<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>[14522] CalendarServer/trunk</title>
</head>
<body>
<style type="text/css"><!--
#msg dl.meta { border: 1px #006 solid; background: #369; padding: 6px; color: #fff; }
#msg dl.meta dt { float: left; width: 6em; font-weight: bold; }
#msg dt:after { content:':';}
#msg dl, #msg dt, #msg ul, #msg li, #header, #footer, #logmsg { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; }
#msg dl a { font-weight: bold}
#msg dl a:link { color:#fc3; }
#msg dl a:active { color:#ff0; }
#msg dl a:visited { color:#cc6; }
h3 { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; font-weight: bold; }
#msg pre { overflow: auto; background: #ffc; border: 1px #fa0 solid; padding: 6px; }
#logmsg { background: #ffc; border: 1px #fa0 solid; padding: 1em 1em 0 1em; }
#logmsg p, #logmsg pre, #logmsg blockquote { margin: 0 0 1em 0; }
#logmsg p, #logmsg li, #logmsg dt, #logmsg dd { line-height: 14pt; }
#logmsg h1, #logmsg h2, #logmsg h3, #logmsg h4, #logmsg h5, #logmsg h6 { margin: .5em 0; }
#logmsg h1:first-child, #logmsg h2:first-child, #logmsg h3:first-child, #logmsg h4:first-child, #logmsg h5:first-child, #logmsg h6:first-child { margin-top: 0; }
#logmsg ul, #logmsg ol { padding: 0; list-style-position: inside; margin: 0 0 0 1em; }
#logmsg ul { text-indent: -1em; padding-left: 1em; }#logmsg ol { text-indent: -1.5em; padding-left: 1.5em; }
#logmsg > ul, #logmsg > ol { margin: 0 0 1em 0; }
#logmsg pre { background: #eee; padding: 1em; }
#logmsg blockquote { border: 1px solid #fa0; border-left-width: 10px; padding: 1em 1em 0 1em; background: white;}
#logmsg dl { margin: 0; }
#logmsg dt { font-weight: bold; }
#logmsg dd { margin: 0; padding: 0 0 0.5em 0; }
#logmsg dd:before { content:'\00bb';}
#logmsg table { border-spacing: 0px; border-collapse: collapse; border-top: 4px solid #fa0; border-bottom: 1px solid #fa0; background: #fff; }
#logmsg table th { text-align: left; font-weight: normal; padding: 0.2em 0.5em; border-top: 1px dotted #fa0; }
#logmsg table td { text-align: right; border-top: 1px dotted #fa0; padding: 0.2em 0.5em; }
#logmsg table thead th { text-align: center; border-bottom: 1px solid #fa0; }
#logmsg table th.Corner { text-align: left; }
#logmsg hr { border: none 0; border-top: 2px dashed #fa0; height: 1px; }
#header, #footer { color: #fff; background: #636; border: 1px #300 solid; padding: 6px; }
#patch { width: 100%; }
#patch h4 {font-family: verdana,arial,helvetica,sans-serif;font-size:10pt;padding:8px;background:#369;color:#fff;margin:0;}
#patch .propset h4, #patch .binary h4 {margin:0;}
#patch pre {padding:0;line-height:1.2em;margin:0;}
#patch .diff {width:100%;background:#eee;padding: 0 0 10px 0;overflow:auto;}
#patch .propset .diff, #patch .binary .diff {padding:10px 0;}
#patch span {display:block;padding:0 10px;}
#patch .modfile, #patch .addfile, #patch .delfile, #patch .propset, #patch .binary, #patch .copfile {border:1px solid #ccc;margin:10px 0;}
#patch ins {background:#dfd;text-decoration:none;display:block;padding:0 10px;}
#patch del {background:#fdd;text-decoration:none;display:block;padding:0 10px;}
#patch .lines, .info {color:#888;background:#fff;}
--></style>
<div id="msg">
<dl class="meta">
<dt>Revision</dt> <dd><a href="http://trac.calendarserver.org//changeset/14522">14522</a></dd>
<dt>Author</dt> <dd>cdaboo@apple.com</dd>
<dt>Date</dt> <dd>2015-03-06 13:42:49 -0800 (Fri, 06 Mar 2015)</dd>
</dl>
<h3>Log Message</h3>
<pre>Merge pod-2-pod branch to trunk.</pre>
<h3>Modified Paths</h3>
<ul>
<li><a href="#CalendarServertrunkcalendarserverpushapplepushpy">CalendarServer/trunk/calendarserver/push/applepush.py</a></li>
<li><a href="#CalendarServertrunkcalendarserverpushtesttest_applepushpy">CalendarServer/trunk/calendarserver/push/test/test_applepush.py</a></li>
<li><a href="#CalendarServertrunkcalendarserverpushtesttest_notifierpy">CalendarServer/trunk/calendarserver/push/test/test_notifier.py</a></li>
<li><a href="#CalendarServertrunkcalendarservertoolsexportpy">CalendarServer/trunk/calendarserver/tools/export.py</a></li>
<li><a href="#CalendarServertrunkcalendarservertoolsprincipalspy">CalendarServer/trunk/calendarserver/tools/principals.py</a></li>
<li><a href="#CalendarServertrunkcalendarservertoolspurgepy">CalendarServer/trunk/calendarserver/tools/purge.py</a></li>
<li><a href="#CalendarServertrunkcalendarservertoolspushpy">CalendarServer/trunk/calendarserver/tools/push.py</a></li>
<li><a href="#CalendarServertrunkcalendarserverwebadminworkpy">CalendarServer/trunk/calendarserver/webadmin/work.py</a></li>
<li><a href="#CalendarServertrunkrequirementsstabletxt">CalendarServer/trunk/requirements-stable.txt</a></li>
<li><a href="#CalendarServertrunktwistedcaldavresourcepy">CalendarServer/trunk/twistedcaldav/resource.py</a></li>
<li><a href="#CalendarServertrunktwistedcaldavstdconfigpy">CalendarServer/trunk/twistedcaldav/stdconfig.py</a></li>
<li><a href="#CalendarServertrunktwistedcaldavtesttest_wrappingpy">CalendarServer/trunk/twistedcaldav/test/test_wrapping.py</a></li>
<li><a href="#CalendarServertrunktxdavbasedatastoreutilpy">CalendarServer/trunk/txdav/base/datastore/util.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingimipinboundpy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/inbound.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingimipoutboundpy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingimiptesttest_inboundpy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingimiptesttest_mailgatewaypy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_mailgateway.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingimiptesttest_outboundpy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_outbound.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingischeduledeliverypy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/delivery.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingtesttest_workpy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_work.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulingworkpy">CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoresqlpy">CalendarServer/trunk/txdav/caldav/datastore/sql.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoresql_externalpy">CalendarServer/trunk/txdav/caldav/datastore/sql_external.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoretestcommonpy">CalendarServer/trunk/txdav/caldav/datastore/test/common.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoretesttest_attachmentspy">CalendarServer/trunk/txdav/caldav/datastore/test/test_attachments.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoretesttest_sqlpy">CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoretesttest_sql_sharingpy">CalendarServer/trunk/txdav/caldav/datastore/test/test_sql_sharing.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreutilpy">CalendarServer/trunk/txdav/caldav/datastore/util.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavicalendarstorepy">CalendarServer/trunk/txdav/caldav/icalendarstore.py</a></li>
<li><a href="#CalendarServertrunktxdavcarddavdatastoresqlpy">CalendarServer/trunk/txdav/carddav/datastore/sql.py</a></li>
<li><a href="#CalendarServertrunktxdavcarddavdatastoresql_externalpy">CalendarServer/trunk/txdav/carddav/datastore/sql_external.py</a></li>
<li><a href="#CalendarServertrunktxdavcarddavdatastoretesttest_sqlpy">CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py</a></li>
<li><a href="#CalendarServertrunktxdavcarddavdatastoretesttest_sql_sharingpy">CalendarServer/trunk/txdav/carddav/datastore/test/test_sql_sharing.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorefilepy">CalendarServer/trunk/txdav/common/datastore/file.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingattachmentspy">CalendarServer/trunk/txdav/common/datastore/podding/attachments.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingconduitpy">CalendarServer/trunk/txdav/common/datastore/podding/conduit.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingdirectorypy">CalendarServer/trunk/txdav/common/datastore/podding/directory.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingrequestpy">CalendarServer/trunk/txdav/common/datastore/podding/request.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingresourcepy">CalendarServer/trunk/txdav/common/datastore/podding/resource.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingsharing_invitespy">CalendarServer/trunk/txdav/common/datastore/podding/sharing_invites.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingstore_apipy">CalendarServer/trunk/txdav/common/datastore/podding/store_api.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingtesttest_conduitpy">CalendarServer/trunk/txdav/common/datastore/podding/test/test_conduit.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingtesttest_store_apipy">CalendarServer/trunk/txdav/common/datastore/podding/test/test_store_api.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingtestutilpy">CalendarServer/trunk/txdav/common/datastore/podding/test/util.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresqlpy">CalendarServer/trunk/txdav/common/datastore/sql.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_externalpy">CalendarServer/trunk/txdav/common/datastore/sql_external.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_schemacurrentoracledialectsql">CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_schemacurrentsql">CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_tablespy">CalendarServer/trunk/txdav/common/datastore/sql_tables.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoretesttest_sqlpy">CalendarServer/trunk/txdav/common/datastore/test/test_sql.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoretestutilpy">CalendarServer/trunk/txdav/common/datastore/test/util.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoreupgradesqlupgradescalendar_upgrade_from_2_to_3py">CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_2_to_3.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoreupgradesqlupgradestesttest_notification_upgrade_from_0_to_1py">CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/test/test_notification_upgrade_from_0_to_1.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoreworktesttest_revision_cleanuppy">CalendarServer/trunk/txdav/common/datastore/work/test/test_revision_cleanup.py</a></li>
<li><a href="#CalendarServertrunktxdavcommonicommondatastorepy">CalendarServer/trunk/txdav/common/icommondatastore.py</a></li>
<li><a href="#CalendarServertrunktxdavwhodelegatespy">CalendarServer/trunk/txdav/who/delegates.py</a></li>
<li><a href="#CalendarServertrunktxdavwhogroupspy">CalendarServer/trunk/txdav/who/groups.py</a></li>
<li><a href="#CalendarServertrunktxdavwhotesttest_delegatespy">CalendarServer/trunk/txdav/who/test/test_delegates.py</a></li>
<li><a href="#CalendarServertrunktxdavwhotesttest_group_attendeespy">CalendarServer/trunk/txdav/who/test/test_group_attendees.py</a></li>
<li><a href="#CalendarServertrunktxdavwhotesttest_group_shareespy">CalendarServer/trunk/txdav/who/test/test_group_sharees.py</a></li>
<li><a href="#CalendarServertrunktxdavwhotesttest_groupspy">CalendarServer/trunk/txdav/who/test/test_groups.py</a></li>
</ul>
<h3>Added Paths</h3>
<ul>
<li><a href="#CalendarServertrunkcalendarservertoolspod_migrationpy">CalendarServer/trunk/calendarserver/tools/pod_migration.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoresql_attachmentpy">CalendarServer/trunk/txdav/caldav/datastore/sql_attachment.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoresql_directorypy">CalendarServer/trunk/txdav/caldav/datastore/sql_directory.py</a></li>
<li>CalendarServer/trunk/txdav/common/datastore/podding/migration/</li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigration__init__py">CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationhome_syncpy">CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationsync_metadatapy">CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py</a></li>
<li>CalendarServer/trunk/txdav/common/datastore/podding/migration/test/</li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtest__init__py">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py</a></li>
<li>CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/</li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsaugmentsxml">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsgroupAccountsxml">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_home_syncpy">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_migrationpy">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingutilpy">CalendarServer/trunk/txdav/common/datastore/podding/util.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_apnpy">CalendarServer/trunk/txdav/common/datastore/sql_apn.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_directorypy">CalendarServer/trunk/txdav/common/datastore/sql_directory.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_imippy">CalendarServer/trunk/txdav/common/datastore/sql_imip.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_notificationpy">CalendarServer/trunk/txdav/common/datastore/sql_notification.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_schemaoldoracledialectv51sql">CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v51.sql</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_schemaoldpostgresdialectv51sql">CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v51.sql</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_schemaupgradesoracledialectupgrade_from_51_to_52sql">CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_51_to_52.sql</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_schemaupgradespostgresdialectupgrade_from_51_to_52sql">CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_51_to_52.sql</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_sharingpy">CalendarServer/trunk/txdav/common/datastore/sql_sharing.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastoresql_utilpy">CalendarServer/trunk/txdav/common/datastore/sql_util.py</a></li>
</ul>
<h3>Removed Paths</h3>
<ul>
<li><a href="#CalendarServertrunktxdavcaldavdatastoreschedulepy">CalendarServer/trunk/txdav/caldav/datastore/schedule.py</a></li>
<li><a href="#CalendarServertrunktxdavcaldavdatastoretesttest_schedulepy">CalendarServer/trunk/txdav/caldav/datastore/test/test_schedule.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigration__init__py">CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationhome_syncpy">CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationsync_metadatapy">CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py</a></li>
<li>CalendarServer/trunk/txdav/common/datastore/podding/migration/test/</li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtest__init__py">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py</a></li>
<li>CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/</li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsaugmentsxml">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsgroupAccountsxml">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_home_syncpy">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py</a></li>
<li><a href="#CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_migrationpy">CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py</a></li>
</ul>
<h3>Property Changed</h3>
<ul>
<li><a href="#CalendarServertrunk">CalendarServer/trunk/</a></li>
</ul>
</div>
<div id="patch">
<h3>Diff</h3>
<a id="CalendarServertrunk"></a>
<div class="propset"><h4>Property changes: CalendarServer/trunk</h4>
<pre class="diff"><span>
</span></pre></div>
<a id="svnmergeinfo"></a>
<div class="modfile"><h4>Modified: svn:mergeinfo</h4></div>
<span class="cx">/CalendarServer/branches/config-separation:4379-4443
</span><span class="cx">/CalendarServer/branches/egg-info-351:4589-4625
</span><span class="cx">/CalendarServer/branches/generic-sqlstore:6167-6191
</span><span class="cx">/CalendarServer/branches/new-store:5594-5934
</span><span class="cx">/CalendarServer/branches/new-store-no-caldavfile:5911-5935
</span><span class="cx">/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
</span><span class="cx">/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
</span><span class="cx">/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
</span><span class="cx">/CalendarServer/branches/release/CalendarServer-5.2-dev:11972,12357-12358,12794,12814
</span><span class="cx">/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
</span><span class="cx">/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
</span><span class="cx">/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
</span><span class="cx">/CalendarServer/branches/users/cdaboo/cross-pod-sharing:12038-12191
</span><span class="cx">/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
</span><span class="cx">/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
</span><span class="cx">/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
</span><span class="cx">/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
</span><span class="cx">/CalendarServer/branches/users/cdaboo/json:11622-11912
</span><span class="cx">/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
</span><span class="cx">/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
</span><span class="cx">/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
</span><span class="cx">/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pods:7297-7377
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pycard:7227-7237
</span><span class="cx">/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
</span><span class="cx">/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
</span><span class="cx">/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
</span><span class="cx">/CalendarServer/branches/users/cdaboo/scheduling-queue-refresh:11783-12557
</span><span class="cx">/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
</span><span class="cx">/CalendarServer/branches/users/cdaboo/sharing-in-the-store:11935-12016
</span><span class="cx">/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
</span><span class="cx">/CalendarServer/branches/users/cdaboo/timezones:7443-7699
</span><span class="cx">/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
</span><span class="cx">/CalendarServer/branches/users/gaya/cleanrevisions:12152-12334
</span><span class="cx">/CalendarServer/branches/users/gaya/groupsharee2:13669-13773
</span><span class="cx">/CalendarServer/branches/users/gaya/sharedgroupfixes:12120-12142
</span><span class="cx">/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
</span><span class="cx">/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
</span><span class="cx">/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
</span><span class="cx">/CalendarServer/branches/users/glyph/conn-limit:6574-6577
</span><span class="cx">/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
</span><span class="cx">/CalendarServer/branches/users/glyph/dalify:6932-7023
</span><span class="cx">/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
</span><span class="cx">/CalendarServer/branches/users/glyph/deploybuild:7563-7572
</span><span class="cx">/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
</span><span class="cx">/CalendarServer/branches/users/glyph/disable-quota:7718-7727
</span><span class="cx">/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
</span><span class="cx">/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
</span><span class="cx">/CalendarServer/branches/users/glyph/hang-fix:11465-11491
</span><span class="cx">/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
</span><span class="cx">/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
</span><span class="cx">/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
</span><span class="cx">/CalendarServer/branches/users/glyph/linux-tests:6893-6900
</span><span class="cx">/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
</span><span class="cx">/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
</span><span class="cx">/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
</span><span class="cx">/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
</span><span class="cx">/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
</span><span class="cx">/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
</span><span class="cx">/CalendarServer/branches/users/glyph/new-export:7444-7485
</span><span class="cx">/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
</span><span class="cx">/CalendarServer/branches/users/glyph/oracle:7106-7155
</span><span class="cx">/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
</span><span class="cx">/CalendarServer/branches/users/glyph/other-html:8062-8091
</span><span class="cx">/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
</span><span class="cx">/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
</span><span class="cx">/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
</span><span class="cx">/CalendarServer/branches/users/glyph/q:9560-9688
</span><span class="cx">/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
</span><span class="cx">/CalendarServer/branches/users/glyph/quota:7604-7637
</span><span class="cx">/CalendarServer/branches/users/glyph/sendfdport:5388-5424
</span><span class="cx">/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
</span><span class="cx">/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
</span><span class="cx">/CalendarServer/branches/users/glyph/sharedpool:6490-6550
</span><span class="cx">/CalendarServer/branches/users/glyph/sharing-api:9192-9205
</span><span class="cx">/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
</span><span class="cx">/CalendarServer/branches/users/glyph/sql-store:5929-6073
</span><span class="cx">/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
</span><span class="cx">/CalendarServer/branches/users/glyph/subtransactions:7248-7258
</span><span class="cx">/CalendarServer/branches/users/glyph/table-alias:8651-8664
</span><span class="cx">/CalendarServer/branches/users/glyph/uidexport:7673-7676
</span><span class="cx">/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
</span><span class="cx">/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
</span><span class="cx">/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
</span><span class="cx">/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
</span><span class="cx">/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
</span><span class="cx">/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
</span><span class="cx">/CalendarServer/branches/users/sagen/applepush:8126-8184
</span><span class="cx">/CalendarServer/branches/users/sagen/inboxitems:7380-7381
</span><span class="cx">/CalendarServer/branches/users/sagen/locations-resources:5032-5051
</span><span class="cx">/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who:12819-12860
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-2:12861-12898
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-3:12899-12913
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-4:12914-13157
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-5:13158-13163
</span><span class="cx">/CalendarServer/branches/users/sagen/newcua:13309-13327
</span><span class="cx">/CalendarServer/branches/users/sagen/newcua-1:13328-13330
</span><span class="cx">/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
</span><span class="cx">/CalendarServer/branches/users/sagen/recordtypes:13648-13656
</span><span class="cx">/CalendarServer/branches/users/sagen/recordtypes-2:13657
</span><span class="cx">/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
</span><span class="cx">/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
</span><span class="cx">/CalendarServer/branches/users/sagen/resources-2:5084-5093
</span><span class="cx">/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
</span><span class="cx">/CalendarServer/branches/users/wsanchez/psycopg2cffi:14427-14439
</span><span class="cx">/CalendarServer/branches/users/wsanchez/transations:5515-5593
</span><span class="cx"> + /CalDAVTester/trunk:11193-11198
</span><span class="cx">/CalendarServer/branches/config-separation:4379-4443
</span><span class="cx">/CalendarServer/branches/egg-info-351:4589-4625
</span><span class="cx">/CalendarServer/branches/generic-sqlstore:6167-6191
</span><span class="cx">/CalendarServer/branches/new-store:5594-5934
</span><span class="cx">/CalendarServer/branches/new-store-no-caldavfile:5911-5935
</span><span class="cx">/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
</span><span class="cx">/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
</span><span class="cx">/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
</span><span class="cx">/CalendarServer/branches/release/CalendarServer-5.2-dev:11972,12357-12358,12794,12814
</span><span class="cx">/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
</span><span class="cx">/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
</span><span class="cx">/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
</span><span class="cx">/CalendarServer/branches/users/cdaboo/cross-pod-sharing:12038-12191
</span><span class="cx">/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
</span><span class="cx">/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
</span><span class="cx">/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
</span><span class="cx">/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
</span><span class="cx">/CalendarServer/branches/users/cdaboo/json:11622-11912
</span><span class="cx">/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
</span><span class="cx">/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
</span><span class="cx">/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
</span><span class="cx">/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pod2pod-migration:14338-14520
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pods:7297-7377
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
</span><span class="cx">/CalendarServer/branches/users/cdaboo/pycard:7227-7237
</span><span class="cx">/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
</span><span class="cx">/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
</span><span class="cx">/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
</span><span class="cx">/CalendarServer/branches/users/cdaboo/scheduling-queue-refresh:11783-12557
</span><span class="cx">/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
</span><span class="cx">/CalendarServer/branches/users/cdaboo/sharing-in-the-store:11935-12016
</span><span class="cx">/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
</span><span class="cx">/CalendarServer/branches/users/cdaboo/timezones:7443-7699
</span><span class="cx">/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
</span><span class="cx">/CalendarServer/branches/users/gaya/cleanrevisions:12152-12334
</span><span class="cx">/CalendarServer/branches/users/gaya/groupsharee2:13669-13773
</span><span class="cx">/CalendarServer/branches/users/gaya/sharedgroupfixes:12120-12142
</span><span class="cx">/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
</span><span class="cx">/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
</span><span class="cx">/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
</span><span class="cx">/CalendarServer/branches/users/glyph/conn-limit:6574-6577
</span><span class="cx">/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
</span><span class="cx">/CalendarServer/branches/users/glyph/dalify:6932-7023
</span><span class="cx">/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
</span><span class="cx">/CalendarServer/branches/users/glyph/deploybuild:7563-7572
</span><span class="cx">/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
</span><span class="cx">/CalendarServer/branches/users/glyph/disable-quota:7718-7727
</span><span class="cx">/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
</span><span class="cx">/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
</span><span class="cx">/CalendarServer/branches/users/glyph/hang-fix:11465-11491
</span><span class="cx">/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
</span><span class="cx">/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
</span><span class="cx">/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
</span><span class="cx">/CalendarServer/branches/users/glyph/linux-tests:6893-6900
</span><span class="cx">/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
</span><span class="cx">/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
</span><span class="cx">/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
</span><span class="cx">/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
</span><span class="cx">/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
</span><span class="cx">/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
</span><span class="cx">/CalendarServer/branches/users/glyph/new-export:7444-7485
</span><span class="cx">/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
</span><span class="cx">/CalendarServer/branches/users/glyph/oracle:7106-7155
</span><span class="cx">/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
</span><span class="cx">/CalendarServer/branches/users/glyph/other-html:8062-8091
</span><span class="cx">/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
</span><span class="cx">/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
</span><span class="cx">/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
</span><span class="cx">/CalendarServer/branches/users/glyph/q:9560-9688
</span><span class="cx">/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
</span><span class="cx">/CalendarServer/branches/users/glyph/quota:7604-7637
</span><span class="cx">/CalendarServer/branches/users/glyph/sendfdport:5388-5424
</span><span class="cx">/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
</span><span class="cx">/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
</span><span class="cx">/CalendarServer/branches/users/glyph/sharedpool:6490-6550
</span><span class="cx">/CalendarServer/branches/users/glyph/sharing-api:9192-9205
</span><span class="cx">/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
</span><span class="cx">/CalendarServer/branches/users/glyph/sql-store:5929-6073
</span><span class="cx">/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
</span><span class="cx">/CalendarServer/branches/users/glyph/subtransactions:7248-7258
</span><span class="cx">/CalendarServer/branches/users/glyph/table-alias:8651-8664
</span><span class="cx">/CalendarServer/branches/users/glyph/uidexport:7673-7676
</span><span class="cx">/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
</span><span class="cx">/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
</span><span class="cx">/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
</span><span class="cx">/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
</span><span class="cx">/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
</span><span class="cx">/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
</span><span class="cx">/CalendarServer/branches/users/sagen/applepush:8126-8184
</span><span class="cx">/CalendarServer/branches/users/sagen/inboxitems:7380-7381
</span><span class="cx">/CalendarServer/branches/users/sagen/locations-resources:5032-5051
</span><span class="cx">/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who:12819-12860
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-2:12861-12898
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-3:12899-12913
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-4:12914-13157
</span><span class="cx">/CalendarServer/branches/users/sagen/move2who-5:13158-13163
</span><span class="cx">/CalendarServer/branches/users/sagen/newcua:13309-13327
</span><span class="cx">/CalendarServer/branches/users/sagen/newcua-1:13328-13330
</span><span class="cx">/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
</span><span class="cx">/CalendarServer/branches/users/sagen/recordtypes:13648-13656
</span><span class="cx">/CalendarServer/branches/users/sagen/recordtypes-2:13657
</span><span class="cx">/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
</span><span class="cx">/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
</span><span class="cx">/CalendarServer/branches/users/sagen/resources-2:5084-5093
</span><span class="cx">/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
</span><span class="cx">/CalendarServer/branches/users/wsanchez/psycopg2cffi:14427-14439
</span><span class="cx">/CalendarServer/branches/users/wsanchez/transations:5515-5593
</span><a id="CalendarServertrunkcalendarserverpushapplepushpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/push/applepush.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/push/applepush.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/push/applepush.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -234,10 +234,7 @@
</span><span class="cx"> self.log.debug(
</span><span class="cx"> "Sending %d APNS notifications for %s" %
</span><span class="cx"> (numSubscriptions, pushKey))
</span><del>- tokens = []
- for token, uid in subscriptions:
- if token and uid:
- tokens.append(token)
</del><ins>+ tokens = [record.token for record in subscriptions if record.token and record.subscriberGUID]
</ins><span class="cx"> if tokens:
</span><span class="cx"> provider.scheduleNotifications(
</span><span class="cx"> tokens, pushKey,
</span><span class="lines">@@ -349,11 +346,11 @@
</span><span class="cx"> (token,))
</span><span class="cx"> txn = self.factory.store.newTransaction(label="APNProviderProtocol.processError")
</span><span class="cx"> subscriptions = (yield txn.apnSubscriptionsByToken(token))
</span><del>- for key, _ignore_modified, _ignore_uid in subscriptions:
</del><ins>+ for record in subscriptions:
</ins><span class="cx"> self.log.debug(
</span><span class="cx"> "Removing subscription: %s %s" %
</span><del>- (token, key))
- yield txn.removeAPNSubscription(token, key)
</del><ins>+ (token, record.resourceKey))
+ yield txn.removeAPNSubscription(token, record.resourceKey)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -746,12 +743,12 @@
</span><span class="cx"> txn = self.factory.store.newTransaction(label="APNFeedbackProtocol.processFeedback")
</span><span class="cx"> subscriptions = (yield txn.apnSubscriptionsByToken(token))
</span><span class="cx">
</span><del>- for key, modified, _ignore_uid in subscriptions:
- if timestamp > modified:
</del><ins>+ for record in subscriptions:
+ if timestamp > record.modified:
</ins><span class="cx"> self.log.debug(
</span><span class="cx"> "FeedbackProtocol removing subscription: %s %s" %
</span><del>- (token, key))
- yield txn.removeAPNSubscription(token, key)
</del><ins>+ (token, record.resourceKey))
+ yield txn.removeAPNSubscription(token, record.resourceKey)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunkcalendarserverpushtesttest_applepushpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/push/test/test_applepush.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/push/test/test_applepush.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/push/test/test_applepush.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -88,6 +88,7 @@
</span><span class="cx"> yield txn.addAPNSubscription(token, key2, timestamp2, uid, userAgent, ipAddr)
</span><span class="cx">
</span><span class="cx"> subscriptions = (yield txn.apnSubscriptionsBySubscriber(uid))
</span><ins>+ subscriptions = [[record.token, record.resourceKey, record.modified, record.userAgent, record.ipAddr] for record in subscriptions]
</ins><span class="cx"> self.assertTrue([token, key1, timestamp1, userAgent, ipAddr] in subscriptions)
</span><span class="cx"> self.assertTrue([token, key2, timestamp2, userAgent, ipAddr] in subscriptions)
</span><span class="cx"> self.assertTrue([token2, key1, timestamp1, userAgent, ipAddr] in subscriptions)
</span><span class="lines">@@ -98,9 +99,11 @@
</span><span class="cx"> uid2 = "D8FFB335-9D36-4CE8-A3B9-D1859E38C0DA"
</span><span class="cx"> yield txn.addAPNSubscription(token, key2, timestamp3, uid2, userAgent, ipAddr)
</span><span class="cx"> subscriptions = (yield txn.apnSubscriptionsBySubscriber(uid))
</span><ins>+ subscriptions = [[record.token, record.resourceKey, record.modified, record.userAgent, record.ipAddr] for record in subscriptions]
</ins><span class="cx"> self.assertTrue([token, key1, timestamp1, userAgent, ipAddr] in subscriptions)
</span><span class="cx"> self.assertFalse([token, key2, timestamp3, userAgent, ipAddr] in subscriptions)
</span><span class="cx"> subscriptions = (yield txn.apnSubscriptionsBySubscriber(uid2))
</span><ins>+ subscriptions = [[record.token, record.resourceKey, record.modified, record.userAgent, record.ipAddr] for record in subscriptions]
</ins><span class="cx"> self.assertTrue([token, key2, timestamp3, userAgent, ipAddr] in subscriptions)
</span><span class="cx"> # Change it back
</span><span class="cx"> yield txn.addAPNSubscription(token, key2, timestamp2, uid, userAgent, ipAddr)
</span><span class="lines">@@ -284,10 +287,10 @@
</span><span class="cx"> txn = self._sqlCalendarStore.newTransaction()
</span><span class="cx"> subscriptions = (yield txn.apnSubscriptionsByToken(token))
</span><span class="cx"> yield txn.commit()
</span><del>- self.assertEquals(
- subscriptions,
- [["/CalDAV/calendars.example.com/user02/calendar/", 3000, "D2256BCC-48E2-42D1-BD89-CBA1E4CCDFFB"]]
- )
</del><ins>+ self.assertEquals(len(subscriptions), 1)
+ self.assertEqual(subscriptions[0].resourceKey, "/CalDAV/calendars.example.com/user02/calendar/")
+ self.assertEqual(subscriptions[0].modified, 3000)
+ self.assertEqual(subscriptions[0].subscriberGUID, "D2256BCC-48E2-42D1-BD89-CBA1E4CCDFFB")
</ins><span class="cx">
</span><span class="cx"> # Verify processError removes associated subscriptions and history
</span><span class="cx"> # First find the id corresponding to token2
</span><span class="lines">@@ -326,7 +329,7 @@
</span><span class="cx"> subscriptions = (yield txn.apnSubscriptionsByToken(token2))
</span><span class="cx"> yield txn.commit()
</span><span class="cx"> self.assertEquals(len(subscriptions), 1)
</span><del>- self.assertEquals(subscriptions[0][0], key2)
</del><ins>+ self.assertEquals(subscriptions[0].resourceKey, key2)
</ins><span class="cx">
</span><span class="cx"> service.stopService()
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunkcalendarserverpushtesttest_notifierpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/push/test/test_notifier.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/push/test/test_notifier.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/push/test/test_notifier.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -297,7 +297,7 @@
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def test_notificationNotifier(self):
</span><span class="cx">
</span><del>- notifications = yield self.transactionUnderTest().notificationsWithUID("user01")
</del><ins>+ notifications = yield self.transactionUnderTest().notificationsWithUID("user01", create=True)
</ins><span class="cx"> yield notifications.notifyChanged(category=ChangeCategory.default)
</span><span class="cx"> self.assertEquals(
</span><span class="cx"> set(self.notifierFactory.history),
</span></span></pre></div>
<a id="CalendarServertrunkcalendarservertoolsexportpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/tools/export.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/tools/export.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/tools/export.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -203,7 +203,7 @@
</span><span class="cx"> for this calendar home.
</span><span class="cx"> """
</span><span class="cx"> uid = yield self.getHomeUID(exportService)
</span><del>- home = yield txn.calendarHomeWithUID(uid, True)
</del><ins>+ home = yield txn.calendarHomeWithUID(uid, create=True)
</ins><span class="cx"> result = []
</span><span class="cx"> if self.collections:
</span><span class="cx"> for collection in self.collections:
</span><span class="lines">@@ -303,6 +303,7 @@
</span><span class="cx"> fileobj.write(comp.getTextWithTimezones(True))
</span><span class="cx">
</span><span class="cx">
</span><ins>+
</ins><span class="cx"> @inlineCallbacks
</span><span class="cx"> def exportToDirectory(calendars, dirname):
</span><span class="cx"> """
</span></span></pre></div>
<a id="CalendarServertrunkcalendarservertoolspod_migrationpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationcalendarservertoolspod_migrationpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/calendarserver/tools/pod_migration.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/calendarserver/tools/pod_migration.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/tools/pod_migration.py         (rev 0)
+++ CalendarServer/trunk/calendarserver/tools/pod_migration.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,293 @@
</span><ins>+#!/usr/bin/env python
+# -*- test-case-name: calendarserver.tools.test.test_calverify -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+from __future__ import print_function
+
+"""
+This tool manages an overall pod migration. Migration is done in a series of steps,
+with the system admin triggering each step individually by running this tool.
+"""
+
+import os
+import sys
+
+from twisted.internet.defer import inlineCallbacks
+from twisted.python.text import wordWrap
+from twisted.python.usage import Options, UsageError
+
+from twistedcaldav.stdconfig import DEFAULT_CONFIG_FILE
+from twistedcaldav.timezones import TimezoneCache
+
+from txdav.common.datastore.podding.migration.home_sync import CrossPodHomeSync
+
+from twext.python.log import Logger
+from twext.who.idirectory import RecordType
+
+from calendarserver.tools.cmdline import utilityMain, WorkerService
+
+
+log = Logger()
+
+VERSION = "1"
+
+
+
+def usage(e=None):
+ if e:
+ print(e)
+ print("")
+ try:
+ PodMigrationOptions().opt_help()
+ except SystemExit:
+ pass
+ if e:
+ sys.exit(64)
+ else:
+ sys.exit(0)
+
+
+description = ''.join(
+ wordWrap(
+ """
+ Usage: calendarserver_pod_migration [options] [input specifiers]
+ """,
+ int(os.environ.get('COLUMNS', '80'))
+ )
+)
+description += "\nVersion: %s" % (VERSION,)
+
+
+
+class ConfigError(Exception):
+ pass
+
+
+
+class PodMigrationOptions(Options):
+ """
+ Command-line options for 'calendarserver_pod_migration'
+ """
+
+ synopsis = description
+
+ optFlags = [
+ ['verbose', 'v', "Verbose logging."],
+ ['debug', 'D', "Debug logging."],
+ ['step1', '1', "Run step 1 of the migration (initial sync)"],
+ ['step2', '2', "Run step 2 of the migration (incremental sync)"],
+ ['step3', '3', "Run step 3 of the migration (prepare for final sync)"],
+ ['step4', '4', "Run step 4 of the migration (final incremental sync)"],
+ ['step5', '5', "Run step 5 of the migration (final reconcile sync)"],
+ ['step6', '6', "Run step 6 of the migration (enable new home)"],
+ ['step7', '7', "Run step 7 of the migration (remove old home)"],
+ ]
+
+ optParameters = [
+ ['config', 'f', DEFAULT_CONFIG_FILE, "Specify caldavd.plist configuration path."],
+ ['uid', 'u', "", "Directory record uid of user to migrate [REQUIRED]"],
+ ]
+
+ longdesc = "Only one step option is allowed."
+
+ def __init__(self):
+ super(PodMigrationOptions, self).__init__()
+ self.outputName = '-'
+
+
+ def opt_output(self, filename):
+ """
+ Specify output file path (default: '-', meaning stdout).
+ """
+ self.outputName = filename
+
+ opt_o = opt_output
+
+
+ def openOutput(self):
+ """
+ Open the appropriate output file based on the '--output' option.
+ """
+ if self.outputName == '-':
+ return sys.stdout
+ else:
+ return open(self.outputName, 'wb')
+
+
+ def postOptions(self):
+ runstep = None
+ for step in range(7):
+ if self["step{}".format(step + 1)]:
+ if runstep is None:
+ runstep = step
+ self["runstep"] = step + 1
+ else:
+ raise UsageError("Only one step option allowed")
+ else:
+ if runstep is None:
+ raise UsageError("One step option must be present")
+ if not self["uid"]:
+ raise UsageError("A uid is required")
+
+
+
+class PodMigrationService(WorkerService, object):
+ """
+ Service which runs, does its stuff, then stops the reactor.
+ """
+
+ def __init__(self, store, options, output, reactor, config):
+ super(PodMigrationService, self).__init__(store)
+ self.options = options
+ self.output = output
+ self.reactor = reactor
+ self.config = config
+ TimezoneCache.create()
+
+
+ @inlineCallbacks
+ def doWork(self):
+ """
+ Do the work, stopping the reactor when done.
+ """
+ self.output.write("\n---- Pod Migration version: %s ----\n" % (VERSION,))
+
+ # Map short name to uid
+ record = yield self.store.directoryService().recordWithUID(self.options["uid"])
+ if record is None:
+ record = yield self.store.directoryService().recordWithShortName(RecordType.user, self.options["uid"])
+ if record is not None:
+ self.options["uid"] = record.uid
+
+ try:
+ yield getattr(self, "step{}".format(self.options["runstep"]))()
+ self.output.close()
+ except ConfigError:
+ pass
+ except:
+ log.failure("doWork()")
+
+
+ @inlineCallbacks
+ def step1(self):
+ syncer = CrossPodHomeSync(
+ self.store,
+ self.options["uid"],
+ uselog=self.output if self.options["verbose"] else None
+ )
+ syncer.accounting("Pod Migration Step 1\n")
+ yield syncer.sync()
+
+
+ @inlineCallbacks
+ def step2(self):
+ syncer = CrossPodHomeSync(
+ self.store,
+ self.options["uid"],
+ uselog=self.output if self.options["verbose"] else None
+ )
+ syncer.accounting("Pod Migration Step 2\n")
+ yield syncer.sync()
+
+
+ @inlineCallbacks
+ def step3(self):
+ syncer = CrossPodHomeSync(
+ self.store,
+ self.options["uid"],
+ uselog=self.output if self.options["verbose"] else None
+ )
+ syncer.accounting("Pod Migration Step 3\n")
+ yield syncer.disableRemoteHome()
+
+
+ @inlineCallbacks
+ def step4(self):
+ syncer = CrossPodHomeSync(
+ self.store,
+ self.options["uid"],
+ final=True,
+ uselog=self.output if self.options["verbose"] else None
+ )
+ syncer.accounting("Pod Migration Step 4\n")
+ yield syncer.sync()
+
+
+ @inlineCallbacks
+ def step5(self):
+ syncer = CrossPodHomeSync(
+ self.store,
+ self.options["uid"],
+ final=True,
+ uselog=self.output if self.options["verbose"] else None
+ )
+ syncer.accounting("Pod Migration Step 5\n")
+ yield syncer.finalSync()
+
+
+ @inlineCallbacks
+ def step6(self):
+ syncer = CrossPodHomeSync(
+ self.store,
+ self.options["uid"],
+ uselog=self.output if self.options["verbose"] else None
+ )
+ syncer.accounting("Pod Migration Step 6\n")
+ yield syncer.enableLocalHome()
+
+
+ @inlineCallbacks
+ def step7(self):
+ syncer = CrossPodHomeSync(
+ self.store,
+ self.options["uid"],
+ final=True,
+ uselog=self.output if self.options["verbose"] else None
+ )
+ syncer.accounting("Pod Migration Step 7\n")
+ yield syncer.removeRemoteHome()
+
+
+
+def main(argv=sys.argv, stderr=sys.stderr, reactor=None):
+ """
+ Do the export.
+ """
+ if reactor is None:
+ from twisted.internet import reactor
+ options = PodMigrationOptions()
+ try:
+ options.parseOptions(argv[1:])
+ except UsageError as e:
+ stderr.write("Invalid options specified\n")
+ options.opt_help()
+
+ try:
+ output = options.openOutput()
+ except IOError, e:
+ stderr.write("Unable to open output file for writing: %s\n" % (e))
+ sys.exit(1)
+
+
+ def makeService(store):
+ from twistedcaldav.config import config
+ config.TransactionTimeoutSeconds = 0
+ return PodMigrationService(store, options, output, reactor, config)
+
+ utilityMain(options['config'], makeService, reactor, verbose=options["debug"])
+
+if __name__ == '__main__':
+ main()
</ins></span></pre></div>
<a id="CalendarServertrunkcalendarservertoolsprincipalspy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/tools/principals.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/tools/principals.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/tools/principals.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -775,15 +775,11 @@
</span><span class="cx"> groupUIDs.append(record.uid)
</span><span class="cx">
</span><span class="cx"> for groupUID in groupUIDs:
</span><del>- (
- groupID, name, _ignore_membershipHash, modified, _ignore_extant
- ) = yield txn.groupByUID(
- groupUID
- )
- print("Group: \"{name}\" ({uid})".format(name=name, uid=groupUID))
</del><ins>+ group = yield txn.groupByUID(groupUID)
+ print("Group: \"{name}\" ({uid})".format(name=group.name, uid=group.groupUID))
</ins><span class="cx">
</span><span class="cx"> for txt, readWrite in (("read-only", False), ("read-write", True)):
</span><del>- delegatorUIDs = yield txn.delegatorsToGroup(groupID, readWrite)
</del><ins>+ delegatorUIDs = yield txn.delegatorsToGroup(group.groupID, readWrite)
</ins><span class="cx"> for delegatorUID in delegatorUIDs:
</span><span class="cx"> delegator = yield directory.recordWithUID(delegatorUID)
</span><span class="cx"> print(
</span><span class="lines">@@ -793,12 +789,12 @@
</span><span class="cx"> )
</span><span class="cx">
</span><span class="cx"> print("Group members:")
</span><del>- memberUIDs = yield txn.groupMemberUIDs(groupID)
</del><ins>+ memberUIDs = yield txn.groupMemberUIDs(group.groupID)
</ins><span class="cx"> for memberUID in memberUIDs:
</span><span class="cx"> record = yield directory.recordWithUID(memberUID)
</span><span class="cx"> print(prettyRecord(record))
</span><span class="cx">
</span><del>- print("Last cached: {} GMT".format(modified))
</del><ins>+ print("Last cached: {} GMT".format(group.modified))
</ins><span class="cx"> print()
</span><span class="cx">
</span><span class="cx"> yield txn.commit()
</span></span></pre></div>
<a id="CalendarServertrunkcalendarservertoolspurgepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/tools/purge.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/tools/purge.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/tools/purge.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -992,7 +992,7 @@
</span><span class="cx">
</span><span class="cx"> if not self.dryrun:
</span><span class="cx"> yield storeCalHome.removeUnacceptedShares()
</span><del>- notificationHome = yield txn.notificationsWithUID(storeCalHome.uid(), create=False)
</del><ins>+ notificationHome = yield txn.notificationsWithUID(storeCalHome.uid())
</ins><span class="cx"> if notificationHome is not None:
</span><span class="cx"> yield notificationHome.remove()
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunkcalendarservertoolspushpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/tools/push.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/tools/push.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/tools/push.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -68,9 +68,9 @@
</span><span class="cx"> (yield txn.commit())
</span><span class="cx"> if subscriptions:
</span><span class="cx"> byKey = {}
</span><del>- for token, key, timestamp, userAgent, ipAddr in subscriptions:
- byKey.setdefault(key, []).append((token, timestamp, userAgent, ipAddr))
- for key, tokens in byKey.iteritems():
</del><ins>+ for apnrecord in subscriptions:
+ byKey.setdefault(apnrecord.resourceKey, []).append(apnrecord)
+ for key, apnsrecords in byKey.iteritems():
</ins><span class="cx"> print
</span><span class="cx"> protocol, _ignore_host, path = key.strip("/").split("/", 2)
</span><span class="cx"> resource = {
</span><span class="lines">@@ -89,13 +89,13 @@
</span><span class="cx"> else:
</span><span class="cx"> print("...is subscribed to %s's %s home" % (user, resource),)
</span><span class="cx"> # print(" (key: %s)\n" % (key,))
</span><del>- print("with %d device(s):" % (len(tokens),))
- for token, timestamp, userAgent, ipAddr in tokens:
</del><ins>+ print("with %d device(s):" % (len(apnsrecords),))
+ for apnrecords in apnsrecords:
</ins><span class="cx"> print(" %s\n '%s' from %s\n %s" % (
</span><del>- token, userAgent, ipAddr,
</del><ins>+ apnrecords.token, apnrecords.userAgent, apnrecords.ipAddr,
</ins><span class="cx"> time.strftime(
</span><span class="cx"> "on %a, %d %b %Y at %H:%M:%S %z(%Z)",
</span><del>- time.localtime(timestamp)
</del><ins>+ time.localtime(apnrecords.modified)
</ins><span class="cx"> )
</span><span class="cx"> ))
</span><span class="cx"> else:
</span></span></pre></div>
<a id="CalendarServertrunkcalendarserverwebadminworkpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/calendarserver/webadmin/work.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/calendarserver/webadmin/work.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/calendarserver/webadmin/work.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -157,13 +157,13 @@
</span><span class="cx"> if workType == PushNotificationWork:
</span><span class="cx"> attrs += ("pushID", "priority")
</span><span class="cx"> elif workType == ScheduleOrganizerWork:
</span><del>- attrs += ("icalendarUid", "attendeeCount")
</del><ins>+ attrs += ("icalendarUID", "attendeeCount")
</ins><span class="cx"> elif workType == ScheduleRefreshWork:
</span><del>- attrs += ("icalendarUid", "attendeeCount")
</del><ins>+ attrs += ("icalendarUID", "attendeeCount")
</ins><span class="cx"> elif workType == ScheduleReplyWork:
</span><del>- attrs += ("icalendarUid",)
</del><ins>+ attrs += ("icalendarUID",)
</ins><span class="cx"> elif workType == ScheduleAutoReplyWork:
</span><del>- attrs += ("icalendarUid",)
</del><ins>+ attrs += ("icalendarUID",)
</ins><span class="cx"> elif workType == GroupCacherPollingWork:
</span><span class="cx"> attrs += ()
</span><span class="cx"> elif workType == IMIPPollingWork:
</span></span></pre></div>
<a id="CalendarServertrunkrequirementsstabletxt"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/requirements-stable.txt (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/requirements-stable.txt        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/requirements-stable.txt        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -36,7 +36,7 @@
</span><span class="cx"> #pyOpenSSL
</span><span class="cx"> pycrypto==2.6.1
</span><span class="cx">
</span><del>- --editable svn+http://svn.calendarserver.org/repository/calendarserver/twext/trunk@14404#egg=twextpy
</del><ins>+ --editable svn+http://svn.calendarserver.org/repository/calendarserver/twext/trunk@14521#egg=twextpy
</ins><span class="cx"> cffi==0.8.6
</span><span class="cx"> pycparser==2.10
</span><span class="cx"> #twisted
</span></span></pre></div>
<a id="CalendarServertrunktwistedcaldavresourcepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/twistedcaldav/resource.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/twistedcaldav/resource.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/twistedcaldav/resource.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -2139,7 +2139,7 @@
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def createNotificationsCollection(self):
</span><span class="cx"> txn = self._associatedTransaction
</span><del>- notifications = yield txn.notificationsWithUID(self._newStoreHome.uid())
</del><ins>+ notifications = yield txn.notificationsWithUID(self._newStoreHome.uid(), create=True)
</ins><span class="cx">
</span><span class="cx"> from twistedcaldav.storebridge import StoreNotificationCollectionResource
</span><span class="cx"> similar = StoreNotificationCollectionResource(
</span></span></pre></div>
<a id="CalendarServertrunktwistedcaldavstdconfigpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/twistedcaldav/stdconfig.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/twistedcaldav/stdconfig.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/twistedcaldav/stdconfig.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -405,6 +405,7 @@
</span><span class="cx"> "Implicit Errors": False,
</span><span class="cx"> "AutoScheduling": False,
</span><span class="cx"> "iSchedule": False,
</span><ins>+ "migration": False,
</ins><span class="cx"> },
</span><span class="cx"> "AccountingPrincipals": [],
</span><span class="cx"> "AccountingLogRoot" : "accounting",
</span></span></pre></div>
<a id="CalendarServertrunktwistedcaldavtesttest_wrappingpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/twistedcaldav/test/test_wrapping.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/twistedcaldav/test/test_wrapping.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/twistedcaldav/test/test_wrapping.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -120,7 +120,7 @@
</span><span class="cx"> record = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
</span><span class="cx"> uid = record.uid
</span><span class="cx"> txn = self.transactionUnderTest()
</span><del>- home = yield txn.calendarHomeWithUID(uid, True)
</del><ins>+ home = yield txn.calendarHomeWithUID(uid, create=True)
</ins><span class="cx"> cal = yield home.calendarWithName("calendar")
</span><span class="cx"> yield cal.createCalendarObjectWithName(objectName, VComponent.fromString(objectText))
</span><span class="cx"> yield self.commit()
</span><span class="lines">@@ -139,7 +139,7 @@
</span><span class="cx"> record = yield self.directory.recordWithShortName(RecordType.user, u"wsanchez")
</span><span class="cx"> uid = record.uid
</span><span class="cx"> txn = self.transactionUnderTest()
</span><del>- home = yield txn.addressbookHomeWithUID(uid, True)
</del><ins>+ home = yield txn.addressbookHomeWithUID(uid, create=True)
</ins><span class="cx"> adbk = yield home.addressbookWithName("addressbook")
</span><span class="cx"> yield adbk.createAddressBookObjectWithName(objectName, VCComponent.fromString(objectText))
</span><span class="cx"> yield self.commit()
</span></span></pre></div>
<a id="CalendarServertrunktxdavbasedatastoreutilpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/base/datastore/util.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/base/datastore/util.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/base/datastore/util.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -86,6 +86,18 @@
</span><span class="cx"> return self.delete(key)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ # Home objects by UID
+
+ def keyForHomeWithUID(self, homeType, ownerUID, status):
+ return "homeWithUID:%s:%s:%s" % (homeType, status, ownerUID)
+
+
+ # Home objects by id
+
+ def keyForHomeWithID(self, homeType, homeResourceID, status):
+ return "homeWithID:%s:%s:%s" % (homeType, status, homeResourceID)
+
+
</ins><span class="cx"> # Home child objects by name
</span><span class="cx">
</span><span class="cx"> def keyForObjectWithName(self, homeResourceID, name):
</span><span class="lines">@@ -100,8 +112,8 @@
</span><span class="cx">
</span><span class="cx"> # Home child objects by external id
</span><span class="cx">
</span><del>- def keyForObjectWithExternalID(self, homeResourceID, externalID):
- return "objectWithExternalID:%s:%s" % (homeResourceID, externalID)
</del><ins>+ def keyForObjectWithBindUID(self, homeResourceID, bindUID):
+ return "objectWithBindUID:%s:%s" % (homeResourceID, bindUID)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> # Home metadata (Created/Modified)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulepy"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/caldav/datastore/schedule.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/schedule.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/schedule.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,214 +0,0 @@
</span><del>-# -*- test-case-name: txdav.caldav.datastore.test.test_scheduling -*-
-##
-# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-from zope.interface.declarations import implements
-from txdav.caldav.icalendarstore import ICalendarHome, ICalendar, ICalendarObject, \
- ICalendarTransaction, ICalendarStore
-
-from twisted.python.util import FancyEqMixin
-from twisted.python.components import proxyForInterface
-from twisted.internet.defer import inlineCallbacks, returnValue
-
-
-
-class ImplicitTransaction(
- proxyForInterface(ICalendarTransaction,
- originalAttribute="_transaction")):
- """
- Wrapper around an L{ICalendarStoreTransaction}.
- """
-
- def __init__(self, transaction):
- """
- Initialize an L{ImplicitTransaction}.
-
- @type transaction: L{ICalendarStoreTransaction}
- """
- self._transaction = transaction
-
-
- @inlineCallbacks
- def calendarHomeWithUID(self, uid, create=False):
- # FIXME: 'create' flag
- newHome = yield super(ImplicitTransaction, self).calendarHomeWithUID(uid, create)
-# return ImplicitCalendarHome(newHome, self)
- if newHome is None:
- returnValue(None)
- else:
- # FIXME: relay transaction
- returnValue(ImplicitCalendarHome(newHome, None))
-
-
-
-class ImplicitCalendarHome(proxyForInterface(ICalendarHome, "_calendarHome")):
-
- implements(ICalendarHome)
-
- def __init__(self, calendarHome, transaction):
- """
- Initialize L{ImplicitCalendarHome} with an underlying
- calendar home and L{ImplicitTransaction}.
- """
- self._calendarHome = calendarHome
- self._transaction = transaction
-
-
-# def properties(self):
-# # FIXME: wrap?
-# return self._calendarHome.properties()
-
- @inlineCallbacks
- def calendars(self):
- superCalendars = (yield super(ImplicitCalendarHome, self).calendars())
- wrapped = []
- for calendar in superCalendars:
- wrapped.append(ImplicitCalendar(self, calendar))
- returnValue(wrapped)
-
-
- @inlineCallbacks
- def loadCalendars(self):
- superCalendars = (yield super(ImplicitCalendarHome, self).loadCalendars())
- wrapped = []
- for calendar in superCalendars:
- wrapped.append(ImplicitCalendar(self, calendar))
- returnValue(wrapped)
-
-
- def createCalendarWithName(self, name):
- self._calendarHome.createCalendarWithName(name)
-
-
- def removeCalendarWithName(self, name):
- self._calendarHome.removeCalendarWithName(name)
-
-
- @inlineCallbacks
- def calendarWithName(self, name):
- calendar = yield self._calendarHome.calendarWithName(name)
- if calendar is not None:
- returnValue(ImplicitCalendar(self, calendar))
- else:
- returnValue(None)
-
-
- def hasCalendarResourceUIDSomewhereElse(self, uid, ok_object, type):
- return self._calendarHome.hasCalendarResourceUIDSomewhereElse(uid, ok_object, type)
-
-
- def getCalendarResourcesForUID(self, uid):
- return self._calendarHome.getCalendarResourcesForUID(uid)
-
-
-
-class ImplicitCalendarObject(object):
- implements(ICalendarObject)
-
- def setComponent(self, component):
- pass
-
-
- def component(self):
- pass
-
-
- def uid(self):
- pass
-
-
- def componentType(self):
- pass
-
-
- def organizer(self):
- pass
-
-
- def properties(self):
- pass
-
-
-
-class ImplicitCalendar(FancyEqMixin,
- proxyForInterface(ICalendar, "_subCalendar")):
-
- compareAttributes = (
- "_subCalendar",
- "_parentHome",
- )
-
- def __init__(self, parentHome, subCalendar):
- self._parentHome = parentHome
- self._subCalendar = subCalendar
- self._supportedComponents = None
-
-# def ownerCalendarHome(self):
-# return self._parentHome
-# def calendarObjects(self):
-# # FIXME: wrap
-# return self._subCalendar.calendarObjects()
-# def calendarObjectWithUID(self, uid): ""
-# def createCalendarObjectWithName(self, name, component):
-# # FIXME: implement most of StoreCalendarObjectResource here!
-# self._subCalendar.createCalendarObjectWithName(name, component)
-# def syncToken(self): ""
-# def calendarObjectsInTimeRange(self, start, end, timeZone): ""
-# def calendarObjectsSinceToken(self, token): ""
-# def properties(self):
-# # FIXME: probably need to wrap this as well
-# return self._subCalendar.properties()
-#
-# def calendarObjectWithName(self, name):
-# #FIXME: wrap
-# return self._subCalendar.calendarObjectWithName(name)
-
-
- def _createCalendarObjectWithNameInternal(self, name, component, internal_state, options=None):
- return self.createCalendarObjectWithName(name, component, options)
-
-
- def setSupportedComponents(self, supported_components):
- """
- Update the database column with the supported components. Technically this should only happen once
- on collection creation, but for migration we may need to change after the fact - hence a separate api.
- """
- self._supportedComponents = supported_components
-
-
- def getSupportedComponents(self):
- return self._supportedComponents
-
-
-
-class ImplicitStore(proxyForInterface(ICalendarStore, "_calendarStore")):
- """
- This is a wrapper around an L{ICalendarStore} that implements implicit
- scheduling.
- """
-
- def __init__(self, calendarStore):
- """
- Create an L{ImplicitStore} wrapped around another
- L{ICalendarStore} provider.
- """
- self._calendarStore = calendarStore
-
-
- def newTransaction(self, label="unlabeled"):
- """
- Wrap an underlying L{ITransaction}.
- """
- return ImplicitTransaction(self._calendarStore.newTransaction(label))
</del></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingimipinboundpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/inbound.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/inbound.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/inbound.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -260,11 +260,11 @@
</span><span class="cx"> return
</span><span class="cx">
</span><span class="cx"> txn = self.store.newTransaction(label="MailReceiver.processDSN")
</span><del>- result = (yield txn.imipLookupByToken(token))
</del><ins>+ records = (yield txn.imipLookupByToken(token))
</ins><span class="cx"> yield txn.commit()
</span><span class="cx"> try:
</span><span class="cx"> # Note the results are returned as utf-8 encoded strings
</span><del>- organizer, attendee, _ignore_icaluid = result[0]
</del><ins>+ record = records[0]
</ins><span class="cx"> except:
</span><span class="cx"> # This isn't a token we recognize
</span><span class="cx"> log.error(
</span><span class="lines">@@ -272,7 +272,7 @@
</span><span class="cx"> % (token, msgId))
</span><span class="cx"> returnValue(self.UNKNOWN_TOKEN)
</span><span class="cx">
</span><del>- calendar.removeAllButOneAttendee(attendee)
</del><ins>+ calendar.removeAllButOneAttendee(record.attendee)
</ins><span class="cx"> calendar.getOrganizerProperty().setValue(organizer)
</span><span class="cx"> for comp in calendar.subcomponents():
</span><span class="cx"> if comp.name() == "VEVENT":
</span><span class="lines">@@ -288,8 +288,11 @@
</span><span class="cx"> log.warn("Mail gateway processing DSN %s" % (msgId,))
</span><span class="cx"> txn = self.store.newTransaction(label="MailReceiver.processDSN")
</span><span class="cx"> yield txn.enqueue(
</span><del>- IMIPReplyWork, organizer=organizer, attendee=attendee,
- icalendarText=str(calendar))
</del><ins>+ IMIPReplyWork,
+ organizer=record.organizer,
+ attendee=record.attendee,
+ icalendarText=str(calendar)
+ )
</ins><span class="cx"> yield txn.commit()
</span><span class="cx"> returnValue(self.INJECTION_SUBMITTED)
</span><span class="cx">
</span><span class="lines">@@ -313,11 +316,11 @@
</span><span class="cx"> returnValue(self.MALFORMED_TO_ADDRESS)
</span><span class="cx">
</span><span class="cx"> txn = self.store.newTransaction(label="MailReceiver.processReply")
</span><del>- result = (yield txn.imipLookupByToken(token))
</del><ins>+ records = (yield txn.imipLookupByToken(token))
</ins><span class="cx"> yield txn.commit()
</span><span class="cx"> try:
</span><span class="cx"> # Note the results are returned as utf-8 encoded strings
</span><del>- organizer, attendee, _ignore_icaluid = result[0]
</del><ins>+ record = records[0]
</ins><span class="cx"> except:
</span><span class="cx"> # This isn't a token we recognize
</span><span class="cx"> log.error(
</span><span class="lines">@@ -337,11 +340,11 @@
</span><span class="cx"> "in message %s" % (msg['Message-ID'],))
</span><span class="cx">
</span><span class="cx"> toAddr = None
</span><del>- fromAddr = attendee[7:]
- if organizer.startswith("mailto:"):
- toAddr = organizer[7:]
- elif organizer.startswith("urn:x-uid:"):
- uid = organizer[10:]
</del><ins>+ fromAddr = record.attendee[7:]
+ if record.organizer.startswith("mailto:"):
+ toAddr = record.organizer[7:]
+ elif record.organizer.startswith("urn:x-uid:"):
+ uid = record.organizer[10:]
</ins><span class="cx"> record = yield self.directory.recordWithUID(uid)
</span><span class="cx"> try:
</span><span class="cx"> if record and record.emailAddresses:
</span><span class="lines">@@ -376,23 +379,23 @@
</span><span class="cx"> calendar = Component.fromString(calBody)
</span><span class="cx"> event = calendar.mainComponent()
</span><span class="cx">
</span><del>- calendar.removeAllButOneAttendee(attendee)
</del><ins>+ calendar.removeAllButOneAttendee(record.attendee)
</ins><span class="cx"> organizerProperty = calendar.getOrganizerProperty()
</span><span class="cx"> if organizerProperty is None:
</span><span class="cx"> # ORGANIZER is required per rfc2446 section 3.2.3
</span><span class="cx"> log.warn(
</span><span class="cx"> "Mail gateway didn't find an ORGANIZER in REPLY %s"
</span><span class="cx"> % (msg['Message-ID'],))
</span><del>- event.addProperty(Property("ORGANIZER", organizer))
</del><ins>+ event.addProperty(Property("ORGANIZER", record.organizer))
</ins><span class="cx"> else:
</span><del>- organizerProperty.setValue(organizer)
</del><ins>+ organizerProperty.setValue(record.organizer)
</ins><span class="cx">
</span><span class="cx"> if not calendar.getAttendees():
</span><span class="cx"> # The attendee we're expecting isn't there, so add it back
</span><span class="cx"> # with a SCHEDULE-STATUS of SERVICE_UNAVAILABLE.
</span><span class="cx"> # The organizer will then see that the reply was not successful.
</span><span class="cx"> attendeeProp = Property(
</span><del>- "ATTENDEE", attendee,
</del><ins>+ "ATTENDEE", record.attendee,
</ins><span class="cx"> params={
</span><span class="cx"> "SCHEDULE-STATUS": iTIPRequestStatus.SERVICE_UNAVAILABLE,
</span><span class="cx"> }
</span><span class="lines">@@ -406,8 +409,11 @@
</span><span class="cx">
</span><span class="cx"> txn = self.store.newTransaction(label="MailReceiver.processReply")
</span><span class="cx"> yield txn.enqueue(
</span><del>- IMIPReplyWork, organizer=organizer, attendee=attendee,
- icalendarText=str(calendar))
</del><ins>+ IMIPReplyWork,
+ organizer=record.organizer,
+ attendee=record.attendee,
+ icalendarText=str(calendar)
+ )
</ins><span class="cx"> yield txn.commit()
</span><span class="cx"> returnValue(self.INJECTION_SUBMITTED)
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingimipoutboundpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/outbound.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -364,28 +364,29 @@
</span><span class="cx">
</span><span class="cx"> # Reuse or generate a token based on originator, toAddr, and
</span><span class="cx"> # event uid
</span><del>- token = (yield txn.imipGetToken(originator, toAddr.lower(), icaluid))
- if token is None:
</del><ins>+ record = (yield txn.imipGetToken(originator, toAddr.lower(), icaluid))
+ if record is None:
</ins><span class="cx">
</span><span class="cx"> # Because in the past the originator was sometimes in mailto:
</span><span class="cx"> # form, lookup an existing token by mailto: as well
</span><span class="cx"> organizerProperty = calendar.getOrganizerProperty()
</span><span class="cx"> organizerEmailAddress = organizerProperty.parameterValue("EMAIL", None)
</span><span class="cx"> if organizerEmailAddress is not None:
</span><del>- token = (yield txn.imipGetToken("mailto:%s" % (organizerEmailAddress.lower(),), toAddr.lower(), icaluid))
</del><ins>+ record = (yield txn.imipGetToken("mailto:%s" % (organizerEmailAddress.lower(),), toAddr.lower(), icaluid))
</ins><span class="cx">
</span><del>- if token is None:
- token = (yield txn.imipCreateToken(originator, toAddr.lower(), icaluid))
</del><ins>+ if record is None:
+ record = (yield txn.imipCreateToken(originator, toAddr.lower(), icaluid))
</ins><span class="cx"> self.log.debug("Mail gateway created token %s for %s "
</span><span class="cx"> "(originator), %s (recipient) and %s (icaluid)"
</span><del>- % (token, originator, toAddr, icaluid))
</del><ins>+ % (record.token, originator, toAddr, icaluid))
</ins><span class="cx"> inviteState = "new"
</span><span class="cx">
</span><span class="cx"> else:
</span><span class="cx"> self.log.debug("Mail gateway reusing token %s for %s "
</span><span class="cx"> "(originator), %s (recipient) and %s (icaluid)"
</span><del>- % (token, originator, toAddr, icaluid))
</del><ins>+ % (record.token, originator, toAddr, icaluid))
</ins><span class="cx"> inviteState = "update"
</span><ins>+ token = record.token
</ins><span class="cx">
</span><span class="cx"> fullServerAddress = self.address
</span><span class="cx"> _ignore_name, serverAddress = email.utils.parseaddr(fullServerAddress)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingimiptesttest_inboundpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -175,13 +175,13 @@
</span><span class="cx">
</span><span class="cx"> # Make sure a known token *is* processed
</span><span class="cx"> txn = self.store.newTransaction()
</span><del>- token = (yield txn.imipCreateToken(
</del><ins>+ record = (yield txn.imipCreateToken(
</ins><span class="cx"> "urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
</span><span class="cx"> "mailto:user02@example.com",
</span><span class="cx"> "1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C"
</span><span class="cx"> ))
</span><span class="cx"> yield txn.commit()
</span><del>- calBody = template % token
</del><ins>+ calBody = template % record.token
</ins><span class="cx"> result = (yield self.receiver.processDSN(calBody, "xyzzy"))
</span><span class="cx"> self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingimiptesttest_mailgatewaypy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_mailgateway.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_mailgateway.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_mailgateway.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -44,9 +44,8 @@
</span><span class="cx"> "icaluid1", token="token1")
</span><span class="cx"> yield migrateTokensToStore(self.path, self.store)
</span><span class="cx"> txn = self.store.newTransaction()
</span><del>- results = yield (txn.imipLookupByToken("token1"))
- organizer, attendee, icaluid = results[0]
</del><ins>+ records = yield (txn.imipLookupByToken("token1"))
</ins><span class="cx"> yield txn.commit()
</span><del>- self.assertEquals(organizer, "urn:uuid:user01")
- self.assertEquals(attendee, "mailto:attendee@example.com")
- self.assertEquals(icaluid, "icaluid1")
</del><ins>+ self.assertEquals(records[0].organizer, "urn:uuid:user01")
+ self.assertEquals(records[0].attendee, "mailto:attendee@example.com")
+ self.assertEquals(records[0].icaluid, "icaluid1")
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingimiptesttest_outboundpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_outbound.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_outbound.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/imip/test/test_outbound.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -316,17 +316,17 @@
</span><span class="cx"> yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
</span><span class="cx">
</span><span class="cx"> txn = self.store.newTransaction()
</span><del>- token = (yield txn.imipGetToken(
</del><ins>+ record = (yield txn.imipGetToken(
</ins><span class="cx"> ORGANIZER,
</span><span class="cx"> ATTENDEE,
</span><span class="cx"> ICALUID
</span><span class="cx"> ))
</span><del>- self.assertTrue(token)
- organizer, attendee, icaluid = (yield txn.imipLookupByToken(token))[0]
</del><ins>+ self.assertTrue(record is not None)
+ record = (yield txn.imipLookupByToken(record.token))[0]
</ins><span class="cx"> yield txn.commit()
</span><del>- self.assertEquals(organizer, ORGANIZER)
- self.assertEquals(attendee, ATTENDEE)
- self.assertEquals(icaluid, ICALUID)
</del><ins>+ self.assertEquals(record.organizer, ORGANIZER)
+ self.assertEquals(record.attendee, ATTENDEE)
+ self.assertEquals(record.icaluid, ICALUID)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="lines">@@ -492,12 +492,12 @@
</span><span class="cx"> if UID: # The organizer is local, and server is sending to remote
</span><span class="cx"> # attendee
</span><span class="cx"> txn = self.store.newTransaction()
</span><del>- token = (yield txn.imipGetToken(inputOriginator, inputRecipient, UID))
</del><ins>+ record = (yield txn.imipGetToken(inputOriginator, inputRecipient, UID))
</ins><span class="cx"> yield txn.commit()
</span><del>- self.assertNotEquals(token, None)
</del><ins>+ self.assertNotEquals(record, None)
</ins><span class="cx"> self.assertEquals(
</span><span class="cx"> msg["Reply-To"],
</span><del>- "server+%s@example.com" % (token,))
</del><ins>+ "server+%s@example.com" % (record.token,))
</ins><span class="cx">
</span><span class="cx"> # Make sure attendee property for organizer exists and matches
</span><span class="cx"> # the CUA of the organizer property
</span><span class="lines">@@ -529,31 +529,31 @@
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def test_tokens(self):
</span><span class="cx"> txn = self.store.newTransaction()
</span><del>- token = (yield txn.imipLookupByToken("xyzzy"))
</del><ins>+ self.assertEquals((yield txn.imipLookupByToken("xyzzy")), [])
</ins><span class="cx"> yield txn.commit()
</span><del>- self.assertEquals(token, [])
</del><span class="cx">
</span><span class="cx"> txn = self.store.newTransaction()
</span><del>- token1 = (yield txn.imipCreateToken("organizer", "attendee", "icaluid"))
</del><ins>+ record1 = (yield txn.imipCreateToken("organizer", "attendee", "icaluid"))
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><span class="cx"> txn = self.store.newTransaction()
</span><del>- token2 = (yield txn.imipGetToken("organizer", "attendee", "icaluid"))
</del><ins>+ record2 = (yield txn.imipGetToken("organizer", "attendee", "icaluid"))
</ins><span class="cx"> yield txn.commit()
</span><del>- self.assertEquals(token1, token2)
</del><ins>+ self.assertEquals(record1.token, record2.token)
</ins><span class="cx">
</span><span class="cx"> txn = self.store.newTransaction()
</span><ins>+ record = (yield txn.imipLookupByToken(record1.token))[0]
</ins><span class="cx"> self.assertEquals(
</span><del>- map(list, (yield txn.imipLookupByToken(token1))),
- [["organizer", "attendee", "icaluid"]])
</del><ins>+ [record.organizer, record.attendee, record.icaluid],
+ ["organizer", "attendee", "icaluid"])
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><span class="cx"> txn = self.store.newTransaction()
</span><del>- yield txn.imipRemoveToken(token1)
</del><ins>+ yield txn.imipRemoveToken(record1.token)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><span class="cx"> txn = self.store.newTransaction()
</span><del>- self.assertEquals((yield txn.imipLookupByToken(token1)), [])
</del><ins>+ self.assertEquals((yield txn.imipLookupByToken(record1.token)), [])
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -568,7 +568,7 @@
</span><span class="cx"> # Explictly store a token with mailto: CUA for organizer
</span><span class="cx"> # (something that doesn't happen any more, but did in the past)
</span><span class="cx"> txn = self.store.newTransaction()
</span><del>- origToken = (yield txn.imipCreateToken(
</del><ins>+ origRecord = (yield txn.imipCreateToken(
</ins><span class="cx"> organizerEmail,
</span><span class="cx"> "mailto:attendee@example.com",
</span><span class="cx"> "CFDD5E46-4F74-478A-9311-B3FF905449C3"
</span><span class="lines">@@ -588,15 +588,15 @@
</span><span class="cx">
</span><span class="cx"> # Verify we didn't create a new token...
</span><span class="cx"> txn = self.store.newTransaction()
</span><del>- token = (yield txn.imipGetToken(inputOriginator, inputRecipient, UID))
</del><ins>+ record = (yield txn.imipGetToken(inputOriginator, inputRecipient, UID))
</ins><span class="cx"> yield txn.commit()
</span><del>- self.assertEquals(token, None)
</del><ins>+ self.assertEquals(record, None)
</ins><span class="cx">
</span><span class="cx"> # But instead kept the old one...
</span><span class="cx"> txn = self.store.newTransaction()
</span><del>- token = (yield txn.imipGetToken(organizerEmail, inputRecipient, UID))
</del><ins>+ record = (yield txn.imipGetToken(organizerEmail, inputRecipient, UID))
</ins><span class="cx"> yield txn.commit()
</span><del>- self.assertEquals(token, origToken)
</del><ins>+ self.assertEquals(record.token, origRecord.token)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> def generateSampleEmail(self, caltext=initialInviteText):
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingischeduledeliverypy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/delivery.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/delivery.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/ischedule/delivery.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -448,15 +448,6 @@
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def _processRequest(self, ssl, host, port, path):
</span><del>- from twisted.internet import reactor
- f = Factory()
- f.protocol = HTTPClientProtocol
- if ssl:
- ep = GAIEndpoint(reactor, host, port, _configuredClientContextFactory())
- else:
- ep = GAIEndpoint(reactor, host, port)
- proto = (yield ep.connect(f))
-
</del><span class="cx"> if not self.server.podding() and config.Scheduling.iSchedule.DKIM.Enabled:
</span><span class="cx"> domain, selector, key_file, algorithm, useDNSKey, useHTTPKey, usePrivateExchangeKey, expire = DKIMUtils.getConfiguration(config)
</span><span class="cx"> request = DKIMRequest(
</span><span class="lines">@@ -481,6 +472,21 @@
</span><span class="cx"> if accountingEnabledForCategory("iSchedule"):
</span><span class="cx"> self.loggedRequest = yield self.logRequest(request)
</span><span class="cx">
</span><ins>+ response = yield self._submitRequest(ssl, host, port, request)
+ returnValue(response)
+
+
+ @inlineCallbacks
+ def _submitRequest(self, ssl, host, port, request):
+ from twisted.internet import reactor
+ f = Factory()
+ f.protocol = HTTPClientProtocol
+ if ssl:
+ ep = GAIEndpoint(reactor, host, port, _configuredClientContextFactory())
+ else:
+ ep = GAIEndpoint(reactor, host, port)
+ proto = (yield ep.connect(f))
+
</ins><span class="cx"> response = (yield proto.submitRequest(request))
</span><span class="cx">
</span><span class="cx"> returnValue(response)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingtesttest_workpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_work.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_work.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/test/test_work.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -250,7 +250,7 @@
</span><span class="cx">
</span><span class="cx"> work = yield jobs[0].workItem()
</span><span class="cx"> self.assertTrue(isinstance(work, ScheduleOrganizerWork))
</span><del>- self.assertEqual(work.icalendarUid, "12345-67890")
</del><ins>+ self.assertEqual(work.icalendarUID, "12345-67890")
</ins><span class="cx"> self.assertEqual(scheduleActionFromSQL[work.scheduleAction], "create")
</span><span class="cx">
</span><span class="cx"> yield work.delete()
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreschedulingworkpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -77,7 +77,7 @@
</span><span class="cx">
</span><span class="cx"> baseargs = {
</span><span class="cx"> "jobID": kwargs.pop("jobID"),
</span><del>- "icalendarUid": kwargs.pop("icalendarUid"),
</del><ins>+ "icalendarUID": kwargs.pop("icalendarUID"),
</ins><span class="cx"> "workType": cls.workType()
</span><span class="cx"> }
</span><span class="cx">
</span><span class="lines">@@ -121,7 +121,7 @@
</span><span class="cx"> # cause deadlocks if done in the wrong order
</span><span class="cx">
</span><span class="cx"> # Row level lock on this item
</span><del>- locked = yield self.baseWork.trylock(ScheduleWork.icalendarUid == self.icalendarUid)
</del><ins>+ locked = yield self.baseWork.trylock(ScheduleWork.icalendarUID == self.icalendarUID)
</ins><span class="cx"> if locked:
</span><span class="cx"> yield self.trylock()
</span><span class="cx"> returnValue(locked)
</span><span class="lines">@@ -136,7 +136,7 @@
</span><span class="cx"> """
</span><span class="cx"> self.__dict__["baseWork"] = baseWork
</span><span class="cx"> self.__dict__["jobID"] = baseWork.jobID
</span><del>- self.__dict__["icalendarUid"] = baseWork.icalendarUid
</del><ins>+ self.__dict__["icalendarUID"] = baseWork.icalendarUID
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> def delete(self):
</span><span class="lines">@@ -174,7 +174,7 @@
</span><span class="cx"> if self.workType() == ScheduleOrganizerSendWork.workType():
</span><span class="cx"> all = yield self.baseWork.query(
</span><span class="cx"> self.transaction,
</span><del>- (ScheduleWork.icalendarUid == self.icalendarUid).And(ScheduleWork.workID != self.workID),
</del><ins>+ (ScheduleWork.icalendarUID == self.icalendarUID).And(ScheduleWork.workID != self.workID),
</ins><span class="cx"> order=ScheduleWork.workID,
</span><span class="cx"> limit=1,
</span><span class="cx"> )
</span><span class="lines">@@ -183,7 +183,7 @@
</span><span class="cx"> if work.workType == self.workType():
</span><span class="cx"> job = yield JobItem.load(self.transaction, work.jobID)
</span><span class="cx"> yield job.update(notBefore=datetime.datetime.utcnow())
</span><del>- log.debug("ScheduleOrganizerSendWork - promoted job: {id}, UID: '{uid}'", id=work.workID, uid=self.icalendarUid)
</del><ins>+ log.debug("ScheduleOrganizerSendWork - promoted job: {id}, UID: '{uid}'", id=work.workID, uid=self.icalendarUID)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="lines">@@ -323,7 +323,7 @@
</span><span class="cx"> proposal = (yield txn.enqueue(
</span><span class="cx"> cls,
</span><span class="cx"> notBefore=notBefore,
</span><del>- icalendarUid=uid,
</del><ins>+ icalendarUID=uid,
</ins><span class="cx"> scheduleAction=scheduleActionToSQL[action],
</span><span class="cx"> homeResourceID=home.id(),
</span><span class="cx"> resourceID=resource.id() if resource else None,
</span><span class="lines">@@ -347,10 +347,10 @@
</span><span class="cx"> calendar_old = Component.fromString(self.icalendarTextOld) if self.icalendarTextOld else None
</span><span class="cx"> calendar_new = Component.fromString(self.icalendarTextNew) if self.icalendarTextNew else None
</span><span class="cx">
</span><del>- log.debug("ScheduleOrganizerWork - running for ID: {id}, UID: {uid}, organizer: {org}", id=self.workID, uid=self.icalendarUid, org=organizer)
</del><ins>+ log.debug("ScheduleOrganizerWork - running for ID: {id}, UID: {uid}, organizer: {org}", id=self.workID, uid=self.icalendarUID, org=organizer)
</ins><span class="cx">
</span><span class="cx"> # We need to get the UID lock for implicit processing.
</span><del>- yield NamedLock.acquire(self.transaction, "ImplicitUIDLock:%s" % (hashlib.md5(self.icalendarUid).hexdigest(),))
</del><ins>+ yield NamedLock.acquire(self.transaction, "ImplicitUIDLock:%s" % (hashlib.md5(self.icalendarUID).hexdigest(),))
</ins><span class="cx">
</span><span class="cx"> from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
</span><span class="cx"> scheduler = ImplicitScheduler()
</span><span class="lines">@@ -359,7 +359,7 @@
</span><span class="cx"> scheduleActionFromSQL[self.scheduleAction],
</span><span class="cx"> home,
</span><span class="cx"> resource,
</span><del>- self.icalendarUid,
</del><ins>+ self.icalendarUID,
</ins><span class="cx"> calendar_old,
</span><span class="cx"> calendar_new,
</span><span class="cx"> self.smartMerge
</span><span class="lines">@@ -368,15 +368,15 @@
</span><span class="cx"> self._dequeued()
</span><span class="cx">
</span><span class="cx"> except Exception, e:
</span><del>- log.debug("ScheduleOrganizerWork - exception ID: {id}, UID: '{uid}', {err}", id=self.workID, uid=self.icalendarUid, err=str(e))
</del><ins>+ log.debug("ScheduleOrganizerWork - exception ID: {id}, UID: '{uid}', {err}", id=self.workID, uid=self.icalendarUID, err=str(e))
</ins><span class="cx"> log.debug(traceback.format_exc())
</span><span class="cx"> raise
</span><span class="cx"> except:
</span><del>- log.debug("ScheduleOrganizerWork - bare exception ID: {id}, UID: '{uid}'", id=self.workID, uid=self.icalendarUid)
</del><ins>+ log.debug("ScheduleOrganizerWork - bare exception ID: {id}, UID: '{uid}'", id=self.workID, uid=self.icalendarUID)
</ins><span class="cx"> log.debug(traceback.format_exc())
</span><span class="cx"> raise
</span><span class="cx">
</span><del>- log.debug("ScheduleOrganizerWork - done for ID: {id}, UID: {uid}, organizer: {org}", id=self.workID, uid=self.icalendarUid, org=organizer)
</del><ins>+ log.debug("ScheduleOrganizerWork - done for ID: {id}, UID: {uid}, organizer: {org}", id=self.workID, uid=self.icalendarUID, org=organizer)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -418,7 +418,7 @@
</span><span class="cx"> proposal = (yield txn.enqueue(
</span><span class="cx"> cls,
</span><span class="cx"> notBefore=notBefore,
</span><del>- icalendarUid=uid,
</del><ins>+ icalendarUID=uid,
</ins><span class="cx"> scheduleAction=scheduleActionToSQL[action],
</span><span class="cx"> homeResourceID=home.id(),
</span><span class="cx"> resourceID=resource.id() if resource else None,
</span><span class="lines">@@ -449,13 +449,13 @@
</span><span class="cx"> log.debug(
</span><span class="cx"> "ScheduleOrganizerSendWork - running for ID: {id}, UID: {uid}, organizer: {org}, attendee: {att}",
</span><span class="cx"> id=self.workID,
</span><del>- uid=self.icalendarUid,
</del><ins>+ uid=self.icalendarUID,
</ins><span class="cx"> org=organizer,
</span><span class="cx"> att=self.attendee
</span><span class="cx"> )
</span><span class="cx">
</span><span class="cx"> # We need to get the UID lock for implicit processing.
</span><del>- yield NamedLock.acquire(self.transaction, "ImplicitUIDLock:%s" % (hashlib.md5(self.icalendarUid).hexdigest(),))
</del><ins>+ yield NamedLock.acquire(self.transaction, "ImplicitUIDLock:%s" % (hashlib.md5(self.icalendarUID).hexdigest(),))
</ins><span class="cx">
</span><span class="cx"> from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
</span><span class="cx"> scheduler = ImplicitScheduler()
</span><span class="lines">@@ -464,7 +464,7 @@
</span><span class="cx"> scheduleActionFromSQL[self.scheduleAction],
</span><span class="cx"> home,
</span><span class="cx"> resource,
</span><del>- self.icalendarUid,
</del><ins>+ self.icalendarUID,
</ins><span class="cx"> organizer,
</span><span class="cx"> self.attendee,
</span><span class="cx"> itipmsg,
</span><span class="lines">@@ -486,18 +486,18 @@
</span><span class="cx"> self._dequeued()
</span><span class="cx">
</span><span class="cx"> except Exception, e:
</span><del>- log.debug("ScheduleOrganizerSendWork - exception ID: {id}, UID: '{uid}', {err}", id=self.workID, uid=self.icalendarUid, err=str(e))
</del><ins>+ log.debug("ScheduleOrganizerSendWork - exception ID: {id}, UID: '{uid}', {err}", id=self.workID, uid=self.icalendarUID, err=str(e))
</ins><span class="cx"> log.debug(traceback.format_exc())
</span><span class="cx"> raise
</span><span class="cx"> except:
</span><del>- log.debug("ScheduleOrganizerSendWork - bare exception ID: {id}, UID: '{uid}'", id=self.workID, uid=self.icalendarUid)
</del><ins>+ log.debug("ScheduleOrganizerSendWork - bare exception ID: {id}, UID: '{uid}'", id=self.workID, uid=self.icalendarUID)
</ins><span class="cx"> log.debug(traceback.format_exc())
</span><span class="cx"> raise
</span><span class="cx">
</span><span class="cx"> log.debug(
</span><span class="cx"> "ScheduleOrganizerSendWork - for ID: {id}, UID: {uid}, organizer: {org}, attendee: {att}",
</span><span class="cx"> id=self.workID,
</span><del>- uid=self.icalendarUid,
</del><ins>+ uid=self.icalendarUID,
</ins><span class="cx"> org=organizer,
</span><span class="cx"> att=self.attendee
</span><span class="cx"> )
</span><span class="lines">@@ -521,7 +521,7 @@
</span><span class="cx"> proposal = (yield txn.enqueue(
</span><span class="cx"> cls,
</span><span class="cx"> notBefore=notBefore,
</span><del>- icalendarUid=uid,
</del><ins>+ icalendarUID=uid,
</ins><span class="cx"> homeResourceID=home.id(),
</span><span class="cx"> resourceID=resource.id() if resource else None,
</span><span class="cx"> itipMsg=itipmsg.getTextWithTimezones(includeTimezones=not config.EnableTimezonesByReference),
</span><span class="lines">@@ -649,7 +649,7 @@
</span><span class="cx"> notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.AttendeeRefreshBatchDelaySeconds)
</span><span class="cx"> proposal = (yield txn.enqueue(
</span><span class="cx"> cls,
</span><del>- icalendarUid=organizer_resource.uid(),
</del><ins>+ icalendarUID=organizer_resource.uid(),
</ins><span class="cx"> homeResourceID=organizer_resource._home.id(),
</span><span class="cx"> resourceID=organizer_resource.id(),
</span><span class="cx"> attendeeCount=len(attendees),
</span><span class="lines">@@ -676,7 +676,7 @@
</span><span class="cx"> log.debug("Schedule refresh for resource-id: {rid} - ignored", rid=self.resourceID)
</span><span class="cx"> returnValue(None)
</span><span class="cx">
</span><del>- log.debug("ScheduleRefreshWork - running for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUid)
</del><ins>+ log.debug("ScheduleRefreshWork - running for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUID)
</ins><span class="cx">
</span><span class="cx"> # Get the unique list of pending attendees and split into batch to process
</span><span class="cx"> # TODO: do a DELETE ... and rownum <= N returning attendee - but have to fix Oracle to
</span><span class="lines">@@ -707,7 +707,7 @@
</span><span class="cx"> notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.AttendeeRefreshBatchIntervalSeconds)
</span><span class="cx"> yield self.transaction.enqueue(
</span><span class="cx"> self.__class__,
</span><del>- icalendarUid=self.icalendarUid,
</del><ins>+ icalendarUID=self.icalendarUID,
</ins><span class="cx"> homeResourceID=self.homeResourceID,
</span><span class="cx"> resourceID=self.resourceID,
</span><span class="cx"> attendeeCount=len(pendingAttendees),
</span><span class="lines">@@ -721,7 +721,7 @@
</span><span class="cx">
</span><span class="cx"> self._dequeued()
</span><span class="cx">
</span><del>- log.debug("ScheduleRefreshWork - done for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUid)
</del><ins>+ log.debug("ScheduleRefreshWork - done for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUID)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="lines">@@ -790,7 +790,7 @@
</span><span class="cx"> notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.AutoReplyDelaySeconds)
</span><span class="cx"> proposal = (yield txn.enqueue(
</span><span class="cx"> cls,
</span><del>- icalendarUid=resource.uid(),
</del><ins>+ icalendarUID=resource.uid(),
</ins><span class="cx"> homeResourceID=resource._home.id(),
</span><span class="cx"> resourceID=resource.id(),
</span><span class="cx"> partstat=partstat,
</span><span class="lines">@@ -803,7 +803,7 @@
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def doWork(self):
</span><span class="cx">
</span><del>- log.debug("ScheduleAutoReplyWork - running for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUid)
</del><ins>+ log.debug("ScheduleAutoReplyWork - running for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUID)
</ins><span class="cx">
</span><span class="cx"> # Delete all other work items with the same pushID
</span><span class="cx"> yield Delete(
</span><span class="lines">@@ -816,7 +816,7 @@
</span><span class="cx">
</span><span class="cx"> self._dequeued()
</span><span class="cx">
</span><del>- log.debug("ScheduleAutoReplyWork - done for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUid)
</del><ins>+ log.debug("ScheduleAutoReplyWork - done for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUID)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoresqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/sql.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/sql.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -26,19 +26,17 @@
</span><span class="cx"> "CalendarObject",
</span><span class="cx"> ]
</span><span class="cx">
</span><del>-from twext.enterprise.dal.record import fromTable
</del><ins>+from twext.enterprise.dal.record import fromTable, SerializableRecord
</ins><span class="cx"> from twext.enterprise.dal.syntax import Count, ColumnSyntax, Delete, \
</span><span class="cx"> Insert, Len, Max, Parameter, Select, Update, utcNowSQL
</span><span class="cx"> from twext.enterprise.locking import NamedLock
</span><span class="cx"> from twext.enterprise.jobqueue import WorkItem, AggregatedWorkItem, \
</span><span class="cx"> WORK_PRIORITY_LOW, WORK_WEIGHT_5, WORK_WEIGHT_3
</span><del>-from twext.enterprise.util import parseSQLTimestamp
</del><span class="cx"> from twext.python.clsprop import classproperty
</span><del>-from twext.python.filepath import CachingFilePath
</del><span class="cx"> from twext.python.log import Logger
</span><span class="cx"> from twext.who.idirectory import RecordType
</span><span class="cx"> from twistedcaldav.ical import Component as VComponent
</span><del>-from txweb2.http_headers import MimeType, generateContentType
</del><ins>+from txweb2.http_headers import MimeType
</ins><span class="cx"> from txweb2.stream import readStream
</span><span class="cx">
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue, succeed
</span><span class="lines">@@ -48,11 +46,10 @@
</span><span class="cx"> from twistedcaldav import customxml, ical
</span><span class="cx"> from twistedcaldav.stdconfig import config
</span><span class="cx"> from twistedcaldav.datafilters.peruserdata import PerUserDataFilter
</span><del>-from twistedcaldav.dateops import normalizeForIndex, datetimeMktime, \
</del><ins>+from twistedcaldav.dateops import normalizeForIndex, \
</ins><span class="cx"> pyCalendarTodatetime, parseSQLDateToPyCalendar
</span><span class="cx"> from twistedcaldav.ical import Component, InvalidICalendarDataError, Property
</span><span class="cx"> from twistedcaldav.instance import InvalidOverriddenInstanceError
</span><del>-from twistedcaldav.memcacher import Memcacher
</del><span class="cx"> from twistedcaldav.timezones import TimezoneException
</span><span class="cx">
</span><span class="cx"> from txdav.base.propertystore.base import PropertyName
</span><span class="lines">@@ -64,14 +61,15 @@
</span><span class="cx"> from txdav.caldav.datastore.scheduling.icalsplitter import iCalSplitter
</span><span class="cx"> from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
</span><span class="cx"> from txdav.caldav.datastore.scheduling.utils import uidFromCalendarUserAddress
</span><del>-from txdav.caldav.datastore.util import AttachmentRetrievalTransport, \
- normalizationLookup
</del><ins>+from txdav.caldav.datastore.sql_attachment import Attachment, DropBoxAttachment, \
+ AttachmentLink, ManagedAttachment
+from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord, \
+ GroupShareeRecord
+from txdav.caldav.datastore.util import normalizationLookup
</ins><span class="cx"> from txdav.caldav.datastore.util import CalendarObjectBase
</span><del>-from txdav.caldav.datastore.util import StorageTransportBase
</del><span class="cx"> from txdav.caldav.datastore.util import dropboxIDFromCalendarObject
</span><span class="cx"> from txdav.caldav.icalendarstore import ICalendarHome, ICalendar, ICalendarObject, \
</span><del>- IAttachment, AttachmentStoreFailed, AttachmentStoreValidManagedID, \
- AttachmentMigrationFailed, AttachmentDropboxNotAllowed, \
</del><ins>+ AttachmentStoreFailed, AttachmentStoreValidManagedID, \
</ins><span class="cx"> TooManyAttendeesError, InvalidComponentTypeError, InvalidCalendarAccessError, \
</span><span class="cx"> ResourceDeletedError, \
</span><span class="cx"> AttendeeAllowedError, InvalidPerUserDataMerge, ComponentUpdateState, \
</span><span class="lines">@@ -79,15 +77,16 @@
</span><span class="cx"> InvalidDefaultCalendar, \
</span><span class="cx"> InvalidAttachmentOperation, DuplicatePrivateCommentsError, \
</span><span class="cx"> TimeRangeUpperLimit, TimeRangeLowerLimit, InvalidSplit, \
</span><del>- AttachmentSizeTooLarge, UnknownTimezone, SetComponentOptions
-from txdav.caldav.icalendarstore import QuotaExceeded
</del><ins>+ UnknownTimezone, SetComponentOptions
</ins><span class="cx"> from txdav.common.datastore.sql import CommonHome, CommonHomeChild, \
</span><del>- CommonObjectResource, ECALENDARTYPE, SharingInvitation
</del><ins>+ CommonObjectResource, ECALENDARTYPE
+from txdav.common.datastore.sql_directory import GroupsRecord
</ins><span class="cx"> from txdav.common.datastore.sql_tables import _ATTACHMENTS_MODE_NONE, \
</span><span class="cx"> _ATTACHMENTS_MODE_READ, _ATTACHMENTS_MODE_WRITE, _BIND_MODE_DIRECT, \
</span><span class="cx"> _BIND_MODE_GROUP, _BIND_MODE_GROUP_READ, _BIND_MODE_GROUP_WRITE, \
</span><span class="cx"> _BIND_MODE_OWN, _BIND_MODE_READ, _BIND_MODE_WRITE, _BIND_STATUS_ACCEPTED, \
</span><span class="cx"> _TRANSP_OPAQUE, _TRANSP_TRANSPARENT, schema
</span><ins>+from txdav.common.datastore.sql_sharing import SharingInvitation
</ins><span class="cx"> from txdav.common.icommondatastore import IndexedSearchException, \
</span><span class="cx"> InternalDataStoreError, HomeChildNameAlreadyExistsError, \
</span><span class="cx"> HomeChildNameNotAllowedError, ObjectResourceTooBigError, \
</span><span class="lines">@@ -111,8 +110,7 @@
</span><span class="cx"> from urlparse import urlparse, urlunparse
</span><span class="cx"> import collections
</span><span class="cx"> import datetime
</span><del>-import os
-import tempfile
</del><ins>+import itertools
</ins><span class="cx"> import urllib
</span><span class="cx"> import uuid
</span><span class="cx">
</span><span class="lines">@@ -142,7 +140,7 @@
</span><span class="cx"> @type txn: L{txdav.common.datastore.sql.CommonStoreTransaction}
</span><span class="cx"> """
</span><span class="cx">
</span><del>- at = schema.ATTACHMENT
</del><ins>+ at = Attachment._attachmentSchema
</ins><span class="cx"> rows = (yield Select(
</span><span class="cx"> (at.DROPBOX_ID,),
</span><span class="cx"> From=at,
</span><span class="lines">@@ -174,8 +172,8 @@
</span><span class="cx"> txn = self._store.newTransaction("CalendarStoreFeatures.upgradeToManagedAttachments - preliminary work")
</span><span class="cx"> try:
</span><span class="cx"> # Clear out unused CALENDAR_OBJECT.DROPBOX_IDs
</span><del>- co = schema.CALENDAR_OBJECT
- at = schema.ATTACHMENT
</del><ins>+ co = CalendarObject._objectSchema
+ at = Attachment._attachmentSchema
</ins><span class="cx"> yield Update(
</span><span class="cx"> {co.DROPBOX_ID: None},
</span><span class="cx"> Where=co.RESOURCE_ID.In(Select(
</span><span class="lines">@@ -248,7 +246,7 @@
</span><span class="cx"> log.debug(" {0} affected calendar objects".format(len(cobjs),))
</span><span class="cx">
</span><span class="cx"> # Get names of each matching attachment
</span><del>- at = schema.ATTACHMENT
</del><ins>+ at = Attachment._attachmentSchema
</ins><span class="cx"> names = (yield Select(
</span><span class="cx"> (at.PATH,),
</span><span class="cx"> From=at,
</span><span class="lines">@@ -317,8 +315,8 @@
</span><span class="cx"> @type dropbox_id: C{str}
</span><span class="cx"> """
</span><span class="cx">
</span><del>- co = schema.CALENDAR_OBJECT
- cb = schema.CALENDAR_BIND
</del><ins>+ co = CalendarObject._objectSchema
+ cb = Calendar._bindSchema
</ins><span class="cx"> rows = (yield Select(
</span><span class="cx"> (cb.CALENDAR_HOME_RESOURCE_ID, co.CALENDAR_RESOURCE_ID, co.RESOURCE_ID,),
</span><span class="cx"> From=co.join(cb, co.CALENDAR_RESOURCE_ID == cb.CALENDAR_RESOURCE_ID),
</span><span class="lines">@@ -404,6 +402,33 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span><ins>+class CalendarHomeRecord(SerializableRecord, fromTable(schema.CALENDAR_HOME)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.CALENDAR_HOME}.
+ """
+ pass
+
+
+
+class CalendarMetaDataRecord(SerializableRecord, fromTable(schema.CALENDAR_METADATA)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.CALENDAR_METADATA}.
+ """
+ pass
+
+
+
+class CalendarBindRecord(SerializableRecord, fromTable(schema.CALENDAR_BIND)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.CALENDAR_BIND}.
+ """
+ pass
+
+
+
</ins><span class="cx"> class CalendarHome(CommonHome):
</span><span class="cx">
</span><span class="cx"> implements(ICalendarHome)
</span><span class="lines">@@ -412,16 +437,15 @@
</span><span class="cx">
</span><span class="cx"> # structured tables. (new, preferred)
</span><span class="cx"> _homeSchema = schema.CALENDAR_HOME
</span><del>- _bindSchema = schema.CALENDAR_BIND
</del><span class="cx"> _homeMetaDataSchema = schema.CALENDAR_HOME_METADATA
</span><ins>+
+ _bindSchema = schema.CALENDAR_BIND
</ins><span class="cx"> _revisionsSchema = schema.CALENDAR_OBJECT_REVISIONS
</span><span class="cx"> _objectSchema = schema.CALENDAR_OBJECT
</span><span class="cx">
</span><span class="cx"> _notifierPrefix = "CalDAV"
</span><span class="cx"> _dataVersionKey = "CALENDAR-DATAVERSION"
</span><span class="cx">
</span><del>- _cacher = Memcacher("SQL.calhome", pickle=True, key_normalization=False)
-
</del><span class="cx"> _componentCalendarName = {
</span><span class="cx"> "VEVENT": "calendar",
</span><span class="cx"> "VTODO": "tasks",
</span><span class="lines">@@ -494,37 +518,36 @@
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def remove(self):
</span><del>- ch = schema.CALENDAR_HOME
- cb = schema.CALENDAR_BIND
- cor = schema.CALENDAR_OBJECT_REVISIONS
- rp = schema.RESOURCE_PROPERTY
-
</del><span class="cx"> # delete attachments corresponding to this home, also removing from disk
</span><span class="cx"> yield Attachment.removedHome(self._txn, self._resourceID)
</span><span class="cx">
</span><del>- yield Delete(
- From=cb,
- Where=cb.CALENDAR_HOME_RESOURCE_ID == self._resourceID
- ).on(self._txn)
</del><ins>+ yield super(CalendarHome, self).remove()
</ins><span class="cx">
</span><del>- yield Delete(
- From=cor,
- Where=cor.CALENDAR_HOME_RESOURCE_ID == self._resourceID
- ).on(self._txn)
</del><span class="cx">
</span><del>- yield Delete(
- From=ch,
- Where=ch.RESOURCE_ID == self._resourceID
- ).on(self._txn)
</del><ins>+ @inlineCallbacks
+ def copyMetadata(self, other, calendarIDMap):
+ """
+ Copy metadata from one L{CalendarObjectResource} to another. This is only
+ used during a migration step.
+ """
</ins><span class="cx">
</span><del>- yield Delete(
- From=rp,
- Where=rp.RESOURCE_ID == self._resourceID
</del><ins>+ # Simple attributes that can be copied over as-is, but the calendar id's need to be mapped
+ chm = self._homeMetaDataSchema
+ values = {}
+ for attr, col in zip(self.metadataAttributes(), self.metadataColumns()):
+ value = getattr(other, attr)
+ if attr in self._componentDefaultAttribute.values():
+ value = calendarIDMap.get(value)
+ setattr(self, attr, value)
+ values[col] = value
+
+ # Update the local data
+ yield Update(
+ values,
+ Where=chm.RESOURCE_ID == self._resourceID
</ins><span class="cx"> ).on(self._txn)
</span><span class="cx">
</span><del>- yield self._cacher.delete(str(self._ownerUID))
</del><span class="cx">
</span><del>-
</del><span class="cx"> @inlineCallbacks
</span><span class="cx"> def hasCalendarResourceUIDSomewhereElse(self, uid, ok_object, mode):
</span><span class="cx"> """
</span><span class="lines">@@ -598,8 +621,8 @@
</span><span class="cx"> """
</span><span class="cx"> Implement lookup via queries.
</span><span class="cx"> """
</span><del>- co = schema.CALENDAR_OBJECT
- cb = schema.CALENDAR_BIND
</del><ins>+ co = self._objectSchema
+ cb = self._bindSchema
</ins><span class="cx"> rows = (yield Select(
</span><span class="cx"> [co.PARENT_RESOURCE_ID,
</span><span class="cx"> co.RESOURCE_ID],
</span><span class="lines">@@ -618,10 +641,34 @@
</span><span class="cx"> returnValue(None)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def getAllAttachments(self):
+ """
+ Return all the L{Attachment} objects associated with this calendar home.
+ Needed during migration.
+ """
+ return Attachment.loadAllAttachments(self)
+
+
+ def getAttachmentLinks(self):
+ """
+ Read the attachment<->calendar object mapping data associated with this calendar home.
+ Needed during migration only.
+ """
+ return AttachmentLink.linksForHome(self)
+
+
+ def getAttachmentByID(self, id):
+ """
+ Return a specific attachment associated with this calendar home.
+ Needed during migration only.
+ """
+ return Attachment.loadAttachmentByID(self, id)
+
+
</ins><span class="cx"> @inlineCallbacks
</span><span class="cx"> def getAllDropboxIDs(self):
</span><del>- co = schema.CALENDAR_OBJECT
- cb = schema.CALENDAR_BIND
</del><ins>+ co = self._objectSchema
+ cb = self._bindSchema
</ins><span class="cx"> rows = (yield Select(
</span><span class="cx"> [co.DROPBOX_ID],
</span><span class="cx"> From=co.join(cb, co.PARENT_RESOURCE_ID == cb.RESOURCE_ID),
</span><span class="lines">@@ -634,7 +681,7 @@
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def getAllAttachmentNames(self):
</span><del>- att = schema.ATTACHMENT
</del><ins>+ att = Attachment._attachmentSchema
</ins><span class="cx"> rows = (yield Select(
</span><span class="cx"> [att.DROPBOX_ID],
</span><span class="cx"> From=att,
</span><span class="lines">@@ -646,8 +693,8 @@
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def getAllManagedIDs(self):
</span><del>- at = schema.ATTACHMENT
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
</del><ins>+ at = Attachment._attachmentSchema
+ attco = Attachment._attachmentLinkSchema
</ins><span class="cx"> rows = (yield Select(
</span><span class="cx"> [attco.MANAGED_ID, ],
</span><span class="cx"> From=attco.join(at, attco.ATTACHMENT_ID == at.ATTACHMENT_ID),
</span><span class="lines">@@ -658,6 +705,27 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><ins>+ def getAllGroupAttendees(self):
+ """
+ Return a list of L{GroupAttendeeRecord},L{GroupRecord} for each group attendee referenced in calendar data
+ owned by this home.
+ """
+
+ results = []
+ calendars = yield self.loadChildren()
+ for calendar in calendars:
+ if not calendar.owned():
+ continue
+ children = yield calendar.objectResources()
+ cobjs = [child.id() for child in children]
+ if cobjs:
+ result = yield GroupAttendeeRecord.groupAttendeesForObjects(self._txn, cobjs)
+ results.extend(result)
+
+ returnValue(results)
+
+
+ @inlineCallbacks
</ins><span class="cx"> def createdHome(self):
</span><span class="cx">
</span><span class="cx"> # Check whether components type must be separate
</span><span class="lines">@@ -960,6 +1028,12 @@
</span><span class="cx"> _objectSchema = schema.CALENDAR_OBJECT
</span><span class="cx"> _timeRangeSchema = schema.TIME_RANGE
</span><span class="cx">
</span><ins>+ _homeRecordClass = CalendarHomeRecord
+ _metadataRecordClass = CalendarMetaDataRecord
+ _bindRecordClass = CalendarBindRecord
+ _bindHomeIDAttributeName = "calendarHomeResourceID"
+ _bindResourceIDAttributeName = "calendarResourceID"
+
</ins><span class="cx"> # Mapping of iCalendar property name to DB column name
</span><span class="cx"> _queryFields = {
</span><span class="cx"> "UID": _objectSchema.UID,
</span><span class="lines">@@ -1054,6 +1128,46 @@
</span><span class="cx"> def _calendarHome(self):
</span><span class="cx"> return self._home
</span><span class="cx">
</span><ins>+
+ @inlineCallbacks
+ def copyMetadata(self, other):
+ """
+ Copy metadata from one L{Calendar} to another. This is only
+ used during a migration step.
+ """
+
+ # Copy over list of attributes and the name
+ self._name = other._name
+ for attr in itertools.chain(self.metadataAttributes(), self.additionalBindAttributes()):
+ if attr in ("_created", "_modified"):
+ continue
+ if hasattr(other, attr):
+ setattr(self, attr, getattr(other, attr))
+
+ # Update the metadata table
+ cm = self._homeChildMetaDataSchema
+ values = {}
+ for attr, column in itertools.izip(self.metadataAttributes(), self.metadataColumns()):
+ if attr in ("_created", "_modified"):
+ continue
+ values[column] = getattr(self, attr)
+ yield Update(
+ values,
+ Where=(cm.RESOURCE_ID == self._resourceID)
+ ).on(self._txn)
+
+ # Update the bind table
+ cb = self._bindSchema
+ values = {
+ cb.RESOURCE_NAME: self._name
+ }
+ for attr, column in itertools.izip(self.additionalBindAttributes(), self.additionalBindColumns()):
+ values[column] = getattr(self, attr)
+ yield Update(
+ values,
+ Where=(cb.CALENDAR_HOME_RESOURCE_ID == self.viewerHome()._resourceID).And(cb.CALENDAR_RESOURCE_ID == self._resourceID)
+ ).on(self._txn)
+
</ins><span class="cx"> ownerCalendarHome = CommonHomeChild.ownerHome
</span><span class="cx"> viewerCalendarHome = CommonHomeChild.viewerHome
</span><span class="cx"> calendarObjects = CommonHomeChild.objectResources
</span><span class="lines">@@ -1488,7 +1602,7 @@
</span><span class="cx"> """
</span><span class="cx"> Query to find resources that need to be re-expanded
</span><span class="cx"> """
</span><del>- co = schema.CALENDAR_OBJECT
</del><ins>+ co = cls._objectSchema
</ins><span class="cx"> return Select(
</span><span class="cx"> [co.RESOURCE_NAME],
</span><span class="cx"> From=co,
</span><span class="lines">@@ -1795,8 +1909,8 @@
</span><span class="cx">
</span><span class="cx"> # First check that the actual group membership has changed
</span><span class="cx"> if (yield self.updateShareeGroupLink(groupUID)):
</span><del>- groupID = (yield self._txn.groupByUID(groupUID))[0]
- memberUIDs = yield self._txn.groupMemberUIDs(groupID)
</del><ins>+ group = yield self._txn.groupByUID(groupUID)
+ memberUIDs = yield self._txn.groupMemberUIDs(group.groupID)
</ins><span class="cx"> boundUIDs = set()
</span><span class="cx">
</span><span class="cx"> home = self._homeSchema
</span><span class="lines">@@ -1946,39 +2060,36 @@
</span><span class="cx"> update schema.GROUP_SHAREE
</span><span class="cx"> """
</span><span class="cx"> changed = False
</span><del>- (
- groupID, _ignore_name, membershipHash, _ignore_modDate,
- _ignore_extant
- ) = yield self._txn.groupByUID(groupUID)
</del><ins>+ group = yield self._txn.groupByUID(groupUID)
</ins><span class="cx">
</span><span class="cx"> gs = schema.GROUP_SHAREE
</span><span class="cx"> rows = yield Select(
</span><span class="cx"> [gs.MEMBERSHIP_HASH, gs.GROUP_BIND_MODE],
</span><span class="cx"> From=gs,
</span><span class="cx"> Where=(gs.CALENDAR_ID == self._resourceID).And(
</span><del>- gs.GROUP_ID == groupID)
</del><ins>+ gs.GROUP_ID == group.groupID)
</ins><span class="cx"> ).on(self._txn)
</span><span class="cx"> if rows:
</span><span class="cx"> [[gsMembershipHash, gsMode]] = rows
</span><span class="cx"> updateMap = {}
</span><del>- if gsMembershipHash != membershipHash:
- updateMap[gs.MEMBERSHIP_HASH] = membershipHash
</del><ins>+ if gsMembershipHash != group.membershipHash:
+ updateMap[gs.MEMBERSHIP_HASH] = group.membershipHash
</ins><span class="cx"> if mode is not None and gsMode != mode:
</span><span class="cx"> updateMap[gs.GROUP_BIND_MODE] = mode
</span><span class="cx"> if updateMap:
</span><span class="cx"> yield Update(
</span><span class="cx"> updateMap,
</span><span class="cx"> Where=(gs.CALENDAR_ID == self._resourceID).And(
</span><del>- gs.GROUP_ID == groupID
</del><ins>+ gs.GROUP_ID == group.groupID
</ins><span class="cx"> )
</span><span class="cx"> ).on(self._txn)
</span><span class="cx"> changed = True
</span><span class="cx"> else:
</span><span class="cx"> yield Insert({
</span><del>- gs.MEMBERSHIP_HASH: membershipHash,
</del><ins>+ gs.MEMBERSHIP_HASH: group.membershipHash,
</ins><span class="cx"> gs.GROUP_BIND_MODE: mode,
</span><span class="cx"> gs.CALENDAR_ID: self._resourceID,
</span><del>- gs.GROUP_ID: groupID,
</del><ins>+ gs.GROUP_ID: group.groupID,
</ins><span class="cx"> }).on(self._txn)
</span><span class="cx"> changed = True
</span><span class="cx">
</span><span class="lines">@@ -2065,8 +2176,8 @@
</span><span class="cx">
</span><span class="cx"> # invite every member of group
</span><span class="cx"> shareeViews = []
</span><del>- groupID = (yield self._txn.groupByUID(shareeUID))[0]
- memberUIDs = yield self._txn.groupMemberUIDs(groupID)
</del><ins>+ group = yield self._txn.groupByUID(shareeUID)
+ memberUIDs = yield self._txn.groupMemberUIDs(group.groupID)
</ins><span class="cx"> for memberUID in memberUIDs:
</span><span class="cx"> if memberUID != self._home.uid():
</span><span class="cx"> shareeView = yield self.shareeView(memberUID)
</span><span class="lines">@@ -2206,6 +2317,14 @@
</span><span class="cx"> returnValue(invitations)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ @inlineCallbacks
+ def groupSharees(self):
+ sharees = yield GroupShareeRecord.querysimple(self._txn, calendarID=self.id())
+ groups = set([sharee.groupID for sharee in sharees])
+ groups = (yield GroupsRecord.query(self._txn, GroupsRecord.groupID.In(groups))) if groups else []
+ returnValue({"groups": groups, "sharees": sharees})
+
+
</ins><span class="cx"> icalfbtype_to_indexfbtype = {
</span><span class="cx"> "UNKNOWN" : 0,
</span><span class="cx"> "FREE" : 1,
</span><span class="lines">@@ -2413,9 +2532,9 @@
</span><span class="cx"> groupRecord = yield self.directoryService().recordWithCalendarUserAddress(groupCUA)
</span><span class="cx"> if groupRecord:
</span><span class="cx"> # get members
</span><del>- groupID = (yield self._txn.groupByUID(groupRecord.uid))[0]
- if groupID is not None:
- members = yield self._txn.groupMembers(groupID)
</del><ins>+ group = yield self._txn.groupByUID(groupRecord.uid)
+ if group is not None:
+ members = yield self._txn.groupMembers(group.groupID)
</ins><span class="cx"> groupCUAToAttendeeMemberPropMap[groupRecord.canonicalCalendarUserAddress()] = tuple(
</span><span class="cx"> [member.attendeeProperty(params={"MEMBER": groupCUA}) for member in sorted(members, key=lambda x: x.uid)]
</span><span class="cx"> )
</span><span class="lines">@@ -2439,19 +2558,14 @@
</span><span class="cx"> @return: a L{dict} with group ids as the key and membership hash as the value
</span><span class="cx"> @rtype: L{dict}
</span><span class="cx"> """
</span><del>- ga = schema.GROUP_ATTENDEE
- rows = yield Select(
- [ga.GROUP_ID, ga.MEMBERSHIP_HASH],
- From=ga,
- Where=ga.RESOURCE_ID == self._resourceID,
- ).on(self._txn)
- returnValue(dict(rows))
</del><ins>+ records = yield GroupAttendeeRecord.querysimple(self._txn, resourceID=self._resourceID)
+ returnValue(dict([(record.groupID, record,) for record in records]))
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def updateEventGroupLink(self, groupCUAToAttendeeMemberPropMap=None):
</span><span class="cx"> """
</span><del>- update schema.GROUP_ATTENDEE
</del><ins>+ update group event links
</ins><span class="cx"> """
</span><span class="cx"> if groupCUAToAttendeeMemberPropMap is None:
</span><span class="cx"> if hasattr(self, "_groupCUAToAttendeeMemberPropMap"):
</span><span class="lines">@@ -2468,42 +2582,27 @@
</span><span class="cx"> groupUID = groupRecord.uid
</span><span class="cx"> else:
</span><span class="cx"> groupUID = uidFromCalendarUserAddress(groupCUA)
</span><del>- (
- groupID, _ignore_name, membershipHash, _ignore_modDate,
- _ignore_extant
- ) = yield self._txn.groupByUID(groupUID)
</del><ins>+ group = yield self._txn.groupByUID(groupUID)
</ins><span class="cx">
</span><del>- ga = schema.GROUP_ATTENDEE
- if groupID in groupIDToMembershipHashMap:
- if groupIDToMembershipHashMap[groupID] != membershipHash:
- yield Update(
- {ga.MEMBERSHIP_HASH: membershipHash, },
- Where=(ga.RESOURCE_ID == self._resourceID).And(
- ga.GROUP_ID == groupID)
- ).on(self._txn)
</del><ins>+ if group.groupID in groupIDToMembershipHashMap:
+ if groupIDToMembershipHashMap[group.groupID].membershipHash != group.membershipHash:
+ yield groupIDToMembershipHashMap[group.groupID].update(membershipHash=group.membershipHash)
</ins><span class="cx"> changed = True
</span><del>- del groupIDToMembershipHashMap[groupID]
</del><ins>+ del groupIDToMembershipHashMap[group.groupID]
</ins><span class="cx"> else:
</span><del>- yield Insert({
- ga.RESOURCE_ID: self._resourceID,
- ga.GROUP_ID: groupID,
- ga.MEMBERSHIP_HASH: membershipHash,
- }).on(self._txn)
</del><ins>+ yield GroupAttendeeRecord.create(
+ self._txn,
+ resourceID=self._resourceID,
+ groupID=group.groupID,
+ membershipHash=group.membershipHash,
+ )
</ins><span class="cx"> changed = True
</span><span class="cx">
</span><span class="cx"> if groupIDToMembershipHashMap:
</span><del>- groupIDsToRemove = groupIDToMembershipHashMap.keys()
- yield Delete(
- From=ga,
- Where=(ga.RESOURCE_ID == self._resourceID).And(
- ga.GROUP_ID.In(
- Parameter(
- "groupIDsToRemove",
- len(groupIDsToRemove)
- )
- )
- )
- ).on(self._txn, groupIDsToRemove=groupIDsToRemove)
</del><ins>+ yield GroupAttendeeRecord.deletesome(
+ self._txn,
+ GroupAttendeeRecord.groupID.In(groupIDToMembershipHashMap.keys()),
+ )
</ins><span class="cx"> changed = True
</span><span class="cx">
</span><span class="cx"> returnValue(changed)
</span><span class="lines">@@ -2564,11 +2663,7 @@
</span><span class="cx"> del self._groupCUAToAttendeeMemberPropMap
</span><span class="cx"> else:
</span><span class="cx"> # delete existing group rows
</span><del>- ga = schema.GROUP_ATTENDEE
- yield Delete(
- From=ga,
- Where=ga.RESOURCE_ID == self._resourceID,
- ).on(txn)
</del><ins>+ yield GroupAttendeeRecord.deletesimple(self._txn, resourceID=self._resourceID)
</ins><span class="cx">
</span><span class="cx"> returnValue(isOldEventWithGroupAttendees)
</span><span class="cx">
</span><span class="lines">@@ -2614,13 +2709,11 @@
</span><span class="cx"> # remove group link to ensure update (update to unknown hash would work too)
</span><span class="cx"> # FIXME: its possible that more than one group id gets updated during this single work item, so we
</span><span class="cx"> # need to make sure that ALL the group_id's are removed by this query.
</span><del>- ga = schema.GROUP_ATTENDEE
- yield Delete(
- From=ga,
- Where=(ga.RESOURCE_ID == self._resourceID).And(
- ga.GROUP_ID == groupID
- )
- ).on(self._txn)
</del><ins>+ yield GroupAttendeeRecord.deletesimple(
+ self._txn,
+ resourceID=self._resourceID,
+ groupID=groupID,
+ )
</ins><span class="cx">
</span><span class="cx"> # update group attendee in remaining component
</span><span class="cx"> component = yield self.componentForUser()
</span><span class="lines">@@ -3521,7 +3614,7 @@
</span><span class="cx"> recurrenceLowerLimit = None
</span><span class="cx"> recurrenceLimit = DateTime(1900, 1, 1, 0, 0, 0, tzid=Timezone(utc=True))
</span><span class="cx">
</span><del>- co = schema.CALENDAR_OBJECT
</del><ins>+ co = self._objectSchema
</ins><span class="cx"> tr = schema.TIME_RANGE
</span><span class="cx">
</span><span class="cx"> # Do not update if reCreate (re-indexing - we don't want to re-write data
</span><span class="lines">@@ -3725,6 +3818,29 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><ins>+ def copyMetadata(self, other):
+ """
+ Copy metadata from one L{CalendarObjectResource} to another. This is only
+ used during a migration step.
+ """
+ co = self._objectSchema
+ values = {
+ co.ATTACHMENTS_MODE : other._attachment,
+ co.DROPBOX_ID : other._dropboxID,
+ co.ACCESS : other._access,
+ co.SCHEDULE_OBJECT : other._schedule_object,
+ co.SCHEDULE_TAG : other._schedule_tag,
+ co.SCHEDULE_ETAGS : other._schedule_etags,
+ co.PRIVATE_COMMENTS : other._private_comments,
+ }
+
+ yield Update(
+ values,
+ Where=co.RESOURCE_ID == self._resourceID
+ ).on(self._txn)
+
+
+ @inlineCallbacks
</ins><span class="cx"> def component(self, doUpdate=False):
</span><span class="cx"> """
</span><span class="cx"> Read calendar data and validate/fix it. Do not raise a store error here
</span><span class="lines">@@ -3890,6 +4006,15 @@
</span><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def purge(self):
+ """
+ Do a "silent" removal of this object resource.
+ """
+ return self._removeInternal(
+ ComponentRemoveState.NORMAL_NO_IMPLICIT
+ )
+
+
</ins><span class="cx"> @inlineCallbacks
</span><span class="cx"> def _removeInternal(self, internal_state=ComponentRemoveState.NORMAL):
</span><span class="cx">
</span><span class="lines">@@ -3947,7 +4072,7 @@
</span><span class="cx"> """
</span><span class="cx"> DAL query to load RECURRANCE_MIN, RECURRANCE_MAX via an object's resource ID.
</span><span class="cx"> """
</span><del>- co = schema.CALENDAR_OBJECT
</del><ins>+ co = cls._objectSchema
</ins><span class="cx"> return Select(
</span><span class="cx"> [co.RECURRANCE_MIN, co.RECURRANCE_MAX, ],
</span><span class="cx"> From=co,
</span><span class="lines">@@ -4482,8 +4607,8 @@
</span><span class="cx"> Get a list of managed attachments where the names returned are for the last path segment
</span><span class="cx"> of the attachment URI.
</span><span class="cx"> """
</span><del>- at = schema.ATTACHMENT
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
</del><ins>+ at = Attachment._attachmentSchema
+ attco = Attachment._attachmentLinkSchema
</ins><span class="cx"> rows = (yield Select(
</span><span class="cx"> [attco.MANAGED_ID, at.PATH, ],
</span><span class="cx"> From=attco.join(at, attco.ATTACHMENT_ID == at.ATTACHMENT_ID),
</span><span class="lines">@@ -4499,8 +4624,8 @@
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> # Scan all the associated attachments for the one that matches
</span><del>- at = schema.ATTACHMENT
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
</del><ins>+ at = Attachment._attachmentSchema
+ attco = Attachment._attachmentLinkSchema
</ins><span class="cx"> rows = (yield Select(
</span><span class="cx"> [attco.MANAGED_ID, at.PATH, ],
</span><span class="cx"> From=attco.join(at, attco.ATTACHMENT_ID == at.ATTACHMENT_ID),
</span><span class="lines">@@ -4553,8 +4678,10 @@
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def attachments(self):
</span><span class="cx"> if self._dropboxID:
</span><del>- rows = yield self._attachmentsQuery.on(self._txn,
- dropboxID=self._dropboxID)
</del><ins>+ rows = yield self._attachmentsQuery.on(
+ self._txn,
+ dropboxID=self._dropboxID,
+ )
</ins><span class="cx"> result = []
</span><span class="cx"> for row in rows:
</span><span class="cx"> result.append((yield self.attachmentWithName(row[0])))
</span><span class="lines">@@ -4823,900 +4950,6 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span><del>-class AttachmentStorageTransport(StorageTransportBase):
-
- _TEMPORARY_UPLOADS_DIRECTORY = "Temporary"
-
- def __init__(self, attachment, contentType, dispositionName, creating=False):
- super(AttachmentStorageTransport, self).__init__(
- attachment, contentType, dispositionName)
-
- fileDescriptor, fileName = self._temporaryFile()
- # Wrap the file descriptor in a file object we can write to
- self._file = os.fdopen(fileDescriptor, "w")
- self._path = CachingFilePath(fileName)
- self._hash = hashlib.md5()
- self._creating = creating
-
- self._txn.postAbort(self.aborted)
-
-
- def _temporaryFile(self):
- """
- Returns a (file descriptor, absolute path) tuple for a temporary file within
- the Attachments/Temporary directory (creating the Temporary subdirectory
- if it doesn't exist). It is the caller's responsibility to remove the
- file.
- """
- attachmentRoot = self._txn._store.attachmentsPath
- tempUploadsPath = attachmentRoot.child(self._TEMPORARY_UPLOADS_DIRECTORY)
- if not tempUploadsPath.exists():
- tempUploadsPath.createDirectory()
- return tempfile.mkstemp(dir=tempUploadsPath.path)
-
-
- @property
- def _txn(self):
- return self._attachment._txn
-
-
- def aborted(self):
- """
- Transaction aborted - clean up temp files.
- """
- if self._path.exists():
- self._path.remove()
-
-
- def write(self, data):
- if isinstance(data, buffer):
- data = str(data)
- self._file.write(data)
- self._hash.update(data)
-
-
- @inlineCallbacks
- def loseConnection(self):
-
- # FIXME: this should be synchronously accessible; IAttachment should
- # have a method for getting its parent just as CalendarObject/Calendar
- # do.
-
- # FIXME: If this method isn't called, the transaction should be
- # prevented from committing successfully. It's not valid to have an
- # attachment that doesn't point to a real file.
-
- home = (yield self._txn.calendarHomeWithResourceID(self._attachment._ownerHomeID))
-
- oldSize = self._attachment.size()
- newSize = self._file.tell()
- self._file.close()
-
- # Check max size for attachment
- if newSize > config.MaximumAttachmentSize:
- self._path.remove()
- if self._creating:
- yield self._attachment._internalRemove()
- raise AttachmentSizeTooLarge()
-
- # Check overall user quota
- allowed = home.quotaAllowedBytes()
- if allowed is not None and allowed < ((yield home.quotaUsedBytes())
- + (newSize - oldSize)):
- self._path.remove()
- if self._creating:
- yield self._attachment._internalRemove()
- raise QuotaExceeded()
-
- self._path.moveTo(self._attachment._path)
-
- yield self._attachment.changed(
- self._contentType,
- self._dispositionName,
- self._hash.hexdigest(),
- newSize
- )
-
- if home:
- # Adjust quota
- yield home.adjustQuotaUsedBytes(self._attachment.size() - oldSize)
-
- # Send change notification to home
- yield home.notifyChanged()
-
-
-
-def sqltime(value):
- return datetimeMktime(parseSQLTimestamp(value))
-
-
-
-class Attachment(object):
-
- implements(IAttachment)
-
- def __init__(self, txn, a_id, dropboxID, name, ownerHomeID=None, justCreated=False):
- self._txn = txn
- self._attachmentID = a_id
- self._ownerHomeID = ownerHomeID
- self._dropboxID = dropboxID
- self._contentType = None
- self._size = 0
- self._md5 = None
- self._created = None
- self._modified = None
- self._name = name
- self._justCreated = justCreated
-
-
- def __repr__(self):
- return (
- "<{self.__class__.__name__}: {self._attachmentID}>"
- .format(self=self)
- )
-
-
- def _attachmentPathRoot(self):
- return self._txn._store.attachmentsPath
-
-
- @inlineCallbacks
- def initFromStore(self):
- """
- Execute necessary SQL queries to retrieve attributes.
-
- @return: C{True} if this attachment exists, C{False} otherwise.
- """
- att = schema.ATTACHMENT
- if self._dropboxID:
- where = (att.DROPBOX_ID == self._dropboxID).And(
- att.PATH == self._name)
- else:
- where = (att.ATTACHMENT_ID == self._attachmentID)
- rows = (yield Select(
- [
- att.ATTACHMENT_ID,
- att.DROPBOX_ID,
- att.CALENDAR_HOME_RESOURCE_ID,
- att.CONTENT_TYPE,
- att.SIZE,
- att.MD5,
- att.CREATED,
- att.MODIFIED,
- att.PATH,
- ],
- From=att,
- Where=where
- ).on(self._txn))
-
- if not rows:
- returnValue(None)
-
- row_iter = iter(rows[0])
- self._attachmentID = row_iter.next()
- self._dropboxID = row_iter.next()
- self._ownerHomeID = row_iter.next()
- self._contentType = MimeType.fromString(row_iter.next())
- self._size = row_iter.next()
- self._md5 = row_iter.next()
- self._created = sqltime(row_iter.next())
- self._modified = sqltime(row_iter.next())
- self._name = row_iter.next()
-
- returnValue(self)
-
-
- def dropboxID(self):
- return self._dropboxID
-
-
- def isManaged(self):
- return self._dropboxID == "."
-
-
- def name(self):
- return self._name
-
-
- def properties(self):
- pass # stub
-
-
- def store(self, contentType, dispositionName=None):
- if not self._name:
- self._name = dispositionName
- return AttachmentStorageTransport(self, contentType, dispositionName, self._justCreated)
-
-
- def retrieve(self, protocol):
- return AttachmentRetrievalTransport(self._path).start(protocol)
-
-
- def changed(self, contentType, dispositionName, md5, size):
- raise NotImplementedError
-
- _removeStatement = Delete(
- From=schema.ATTACHMENT,
- Where=(schema.ATTACHMENT.ATTACHMENT_ID == Parameter("attachmentID"))
- )
-
-
- @inlineCallbacks
- def remove(self):
- oldSize = self._size
- self._txn.postCommit(self.removePaths)
- yield self._internalRemove()
- # Adjust quota
- home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
- if home:
- yield home.adjustQuotaUsedBytes(-oldSize)
-
- # Send change notification to home
- yield home.notifyChanged()
-
-
- def removePaths(self):
- """
- Remove the actual file and up to attachment parent directory if empty.
- """
- self._path.remove()
- self.removeParentPaths()
-
-
- def removeParentPaths(self):
- """
- Remove up to attachment parent directory if empty.
- """
- parent = self._path.parent()
- toppath = self._attachmentPathRoot().path
- while parent.path != toppath:
- if len(parent.listdir()) == 0:
- parent.remove()
- parent = parent.parent()
- else:
- break
-
-
- def _internalRemove(self):
- """
- Just delete the row; don't do any accounting / bookkeeping. (This is
- for attachments that have failed to be created due to errors during
- storage.)
- """
- return self._removeStatement.on(self._txn, attachmentID=self._attachmentID)
-
-
- @classmethod
- @inlineCallbacks
- def removedHome(cls, txn, homeID):
- """
- A calendar home is being removed so all of its attachments must go too. When removing,
- we don't care about quota adjustment as there will be no quota once the home is removed.
-
- TODO: this needs to be transactional wrt the actual file deletes.
- """
- att = schema.ATTACHMENT
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
-
- rows = (yield Select(
- [att.ATTACHMENT_ID, att.DROPBOX_ID, ],
- From=att,
- Where=(
- att.CALENDAR_HOME_RESOURCE_ID == homeID
- ),
- ).on(txn))
-
- for attachmentID, dropboxID in rows:
- if dropboxID:
- attachment = DropBoxAttachment(txn, attachmentID, None, None)
- else:
- attachment = ManagedAttachment(txn, attachmentID, None, None)
- attachment = (yield attachment.initFromStore())
- if attachment._path.exists():
- attachment.removePaths()
-
- yield Delete(
- From=attco,
- Where=(
- attco.ATTACHMENT_ID.In(Select(
- [att.ATTACHMENT_ID, ],
- From=att,
- Where=(
- att.CALENDAR_HOME_RESOURCE_ID == homeID
- ),
- ))
- ),
- ).on(txn)
-
- yield Delete(
- From=att,
- Where=(
- att.CALENDAR_HOME_RESOURCE_ID == homeID
- ),
- ).on(txn)
-
-
- # IDataStoreObject
- def contentType(self):
- return self._contentType
-
-
- def md5(self):
- return self._md5
-
-
- def size(self):
- return self._size
-
-
- def created(self):
- return self._created
-
-
- def modified(self):
- return self._modified
-
-
-
-class DropBoxAttachment(Attachment):
-
- @classmethod
- @inlineCallbacks
- def create(cls, txn, dropboxID, name, ownerHomeID):
- """
- Create a new Attachment object.
-
- @param txn: The transaction to use
- @type txn: L{CommonStoreTransaction}
- @param dropboxID: the identifier for the attachment (dropbox id or managed id)
- @type dropboxID: C{str}
- @param name: the name of the attachment
- @type name: C{str}
- @param ownerHomeID: the resource-id of the home collection of the attachment owner
- @type ownerHomeID: C{int}
- """
-
- # If store has already migrated to managed attachments we will prevent creation of dropbox attachments
- dropbox = (yield txn.store().dropboxAllowed(txn))
- if not dropbox:
- raise AttachmentDropboxNotAllowed
-
- # Now create the DB entry
- att = schema.ATTACHMENT
- rows = (yield Insert({
- att.CALENDAR_HOME_RESOURCE_ID : ownerHomeID,
- att.DROPBOX_ID : dropboxID,
- att.CONTENT_TYPE : "",
- att.SIZE : 0,
- att.MD5 : "",
- att.PATH : name,
- }, Return=(att.ATTACHMENT_ID, att.CREATED, att.MODIFIED)).on(txn))
-
- row_iter = iter(rows[0])
- a_id = row_iter.next()
- created = sqltime(row_iter.next())
- modified = sqltime(row_iter.next())
-
- attachment = cls(txn, a_id, dropboxID, name, ownerHomeID, True)
- attachment._created = created
- attachment._modified = modified
-
- # File system paths need to exist
- try:
- attachment._path.parent().makedirs()
- except:
- pass
-
- returnValue(attachment)
-
-
- @classmethod
- @inlineCallbacks
- def load(cls, txn, dropboxID, name):
- attachment = cls(txn, None, dropboxID, name)
- attachment = (yield attachment.initFromStore())
- returnValue(attachment)
-
-
- @property
- def _path(self):
- # Use directory hashing scheme based on MD5 of dropboxID
- hasheduid = hashlib.md5(self._dropboxID).hexdigest()
- attachmentRoot = self._attachmentPathRoot().child(hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid)
- return attachmentRoot.child(self.name())
-
-
- @classmethod
- @inlineCallbacks
- def resourceRemoved(cls, txn, resourceID, dropboxID):
- """
- Remove all attachments referencing the specified resource.
- """
-
- # See if any other resources still reference this dropbox ID
- co = schema.CALENDAR_OBJECT
- rows = (yield Select(
- [co.RESOURCE_ID, ],
- From=co,
- Where=(co.DROPBOX_ID == dropboxID).And(
- co.RESOURCE_ID != resourceID)
- ).on(txn))
-
- if not rows:
- # Find each attachment with matching dropbox ID
- att = schema.ATTACHMENT
- rows = (yield Select(
- [att.PATH],
- From=att,
- Where=(att.DROPBOX_ID == dropboxID)
- ).on(txn))
- for name in rows:
- name = name[0]
- attachment = yield cls.load(txn, dropboxID, name)
- yield attachment.remove()
-
-
- @inlineCallbacks
- def changed(self, contentType, dispositionName, md5, size):
- """
- Dropbox attachments never change their path - ignore dispositionName.
- """
-
- self._contentType = contentType
- self._md5 = md5
- self._size = size
-
- att = schema.ATTACHMENT
- self._created, self._modified = map(
- sqltime,
- (yield Update(
- {
- att.CONTENT_TYPE : generateContentType(self._contentType),
- att.SIZE : self._size,
- att.MD5 : self._md5,
- att.MODIFIED : utcNowSQL,
- },
- Where=(att.ATTACHMENT_ID == self._attachmentID),
- Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0]
- )
-
-
- @inlineCallbacks
- def convertToManaged(self):
- """
- Convert this dropbox attachment into a managed attachment by updating the
- database and returning a new ManagedAttachment object that does not reference
- any calendar object. Referencing will be added later.
-
- @return: the managed attachment object
- @rtype: L{ManagedAttachment}
- """
-
- # Change the DROPBOX_ID to a single "." to indicate a managed attachment.
- att = schema.ATTACHMENT
- (yield Update(
- {att.DROPBOX_ID : ".", },
- Where=(att.ATTACHMENT_ID == self._attachmentID),
- ).on(self._txn))
-
- # Create an "orphaned" ManagedAttachment that points to the updated data but without
- # an actual managed-id (which only exists when there is a reference to a calendar object).
- mattach = (yield ManagedAttachment.load(self._txn, None, None, attachmentID=self._attachmentID))
- mattach._managedID = str(uuid.uuid4())
- if mattach is None:
- raise AttachmentMigrationFailed
-
- # Then move the file on disk from the old path to the new one
- try:
- mattach._path.parent().makedirs()
- except Exception:
- # OK to fail if it already exists, otherwise must raise
- if not mattach._path.parent().exists():
- raise
- oldpath = self._path
- newpath = mattach._path
- oldpath.moveTo(newpath)
- self.removeParentPaths()
-
- returnValue(mattach)
-
-
-
-class ManagedAttachment(Attachment):
- """
- Managed attachments are ones that the server is in total control of. Clients do POSTs on calendar objects
- to store the attachment data and have ATTACH properties added, updated or remove from the calendar objects.
- Each ATTACH property in a calendar object has a MANAGED-ID iCalendar parameter that is used in the POST requests
- to target a specific attachment. The MANAGED-ID values are unique to each calendar object resource, though
- multiple calendar object resources can point to the same underlying attachment as there is a separate database
- table that maps calendar objects/managed-ids to actual attachments.
- """
-
- @classmethod
- @inlineCallbacks
- def _create(cls, txn, managedID, ownerHomeID):
- """
- Create a new managed Attachment object.
-
- @param txn: The transaction to use
- @type txn: L{CommonStoreTransaction}
- @param managedID: the identifier for the attachment
- @type managedID: C{str}
- @param ownerHomeID: the resource-id of the home collection of the attachment owner
- @type ownerHomeID: C{int}
- """
-
- # Now create the DB entry
- att = schema.ATTACHMENT
- rows = (yield Insert({
- att.CALENDAR_HOME_RESOURCE_ID : ownerHomeID,
- att.DROPBOX_ID : ".",
- att.CONTENT_TYPE : "",
- att.SIZE : 0,
- att.MD5 : "",
- att.PATH : "",
- }, Return=(att.ATTACHMENT_ID, att.CREATED, att.MODIFIED)).on(txn))
-
- row_iter = iter(rows[0])
- a_id = row_iter.next()
- created = sqltime(row_iter.next())
- modified = sqltime(row_iter.next())
-
- attachment = cls(txn, a_id, ".", None, ownerHomeID, True)
- attachment._managedID = managedID
- attachment._created = created
- attachment._modified = modified
-
- # File system paths need to exist
- try:
- attachment._path.parent().makedirs()
- except:
- pass
-
- returnValue(attachment)
-
-
- @classmethod
- @inlineCallbacks
- def create(cls, txn, managedID, ownerHomeID, referencedBy):
- """
- Create a new Attachment object.
-
- @param txn: The transaction to use
- @type txn: L{CommonStoreTransaction}
- @param managedID: the identifier for the attachment
- @type managedID: C{str}
- @param ownerHomeID: the resource-id of the home collection of the attachment owner
- @type ownerHomeID: C{int}
- @param referencedBy: the resource-id of the calendar object referencing the attachment
- @type referencedBy: C{int}
- """
-
- # Now create the DB entry
- attachment = (yield cls._create(txn, managedID, ownerHomeID))
- attachment._objectResourceID = referencedBy
-
- # Create the attachment<->calendar object relationship for managed attachments
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
- yield Insert({
- attco.ATTACHMENT_ID : attachment._attachmentID,
- attco.MANAGED_ID : attachment._managedID,
- attco.CALENDAR_OBJECT_RESOURCE_ID : attachment._objectResourceID,
- }).on(txn)
-
- returnValue(attachment)
-
-
- @classmethod
- @inlineCallbacks
- def update(cls, txn, oldManagedID, ownerHomeID, referencedBy, oldAttachmentID):
- """
- Create a new Attachment object.
-
- @param txn: The transaction to use
- @type txn: L{CommonStoreTransaction}
- @param oldManagedID: the identifier for the original attachment
- @type oldManagedID: C{str}
- @param ownerHomeID: the resource-id of the home collection of the attachment owner
- @type ownerHomeID: C{int}
- @param referencedBy: the resource-id of the calendar object referencing the attachment
- @type referencedBy: C{int}
- @param oldAttachmentID: the attachment-id of the existing attachment being updated
- @type oldAttachmentID: C{int}
- """
-
- # Now create the DB entry with a new managed-ID
- managed_id = str(uuid.uuid4())
- attachment = (yield cls._create(txn, managed_id, ownerHomeID))
- attachment._objectResourceID = referencedBy
-
- # Update the attachment<->calendar object relationship for managed attachments
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
- yield Update(
- {
- attco.ATTACHMENT_ID : attachment._attachmentID,
- attco.MANAGED_ID : attachment._managedID,
- },
- Where=(attco.MANAGED_ID == oldManagedID).And(
- attco.CALENDAR_OBJECT_RESOURCE_ID == attachment._objectResourceID
- ),
- ).on(txn)
-
- # Now check whether old attachmentID is still referenced - if not delete it
- rows = (yield Select(
- [attco.ATTACHMENT_ID, ],
- From=attco,
- Where=(attco.ATTACHMENT_ID == oldAttachmentID),
- ).on(txn))
- aids = [row[0] for row in rows] if rows is not None else ()
- if len(aids) == 0:
- oldattachment = ManagedAttachment(txn, oldAttachmentID, None, None)
- oldattachment = (yield oldattachment.initFromStore())
- yield oldattachment.remove()
-
- returnValue(attachment)
-
-
- @classmethod
- @inlineCallbacks
- def load(cls, txn, referencedID, managedID, attachmentID=None):
- """
- Load a ManagedAttachment via either its managedID or attachmentID.
- """
-
- if managedID:
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
- where = (attco.MANAGED_ID == managedID)
- if referencedID is not None:
- where = where.And(attco.CALENDAR_OBJECT_RESOURCE_ID == referencedID)
- rows = (yield Select(
- [attco.ATTACHMENT_ID, ],
- From=attco,
- Where=where,
- ).on(txn))
- if len(rows) == 0:
- returnValue(None)
- elif referencedID is not None and len(rows) != 1:
- raise AttachmentStoreValidManagedID
- attachmentID = rows[0][0]
-
- attachment = cls(txn, attachmentID, None, None)
- attachment = (yield attachment.initFromStore())
- attachment._managedID = managedID
- attachment._objectResourceID = referencedID
- returnValue(attachment)
-
-
- @classmethod
- @inlineCallbacks
- def referencesTo(cls, txn, managedID):
- """
- Find all the calendar object resourceIds referenced by this supplied managed-id.
- """
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
- rows = (yield Select(
- [attco.CALENDAR_OBJECT_RESOURCE_ID, ],
- From=attco,
- Where=(attco.MANAGED_ID == managedID),
- ).on(txn))
- cobjs = set([row[0] for row in rows]) if rows is not None else set()
- returnValue(cobjs)
-
-
- @classmethod
- @inlineCallbacks
- def usedManagedID(cls, txn, managedID):
- """
- Return the "owner" home and referencing resource is, and UID for a managed-id.
- """
- att = schema.ATTACHMENT
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
- co = schema.CALENDAR_OBJECT
- rows = (yield Select(
- [
- att.CALENDAR_HOME_RESOURCE_ID,
- attco.CALENDAR_OBJECT_RESOURCE_ID,
- co.ICALENDAR_UID,
- ],
- From=att.join(
- attco, att.ATTACHMENT_ID == attco.ATTACHMENT_ID, "left outer"
- ).join(co, co.RESOURCE_ID == attco.CALENDAR_OBJECT_RESOURCE_ID),
- Where=(attco.MANAGED_ID == managedID),
- ).on(txn))
- returnValue(rows)
-
-
- @classmethod
- @inlineCallbacks
- def resourceRemoved(cls, txn, resourceID):
- """
- Remove all attachments referencing the specified resource.
- """
-
- # Find all reference attachment-ids and dereference
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
- rows = (yield Select(
- [attco.MANAGED_ID, ],
- From=attco,
- Where=(attco.CALENDAR_OBJECT_RESOURCE_ID == resourceID),
- ).on(txn))
- mids = set([row[0] for row in rows]) if rows is not None else set()
- for managedID in mids:
- attachment = (yield ManagedAttachment.load(txn, resourceID, managedID))
- (yield attachment.removeFromResource(resourceID))
-
-
- @classmethod
- @inlineCallbacks
- def copyManagedID(cls, txn, managedID, referencedBy):
- """
- Associate an existing attachment with the new resource.
- """
-
- # Find the associated attachment-id and insert new reference
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
- aid = (yield Select(
- [attco.ATTACHMENT_ID, ],
- From=attco,
- Where=(attco.MANAGED_ID == managedID),
- ).on(txn))[0][0]
-
- yield Insert({
- attco.ATTACHMENT_ID : aid,
- attco.MANAGED_ID : managedID,
- attco.CALENDAR_OBJECT_RESOURCE_ID : referencedBy,
- }).on(txn)
-
-
- def managedID(self):
- return self._managedID
-
-
- @inlineCallbacks
- def objectResource(self):
- """
- Return the calendar object resource associated with this attachment.
- """
-
- home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
- obj = (yield home.objectResourceWithID(self._objectResourceID))
- returnValue(obj)
-
-
- @property
- def _path(self):
- # Use directory hashing scheme based on MD5 of attachmentID
- hasheduid = hashlib.md5(str(self._attachmentID)).hexdigest()
- return self._attachmentPathRoot().child(hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid)
-
-
- @inlineCallbacks
- def location(self):
- """
- Return the URI location of the attachment.
- """
- if not hasattr(self, "_ownerName"):
- home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
- self._ownerName = home.name()
- if not hasattr(self, "_objectDropboxID"):
- if not hasattr(self, "_objectResource"):
- self._objectResource = (yield self.objectResource())
- self._objectDropboxID = self._objectResource._dropboxID
-
- fname = self.lastSegmentOfUriPath(self._managedID, self._name)
- location = self._txn._store.attachmentsURIPattern % {
- "home": self._ownerName,
- "dropbox_id": urllib.quote(self._objectDropboxID),
- "name": urllib.quote(fname),
- }
- returnValue(location)
-
-
- @classmethod
- def lastSegmentOfUriPath(cls, managed_id, name):
- splits = name.rsplit(".", 1)
- fname = splits[0]
- suffix = splits[1] if len(splits) == 2 else "unknown"
- return "{0}-{1}.{2}".format(fname, managed_id[:8], suffix)
-
-
- @inlineCallbacks
- def changed(self, contentType, dispositionName, md5, size):
- """
- Always update name to current disposition name.
- """
-
- self._contentType = contentType
- self._name = dispositionName
- self._md5 = md5
- self._size = size
- att = schema.ATTACHMENT
- self._created, self._modified = map(
- sqltime,
- (yield Update(
- {
- att.CONTENT_TYPE : generateContentType(self._contentType),
- att.SIZE : self._size,
- att.MD5 : self._md5,
- att.MODIFIED : utcNowSQL,
- att.PATH : self._name,
- },
- Where=(att.ATTACHMENT_ID == self._attachmentID),
- Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0]
- )
-
-
- @inlineCallbacks
- def newReference(self, resourceID):
- """
- Create a new reference of this attachment to the supplied calendar object resource id, and
- return a ManagedAttachment for the new reference.
-
- @param resourceID: the resource id to reference
- @type resourceID: C{int}
-
- @return: the new managed attachment
- @rtype: L{ManagedAttachment}
- """
-
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
- yield Insert({
- attco.ATTACHMENT_ID : self._attachmentID,
- attco.MANAGED_ID : self._managedID,
- attco.CALENDAR_OBJECT_RESOURCE_ID : resourceID,
- }).on(self._txn)
-
- mattach = (yield ManagedAttachment.load(self._txn, resourceID, self._managedID))
- returnValue(mattach)
-
-
- @inlineCallbacks
- def removeFromResource(self, resourceID):
-
- # Delete the reference
- attco = schema.ATTACHMENT_CALENDAR_OBJECT
- yield Delete(
- From=attco,
- Where=(attco.ATTACHMENT_ID == self._attachmentID).And(
- attco.CALENDAR_OBJECT_RESOURCE_ID == resourceID),
- ).on(self._txn)
-
- # References still exist - if not remove actual attachment
- rows = (yield Select(
- [attco.CALENDAR_OBJECT_RESOURCE_ID, ],
- From=attco,
- Where=(attco.ATTACHMENT_ID == self._attachmentID),
- ).on(self._txn))
- if len(rows) == 0:
- yield self.remove()
-
-
- @inlineCallbacks
- def attachProperty(self):
- """
- Return an iCalendar ATTACH property for this attachment.
- """
- attach = Property("ATTACH", "", valuetype=Value.VALUETYPE_URI)
- location = (yield self.updateProperty(attach))
- returnValue((attach, location,))
-
-
- @inlineCallbacks
- def updateProperty(self, attach):
- """
- Update an iCalendar ATTACH property for this attachment.
- """
-
- location = (yield self.location())
-
- attach.setParameter("MANAGED-ID", self.managedID())
- attach.setParameter("FMTTYPE", "{0}/{1}".format(self.contentType().mediaType, self.contentType().mediaSubtype))
- attach.setParameter("FILENAME", self.name())
- attach.setParameter("SIZE", str(self.size()))
- attach.setValue(location)
-
- returnValue(location)
-
</del><span class="cx"> # Hook-up class relationships at the end after they have all been defined
</span><span class="cx"> from txdav.caldav.datastore.sql_external import CalendarHomeExternal, CalendarExternal, CalendarObjectExternal
</span><span class="cx"> CalendarHome._externalClass = CalendarHomeExternal
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoresql_attachmentpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcaldavdatastoresql_attachmentpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/caldav/datastore/sql_attachment.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql_attachment.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/sql_attachment.py         (rev 0)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql_attachment.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,1205 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from pycalendar.value import Value
+
+from twext.enterprise.dal.syntax import Select, Insert, Delete, Parameter, \
+ Update, utcNowSQL
+from twext.enterprise.util import parseSQLTimestamp
+from twext.python.filepath import CachingFilePath
+
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+from twistedcaldav.config import config
+from twistedcaldav.dateops import datetimeMktime
+from twistedcaldav.ical import Property
+
+from txdav.caldav.datastore.util import StorageTransportBase, \
+ AttachmentRetrievalTransport
+from txdav.caldav.icalendarstore import AttachmentSizeTooLarge, QuotaExceeded, \
+ IAttachment, AttachmentDropboxNotAllowed, AttachmentMigrationFailed, \
+ AttachmentStoreValidManagedID
+from txdav.common.datastore.sql_tables import schema
+
+from txweb2.http_headers import MimeType, generateContentType
+
+from zope.interface.declarations import implements
+
+import hashlib
+import itertools
+import os
+import tempfile
+import urllib
+import uuid
+
+"""
+Classes and methods that relate to CalDAV attachments in the SQL store.
+"""
+
+
+class AttachmentStorageTransport(StorageTransportBase):
+
+ _TEMPORARY_UPLOADS_DIRECTORY = "Temporary"
+
+ def __init__(self, attachment, contentType, dispositionName, creating=False, migrating=False):
+ super(AttachmentStorageTransport, self).__init__(
+ attachment, contentType, dispositionName)
+
+ fileDescriptor, fileName = self._temporaryFile()
+ # Wrap the file descriptor in a file object we can write to
+ self._file = os.fdopen(fileDescriptor, "w")
+ self._path = CachingFilePath(fileName)
+ self._hash = hashlib.md5()
+ self._creating = creating
+ self._migrating = migrating
+
+ self._txn.postAbort(self.aborted)
+
+
+ def _temporaryFile(self):
+ """
+ Returns a (file descriptor, absolute path) tuple for a temporary file within
+ the Attachments/Temporary directory (creating the Temporary subdirectory
+ if it doesn't exist). It is the caller's responsibility to remove the
+ file.
+ """
+ attachmentRoot = self._txn._store.attachmentsPath
+ tempUploadsPath = attachmentRoot.child(self._TEMPORARY_UPLOADS_DIRECTORY)
+ if not tempUploadsPath.exists():
+ tempUploadsPath.createDirectory()
+ return tempfile.mkstemp(dir=tempUploadsPath.path)
+
+
+ @property
+ def _txn(self):
+ return self._attachment._txn
+
+
+ def aborted(self):
+ """
+ Transaction aborted - clean up temp files.
+ """
+ if self._path.exists():
+ self._path.remove()
+
+
+ def write(self, data):
+ if isinstance(data, buffer):
+ data = str(data)
+ self._file.write(data)
+ self._hash.update(data)
+
+
+ @inlineCallbacks
+ def loseConnection(self):
+ """
+ Note that when self._migrating is set we only care about the data and don't need to
+ do any quota checks/adjustments.
+ """
+
+ # FIXME: this should be synchronously accessible; IAttachment should
+ # have a method for getting its parent just as CalendarObject/Calendar
+ # do.
+
+ # FIXME: If this method isn't called, the transaction should be
+ # prevented from committing successfully. It's not valid to have an
+ # attachment that doesn't point to a real file.
+
+ home = (yield self._txn.calendarHomeWithResourceID(self._attachment._ownerHomeID))
+
+ oldSize = self._attachment.size()
+ newSize = self._file.tell()
+ self._file.close()
+
+ # Check max size for attachment
+ if not self._migrating and newSize > config.MaximumAttachmentSize:
+ self._path.remove()
+ if self._creating:
+ yield self._attachment._internalRemove()
+ raise AttachmentSizeTooLarge()
+
+ # Check overall user quota
+ if not self._migrating:
+ allowed = home.quotaAllowedBytes()
+ if allowed is not None and allowed < ((yield home.quotaUsedBytes())
+ + (newSize - oldSize)):
+ self._path.remove()
+ if self._creating:
+ yield self._attachment._internalRemove()
+ raise QuotaExceeded()
+
+ self._path.moveTo(self._attachment._path)
+
+ yield self._attachment.changed(
+ self._contentType,
+ self._dispositionName,
+ self._hash.hexdigest(),
+ newSize
+ )
+
+ if not self._migrating and home:
+ # Adjust quota
+ yield home.adjustQuotaUsedBytes(self._attachment.size() - oldSize)
+
+ # Send change notification to home
+ yield home.notifyChanged()
+
+
+
+def sqltime(value):
+ return datetimeMktime(parseSQLTimestamp(value))
+
+
+
+class AttachmentLink(object):
+ """
+ A binding between an L{Attachment} and an L{CalendarObject}.
+ """
+
+ _attachmentSchema = schema.ATTACHMENT
+ _attachmentLinkSchema = schema.ATTACHMENT_CALENDAR_OBJECT
+
+ @classmethod
+ def makeClass(cls, txn, linkData):
+ """
+ Given the various database rows, build the actual class.
+
+ @param objectData: the standard set of object columns
+ @type objectData: C{list}
+
+ @return: the constructed child class
+ @rtype: L{CommonHomeChild}
+ """
+
+ child = cls(txn)
+ for attr, value in zip(child._rowAttributes(), linkData):
+ setattr(child, attr, value)
+ return child
+
+
+ @classmethod
+ def _allColumns(cls):
+ """
+ Full set of columns in the object table that need to be loaded to
+ initialize the object resource state.
+ """
+ aco = cls._attachmentLinkSchema
+ return [
+ aco.ATTACHMENT_ID,
+ aco.MANAGED_ID,
+ aco.CALENDAR_OBJECT_RESOURCE_ID,
+ ]
+
+
+ @classmethod
+ def _rowAttributes(cls):
+ """
+ Object attributes used to store the column values from L{_allColumns}. This used to create
+ a mapping when serializing the object for cross-pod requests.
+ """
+ return (
+ "_attachmentID",
+ "_managedID",
+ "_calendarObjectID",
+ )
+
+
+ @classmethod
+ @inlineCallbacks
+ def linksForHome(cls, home):
+ """
+ Load all attachment<->calendar object mappings for the specified home collection.
+ """
+
+ # Load from the main table first
+ att = cls._attachmentSchema
+ attco = cls._attachmentLinkSchema
+ dataRows = yield Select(
+ cls._allColumns(),
+ From=attco.join(att, on=(attco.ATTACHMENT_ID == att.ATTACHMENT_ID)),
+ Where=att.CALENDAR_HOME_RESOURCE_ID == home.id(),
+ ).on(home._txn)
+
+ # Create the actual objects
+ returnValue([cls.makeClass(home._txn, row) for row in dataRows])
+
+
+ def __init__(self, txn):
+ self._txn = txn
+ for attr in self._rowAttributes():
+ setattr(self, attr, None)
+
+
+ def serialize(self):
+ """
+ Create a dictionary mapping key attributes so this object can be sent over a cross-pod call
+ and reconstituted at the other end. Note that the other end may have a different schema so
+ the attributes may not match exactly and will need to be processed accordingly.
+ """
+ return dict([(attr[1:], getattr(self, attr, None)) for attr in self._rowAttributes()])
+
+
+ @classmethod
+ def deserialize(cls, txn, mapping):
+ """
+ Given a mapping generated by L{serialize}, convert the values into an array of database
+ like items that conforms to the ordering of L{_allColumns} so it can be fed into L{makeClass}.
+ Note that there may be a schema mismatch with the external data, so treat missing items as
+ C{None} and ignore extra items.
+ """
+
+ return cls.makeClass(txn, [mapping.get(row[1:]) for row in cls._rowAttributes()])
+
+
+ def insert(self):
+ """
+ Insert the object.
+ """
+
+ row = dict([(column, getattr(self, attr)) for column, attr in itertools.izip(self._allColumns(), self._rowAttributes())])
+ return Insert(row).on(self._txn)
+
+
+
+class Attachment(object):
+
+ implements(IAttachment)
+
+ _attachmentSchema = schema.ATTACHMENT
+ _attachmentLinkSchema = schema.ATTACHMENT_CALENDAR_OBJECT
+
+ @classmethod
+ def makeClass(cls, txn, attachmentData):
+ """
+ Given the various database rows, build the actual class.
+
+ @param attachmentData: the standard set of attachment columns
+ @type attachmentData: C{list}
+
+ @return: the constructed child class
+ @rtype: L{Attachment}
+ """
+
+ att = cls._attachmentSchema
+ dropbox_id = attachmentData[cls._allColumns().index(att.DROPBOX_ID)]
+ c = ManagedAttachment if dropbox_id == "." else DropBoxAttachment
+ child = c(
+ txn,
+ attachmentData[cls._allColumns().index(att.ATTACHMENT_ID)],
+ attachmentData[cls._allColumns().index(att.DROPBOX_ID)],
+ attachmentData[cls._allColumns().index(att.PATH)],
+ )
+
+ for attr, value in zip(child._rowAttributes(), attachmentData):
+ setattr(child, attr, value)
+ child._contentType = MimeType.fromString(child._contentType)
+
+ return child
+
+
+ @classmethod
+ def _allColumns(cls):
+ """
+ Full set of columns in the object table that need to be loaded to
+ initialize the object resource state.
+ """
+ att = cls._attachmentSchema
+ return [
+ att.ATTACHMENT_ID,
+ att.DROPBOX_ID,
+ att.CALENDAR_HOME_RESOURCE_ID,
+ att.CONTENT_TYPE,
+ att.SIZE,
+ att.MD5,
+ att.CREATED,
+ att.MODIFIED,
+ att.PATH,
+ ]
+
+
+ @classmethod
+ def _rowAttributes(cls):
+ """
+ Object attributes used to store the column values from L{_allColumns}. This used to create
+ a mapping when serializing the object for cross-pod requests.
+ """
+ return (
+ "_attachmentID",
+ "_dropboxID",
+ "_ownerHomeID",
+ "_contentType",
+ "_size",
+ "_md5",
+ "_created",
+ "_modified",
+ "_name",
+ )
+
+
+ @classmethod
+ @inlineCallbacks
+ def loadAllAttachments(cls, home):
+ """
+ Load all attachments assigned to the specified home collection. This should only be
+ used when sync'ing an entire home's set of attachments.
+ """
+
+ results = []
+
+ # Load from the main table first
+ att = cls._attachmentSchema
+ dataRows = yield Select(
+ cls._allColumns(),
+ From=att,
+ Where=att.CALENDAR_HOME_RESOURCE_ID == home.id(),
+ ).on(home._txn)
+
+ # Create the actual objects
+ for row in dataRows:
+ child = cls.makeClass(home._txn, row)
+ results.append(child)
+
+ returnValue(results)
+
+
+ @classmethod
+ @inlineCallbacks
+ def loadAttachmentByID(cls, home, id):
+ """
+ Load one attachments assigned to the specified home collection. This should only be
+ used when sync'ing an entire home's set of attachments.
+ """
+
+ # Load from the main table first
+ att = cls._attachmentSchema
+ rows = yield Select(
+ cls._allColumns(),
+ From=att,
+ Where=(att.CALENDAR_HOME_RESOURCE_ID == home.id()).And(
+ att.ATTACHMENT_ID == id),
+ ).on(home._txn)
+
+ # Create the actual object
+ returnValue(cls.makeClass(home._txn, rows[0]) if len(rows) == 1 else None)
+
+
+ def serialize(self):
+ """
+ Create a dictionary mapping key attributes so this object can be sent over a cross-pod call
+ and reconstituted at the other end. Note that the other end may have a different schema so
+ the attributes may not match exactly and will need to be processed accordingly.
+ """
+ result = dict([(attr[1:], getattr(self, attr, None)) for attr in self._rowAttributes()])
+ result["contentType"] = generateContentType(result["contentType"])
+ return result
+
+
+ @classmethod
+ def deserialize(cls, txn, mapping):
+ """
+ Given a mapping generated by L{serialize}, convert the values into an array of database
+ like items that conforms to the ordering of L{_allColumns} so it can be fed into L{makeClass}.
+ Note that there may be a schema mismatch with the external data, so treat missing items as
+ C{None} and ignore extra items.
+ """
+
+ return cls.makeClass(txn, [mapping.get(row[1:]) for row in cls._rowAttributes()])
+
+
+ def __init__(self, txn, a_id, dropboxID, name, ownerHomeID=None, justCreated=False):
+ self._txn = txn
+ self._attachmentID = a_id
+ self._ownerHomeID = ownerHomeID
+ self._dropboxID = dropboxID
+ self._contentType = None
+ self._size = 0
+ self._md5 = None
+ self._created = None
+ self._modified = None
+ self._name = name
+ self._justCreated = justCreated
+
+
+ def __repr__(self):
+ return (
+ "<{self.__class__.__name__}: {self._attachmentID}>"
+ .format(self=self)
+ )
+
+
+ def _attachmentPathRoot(self):
+ return self._txn._store.attachmentsPath
+
+
+ @inlineCallbacks
+ def initFromStore(self):
+ """
+ Execute necessary SQL queries to retrieve attributes.
+
+ @return: C{True} if this attachment exists, C{False} otherwise.
+ """
+ att = self._attachmentSchema
+ if self._dropboxID and self._dropboxID != ".":
+ where = (att.DROPBOX_ID == self._dropboxID).And(
+ att.PATH == self._name)
+ else:
+ where = (att.ATTACHMENT_ID == self._attachmentID)
+ rows = (yield Select(
+ self._allColumns(),
+ From=att,
+ Where=where
+ ).on(self._txn))
+
+ if not rows:
+ returnValue(None)
+
+ for attr, value in zip(self._rowAttributes(), rows[0]):
+ setattr(self, attr, value)
+ self._contentType = MimeType.fromString(self._contentType)
+ self._created = sqltime(self._created)
+ self._modified = sqltime(self._modified)
+
+ returnValue(self)
+
+
+ def copyRemote(self, remote):
+ """
+ Copy properties from a remote (external) attachment that is being migrated.
+
+ @param remote: the external attachment
+ @type remote: L{Attachment}
+ """
+ return self.changed(remote.contentType(), remote.name(), remote.md5(), remote.size())
+
+
+ def id(self):
+ return self._attachmentID
+
+
+ def dropboxID(self):
+ return self._dropboxID
+
+
+ def isManaged(self):
+ return self._dropboxID == "."
+
+
+ def name(self):
+ return self._name
+
+
+ def properties(self):
+ pass # stub
+
+
+ def store(self, contentType, dispositionName=None, migrating=False):
+ if not self._name:
+ self._name = dispositionName
+ return AttachmentStorageTransport(self, contentType, dispositionName, self._justCreated, migrating=migrating)
+
+
+ def retrieve(self, protocol):
+ return AttachmentRetrievalTransport(self._path).start(protocol)
+
+
+ def changed(self, contentType, dispositionName, md5, size):
+ raise NotImplementedError
+
+ _removeStatement = Delete(
+ From=schema.ATTACHMENT,
+ Where=(schema.ATTACHMENT.ATTACHMENT_ID == Parameter("attachmentID"))
+ )
+
+
+ @inlineCallbacks
+ def remove(self, adjustQuota=True):
+ oldSize = self._size
+ self._txn.postCommit(self.removePaths)
+ yield self._internalRemove()
+
+ # Adjust quota
+ if adjustQuota:
+ home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
+ if home:
+ yield home.adjustQuotaUsedBytes(-oldSize)
+
+ # Send change notification to home
+ yield home.notifyChanged()
+
+
+ def removePaths(self):
+ """
+ Remove the actual file and up to attachment parent directory if empty.
+ """
+ self._path.remove()
+ self.removeParentPaths()
+
+
+ def removeParentPaths(self):
+ """
+ Remove up to attachment parent directory if empty.
+ """
+ parent = self._path.parent()
+ toppath = self._attachmentPathRoot().path
+ while parent.path != toppath:
+ if len(parent.listdir()) == 0:
+ parent.remove()
+ parent = parent.parent()
+ else:
+ break
+
+
+ def _internalRemove(self):
+ """
+ Just delete the row; don't do any accounting / bookkeeping. (This is
+ for attachments that have failed to be created due to errors during
+ storage.)
+ """
+ return self._removeStatement.on(self._txn, attachmentID=self._attachmentID)
+
+
+ @classmethod
+ @inlineCallbacks
+ def removedHome(cls, txn, homeID):
+ """
+ A calendar home is being removed so all of its attachments must go too. When removing,
+ we don't care about quota adjustment as there will be no quota once the home is removed.
+
+ TODO: this needs to be transactional wrt the actual file deletes.
+ """
+ att = cls._attachmentSchema
+ attco = cls._attachmentLinkSchema
+
+ rows = (yield Select(
+ [att.ATTACHMENT_ID, att.DROPBOX_ID, ],
+ From=att,
+ Where=(
+ att.CALENDAR_HOME_RESOURCE_ID == homeID
+ ),
+ ).on(txn))
+
+ for attachmentID, dropboxID in rows:
+ if dropboxID != ".":
+ attachment = DropBoxAttachment(txn, attachmentID, None, None)
+ else:
+ attachment = ManagedAttachment(txn, attachmentID, None, None)
+ attachment = (yield attachment.initFromStore())
+ if attachment._path.exists():
+ attachment.removePaths()
+
+ yield Delete(
+ From=attco,
+ Where=(
+ attco.ATTACHMENT_ID.In(Select(
+ [att.ATTACHMENT_ID, ],
+ From=att,
+ Where=(
+ att.CALENDAR_HOME_RESOURCE_ID == homeID
+ ),
+ ))
+ ),
+ ).on(txn)
+
+ yield Delete(
+ From=att,
+ Where=(
+ att.CALENDAR_HOME_RESOURCE_ID == homeID
+ ),
+ ).on(txn)
+
+
+ # IDataStoreObject
+ def contentType(self):
+ return self._contentType
+
+
+ def md5(self):
+ return self._md5
+
+
+ def size(self):
+ return self._size
+
+
+ def created(self):
+ return self._created
+
+
+ def modified(self):
+ return self._modified
+
+
+
+class DropBoxAttachment(Attachment):
+
+ @classmethod
+ @inlineCallbacks
+ def create(cls, txn, dropboxID, name, ownerHomeID):
+ """
+ Create a new Attachment object.
+
+ @param txn: The transaction to use
+ @type txn: L{CommonStoreTransaction}
+ @param dropboxID: the identifier for the attachment (dropbox id or managed id)
+ @type dropboxID: C{str}
+ @param name: the name of the attachment
+ @type name: C{str}
+ @param ownerHomeID: the resource-id of the home collection of the attachment owner
+ @type ownerHomeID: C{int}
+ """
+
+ # If store has already migrated to managed attachments we will prevent creation of dropbox attachments
+ dropbox = (yield txn.store().dropboxAllowed(txn))
+ if not dropbox:
+ raise AttachmentDropboxNotAllowed
+
+ # Now create the DB entry
+ att = cls._attachmentSchema
+ rows = (yield Insert({
+ att.CALENDAR_HOME_RESOURCE_ID : ownerHomeID,
+ att.DROPBOX_ID : dropboxID,
+ att.CONTENT_TYPE : "",
+ att.SIZE : 0,
+ att.MD5 : "",
+ att.PATH : name,
+ }, Return=(att.ATTACHMENT_ID, att.CREATED, att.MODIFIED)).on(txn))
+
+ row_iter = iter(rows[0])
+ a_id = row_iter.next()
+ created = sqltime(row_iter.next())
+ modified = sqltime(row_iter.next())
+
+ attachment = cls(txn, a_id, dropboxID, name, ownerHomeID, True)
+ attachment._created = created
+ attachment._modified = modified
+
+ # File system paths need to exist
+ try:
+ attachment._path.parent().makedirs()
+ except:
+ pass
+
+ returnValue(attachment)
+
+
+ @classmethod
+ @inlineCallbacks
+ def load(cls, txn, dropboxID, name):
+ attachment = cls(txn, None, dropboxID, name)
+ attachment = (yield attachment.initFromStore())
+ returnValue(attachment)
+
+
+ @property
+ def _path(self):
+ # Use directory hashing scheme based on MD5 of dropboxID
+ hasheduid = hashlib.md5(self._dropboxID).hexdigest()
+ attachmentRoot = self._attachmentPathRoot().child(hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid)
+ return attachmentRoot.child(self.name())
+
+
+ @classmethod
+ @inlineCallbacks
+ def resourceRemoved(cls, txn, resourceID, dropboxID):
+ """
+ Remove all attachments referencing the specified resource.
+ """
+
+ # See if any other resources still reference this dropbox ID
+ co = schema.CALENDAR_OBJECT
+ rows = (yield Select(
+ [co.RESOURCE_ID, ],
+ From=co,
+ Where=(co.DROPBOX_ID == dropboxID).And(
+ co.RESOURCE_ID != resourceID)
+ ).on(txn))
+
+ if not rows:
+ # Find each attachment with matching dropbox ID
+ att = cls._attachmentSchema
+ rows = (yield Select(
+ [att.PATH],
+ From=att,
+ Where=(att.DROPBOX_ID == dropboxID)
+ ).on(txn))
+ for name in rows:
+ name = name[0]
+ attachment = yield cls.load(txn, dropboxID, name)
+ yield attachment.remove()
+
+
+ @inlineCallbacks
+ def changed(self, contentType, dispositionName, md5, size):
+ """
+ Dropbox attachments never change their path - ignore dispositionName.
+ """
+
+ self._contentType = contentType
+ self._md5 = md5
+ self._size = size
+
+ att = self._attachmentSchema
+ self._created, self._modified = map(
+ sqltime,
+ (yield Update(
+ {
+ att.CONTENT_TYPE : generateContentType(self._contentType),
+ att.SIZE : self._size,
+ att.MD5 : self._md5,
+ att.MODIFIED : utcNowSQL,
+ },
+ Where=(att.ATTACHMENT_ID == self._attachmentID),
+ Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0]
+ )
+
+
+ @inlineCallbacks
+ def convertToManaged(self):
+ """
+ Convert this dropbox attachment into a managed attachment by updating the
+ database and returning a new ManagedAttachment object that does not reference
+ any calendar object. Referencing will be added later.
+
+ @return: the managed attachment object
+ @rtype: L{ManagedAttachment}
+ """
+
+ # Change the DROPBOX_ID to a single "." to indicate a managed attachment.
+ att = self._attachmentSchema
+ (yield Update(
+ {att.DROPBOX_ID : ".", },
+ Where=(att.ATTACHMENT_ID == self._attachmentID),
+ ).on(self._txn))
+
+ # Create an "orphaned" ManagedAttachment that points to the updated data but without
+ # an actual managed-id (which only exists when there is a reference to a calendar object).
+ mattach = (yield ManagedAttachment.load(self._txn, None, None, attachmentID=self._attachmentID))
+ mattach._managedID = str(uuid.uuid4())
+ if mattach is None:
+ raise AttachmentMigrationFailed
+
+ # Then move the file on disk from the old path to the new one
+ try:
+ mattach._path.parent().makedirs()
+ except Exception:
+ # OK to fail if it already exists, otherwise must raise
+ if not mattach._path.parent().exists():
+ raise
+ oldpath = self._path
+ newpath = mattach._path
+ oldpath.moveTo(newpath)
+ self.removeParentPaths()
+
+ returnValue(mattach)
+
+
+
+class ManagedAttachment(Attachment):
+ """
+ Managed attachments are ones that the server is in total control of. Clients do POSTs on calendar objects
+ to store the attachment data and have ATTACH properties added, updated or remove from the calendar objects.
+ Each ATTACH property in a calendar object has a MANAGED-ID iCalendar parameter that is used in the POST requests
+ to target a specific attachment. The MANAGED-ID values are unique to each calendar object resource, though
+ multiple calendar object resources can point to the same underlying attachment as there is a separate database
+ table that maps calendar objects/managed-ids to actual attachments.
+ """
+
+ @classmethod
+ @inlineCallbacks
+ def _create(cls, txn, managedID, ownerHomeID):
+ """
+ Create a new managed Attachment object.
+
+ @param txn: The transaction to use
+ @type txn: L{CommonStoreTransaction}
+ @param managedID: the identifier for the attachment
+ @type managedID: C{str}
+ @param ownerHomeID: the resource-id of the home collection of the attachment owner
+ @type ownerHomeID: C{int}
+ """
+
+ # Now create the DB entry
+ att = cls._attachmentSchema
+ rows = (yield Insert({
+ att.CALENDAR_HOME_RESOURCE_ID : ownerHomeID,
+ att.DROPBOX_ID : ".",
+ att.CONTENT_TYPE : "",
+ att.SIZE : 0,
+ att.MD5 : "",
+ att.PATH : "",
+ }, Return=(att.ATTACHMENT_ID, att.CREATED, att.MODIFIED)).on(txn))
+
+ row_iter = iter(rows[0])
+ a_id = row_iter.next()
+ created = sqltime(row_iter.next())
+ modified = sqltime(row_iter.next())
+
+ attachment = cls(txn, a_id, ".", None, ownerHomeID, True)
+ attachment._managedID = managedID
+ attachment._created = created
+ attachment._modified = modified
+
+ # File system paths need to exist
+ try:
+ attachment._path.parent().makedirs()
+ except:
+ pass
+
+ returnValue(attachment)
+
+
+ @classmethod
+ @inlineCallbacks
+ def create(cls, txn, managedID, ownerHomeID, referencedBy):
+ """
+ Create a new Attachment object and reference it.
+
+ @param txn: The transaction to use
+ @type txn: L{CommonStoreTransaction}
+ @param managedID: the identifier for the attachment
+ @type managedID: C{str}
+ @param ownerHomeID: the resource-id of the home collection of the attachment owner
+ @type ownerHomeID: C{int}
+ @param referencedBy: the resource-id of the calendar object referencing the attachment
+ @type referencedBy: C{int}
+ """
+
+ # Now create the DB entry
+ attachment = (yield cls._create(txn, managedID, ownerHomeID))
+ attachment._objectResourceID = referencedBy
+
+ # Create the attachment<->calendar object relationship for managed attachments
+ attco = cls._attachmentLinkSchema
+ yield Insert({
+ attco.ATTACHMENT_ID : attachment._attachmentID,
+ attco.MANAGED_ID : attachment._managedID,
+ attco.CALENDAR_OBJECT_RESOURCE_ID : attachment._objectResourceID,
+ }).on(txn)
+
+ returnValue(attachment)
+
+
+ @classmethod
+ @inlineCallbacks
+ def update(cls, txn, oldManagedID, ownerHomeID, referencedBy, oldAttachmentID):
+ """
+ Update an Attachment object. This creates a new one and adjusts the reference to the old
+ one to point to the new one. If the old one is no longer referenced at all, it is deleted.
+
+ @param txn: The transaction to use
+ @type txn: L{CommonStoreTransaction}
+ @param oldManagedID: the identifier for the original attachment
+ @type oldManagedID: C{str}
+ @param ownerHomeID: the resource-id of the home collection of the attachment owner
+ @type ownerHomeID: C{int}
+ @param referencedBy: the resource-id of the calendar object referencing the attachment
+ @type referencedBy: C{int}
+ @param oldAttachmentID: the attachment-id of the existing attachment being updated
+ @type oldAttachmentID: C{int}
+ """
+
+ # Now create the DB entry with a new managed-ID
+ managed_id = str(uuid.uuid4())
+ attachment = (yield cls._create(txn, managed_id, ownerHomeID))
+ attachment._objectResourceID = referencedBy
+
+ # Update the attachment<->calendar object relationship for managed attachments
+ attco = cls._attachmentLinkSchema
+ yield Update(
+ {
+ attco.ATTACHMENT_ID : attachment._attachmentID,
+ attco.MANAGED_ID : attachment._managedID,
+ },
+ Where=(attco.MANAGED_ID == oldManagedID).And(
+ attco.CALENDAR_OBJECT_RESOURCE_ID == attachment._objectResourceID
+ ),
+ ).on(txn)
+
+ # Now check whether old attachmentID is still referenced - if not delete it
+ rows = (yield Select(
+ [attco.ATTACHMENT_ID, ],
+ From=attco,
+ Where=(attco.ATTACHMENT_ID == oldAttachmentID),
+ ).on(txn))
+ aids = [row[0] for row in rows] if rows is not None else ()
+ if len(aids) == 0:
+ oldattachment = ManagedAttachment(txn, oldAttachmentID, None, None)
+ oldattachment = (yield oldattachment.initFromStore())
+ yield oldattachment.remove()
+
+ returnValue(attachment)
+
+
+ @classmethod
+ @inlineCallbacks
+ def load(cls, txn, referencedID, managedID, attachmentID=None):
+ """
+ Load a ManagedAttachment via either its managedID or attachmentID.
+ """
+
+ if managedID:
+ attco = cls._attachmentLinkSchema
+ where = (attco.MANAGED_ID == managedID)
+ if referencedID is not None:
+ where = where.And(attco.CALENDAR_OBJECT_RESOURCE_ID == referencedID)
+ rows = (yield Select(
+ [attco.ATTACHMENT_ID, ],
+ From=attco,
+ Where=where,
+ ).on(txn))
+ if len(rows) == 0:
+ returnValue(None)
+ elif referencedID is not None and len(rows) != 1:
+ raise AttachmentStoreValidManagedID
+ attachmentID = rows[0][0]
+
+ attachment = cls(txn, attachmentID, None, None)
+ attachment = (yield attachment.initFromStore())
+ attachment._managedID = managedID
+ attachment._objectResourceID = referencedID
+ returnValue(attachment)
+
+
+ @classmethod
+ @inlineCallbacks
+ def referencesTo(cls, txn, managedID):
+ """
+ Find all the calendar object resourceIds referenced by this supplied managed-id.
+ """
+ attco = cls._attachmentLinkSchema
+ rows = (yield Select(
+ [attco.CALENDAR_OBJECT_RESOURCE_ID, ],
+ From=attco,
+ Where=(attco.MANAGED_ID == managedID),
+ ).on(txn))
+ cobjs = set([row[0] for row in rows]) if rows is not None else set()
+ returnValue(cobjs)
+
+
+ @classmethod
+ @inlineCallbacks
+ def usedManagedID(cls, txn, managedID):
+ """
+ Return the "owner" home and referencing resource is, and UID for a managed-id.
+ """
+ att = cls._attachmentSchema
+ attco = cls._attachmentLinkSchema
+ co = schema.CALENDAR_OBJECT
+ rows = (yield Select(
+ [
+ att.CALENDAR_HOME_RESOURCE_ID,
+ attco.CALENDAR_OBJECT_RESOURCE_ID,
+ co.ICALENDAR_UID,
+ ],
+ From=att.join(
+ attco, att.ATTACHMENT_ID == attco.ATTACHMENT_ID, "left outer"
+ ).join(co, co.RESOURCE_ID == attco.CALENDAR_OBJECT_RESOURCE_ID),
+ Where=(attco.MANAGED_ID == managedID),
+ ).on(txn))
+ returnValue(rows)
+
+
+ @classmethod
+ @inlineCallbacks
+ def resourceRemoved(cls, txn, resourceID):
+ """
+ Remove all attachments referencing the specified resource.
+ """
+
+ # Find all reference attachment-ids and dereference
+ attco = cls._attachmentLinkSchema
+ rows = (yield Select(
+ [attco.MANAGED_ID, ],
+ From=attco,
+ Where=(attco.CALENDAR_OBJECT_RESOURCE_ID == resourceID),
+ ).on(txn))
+ mids = set([row[0] for row in rows]) if rows is not None else set()
+ for managedID in mids:
+ attachment = (yield ManagedAttachment.load(txn, resourceID, managedID))
+ (yield attachment.removeFromResource(resourceID))
+
+
+ @classmethod
+ @inlineCallbacks
+ def copyManagedID(cls, txn, managedID, referencedBy):
+ """
+ Associate an existing attachment with the new resource.
+ """
+
+ # Find the associated attachment-id and insert new reference
+ attco = cls._attachmentLinkSchema
+ aid = (yield Select(
+ [attco.ATTACHMENT_ID, ],
+ From=attco,
+ Where=(attco.MANAGED_ID == managedID),
+ ).on(txn))[0][0]
+
+ yield Insert({
+ attco.ATTACHMENT_ID : aid,
+ attco.MANAGED_ID : managedID,
+ attco.CALENDAR_OBJECT_RESOURCE_ID : referencedBy,
+ }).on(txn)
+
+
+ def managedID(self):
+ return self._managedID
+
+
+ @inlineCallbacks
+ def objectResource(self):
+ """
+ Return the calendar object resource associated with this attachment.
+ """
+
+ home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
+ obj = (yield home.objectResourceWithID(self._objectResourceID))
+ returnValue(obj)
+
+
+ @property
+ def _path(self):
+ # Use directory hashing scheme based on MD5 of attachmentID
+ hasheduid = hashlib.md5(str(self._attachmentID)).hexdigest()
+ return self._attachmentPathRoot().child(hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid)
+
+
+ @inlineCallbacks
+ def location(self):
+ """
+ Return the URI location of the attachment.
+ """
+ if not hasattr(self, "_ownerName"):
+ home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
+ self._ownerName = home.name()
+ if not hasattr(self, "_objectDropboxID"):
+ if not hasattr(self, "_objectResource"):
+ self._objectResource = (yield self.objectResource())
+ self._objectDropboxID = self._objectResource._dropboxID
+
+ fname = self.lastSegmentOfUriPath(self._managedID, self._name)
+ location = self._txn._store.attachmentsURIPattern % {
+ "home": self._ownerName,
+ "dropbox_id": urllib.quote(self._objectDropboxID),
+ "name": urllib.quote(fname),
+ }
+ returnValue(location)
+
+
+ @classmethod
+ def lastSegmentOfUriPath(cls, managed_id, name):
+ splits = name.rsplit(".", 1)
+ fname = splits[0]
+ suffix = splits[1] if len(splits) == 2 else "unknown"
+ return "{0}-{1}.{2}".format(fname, managed_id[:8], suffix)
+
+
+ @inlineCallbacks
+ def changed(self, contentType, dispositionName, md5, size):
+ """
+ Always update name to current disposition name.
+ """
+
+ self._contentType = contentType
+ self._name = dispositionName
+ self._md5 = md5
+ self._size = size
+ att = self._attachmentSchema
+ self._created, self._modified = map(
+ sqltime,
+ (yield Update(
+ {
+ att.CONTENT_TYPE : generateContentType(self._contentType),
+ att.SIZE : self._size,
+ att.MD5 : self._md5,
+ att.MODIFIED : utcNowSQL,
+ att.PATH : self._name,
+ },
+ Where=(att.ATTACHMENT_ID == self._attachmentID),
+ Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0]
+ )
+
+
+ @inlineCallbacks
+ def newReference(self, resourceID):
+ """
+ Create a new reference of this attachment to the supplied calendar object resource id, and
+ return a ManagedAttachment for the new reference.
+
+ @param resourceID: the resource id to reference
+ @type resourceID: C{int}
+
+ @return: the new managed attachment
+ @rtype: L{ManagedAttachment}
+ """
+
+ attco = self._attachmentLinkSchema
+ yield Insert({
+ attco.ATTACHMENT_ID : self._attachmentID,
+ attco.MANAGED_ID : self._managedID,
+ attco.CALENDAR_OBJECT_RESOURCE_ID : resourceID,
+ }).on(self._txn)
+
+ mattach = (yield ManagedAttachment.load(self._txn, resourceID, self._managedID))
+ returnValue(mattach)
+
+
+ @inlineCallbacks
+ def removeFromResource(self, resourceID):
+
+ # Delete the reference
+ attco = self._attachmentLinkSchema
+ yield Delete(
+ From=attco,
+ Where=(attco.ATTACHMENT_ID == self._attachmentID).And(
+ attco.CALENDAR_OBJECT_RESOURCE_ID == resourceID),
+ ).on(self._txn)
+
+ # References still exist - if not remove actual attachment
+ rows = (yield Select(
+ [attco.CALENDAR_OBJECT_RESOURCE_ID, ],
+ From=attco,
+ Where=(attco.ATTACHMENT_ID == self._attachmentID),
+ ).on(self._txn))
+ if len(rows) == 0:
+ yield self.remove()
+
+
+ @inlineCallbacks
+ def attachProperty(self):
+ """
+ Return an iCalendar ATTACH property for this attachment.
+ """
+ attach = Property("ATTACH", "", valuetype=Value.VALUETYPE_URI)
+ location = (yield self.updateProperty(attach))
+ returnValue((attach, location,))
+
+
+ @inlineCallbacks
+ def updateProperty(self, attach):
+ """
+ Update an iCalendar ATTACH property for this attachment.
+ """
+
+ location = (yield self.location())
+
+ attach.setParameter("MANAGED-ID", self.managedID())
+ attach.setParameter("FMTTYPE", "{0}/{1}".format(self.contentType().mediaType, self.contentType().mediaSubtype))
+ attach.setParameter("FILENAME", self.name())
+ attach.setParameter("SIZE", str(self.size()))
+ attach.setValue(location)
+
+ returnValue(location)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoresql_directorypyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcaldavdatastoresql_directorypy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/caldav/datastore/sql_directory.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql_directory.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/sql_directory.py         (rev 0)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql_directory.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,73 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import SerializableRecord, fromTable
+from twext.enterprise.dal.syntax import Select, Parameter
+from twisted.internet.defer import inlineCallbacks, returnValue
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.datastore.sql_directory import GroupsRecord
+
+"""
+Classes and methods that relate to directory objects in the SQL store. e.g.,
+delegates, groups etc
+"""
+
+class GroupAttendeeRecord(SerializableRecord, fromTable(schema.GROUP_ATTENDEE)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.GROUP_ATTENDEE}.
+ """
+
+ @classmethod
+ @inlineCallbacks
+ def groupAttendeesForObjects(cls, txn, cobjs):
+ """
+ Get delegator/group pairs for each of the specified calendar objects.
+ """
+
+ # Do a join to get what we need
+ rows = yield Select(
+ list(GroupAttendeeRecord.table) + list(GroupsRecord.table),
+ From=GroupAttendeeRecord.table.join(GroupsRecord.table, GroupAttendeeRecord.groupID == GroupsRecord.groupID),
+ Where=(GroupAttendeeRecord.resourceID.In(Parameter("cobjs", len(cobjs))))
+ ).on(txn, cobjs=cobjs)
+
+ results = []
+ groupAttendeeNames = [GroupAttendeeRecord.__colmap__[column] for column in list(GroupAttendeeRecord.table)]
+ groupsNames = [GroupsRecord.__colmap__[column] for column in list(GroupsRecord.table)]
+ split_point = len(groupAttendeeNames)
+ for row in rows:
+ groupAttendeeRow = row[:split_point]
+ groupAttendeeRecord = GroupAttendeeRecord()
+ groupAttendeeRecord._attributesFromRow(zip(groupAttendeeNames, groupAttendeeRow))
+ groupAttendeeRecord.transaction = txn
+ groupsRow = row[split_point:]
+ groupsRecord = GroupsRecord()
+ groupsRecord._attributesFromRow(zip(groupsNames, groupsRow))
+ groupsRecord.transaction = txn
+ results.append((groupAttendeeRecord, groupsRecord,))
+
+ returnValue(results)
+
+
+
+class GroupShareeRecord(SerializableRecord, fromTable(schema.GROUP_SHAREE)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.GROUP_SHAREE}.
+ """
+ pass
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoresql_externalpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/sql_external.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/sql_external.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql_external.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -18,12 +18,15 @@
</span><span class="cx"> SQL backend for CalDAV storage when resources are external.
</span><span class="cx"> """
</span><span class="cx">
</span><del>-from twisted.internet.defer import succeed, inlineCallbacks, returnValue
</del><ins>+from twisted.internet.defer import inlineCallbacks, returnValue
</ins><span class="cx">
</span><span class="cx"> from twext.python.log import Logger
</span><span class="cx">
</span><span class="cx"> from txdav.caldav.datastore.sql import CalendarHome, Calendar, CalendarObject
</span><ins>+from txdav.caldav.datastore.sql_attachment import Attachment, AttachmentLink
+from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord, GroupShareeRecord
</ins><span class="cx"> from txdav.caldav.icalendarstore import ComponentUpdateState, ComponentRemoveState
</span><ins>+from txdav.common.datastore.sql_directory import GroupsRecord
</ins><span class="cx"> from txdav.common.datastore.sql_external import CommonHomeExternal, CommonHomeChildExternal, \
</span><span class="cx"> CommonObjectResourceExternal
</span><span class="cx">
</span><span class="lines">@@ -34,10 +37,10 @@
</span><span class="cx"> Wrapper for a CalendarHome that is external and only supports a limited set of operations.
</span><span class="cx"> """
</span><span class="cx">
</span><del>- def __init__(self, transaction, ownerUID, resourceID):
</del><ins>+ def __init__(self, transaction, homeData):
</ins><span class="cx">
</span><del>- CalendarHome.__init__(self, transaction, ownerUID)
- CommonHomeExternal.__init__(self, transaction, ownerUID, resourceID)
</del><ins>+ CalendarHome.__init__(self, transaction, homeData)
+ CommonHomeExternal.__init__(self, transaction, homeData)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> def hasCalendarResourceUIDSomewhereElse(self, uid, ok_object, mode):
</span><span class="lines">@@ -61,6 +64,36 @@
</span><span class="cx"> raise AssertionError("CommonHomeExternal: not supported")
</span><span class="cx">
</span><span class="cx">
</span><ins>+ @inlineCallbacks
+ def getAllAttachments(self):
+ """
+ Return all the L{Attachment} objects associated with this calendar home.
+ Needed during migration.
+ """
+ raw_results = yield self._txn.store().conduit.send_home_get_all_attachments(self)
+ returnValue([Attachment.deserialize(self._txn, attachment) for attachment in raw_results])
+
+
+ @inlineCallbacks
+ def readAttachmentData(self, remote_id, attachment):
+ """
+ Read the data associated with an attachment associated with this calendar home.
+ Needed during migration only.
+ """
+ stream = attachment.store(attachment.contentType(), attachment.name(), migrating=True)
+ yield self._txn.store().conduit.send_get_attachment_data(self, remote_id, stream)
+
+
+ @inlineCallbacks
+ def getAttachmentLinks(self):
+ """
+ Read the attachment<->calendar object mapping data associated with this calendar home.
+ Needed during migration only.
+ """
+ raw_results = yield self._txn.store().conduit.send_home_get_attachment_links(self)
+ returnValue([AttachmentLink.deserialize(self._txn, attachment) for attachment in raw_results])
+
+
</ins><span class="cx"> def getAllDropboxIDs(self):
</span><span class="cx"> """
</span><span class="cx"> No children.
</span><span class="lines">@@ -82,13 +115,17 @@
</span><span class="cx"> raise AssertionError("CommonHomeExternal: not supported")
</span><span class="cx">
</span><span class="cx">
</span><del>- def createdHome(self):
</del><ins>+ @inlineCallbacks
+ def getAllGroupAttendees(self):
</ins><span class="cx"> """
</span><del>- No children - make this a no-op.
</del><ins>+ Return a list of L{GroupAttendeeRecord},L{GroupRecord} for each group attendee referenced in calendar data
+ owned by this home.
</ins><span class="cx"> """
</span><del>- return succeed(None)
</del><span class="cx">
</span><ins>+ raw_results = yield self._txn.store().conduit.send_home_get_all_group_attendees(self)
+ returnValue([(GroupAttendeeRecord.deserialize(item[0]), GroupsRecord.deserialize(item[1]),) for item in raw_results])
</ins><span class="cx">
</span><ins>+
</ins><span class="cx"> def splitCalendars(self):
</span><span class="cx"> """
</span><span class="cx"> No children.
</span><span class="lines">@@ -157,10 +194,16 @@
</span><span class="cx"> """
</span><span class="cx"> SQL-based implementation of L{ICalendar}.
</span><span class="cx"> """
</span><del>- pass
</del><span class="cx">
</span><ins>+ @inlineCallbacks
+ def groupSharees(self):
+ results = yield self._txn.store().conduit.send_homechild_group_sharees(self)
+ results["groups"] = [GroupsRecord.deserialize(items) for items in results["groups"]]
+ results["sharees"] = [GroupShareeRecord.deserialize(items) for items in results["sharees"]]
+ returnValue(results)
</ins><span class="cx">
</span><span class="cx">
</span><ins>+
</ins><span class="cx"> class CalendarObjectExternal(CommonObjectResourceExternal, CalendarObject):
</span><span class="cx"> """
</span><span class="cx"> SQL-based implementation of L{ICalendarObject}.
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoretestcommonpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/test/common.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/test/common.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/common.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -375,7 +375,7 @@
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def notificationUnderTest(self):
</span><span class="cx"> txn = self.transactionUnderTest()
</span><del>- notifications = yield txn.notificationsWithUID("home1")
</del><ins>+ notifications = yield txn.notificationsWithUID("home1", create=True)
</ins><span class="cx"> yield notifications.writeNotificationObject(
</span><span class="cx"> "abc",
</span><span class="cx"> json.loads("{\"notification-type\":\"invite-notification\"}"),
</span><span class="lines">@@ -402,7 +402,7 @@
</span><span class="cx"> objects changed or deleted since
</span><span class="cx"> """
</span><span class="cx"> txn = self.transactionUnderTest()
</span><del>- coll = yield txn.notificationsWithUID("home1")
</del><ins>+ coll = yield txn.notificationsWithUID("home1", create=True)
</ins><span class="cx"> yield coll.writeNotificationObject(
</span><span class="cx"> "1",
</span><span class="cx"> json.loads("{\"notification-type\":\"invite-notification\"}"),
</span><span class="lines">@@ -435,7 +435,7 @@
</span><span class="cx"> overwrite the notification object.
</span><span class="cx"> """
</span><span class="cx"> notifications = yield self.transactionUnderTest().notificationsWithUID(
</span><del>- "home1"
</del><ins>+ "home1", create=True
</ins><span class="cx"> )
</span><span class="cx"> yield notifications.writeNotificationObject(
</span><span class="cx"> "abc",
</span><span class="lines">@@ -462,7 +462,7 @@
</span><span class="cx"> """
</span><span class="cx"> # Prime the home collection first
</span><span class="cx"> yield self.transactionUnderTest().notificationsWithUID(
</span><del>- "home1"
</del><ins>+ "home1", create=True
</ins><span class="cx"> )
</span><span class="cx"> yield self.commit()
</span><span class="cx">
</span><span class="lines">@@ -512,7 +512,7 @@
</span><span class="cx"> overwrite the notification object.
</span><span class="cx"> """
</span><span class="cx"> notifications = yield self.transactionUnderTest().notificationsWithUID(
</span><del>- "home1"
</del><ins>+ "home1", create=True
</ins><span class="cx"> )
</span><span class="cx"> yield notifications.writeNotificationObject(
</span><span class="cx"> "abc",
</span><span class="lines">@@ -555,7 +555,7 @@
</span><span class="cx"> L{INotificationCollection} that the object was retrieved from.
</span><span class="cx"> """
</span><span class="cx"> txn = self.transactionUnderTest()
</span><del>- collection = yield txn.notificationsWithUID("home1")
</del><ins>+ collection = yield txn.notificationsWithUID("home1", create=True)
</ins><span class="cx"> notification = yield self.notificationUnderTest()
</span><span class="cx"> self.assertIdentical(collection, notification.notificationCollection())
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoretesttest_attachmentspy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/test/test_attachments.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/test/test_attachments.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_attachments.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -29,8 +29,7 @@
</span><span class="cx"> from twistedcaldav.config import config
</span><span class="cx"> from twistedcaldav.ical import Property, Component
</span><span class="cx">
</span><del>-from txdav.caldav.datastore.sql import CalendarStoreFeatures, DropBoxAttachment, \
- ManagedAttachment
</del><ins>+from txdav.caldav.datastore.sql import CalendarStoreFeatures
</ins><span class="cx"> from txdav.caldav.datastore.test.common import CaptureProtocol
</span><span class="cx"> from txdav.caldav.icalendarstore import IAttachmentStorageTransport, IAttachment, \
</span><span class="cx"> QuotaExceeded, AttachmentSizeTooLarge
</span><span class="lines">@@ -40,6 +39,8 @@
</span><span class="cx">
</span><span class="cx"> import hashlib
</span><span class="cx"> import os
</span><ins>+from txdav.caldav.datastore.sql_attachment import DropBoxAttachment, \
+ ManagedAttachment
</ins><span class="cx">
</span><span class="cx"> """
</span><span class="cx"> Tests for txdav.caldav.datastore.sql attachment handling.
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoretesttest_schedulepy"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/caldav/datastore/test/test_schedule.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/test/test_schedule.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_schedule.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,70 +0,0 @@
</span><del>-##
-# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-"""
-Tests for L{txdav.caldav.datastore.scheduling}.
-
-The aforementioned module is intended to eventually support implicit
-scheduling; however, it does not currently. The interim purpose of this module
-and accompanying tests is to effectively test the interface specifications to
-make sure that the common tests don't require anything I{not} specified in the
-interface, so that dynamic proxies specified with a tool like
-C{proxyForInterface} can be used to implement features such as implicit
-scheduling or data caching as middleware in the data-store layer.
-"""
-
-from twisted.trial.unittest import TestCase, SkipTest
-from txdav.caldav.datastore.test.test_file import FileStorageTests
-from txdav.caldav.datastore.schedule import ImplicitStore
-
-simpleEvent = """BEGIN:VCALENDAR
-VERSION:2.0
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:12345-67890
-DTSTART:20080601T120000Z
-DTEND:20080601T130000Z
-ORGANIZER:mailto:user1@example.com
-ATTENDEE:mailto:user1@example.com
-ATTENDEE:mailto:user2@example.com
-END:VEVENT
-END:VCALENDAR
-"""
-
-class ImplicitStoreTests(FileStorageTests, TestCase):
- """
- Tests for L{ImplicitSchedulingStore}.
- """
-
- implicitStore = None
-
- def storeUnderTest(self):
- if self.implicitStore is None:
- sut = super(ImplicitStoreTests, self).storeUnderTest()
- self.implicitStore = ImplicitStore(sut)
- return self.implicitStore
-
-
- def skipit(self):
- raise SkipTest("No private attribute tests.")
-
- test_calendarObjectsWithDotFile = skipit
- test_countComponentTypes = skipit
- test_init = skipit
- test_calendarObjectsWithDirectory = skipit
- test_hasCalendarResourceUIDSomewhereElse = skipit
-
-del FileStorageTests
</del></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoretesttest_sqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -767,13 +767,13 @@
</span><span class="cx"> txn2 = calendarStore.newTransaction()
</span><span class="cx">
</span><span class="cx"> notification_uid1_1 = yield txn1.notificationsWithUID(
</span><del>- "uid1",
</del><ins>+ "uid1", create=True
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def _defer_notification_uid1_2():
</span><span class="cx"> notification_uid1_2 = yield txn2.notificationsWithUID(
</span><del>- "uid1",
</del><ins>+ "uid1", create=True
</ins><span class="cx"> )
</span><span class="cx"> yield txn2.commit()
</span><span class="cx"> returnValue(notification_uid1_2)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoretesttest_sql_sharingpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/test/test_sql_sharing.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/test/test_sql_sharing.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_sql_sharing.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -464,7 +464,7 @@
</span><span class="cx"> shared = yield self.calendarUnderTest(home="user02", name=sharedName)
</span><span class="cx"> self.assertTrue(shared is not None)
</span><span class="cx">
</span><del>- notifyHome = yield self.transactionUnderTest().notificationsWithUID("user02")
</del><ins>+ notifyHome = yield self.transactionUnderTest().notificationsWithUID("user02", create=True)
</ins><span class="cx"> notifications = yield notifyHome.listNotificationObjects()
</span><span class="cx"> self.assertEqual(len(notifications), 0)
</span><span class="cx">
</span><span class="lines">@@ -587,7 +587,42 @@
</span><span class="cx"> yield self.commit()
</span><span class="cx">
</span><span class="cx">
</span><ins>+ @inlineCallbacks
+ def test_sharingBindRecords(self):
</ins><span class="cx">
</span><ins>+ yield self.calendarUnderTest(home="user01", name="calendar")
+ yield self.commit()
+
+ shared_name = yield self._createShare()
+
+ shared = yield self.calendarUnderTest(home="user01", name="calendar")
+ results = yield shared.sharingBindRecords()
+ self.assertEqual(len(results), 1)
+ self.assertEqual(results.keys(), ["user02"])
+ self.assertEqual(results["user02"].calendarResourceName, shared_name)
+
+
+ @inlineCallbacks
+ def test_sharedToBindRecords(self):
+
+ yield self.calendarUnderTest(home="user01", name="calendar")
+ yield self.commit()
+
+ shared_name = yield self._createShare()
+
+ home = yield self.homeUnderTest(name="user02")
+ results = yield home.sharedToBindRecords()
+ self.assertEqual(len(results), 1)
+ self.assertEqual(results.keys(), ["user01"])
+ sharedRecord = results["user01"][0]
+ ownerRecord = results["user01"][1]
+ metadataRecord = results["user01"][2]
+ self.assertEqual(ownerRecord.calendarResourceName, "calendar")
+ self.assertEqual(sharedRecord.calendarResourceName, shared_name)
+ self.assertEqual(metadataRecord.supportedComponents, None)
+
+
+
</ins><span class="cx"> class GroupSharingTests(BaseSharingTests):
</span><span class="cx"> """
</span><span class="cx"> Test store-based group sharing.
</span><span class="lines">@@ -619,7 +654,7 @@
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def _check_notifications(self, uid, items):
</span><del>- notifyHome = yield self.transactionUnderTest().notificationsWithUID(uid)
</del><ins>+ notifyHome = yield self.transactionUnderTest().notificationsWithUID(uid, create=True)
</ins><span class="cx"> notifications = yield notifyHome.listNotificationObjects()
</span><span class="cx"> self.assertEqual(set(notifications), set(items))
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavdatastoreutilpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/datastore/util.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/datastore/util.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/datastore/util.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -522,6 +522,11 @@
</span><span class="cx"> self._contentType = http_headers.MimeType.fromString(getType(self._attachment.name(), self.contentTypes))
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def resetDetails(self, contentType, dispositionName):
+ self._contentType = contentType
+ self._dispositionName = dispositionName
+
+
</ins><span class="cx"> def write(self, data):
</span><span class="cx"> """
</span><span class="cx"> Children must override this to actually write the data, but should
</span></span></pre></div>
<a id="CalendarServertrunktxdavcaldavicalendarstorepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/caldav/icalendarstore.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/caldav/icalendarstore.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/caldav/icalendarstore.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -906,7 +906,7 @@
</span><span class="cx"> is done (more than RAW).
</span><span class="cx">
</span><span class="cx"> RAW - store the supplied data as-is without any processing or validation. This is used
</span><del>- for unit testing purposes only.
</del><ins>+ for unit testing purposes only, or during migration.
</ins><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> NORMAL = NamedConstant()
</span></span></pre></div>
<a id="CalendarServertrunktxdavcarddavdatastoresqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/carddav/datastore/sql.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/carddav/datastore/sql.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/carddav/datastore/sql.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -36,11 +36,10 @@
</span><span class="cx"> from txweb2.http_headers import MimeType
</span><span class="cx"> from txweb2.responsecode import FORBIDDEN
</span><span class="cx">
</span><del>-from twisted.internet.defer import inlineCallbacks, returnValue
</del><ins>+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
</ins><span class="cx"> from twisted.python import hashlib
</span><span class="cx">
</span><span class="cx"> from twistedcaldav.config import config
</span><del>-from twistedcaldav.memcacher import Memcacher
</del><span class="cx"> from twistedcaldav.vcard import Component as VCard, InvalidVCardDataError, Property, \
</span><span class="cx"> vCardProductID
</span><span class="cx">
</span><span class="lines">@@ -53,11 +52,12 @@
</span><span class="cx"> KindChangeNotAllowedError
</span><span class="cx"> from txdav.common.datastore.query.generator import SQLQueryGenerator
</span><span class="cx"> from txdav.common.datastore.sql import CommonHome, CommonHomeChild, \
</span><del>- CommonObjectResource, EADDRESSBOOKTYPE, SharingMixIn, SharingInvitation
</del><ins>+ CommonObjectResource, EADDRESSBOOKTYPE, SharingMixIn
</ins><span class="cx"> from txdav.common.datastore.sql_tables import _ABO_KIND_PERSON, \
</span><span class="cx"> _ABO_KIND_GROUP, _ABO_KIND_RESOURCE, _ABO_KIND_LOCATION, schema, \
</span><span class="cx"> _BIND_MODE_OWN, _BIND_MODE_WRITE, _BIND_STATUS_ACCEPTED, \
</span><span class="cx"> _BIND_STATUS_INVITED, _BIND_MODE_INDIRECT, _BIND_STATUS_DECLINED
</span><ins>+from txdav.common.datastore.sql_sharing import SharingInvitation
</ins><span class="cx"> from txdav.common.icommondatastore import InternalDataStoreError, \
</span><span class="cx"> InvalidUIDError, UIDExistsError, ObjectResourceTooBigError, \
</span><span class="cx"> InvalidObjectResourceError, InvalidComponentForStoreError, \
</span><span class="lines">@@ -77,20 +77,20 @@
</span><span class="cx">
</span><span class="cx"> # structured tables. (new, preferred)
</span><span class="cx"> _homeSchema = schema.ADDRESSBOOK_HOME
</span><del>- _bindSchema = schema.SHARED_ADDRESSBOOK_BIND
</del><span class="cx"> _homeMetaDataSchema = schema.ADDRESSBOOK_HOME_METADATA
</span><ins>+
+ _bindSchema = schema.SHARED_ADDRESSBOOK_BIND
</ins><span class="cx"> _revisionsSchema = schema.ADDRESSBOOK_OBJECT_REVISIONS
</span><span class="cx"> _objectSchema = schema.ADDRESSBOOK_OBJECT
</span><span class="cx">
</span><span class="cx"> _notifierPrefix = "CardDAV"
</span><span class="cx"> _dataVersionKey = "ADDRESSBOOK-DATAVERSION"
</span><del>- _cacher = Memcacher("SQL.adbkhome", pickle=True, key_normalization=False)
</del><span class="cx">
</span><span class="cx">
</span><del>- def __init__(self, transaction, ownerUID, authzUID=None):
</del><ins>+ def __init__(self, transaction, homeData, authzUID=None):
</ins><span class="cx">
</span><del>- super(AddressBookHome, self).__init__(transaction, ownerUID, authzUID=authzUID)
</del><span class="cx"> self._addressbookPropertyStoreID = None
</span><ins>+ super(AddressBookHome, self).__init__(transaction, homeData, authzUID=authzUID)
</ins><span class="cx"> self._addressbook = None
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -116,6 +116,7 @@
</span><span class="cx"> return (
</span><span class="cx"> cls._homeSchema.RESOURCE_ID,
</span><span class="cx"> cls._homeSchema.OWNER_UID,
</span><ins>+ cls._homeSchema.STATUS,
</ins><span class="cx"> cls._homeSchema.ADDRESSBOOK_PROPERTY_STORE_ID,
</span><span class="cx"> )
</span><span class="cx">
</span><span class="lines">@@ -131,19 +132,20 @@
</span><span class="cx"> return (
</span><span class="cx"> "_resourceID",
</span><span class="cx"> "_ownerUID",
</span><ins>+ "_status",
</ins><span class="cx"> "_addressbookPropertyStoreID",
</span><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def initFromStore(self, no_cache=False):
</del><ins>+ def initFromStore(self):
</ins><span class="cx"> """
</span><span class="cx"> Initialize this object from the store. We read in and cache all the
</span><span class="cx"> extra meta-data from the DB to avoid having to do DB queries for those
</span><span class="cx"> individually later.
</span><span class="cx"> """
</span><span class="cx">
</span><del>- result = yield super(AddressBookHome, self).initFromStore(no_cache)
</del><ins>+ result = yield super(AddressBookHome, self).initFromStore()
</ins><span class="cx"> if result is not None:
</span><span class="cx"> # Created owned address book
</span><span class="cx"> addressbook = AddressBook(
</span><span class="lines">@@ -167,36 +169,23 @@
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def remove(self):
</span><del>- ah = schema.ADDRESSBOOK_HOME
</del><span class="cx"> ahb = schema.SHARED_ADDRESSBOOK_BIND
</span><del>- aor = schema.ADDRESSBOOK_OBJECT_REVISIONS
- rp = schema.RESOURCE_PROPERTY
</del><span class="cx">
</span><span class="cx"> yield Delete(
</span><span class="cx"> From=ahb,
</span><span class="cx"> Where=ahb.ADDRESSBOOK_HOME_RESOURCE_ID == self._resourceID,
</span><span class="cx"> ).on(self._txn)
</span><span class="cx">
</span><del>- yield Delete(
- From=aor,
- Where=aor.ADDRESSBOOK_HOME_RESOURCE_ID == self._resourceID,
- ).on(self._txn)
</del><ins>+ yield super(AddressBookHome, self).remove()
</ins><span class="cx">
</span><del>- yield Delete(
- From=ah,
- Where=ah.RESOURCE_ID == self._resourceID,
- ).on(self._txn)
</del><span class="cx">
</span><del>- yield Delete(
- From=rp,
- Where=(rp.RESOURCE_ID == self._resourceID).Or(
- rp.RESOURCE_ID == self._addressbookPropertyStoreID
- )
- ).on(self._txn)
</del><ins>+ def removeAllChildren(self):
+ """
+ This is a NoOp for the single child address book home
+ """
+ return succeed(None)
</ins><span class="cx">
</span><del>- yield self._cacher.delete(str(self._ownerUID))
</del><span class="cx">
</span><del>-
</del><span class="cx"> @inlineCallbacks
</span><span class="cx"> def createdHome(self):
</span><span class="cx"> yield self.addressbook()._initSyncToken()
</span><span class="lines">@@ -473,7 +462,7 @@
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><del>- def _getDBDataIndirect(cls, home, name, resourceID, externalID):
</del><ins>+ def _getDBDataIndirect(cls, home, name, resourceID, bindUID):
</ins><span class="cx">
</span><span class="cx"> # Get the bind row data
</span><span class="cx"> row = None
</span><span class="lines">@@ -503,7 +492,7 @@
</span><span class="cx"> overallBindStatus = _BIND_STATUS_INVITED
</span><span class="cx"> minBindRevision = None
</span><span class="cx"> for row in rows:
</span><del>- bindMode, homeID, resourceGroupID, externalID, name, bindStatus, bindRevision, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable
</del><ins>+ homeID, resourceGroupID, name, bindMode, bindStatus, bindRevision, bindUID, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable
</ins><span class="cx"> if groupID is None:
</span><span class="cx"> groupID = resourceGroupID
</span><span class="cx"> minBindRevision = min(minBindRevision, bindRevision) if minBindRevision is not None else bindRevision
</span><span class="lines">@@ -543,9 +532,9 @@
</span><span class="cx"> returnValue((bindData, additionalBindData, metadataData, ownerHome,))
</span><span class="cx">
</span><span class="cx">
</span><del>- def __init__(self, home, name, resourceID, mode, status, revision=0, message=None, ownerHome=None, ownerName=None, externalID=None):
</del><ins>+ def __init__(self, home, name, resourceID, mode, status, revision=0, message=None, ownerHome=None, ownerName=None, bindUID=None):
</ins><span class="cx"> ownerName = ownerHome.addressbook().name() if ownerHome else None
</span><del>- super(AddressBook, self).__init__(home, name, resourceID, mode, status, revision=revision, message=message, ownerHome=ownerHome, ownerName=ownerName, externalID=externalID)
</del><ins>+ super(AddressBook, self).__init__(home, name, resourceID, mode, status, revision=revision, message=message, ownerHome=ownerHome, ownerName=ownerName, bindUID=bindUID)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> def __repr__(self):
</span><span class="lines">@@ -875,7 +864,7 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><del>- def create(cls, home, name, externalID=None):
</del><ins>+ def create(cls, home, name, bindUID=None):
</ins><span class="cx"> if name == home.addressbook().name():
</span><span class="cx"> # raise HomeChildNameAlreadyExistsError
</span><span class="cx"> pass
</span><span class="lines">@@ -1141,7 +1130,7 @@
</span><span class="cx"> home._txn, homeID=home._resourceID
</span><span class="cx"> )
</span><span class="cx"> for groupRow in groupRows:
</span><del>- bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+ homeID, resourceID, bindName, bindMode, bindStatus, bindRevision, bindUID, bindMessage = groupRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx"> ownerAddressBookID = yield AddressBookObject.ownerAddressBookIDFromGroupID(home._txn, resourceID)
</span><span class="cx"> ownerHome = yield home._txn.homeWithResourceID(home._homeType, ownerAddressBookID)
</span><span class="cx"> names |= set([ownerHome.uid()])
</span><span class="lines">@@ -1169,7 +1158,7 @@
</span><span class="cx"> )
</span><span class="cx"> # get ownerHomeIDs
</span><span class="cx"> for dataRow in dataRows:
</span><del>- bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
</del><ins>+ homeID, resourceID, bindName, bindMode, bindStatus, bindRevision, bindUID, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
</ins><span class="cx"> ownerHome = yield home.ownerHomeWithChildID(resourceID)
</span><span class="cx"> ownerHomeToDataRowMap[ownerHome] = dataRow
</span><span class="cx">
</span><span class="lines">@@ -1178,12 +1167,16 @@
</span><span class="cx"> home._txn, homeID=home._resourceID
</span><span class="cx"> )
</span><span class="cx"> for groupBindRow in groupBindRows:
</span><del>- bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+ homeID, resourceID, name, bindMode, bindStatus, bindRevision, bindUID, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx"> ownerAddressBookID = yield AddressBookObject.ownerAddressBookIDFromGroupID(home._txn, resourceID)
</span><span class="cx"> ownerHome = yield home.ownerHomeWithChildID(ownerAddressBookID)
</span><span class="cx"> if ownerHome not in ownerHomeToDataRowMap:
</span><del>- groupBindRow[0] = _BIND_MODE_INDIRECT
- groupBindRow[3:7] = 4 * [None] # bindName, bindStatus, bindRevision, bindMessage
</del><ins>+ groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_MODE)] = _BIND_MODE_INDIRECT
+ groupBindRow[cls.bindColumns().index(cls._bindSchema.RESOURCE_NAME)] = None
+ groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_STATUS)] = None
+ groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_REVISION)] = None
+ groupBindRow[cls.bindColumns().index(cls._bindSchema.BIND_UID)] = None
+ groupBindRow[cls.bindColumns().index(cls._bindSchema.MESSAGE)] = None
</ins><span class="cx"> ownerHomeToDataRowMap[ownerHome] = groupBindRow
</span><span class="cx">
</span><span class="cx"> if ownerHomeToDataRowMap:
</span><span class="lines">@@ -1272,7 +1265,7 @@
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><del>- def _indirectObjectWithNameOrID(cls, home, name=None, resourceID=None, externalID=None, accepted=True):
</del><ins>+ def _indirectObjectWithNameOrID(cls, home, name=None, resourceID=None, bindUID=None, accepted=True):
</ins><span class="cx"> # replaces objectWithName()
</span><span class="cx"> """
</span><span class="cx"> Synthesize and indirect child for matching name or id based on whether shared groups exist.
</span><span class="lines">@@ -1285,7 +1278,7 @@
</span><span class="cx"> exists.
</span><span class="cx"> """
</span><span class="cx">
</span><del>- dbData = yield cls._getDBDataIndirect(home, name, resourceID, externalID)
</del><ins>+ dbData = yield cls._getDBDataIndirect(home, name, resourceID, bindUID)
</ins><span class="cx"> if dbData is None:
</span><span class="cx"> returnValue(None)
</span><span class="cx"> bindData, additionalBindData, metadataData, ownerHome = dbData
</span><span class="lines">@@ -1423,7 +1416,7 @@
</span><span class="cx"> readWriteGroupIDs = set()
</span><span class="cx"> readOnlyGroupIDs = set()
</span><span class="cx"> for groupBindRow in groupBindRows:
</span><del>- bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+ homeID, resourceID, name, bindMode, bindStatus, bindRevision, bindUID, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx"> if bindMode == _BIND_MODE_WRITE:
</span><span class="cx"> readWriteGroupIDs.add(resourceID)
</span><span class="cx"> else:
</span><span class="lines">@@ -1484,7 +1477,7 @@
</span><span class="cx"> readWriteGroupIDs = []
</span><span class="cx"> readOnlyGroupIDs = []
</span><span class="cx"> for groupBindRow in groupBindRows:
</span><del>- bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+ homeID, resourceID, name, bindMode, bindStatus, bindRevision, bindUID, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx"> if bindMode == _BIND_MODE_WRITE:
</span><span class="cx"> readWriteGroupIDs.append(resourceID)
</span><span class="cx"> else:
</span><span class="lines">@@ -1602,10 +1595,10 @@
</span><span class="cx"> subt,
</span><span class="cx"> homeID=shareeHome._resourceID,
</span><span class="cx"> resourceID=self._resourceID,
</span><del>- externalID=None,
</del><span class="cx"> name=newName,
</span><span class="cx"> mode=mode,
</span><span class="cx"> bindStatus=status,
</span><ins>+ bindUID=None,
</ins><span class="cx"> message=summary
</span><span class="cx"> )
</span><span class="cx"> returnValue(newName)
</span><span class="lines">@@ -1920,7 +1913,7 @@
</span><span class="cx"> yield child._loadPropertyStore(propstore)
</span><span class="cx">
</span><span class="cx"> if groupBindData:
</span><del>- bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupBindData[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+ homeID, resourceID, bindName, bindMode, bindStatus, bindRevision, bindUID, bindMessage = groupBindData[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx"> child._bindMode = bindMode
</span><span class="cx"> child._bindStatus = bindStatus
</span><span class="cx"> child._bindMessage = bindMessage
</span><span class="lines">@@ -2021,7 +2014,7 @@
</span><span class="cx"> self._bindName = None
</span><span class="cx"> self._bindRevision = None
</span><span class="cx"> super(AddressBookObject, self).__init__(addressbook, name, uid, resourceID, options)
</span><del>- self._externalID = None
</del><ins>+ self._bindUID = None
</ins><span class="cx"> self._options = {} if options is None else options
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -2230,7 +2223,7 @@
</span><span class="cx"> )
</span><span class="cx"> if groupBindRows:
</span><span class="cx"> groupBindRow = groupBindRows[0]
</span><del>- bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</del><ins>+ homeID, resourceID, bindName, bindMode, bindStatus, bindRevision, bindUID, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
</ins><span class="cx">
</span><span class="cx"> if accepted is not None and (bindStatus == _BIND_STATUS_ACCEPTED) != bool(accepted):
</span><span class="cx"> returnValue(None)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcarddavdatastoresql_externalpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/carddav/datastore/sql_external.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/carddav/datastore/sql_external.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/carddav/datastore/sql_external.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -18,8 +18,6 @@
</span><span class="cx"> SQL backend for CardDAV storage when resources are external.
</span><span class="cx"> """
</span><span class="cx">
</span><del>-from twisted.internet.defer import succeed
-
</del><span class="cx"> from twext.python.log import Logger
</span><span class="cx">
</span><span class="cx"> from txdav.carddav.datastore.sql import AddressBookHome, AddressBook, \
</span><span class="lines">@@ -31,10 +29,10 @@
</span><span class="cx">
</span><span class="cx"> class AddressBookHomeExternal(CommonHomeExternal, AddressBookHome):
</span><span class="cx">
</span><del>- def __init__(self, transaction, ownerUID, resourceID):
</del><ins>+ def __init__(self, transaction, homeData):
</ins><span class="cx">
</span><del>- AddressBookHome.__init__(self, transaction, ownerUID)
- CommonHomeExternal.__init__(self, transaction, ownerUID, resourceID)
</del><ins>+ AddressBookHome.__init__(self, transaction, homeData)
+ CommonHomeExternal.__init__(self, transaction, homeData)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> def hasAddressBookResourceUIDSomewhereElse(self, uid, ok_object, mode):
</span><span class="lines">@@ -51,13 +49,6 @@
</span><span class="cx"> raise AssertionError("CommonHomeExternal: not supported")
</span><span class="cx">
</span><span class="cx">
</span><del>- def createdHome(self):
- """
- No children - make this a no-op.
- """
- return succeed(None)
-
-
</del><span class="cx"> def addressbook(self):
</span><span class="cx"> """
</span><span class="cx"> No children.
</span></span></pre></div>
<a id="CalendarServertrunktxdavcarddavdatastoretesttest_sqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -70,7 +70,7 @@
</span><span class="cx"> populateTxn = self.storeUnderTest().newTransaction()
</span><span class="cx"> for homeUID in self.requirements:
</span><span class="cx"> addressbooks = self.requirements[homeUID]
</span><del>- home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
</del><ins>+ home = yield populateTxn.addressbookHomeWithUID(homeUID, create=True)
</ins><span class="cx"> if addressbooks is not None:
</span><span class="cx"> addressbook = home.addressbook()
</span><span class="cx">
</span><span class="lines">@@ -364,13 +364,13 @@
</span><span class="cx"> txn2 = addressbookStore.newTransaction()
</span><span class="cx">
</span><span class="cx"> notification_uid1_1 = yield txn1.notificationsWithUID(
</span><del>- "uid1",
</del><ins>+ "uid1", create=True,
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def _defer_notification_uid1_2():
</span><span class="cx"> notification_uid1_2 = yield txn2.notificationsWithUID(
</span><del>- "uid1",
</del><ins>+ "uid1", create=True,
</ins><span class="cx"> )
</span><span class="cx"> yield txn2.commit()
</span><span class="cx"> returnValue(notification_uid1_2)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcarddavdatastoretesttest_sql_sharingpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/carddav/datastore/test/test_sql_sharing.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/carddav/datastore/test/test_sql_sharing.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/carddav/datastore/test/test_sql_sharing.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -45,7 +45,7 @@
</span><span class="cx"> for homeUID in self.requirements:
</span><span class="cx"> addressbooks = self.requirements[homeUID]
</span><span class="cx"> if addressbooks is not None:
</span><del>- home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
</del><ins>+ home = yield populateTxn.addressbookHomeWithUID(homeUID, create=True)
</ins><span class="cx"> addressbook = home.addressbook()
</span><span class="cx">
</span><span class="cx"> addressbookObjNames = addressbooks[addressbook.name()]
</span><span class="lines">@@ -198,7 +198,7 @@
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def _check_notifications(self, home, items):
</span><del>- notifyHome = yield self.transactionUnderTest().notificationsWithUID(home)
</del><ins>+ notifyHome = yield self.transactionUnderTest().notificationsWithUID(home, create=True)
</ins><span class="cx"> notifications = yield notifyHome.listNotificationObjects()
</span><span class="cx"> self.assertEqual(set(notifications), set(items))
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorefilepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/file.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/file.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/file.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -44,7 +44,8 @@
</span><span class="cx"> from txdav.common.icommondatastore import HomeChildNameNotAllowedError, \
</span><span class="cx"> HomeChildNameAlreadyExistsError, NoSuchHomeChildError, \
</span><span class="cx"> InternalDataStoreError, ObjectResourceNameNotAllowedError, \
</span><del>- ObjectResourceNameAlreadyExistsError, NoSuchObjectResourceError
</del><ins>+ ObjectResourceNameAlreadyExistsError, NoSuchObjectResourceError, \
+ ECALENDARTYPE, EADDRESSBOOKTYPE
</ins><span class="cx"> from txdav.common.idirectoryservice import IStoreDirectoryService
</span><span class="cx"> from txdav.common.inotifications import INotificationCollection, \
</span><span class="cx"> INotificationObject
</span><span class="lines">@@ -64,16 +65,6 @@
</span><span class="cx"> from twistedcaldav.sql import AbstractSQLDatabase, db_prefix
</span><span class="cx"> import os
</span><span class="cx">
</span><del>-ECALENDARTYPE = 0
-EADDRESSBOOKTYPE = 1
-
-# Labels used to identify the class of resource being modified, so that
-# notification systems can target the correct application
-NotifierPrefixes = {
- ECALENDARTYPE : "CalDAV",
- EADDRESSBOOKTYPE : "CardDAV",
-}
-
</del><span class="cx"> TOPPATHS = (
</span><span class="cx"> "calendars",
</span><span class="cx"> "addressbooks"
</span><span class="lines">@@ -343,15 +334,15 @@
</span><span class="cx"> CommonStoreTransaction._homeClass[EADDRESSBOOKTYPE] = AddressBookHome
</span><span class="cx">
</span><span class="cx">
</span><del>- def calendarHomeWithUID(self, uid, create=False):
- return self.homeWithUID(ECALENDARTYPE, uid, create=create)
</del><ins>+ def calendarHomeWithUID(self, uid, status=None, create=False):
+ return self.homeWithUID(ECALENDARTYPE, uid, status=status, create=create)
</ins><span class="cx">
</span><span class="cx">
</span><del>- def addressbookHomeWithUID(self, uid, create=False):
- return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create)
</del><ins>+ def addressbookHomeWithUID(self, uid, status=None, create=False):
+ return self.homeWithUID(EADDRESSBOOKTYPE, uid, status=status, create=create)
</ins><span class="cx">
</span><span class="cx">
</span><del>- def _determineMemo(self, storeType, uid, create=False):
</del><ins>+ def _determineMemo(self, storeType, uid, status=None, create=False):
</ins><span class="cx"> """
</span><span class="cx"> Determine the memo dictionary to use for homeWithUID.
</span><span class="cx"> """
</span><span class="lines">@@ -374,7 +365,7 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @memoizedKey("uid", _determineMemo, deferredResult=False)
</span><del>- def homeWithUID(self, storeType, uid, create=False):
</del><ins>+ def homeWithUID(self, storeType, uid, status=None, create=False):
</ins><span class="cx"> if uid.startswith("."):
</span><span class="cx"> return None
</span><span class="cx">
</span><span class="lines">@@ -385,7 +376,7 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @memoizedKey("uid", "_notificationHomes", deferredResult=False)
</span><del>- def notificationsWithUID(self, uid, home=None):
</del><ins>+ def notificationsWithUID(self, uid, home=None, create=False):
</ins><span class="cx">
</span><span class="cx"> if home is None:
</span><span class="cx"> home = self.homeWithUID(self._notificationHomeType, uid, create=True)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingattachmentspy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/attachments.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/attachments.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/attachments.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -15,6 +15,9 @@
</span><span class="cx"> ##
</span><span class="cx">
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue
</span><ins>+from txdav.caldav.icalendarstore import InvalidAttachmentOperation
+from txdav.common.datastore.podding.util import UtilityConduitMixin
+from txweb2.http_headers import generateContentType
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> class AttachmentsConduitMixin(object):
</span><span class="lines">@@ -150,3 +153,48 @@
</span><span class="cx"> request["rids"],
</span><span class="cx"> request["managedID"],
</span><span class="cx"> )
</span><ins>+
+
+ @inlineCallbacks
+ def send_get_attachment_data(self, home, attachment_id, stream):
+ """
+ Managed attachment readAttachmentData call. We are using streams on the sender and the receiver
+ side to avoid reading the whole attachment into memory.
+
+ @param home: the home whose attachment is being read
+ @type home: L{CalendarHome}
+ @param attachment_id: attachment-id to get
+ @type attachment_id: C{str}
+ @param stream: attachment data stream to write to
+ @type stream: L{IStream}
+ """
+
+ actionName = "get-attachment-data"
+ txn, request, server = yield self._getRequestForStoreObject(actionName, home, False)
+ request["attachmentID"] = attachment_id
+
+ response = yield self.sendRequestToServer(txn, server, request, writeStream=stream)
+ returnValue(response)
+
+
+ @inlineCallbacks
+ def recv_get_attachment_data(self, txn, request, stream):
+ """
+ Process an getAttachmentData cross-pod request. Request arguments as per L{send_get_attachment_data}.
+
+ @param request: request arguments
+ @type request: C{dict}
+ """
+
+ home, _ignore = yield self._getStoreObjectForRequest(txn, request)
+ attachment = yield home.getAttachmentByID(request["attachmentID"])
+ if attachment is None:
+ raise InvalidAttachmentOperation("Attachment is missing: {}".format(request["attachmentID"]))
+
+ attachment.retrieve(stream)
+ returnValue((generateContentType(attachment.contentType()), attachment.name(),))
+
+
+# Calls on L{CommonHome} objects
+UtilityConduitMixin._make_simple_action(AttachmentsConduitMixin, "home_get_all_attachments", "getAllAttachments", classMethod=False, transform_recv_result=UtilityConduitMixin._to_serialize_list)
+UtilityConduitMixin._make_simple_action(AttachmentsConduitMixin, "home_get_attachment_links", "getAttachmentLinks", classMethod=False, transform_recv_result=UtilityConduitMixin._to_serialize_list)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingconduitpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/conduit.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/conduit.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/conduit.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -19,14 +19,11 @@
</span><span class="cx"> from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
</span><span class="cx"> from txdav.common.datastore.podding.attachments import AttachmentsConduitMixin
</span><span class="cx"> from txdav.common.datastore.podding.base import FailedCrossPodRequestError
</span><del>-from txdav.common.datastore.podding.directory import (
- DirectoryPoddingConduitMixin
-)
</del><ins>+from txdav.common.datastore.podding.directory import DirectoryPoddingConduitMixin
+from txdav.common.datastore.podding.request import ConduitRequest
+from txdav.common.datastore.podding.sharing_invites import SharingInvitesConduitMixin
</ins><span class="cx"> from txdav.common.datastore.podding.store_api import StoreAPIConduitMixin
</span><del>-from txdav.common.datastore.podding.request import ConduitRequest
-from txdav.common.datastore.podding.sharing_invites import (
- SharingInvitesConduitMixin
-)
</del><ins>+from txdav.common.datastore.podding.util import UtilityConduitMixin
</ins><span class="cx">
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue
</span><span class="cx"> from twisted.python.reflect import namedClass
</span><span class="lines">@@ -36,6 +33,7 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> class PoddingConduit(
</span><ins>+ UtilityConduitMixin,
</ins><span class="cx"> StoreAPIConduitMixin,
</span><span class="cx"> AttachmentsConduitMixin,
</span><span class="cx"> SharingInvitesConduitMixin,
</span><span class="lines">@@ -44,33 +42,29 @@
</span><span class="cx"> """
</span><span class="cx"> This class is the API/RPC bridge between cross-pod requests and the store.
</span><span class="cx">
</span><del>- Each cross-pod request/response is described by a Python C{dict} that is
- serialized to JSON for the HTTP request/response.
</del><ins>+ Each cross-pod request/response is described by a Python C{dict} that is serialized
+ to JSON for the HTTP request/response.
</ins><span class="cx">
</span><del>- Each request C{dict} has an "action" key that indicates what call is being
- made, and the other keys are arguments to that call.
</del><ins>+ Each request C{dict} has an "action" key that indicates what call is being made, and
+ the other keys are arguments to that call.
</ins><span class="cx">
</span><del>- Each response C{dict} has a "result" key that indicates the call result,
- and other optional keys for any values returned by the call.
</del><ins>+ Each response C{dict} has a "result" key that indicates the call result, and other
+ optional keys for any values returned by the call.
</ins><span class="cx">
</span><del>- The conduit provides two methods for each action: one for the sending side
- and one for the receiving side, called "send_{action}" and "recv_{action}",
- respectively, where {action} is the action value.
</del><ins>+ The conduit provides two methods for each action: one for the sending side and one for
+ the receiving side, called "send_{action}" and "recv_{action}", respectively, where
+ {action} is the action value.
</ins><span class="cx">
</span><del>- The "send_{action}" calls each have a set of arguments specific to the call
- itself.
- The code takes care of packing that into a C{dict} and sending to the
- appropriate pod.
</del><ins>+ The "send_{action}" calls each have a set of arguments specific to the call itself. The
+ code takes care of packing that into a C{dict} and sending to the appropriate pod.
</ins><span class="cx">
</span><del>- The "recv_{action}" calls take a single C{dict} argument that is the
- deserialized JSON data from the incoming request. The return value is a
- C{dict} with the result.
</del><ins>+ The "recv_{action}" calls take a single C{dict} argument that is the deserialized JSON
+ data from the incoming request. The return value is a C{dict} with the result.
</ins><span class="cx">
</span><del>- Some simple forms of send_/recv_ methods can be auto-generated to simplify
- coding.
</del><ins>+ Some simple forms of send_/recv_ methods can be auto-generated to simplify coding.
</ins><span class="cx">
</span><del>- Actual implementations of this will be done via mix-ins for the different
- sub-systems using the conduit.
</del><ins>+ Actual implementations of this will be done via mix-ins for the different sub-systems using
+ the conduit.
</ins><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> conduitRequestClass = ConduitRequest
</span><span class="lines">@@ -80,6 +74,7 @@
</span><span class="cx"> @param store: the L{CommonDataStore} in use.
</span><span class="cx"> """
</span><span class="cx"> self.store = store
</span><ins>+ self.streamingActions = ("get-attachment-data",)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="lines">@@ -88,12 +83,9 @@
</span><span class="cx"> Verify that the specified uids are valid for the request and return the
</span><span class="cx"> matching directory records.
</span><span class="cx">
</span><del>- @param source_uid: UID for the user on whose behalf the request is
- being made
</del><ins>+ @param source_uid: UID for the user on whose behalf the request is being made
</ins><span class="cx"> @type source_uid: C{str}
</span><del>-
- @param destination_uid: UID for the user to whom the request is being
- sent
</del><ins>+ @param destination_uid: UID for the user to whom the request is being sent
</ins><span class="cx"> @type destination_uid: C{str}
</span><span class="cx">
</span><span class="cx"> @return: L{Deferred} resulting in C{tuple} of L{IStoreDirectoryRecord}
</span><span class="lines">@@ -101,59 +93,57 @@
</span><span class="cx">
</span><span class="cx"> source = yield self.store.directoryService().recordWithUID(source_uid)
</span><span class="cx"> if source is None:
</span><del>- raise DirectoryRecordNotFoundError(
- "Cross-pod source: {}".format(source_uid)
- )
</del><ins>+ raise DirectoryRecordNotFoundError("Cross-pod source: {}".format(source_uid))
</ins><span class="cx"> if not source.thisServer():
</span><del>- raise FailedCrossPodRequestError(
- "Cross-pod source not on this server: {}".format(source_uid)
- )
</del><ins>+ raise FailedCrossPodRequestError("Cross-pod source not on this server: {}".format(source_uid))
</ins><span class="cx">
</span><del>- destination = yield self.store.directoryService().recordWithUID(
- destination_uid
- )
</del><ins>+ destination = yield self.store.directoryService().recordWithUID(destination_uid)
</ins><span class="cx"> if destination is None:
</span><del>- raise DirectoryRecordNotFoundError(
- "Cross-pod destination: {}".format(destination_uid)
- )
</del><ins>+ raise DirectoryRecordNotFoundError("Cross-pod destination: {}".format(destination_uid))
</ins><span class="cx"> if destination.thisServer():
</span><del>- raise FailedCrossPodRequestError(
- "Cross-pod destination on this server: {}".format(
- destination_uid
- )
- )
</del><ins>+ raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(destination_uid))
</ins><span class="cx">
</span><span class="cx"> returnValue((source, destination,))
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> def sendRequest(self, txn, recipient, data, stream=None, streamType=None):
</span><del>- return self.sendRequestToServer(
- txn, recipient.server(), data, stream, streamType
- )
</del><ins>+ return self.sendRequestToServer(txn, recipient.server(), data, stream, streamType)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def sendRequestToServer(
- self, txn, server, data, stream=None, streamType=None
- ):
</del><ins>+ def sendRequestToServer(self, txn, server, data, stream=None, streamType=None, writeStream=None):
</ins><span class="cx">
</span><del>- request = self.conduitRequestClass(server, data, stream, streamType)
</del><ins>+ request = self.conduitRequestClass(server, data, stream, streamType, writeStream)
</ins><span class="cx"> try:
</span><span class="cx"> response = (yield request.doRequest(txn))
</span><span class="cx"> except Exception as e:
</span><del>- raise FailedCrossPodRequestError(
- "Failed cross-pod request: {}".format(e)
- )
</del><ins>+ raise FailedCrossPodRequestError("Failed cross-pod request: {}".format(e))
</ins><span class="cx"> if response["result"] == "exception":
</span><span class="cx"> raise namedClass(response["class"])(response["details"])
</span><span class="cx"> elif response["result"] != "ok":
</span><del>- raise FailedCrossPodRequestError(
- "Cross-pod request failed: {}".format(response)
- )
</del><ins>+ raise FailedCrossPodRequestError("Cross-pod request failed: {}".format(response))
</ins><span class="cx"> else:
</span><span class="cx"> returnValue(response.get("value"))
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def isStreamAction(self, data):
+ """
+ Check to see if this is a request that will return a data stream rather than a JSON response.
+ e.g., this is used to retrieve attachment data on another pod.
+
+ @param data: the JSON data to process
+ @type data: C{dict}
+ """
+ # Must have a dict with an "action" key
+ try:
+ action = data["action"]
+ except (KeyError, TypeError) as e:
+ log.error("JSON data must have an object as its root with an 'action' attribute: {ex}\n{json}", ex=e, json=data)
+ return False
+
+ return action in self.streamingActions
+
+
</ins><span class="cx"> @inlineCallbacks
</span><span class="cx"> def processRequest(self, data):
</span><span class="cx"> """
</span><span class="lines">@@ -166,15 +156,8 @@
</span><span class="cx"> try:
</span><span class="cx"> action = data["action"]
</span><span class="cx"> except (KeyError, TypeError) as e:
</span><del>- log.error(
- "JSON data must have an object as its root with an 'action' "
- "attribute: {ex}\n{json}",
- ex=e, json=data
- )
- raise FailedCrossPodRequestError(
- "JSON data must have an object as its root with an 'action' "
- "attribute: {}\n{}".format(e, data,)
- )
</del><ins>+ log.error("JSON data must have an object as its root with an 'action' attribute: {ex}\n{json}", ex=e, json=data)
+ raise FailedCrossPodRequestError("JSON data must have an object as its root with an 'action' attribute: {}\n{}".format(e, data,))
</ins><span class="cx">
</span><span class="cx"> if action == "ping":
</span><span class="cx"> result = {"result": "ok"}
</span><span class="lines">@@ -183,9 +166,7 @@
</span><span class="cx"> method = "recv_{}".format(action.replace("-", "_"))
</span><span class="cx"> if not hasattr(self, method):
</span><span class="cx"> log.error("Unsupported action: {action}", action=action)
</span><del>- raise FailedCrossPodRequestError(
- "Unsupported action: {}".format(action)
- )
</del><ins>+ raise FailedCrossPodRequestError("Unsupported action: {}".format(action))
</ins><span class="cx">
</span><span class="cx"> # Need a transaction to work with
</span><span class="cx"> txn = self.store.newTransaction(repr("Conduit request"))
</span><span class="lines">@@ -203,9 +184,7 @@
</span><span class="cx"> log.error("Failed action: {action}, {ex}", action=action, ex=e)
</span><span class="cx"> result = {
</span><span class="cx"> "result": "exception",
</span><del>- "class": ".".join((
- e.__class__.__module__, e.__class__.__name__,
- )),
</del><ins>+ "class": ".".join((e.__class__.__module__, e.__class__.__name__,)),
</ins><span class="cx"> "details": str(e),
</span><span class="cx"> }
</span><span class="cx">
</span><span class="lines">@@ -213,3 +192,48 @@
</span><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><span class="cx"> returnValue(result)
</span><ins>+
+
+ @inlineCallbacks
+ def processRequestStream(self, data, stream):
+ """
+ Process the request.
+
+ @param data: the JSON data to process
+ @type data: C{dict}
+
+ @return: a L{tuple} of content-type and name, if successful, else a L{dict} for a JSON result
+ @rtype: L{tuple} of (L{str}, L{str}), or L{dict}
+ """
+ # Must have a dict with an "action" key
+ try:
+ action = data["action"]
+ except (KeyError, TypeError) as e:
+ log.error("JSON data must have an object as its root with an 'action' attribute: {ex}\n{json}", ex=e, json=data)
+ raise FailedCrossPodRequestError("JSON data must have an object as its root with an 'action' attribute: {}\n{}".format(e, data,))
+
+ method = "recv_{}".format(action.replace("-", "_"))
+ if not hasattr(self, method):
+ log.error("Unsupported action: {action}", action=action)
+ raise FailedCrossPodRequestError("Unsupported action: {}".format(action))
+
+ # Need a transaction to work with
+ txn = self.store.newTransaction(repr("Conduit request"))
+
+ # Do the actual request processing
+ try:
+ result = (yield getattr(self, method)(txn, data, stream))
+ except Exception as e:
+ # Send the exception over to the other side
+ yield txn.abort()
+ log.error("Failed action: {action}, {ex}", action=action, ex=e)
+ result = {
+ "result": "exception",
+ "class": ".".join((e.__class__.__module__, e.__class__.__name__,)),
+ "details": str(e),
+ }
+
+ else:
+ yield txn.commit()
+
+ returnValue(result)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingdirectorypy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/directory.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/directory.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/directory.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -191,3 +191,126 @@
</span><span class="cx"> delegators = yield Delegates._delegatedToUIDs(txn, delegate, request["read-write"], onlyThisServer=True)
</span><span class="cx">
</span><span class="cx"> returnValue(list(delegators))
</span><ins>+
+
+ @inlineCallbacks
+ def send_dump_individual_delegates(self, txn, delegator):
+ """
+ Get L{DelegateRecords} from another pod.
+
+ @param txn: transaction to use
+ @type txn: L{CommonStoreTransaction}
+ @param delegator: delegate to lookup
+ @type delegator: L{DirectoryRecord}
+ @param readWrite: if True, read and write access delegates are returned;
+ read-only access otherwise
+ """
+ if delegator.thisServer():
+ raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(delegator.uid))
+
+ request = {
+ "action": "dump-individual-delegates",
+ "uid": delegator.uid,
+ }
+ response = yield self.sendRequestToServer(txn, delegator.server(), request)
+ returnValue(response)
+
+
+ @inlineCallbacks
+ def recv_dump_individual_delegates(self, txn, request):
+ """
+ Process an delegators cross-pod request. Request arguments as per L{send_dump_individual_delegates}.
+
+ @param request: request arguments
+ @type request: C{dict}
+ """
+
+ delegator = yield txn.directoryService().recordWithUID(request["uid"])
+ if delegator is None or not delegator.thisServer():
+ raise FailedCrossPodRequestError("Cross-pod delegate missing or on this server: {}".format(delegator.uid))
+
+ delegates = yield txn.dumpIndividualDelegatesLocal(delegator.uid)
+
+ returnValue(self._to_serialize_list(delegates))
+
+
+ @inlineCallbacks
+ def send_dump_group_delegates(self, txn, delegator):
+ """
+ Get L{DelegateGroupsRecord},L{GroupsRecord} from another pod.
+
+ @param txn: transaction to use
+ @type txn: L{CommonStoreTransaction}
+ @param delegator: delegate to lookup
+ @type delegator: L{DirectoryRecord}
+ @param readWrite: if True, read and write access delegates are returned;
+ read-only access otherwise
+ """
+ if delegator.thisServer():
+ raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(delegator.uid))
+
+ request = {
+ "action": "dump-group-delegates",
+ "uid": delegator.uid,
+ }
+ response = yield self.sendRequestToServer(txn, delegator.server(), request)
+ returnValue(response)
+
+
+ @inlineCallbacks
+ def recv_dump_group_delegates(self, txn, request):
+ """
+ Process an delegators cross-pod request. Request arguments as per L{send_dump_group_delegates}.
+
+ @param request: request arguments
+ @type request: C{dict}
+ """
+
+ delegator = yield txn.directoryService().recordWithUID(request["uid"])
+ if delegator is None or not delegator.thisServer():
+ raise FailedCrossPodRequestError("Cross-pod delegate missing or on this server: {}".format(delegator.uid))
+
+ results = yield txn.dumpGroupDelegatesLocal(delegator.uid)
+
+ returnValue([[delegator.serialize(), group.serialize()] for delegator, group in results])
+
+
+ @inlineCallbacks
+ def send_dump_external_delegates(self, txn, delegator):
+ """
+ Get L{ExternalDelegateGroupsRecord} from another pod.
+
+ @param txn: transaction to use
+ @type txn: L{CommonStoreTransaction}
+ @param delegator: delegate to lookup
+ @type delegator: L{DirectoryRecord}
+ @param readWrite: if True, read and write access delegates are returned;
+ read-only access otherwise
+ """
+ if delegator.thisServer():
+ raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(delegator.uid))
+
+ request = {
+ "action": "dump-external-delegates",
+ "uid": delegator.uid,
+ }
+ response = yield self.sendRequestToServer(txn, delegator.server(), request)
+ returnValue(response)
+
+
+ @inlineCallbacks
+ def recv_dump_external_delegates(self, txn, request):
+ """
+ Process an delegators cross-pod request. Request arguments as per L{send_dump_external_delegates}.
+
+ @param request: request arguments
+ @type request: C{dict}
+ """
+
+ delegator = yield txn.directoryService().recordWithUID(request["uid"])
+ if delegator is None or not delegator.thisServer():
+ raise FailedCrossPodRequestError("Cross-pod delegate missing or on this server: {}".format(delegator.uid))
+
+ delegates = yield txn.dumpExternalDelegatesLocal(delegator.uid)
+
+ returnValue(self._to_serialize_list(delegates))
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigration__init__py"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/__init__.py        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,15 +0,0 @@
</span><del>-##
-# Copyright (c) 2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigration__init__pyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigration__init__py"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/__init__.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/__init__.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,15 @@
</span><ins>+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationhome_syncpy"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/home_sync.py        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,1353 +0,0 @@
</span><del>-##
-# Copyright (c) 2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from functools import wraps
-
-from twext.python.log import Logger
-from twisted.internet.defer import returnValue, inlineCallbacks
-from twisted.python.failure import Failure
-from twistedcaldav.accounting import emitAccounting
-from txdav.caldav.icalendarstore import ComponentUpdateState
-from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord, \
- CalendarObjectMigrationRecord, AttachmentMigrationRecord
-from txdav.caldav.datastore.sql import ManagedAttachment, CalendarBindRecord
-from txdav.common.datastore.sql_external import NotificationCollectionExternal
-from txdav.common.datastore.sql_notification import NotificationCollection
-from txdav.common.datastore.sql_tables import _HOME_STATUS_MIGRATING, _HOME_STATUS_DISABLED, \
- _HOME_STATUS_EXTERNAL, _HOME_STATUS_NORMAL
-from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
-
-from uuid import uuid4
-import datetime
-
-log = Logger()
-
-ACCOUNTING_TYPE = "migration"
-ACCOUNTING_LOG = "migration.log"
-
-def inTransactionWrapper(operation):
- """
- This wrapper converts an instance method that takes a transaction as its
- first parameter into one where the transaction parameter is an optional
- keyword argument. If the keyword argument is present and not None, then
- the instance method is called with that keyword as the first positional
- argument (i.e., almost a NoOp). If the keyword argument is not present,
- then a new transaction is created and the instance method called with
- it as the first positional argument, plus the call is wrapped with
- try/except/else to ensure proper commit and abort of the internally
- created transaction is done.
-
- So this wrapper allows for a method that requires a transaction to be run
- with either an existing transaction or one created just for the purpose
- of running it.
-
- @param operation: a callable that takes an L{IAsyncTransaction} as its first
- argument, and returns a value.
- """
-
- @wraps(operation)
- @inlineCallbacks
- def _inTxn(self, *args, **kwargs):
- label = self.label(operation.__name__)
- if "txn" in kwargs:
- txn = kwargs["txn"]
- del kwargs["txn"]
- result = yield operation(self, txn, *args, **kwargs)
- returnValue(result)
- else:
- txn = self.store.newTransaction(label=label)
- try:
- result = yield operation(self, txn, *args, **kwargs)
- except Exception as ex:
- f = Failure()
- yield txn.abort()
- log.error("{label} failed: {e}".format(label=label, e=str(ex)))
- returnValue(f)
- else:
- yield txn.commit()
- returnValue(result)
-
- return _inTxn
-
-
-
-# Cross-pod synchronization of an entire calendar home
-class CrossPodHomeSync(object):
-
- BATCH_SIZE = 50
-
- def __init__(self, store, diruid, final=False, uselog=None):
- """
- @param store: the data store
- @type store: L{CommonDataStore}
- @param diruid: directory uid of the user whose home is to be sync'd
- @type diruid: L{str}
- @param final: indicates whether this is in the final sync stage with the remote home
- already disabled
- @type final: L{bool}
- @param uselog: additional logging written to this object
- @type: L{File}
- """
-
- self.store = store
- self.diruid = diruid
- self.disabledRemote = final
- self.uselog = uselog
- self.record = None
- self.homeId = None
-
-
- def label(self, detail):
- return "Cross-pod Migration Sync for {}: {}".format(self.diruid, detail)
-
-
- def accounting(self, logstr):
- emitAccounting(ACCOUNTING_TYPE, self.record, "{} {}\n".format(datetime.datetime.now().isoformat(), logstr), filename=ACCOUNTING_LOG)
- if self.uselog is not None:
- self.uselog.write("CrossPodHomeSync: {}\n".format(logstr))
-
-
- @inlineCallbacks
- def migrateHere(self):
- """
- This is a full, serialized version of a data migration (minus any directory
- update) that can be triggered via a command line tool. It is designed to
- minimize down time for the migrating user.
- """
-
- # Step 1 - initial full sync
- yield self.sync()
-
- # Step 2 - increment sync (since the initial sync may take a long time
- # to run we should do one incremental sync before bringing down the
- # account being migrated)
- yield self.sync()
-
- # Step 3 - disable remote home
- # NB Any failure from this point on will need to be caught and
- # handled by re-enabling the old home (and fixing any sharing state
- # that may have been changed)
- yield self.disableRemoteHome()
-
- # Step 4 - final incremental sync
- yield self.sync()
-
- # Step 5 - final overall sync of meta-data (including sharing re-linking)
- yield self.finalSync()
-
- # Step 6 - enable new home
- yield self.enableLocalHome()
-
- # Step 7 - remove remote home
- yield self.removeRemoteHome()
-
- # Step 8 - say phew! TODO: Actually alert everyone else
- pass
-
-
- @inlineCallbacks
- def sync(self):
- """
- Initiate a sync of the home. This is a simple data sync that does not
- reconcile sharing state etc. The L{finalSync} method will do a full
- sharing reconcile as well as disable the migration source home.
- """
-
- yield self.loadRecord()
- self.accounting("Starting: sync...")
- yield self.prepareCalendarHome()
-
- # Calendar list and calendar data
- yield self.syncCalendarList()
-
- # Sync home metadata such as alarms, default calendars, etc
- yield self.syncCalendarHomeMetaData()
-
- # Sync attachments
- yield self.syncAttachments()
-
- self.accounting("Completed: sync.\n")
-
-
- @inlineCallbacks
- def finalSync(self):
- """
- Do the final sync up of any additional data, re-link sharing bind
- rows, recalculate quota etc.
- """
-
- yield self.loadRecord()
- self.accounting("Starting: finalSync...")
- yield self.prepareCalendarHome()
-
- # Link attachments to resources: ATTACHMENT_CALENDAR_OBJECT table
- yield self.linkAttachments()
-
- # TODO: Re-write attachment URIs - not sure if we need this as reverse proxy may take care of it
- pass
-
- # Group attendee reconcile
- yield self.groupAttendeeReconcile()
-
- # Delegates reconcile
- yield self.delegateReconcile()
-
- # Shared collections reconcile (including group sharees)
- yield self.sharedByCollectionsReconcile()
- yield self.sharedToCollectionsReconcile()
-
- # Notifications
- yield self.notificationsReconcile()
-
- # TODO: work items
- pass
-
- self.accounting("Completed: finalSync.\n")
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def disableRemoteHome(self, txn):
- """
- Mark the remote home as disabled.
- """
-
- yield self.loadRecord()
- self.accounting("Starting: disableRemoteHome...")
- yield self.prepareCalendarHome()
-
- # Calendar home
- remote_home = yield self._remoteHome(txn)
- yield remote_home.setStatus(_HOME_STATUS_DISABLED)
-
- # Notification home
- notifications = yield self._remoteNotificationsHome(txn)
- yield notifications.setStatus(_HOME_STATUS_DISABLED)
-
- self.disabledRemote = True
-
- self.accounting("Completed: disableRemoteHome.\n")
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def enableLocalHome(self, txn):
- """
- Mark the local home as enabled and remove any previously existing external home.
- """
-
- yield self.loadRecord()
- self.accounting("Starting: enableLocalHome...")
- yield self.prepareCalendarHome()
-
- # Disable any local external homes
- oldhome = yield txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_EXTERNAL)
- if oldhome is not None:
- yield oldhome.setLocalStatus(_HOME_STATUS_DISABLED)
- oldnotifications = yield txn.notificationsWithUID(self.diruid, status=_HOME_STATUS_EXTERNAL)
- if oldnotifications:
- yield oldnotifications.setLocalStatus(_HOME_STATUS_DISABLED)
-
- # Enable the migrating ones
- newhome = yield txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_MIGRATING)
- if newhome is not None:
- yield newhome.setStatus(_HOME_STATUS_NORMAL)
- newnotifications = yield txn.notificationsWithUID(self.diruid, status=_HOME_STATUS_MIGRATING)
- if newnotifications:
- yield newnotifications.setStatus(_HOME_STATUS_NORMAL)
-
- # TODO: purge the old ones
- pass
-
- self.accounting("Completed: enableLocalHome.\n")
-
-
- @inlineCallbacks
- def removeRemoteHome(self):
- """
- Remove all the old data on the remote pod.
- """
-
- # TODO: implement API on CommonHome to purge the old data without
- # any side-effects (scheduling, sharing etc).
- yield self.loadRecord()
- self.accounting("Starting: removeRemoteHome...")
- yield self.prepareCalendarHome()
-
- self.accounting("Completed: removeRemoteHome.\n")
-
-
- @inlineCallbacks
- def loadRecord(self):
- """
- Initiate a sync of the home.
- """
-
- if self.record is None:
- self.record = yield self.store.directoryService().recordWithUID(self.diruid)
- if self.record is None:
- raise DirectoryRecordNotFoundError("Cross-pod Migration Sync missing directory record for {}".format(self.diruid))
- if self.record.thisServer():
- raise ValueError("Cross-pod Migration Sync cannot sync with user already on this server: {}".format(self.diruid))
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def prepareCalendarHome(self, txn):
- """
- Make sure the inactive home to migrate into is present on this pod.
- """
-
- if self.homeId is None:
- home = yield self._localHome(txn)
- if home is None:
- if self.disabledRemote:
- self.homeId = None
- else:
- home = yield txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_MIGRATING, create=True)
- self.accounting(" Created new home collection to migrate into.")
- self.homeId = home.id() if home is not None else None
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def syncCalendarHomeMetaData(self, txn):
- """
- Make sure the home meta-data (alarms, default calendars) is properly sync'd
- """
-
- self.accounting("Starting: syncCalendarHomeMetaData...")
- remote_home = yield self._remoteHome(txn)
- yield remote_home.readMetaData()
-
- calendars = yield CalendarMigrationRecord.querysimple(txn, calendarHomeResourceID=self.homeId)
- calendarIDMap = dict((item.remoteResourceID, item.localResourceID) for item in calendars)
-
- local_home = yield self._localHome(txn)
- yield local_home.copyMetadata(remote_home, calendarIDMap)
-
- self.accounting("Completed: syncCalendarHomeMetaData.")
-
-
- @inlineCallbacks
- def _remoteHome(self, txn):
- """
- Create a synthetic external home object that maps to the actual remote home.
- """
-
- from txdav.caldav.datastore.sql_external import CalendarHomeExternal
- resourceID = yield txn.store().conduit.send_home_resource_id(txn, self.record, migrating=True)
- home = CalendarHomeExternal.makeSyntheticExternalHome(txn, self.record.uid, resourceID) if resourceID is not None else None
- if self.disabledRemote:
- home._migratingHome = True
- returnValue(home)
-
-
- @inlineCallbacks
- def _remoteNotificationsHome(self, txn):
- """
- Create a synthetic external home object that maps to the actual remote home.
- """
-
- notifications = yield NotificationCollectionExternal.notificationsWithUID(txn, self.diruid, create=True)
- if self.disabledRemote:
- notifications._migratingHome = True
- returnValue(notifications)
-
-
- def _localHome(self, txn):
- """
- Get the home on this pod that will have data migrated to it.
- """
-
- return txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_MIGRATING)
-
-
- @inlineCallbacks
- def syncCalendarList(self):
- """
- Synchronize each owned calendar.
- """
-
- self.accounting("Starting: syncCalendarList...")
-
- # Remote sync details
- remote_sync_state = yield self.getCalendarSyncList()
- self.accounting(" Found {} remote calendars to sync.".format(len(remote_sync_state)))
-
- # Get local sync details from local DB
- local_sync_state = yield self.getSyncState()
- self.accounting(" Found {} local calendars to sync.".format(len(local_sync_state)))
-
- # Remove local calendars no longer on the remote side
- yield self.purgeLocal(local_sync_state, remote_sync_state)
-
- # Sync each calendar that matches on both sides
- for remoteID in remote_sync_state.keys():
- yield self.syncCalendar(remoteID, local_sync_state, remote_sync_state)
-
- self.accounting("Completed: syncCalendarList.")
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def getCalendarSyncList(self, txn):
- """
- Get the names and sync-tokens for each remote owned calendar.
- """
-
- # List of calendars from the remote side
- home = yield self._remoteHome(txn)
- if home is None:
- returnValue(None)
- calendars = yield home.loadChildren()
- results = {}
- for calendar in calendars:
- if calendar.owned():
- sync_token = yield calendar.syncToken()
- results[calendar.id()] = CalendarMigrationRecord.make(
- calendarHomeResourceID=home.id(),
- remoteResourceID=calendar.id(),
- localResourceID=0,
- lastSyncToken=sync_token,
- )
-
- returnValue(results)
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def getSyncState(self, txn):
- """
- Get local synchronization state for the home being migrated.
- """
- records = yield CalendarMigrationRecord.querysimple(
- txn, calendarHomeResourceID=self.homeId
- )
- returnValue(dict([(record.remoteResourceID, record) for record in records]))
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def updateSyncState(self, txn, stateRecord, newSyncToken):
- """
- Update or insert an L{CalendarMigrationRecord} with the new specified sync token.
- """
- if stateRecord.isnew():
- stateRecord.lastSyncToken = newSyncToken
- yield stateRecord.insert(txn)
- else:
- # The existing stateRecord has a stale txn, but valid column values. We have
- # to duplicate it before we can give it a different txn.
- stateRecord = stateRecord.duplicate()
- stateRecord.transaction = txn
- yield stateRecord.update(lastSyncToken=newSyncToken)
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def purgeLocal(self, txn, local_sync_state, remote_sync_state):
- """
- Remove (silently - i.e., no scheduling) local calendars that are no longer on the remote side.
-
- @param txn: transaction to use
- @type txn: L{CommonStoreTransaction}
- @param local_sync_state: local sync state
- @type local_sync_state: L{dict}
- @param remote_sync_state: remote sync state
- @type remote_sync_state: L{dict}
- """
- home = yield self._localHome(txn)
- for localID in set(local_sync_state.keys()) - set(remote_sync_state.keys()):
- calendar = yield home.childWithID(local_sync_state[localID].localResourceID)
- if calendar is not None:
- yield calendar.purge()
- del local_sync_state[localID]
- self.accounting(" Purged calendar local-id={} that no longer exists on the remote pod.".format(localID))
-
-
- @inlineCallbacks
- def syncCalendar(self, remoteID, local_sync_state, remote_sync_state):
- """
- Sync the contents of a calendar from the remote side. The local calendar may need to be created
- on initial sync. Make use of sync tokens to avoid unnecessary work.
-
- @param remoteID: id of the remote calendar to sync
- @type remoteID: L{int}
- @param local_sync_state: local sync state
- @type local_sync_state: L{dict}
- @param remote_sync_state: remote sync state
- @type remote_sync_state: L{dict}
- """
-
- self.accounting("Starting: syncCalendar.")
-
- # See if we need to create the local one first
- if remoteID not in local_sync_state:
- localID = yield self.newCalendar()
- local_sync_state[remoteID] = CalendarMigrationRecord.make(
- calendarHomeResourceID=self.homeId,
- remoteResourceID=remoteID,
- localResourceID=localID,
- lastSyncToken=None,
- )
- self.accounting(" Created new calendar local-id={}, remote-id={}.".format(localID, remoteID))
- else:
- localID = local_sync_state.get(remoteID).localResourceID
- self.accounting(" Updating calendar local-id={}, remote-id={}.".format(localID, remoteID))
- local_record = local_sync_state.get(remoteID)
-
- remote_token = remote_sync_state[remoteID].lastSyncToken
- if local_record.lastSyncToken != remote_token:
- # Sync meta-data such as name, alarms, supported-components, transp, etc
- yield self.syncCalendarMetaData(local_record)
-
- # Sync object resources
- changed, removed = yield self.findObjectsToSync(local_record)
- self.accounting(" Calendar objects changed={}, removed={}.".format(len(changed), len(removed)))
- yield self.purgeDeletedObjectsInBatches(local_record, removed)
- yield self.updateChangedObjectsInBatches(local_record, changed)
-
- yield self.updateSyncState(local_record, remote_token)
- self.accounting("Completed: syncCalendar.")
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def newCalendar(self, txn):
- """
- Create a new local calendar to sync remote data to. We don't care about the name
- of the calendar right now - it will be sync'd later.
- """
-
- home = yield self._localHome(txn)
- calendar = yield home.createChildWithName(str(uuid4()))
- returnValue(calendar.id())
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def syncCalendarMetaData(self, txn, migrationRecord):
- """
- Sync the metadata of a calendar from the remote side.
-
- @param migrationRecord: current migration record
- @type localID: L{CalendarMigrationRecord}
- """
-
- # Remote changes
- remote_home = yield self._remoteHome(txn)
- remote_calendar = yield remote_home.childWithID(migrationRecord.remoteResourceID)
- if remote_calendar is None:
- returnValue(None)
-
- # Check whether the deleted set items
- local_home = yield self._localHome(txn)
- local_calendar = yield local_home.childWithID(migrationRecord.localResourceID)
- yield local_calendar.copyMetadata(remote_calendar)
- self.accounting(" Copied calendar meta-data for calendar local-id={0.localResourceID}, remote-id={0.remoteResourceID}.".format(migrationRecord))
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def findObjectsToSync(self, txn, migrationRecord):
- """
- Find the set of object resources that need to be sync'd from the remote
- side and the set that need to be removed locally. Take into account the
- possibility that this is a partial sync and removals or additions might
- be false positives.
-
- @param migrationRecord: current migration record
- @type localID: L{CalendarMigrationRecord}
- """
-
- # Remote changes
- remote_home = yield self._remoteHome(txn)
- remote_calendar = yield remote_home.childWithID(migrationRecord.remoteResourceID)
- if remote_calendar is None:
- returnValue(None)
- changed, deleted, _ignore_invalid = yield remote_calendar.resourceNamesSinceToken(migrationRecord.lastSyncToken)
-
- # Check whether the deleted set items
- local_home = yield self._localHome(txn)
- local_calendar = yield local_home.childWithID(migrationRecord.localResourceID)
-
- # Check the md5's on each changed remote with the local one to filter out ones
- # we don't actually need to sync
- remote_changes = yield remote_calendar.objectResourcesWithNames(changed)
- remote_changes = dict([(calendar.name(), calendar) for calendar in remote_changes])
-
- local_changes = yield local_calendar.objectResourcesWithNames(changed)
- local_changes = dict([(calendar.name(), calendar) for calendar in local_changes])
-
- actual_changes = []
- for name, calendar in remote_changes.items():
- if name not in local_changes or remote_changes[name].md5() != local_changes[name].md5():
- actual_changes.append(name)
-
- returnValue((actual_changes, deleted,))
-
-
- @inlineCallbacks
- def purgeDeletedObjectsInBatches(self, migrationRecord, deleted):
- """
- Purge (silently remove) the specified object resources. This needs to
- succeed in the case where some or all resources have already been deleted.
- Do this in batches to keep transaction times small.
-
- @param migrationRecord: local calendar migration record
- @type migrationRecord: L{CalendarMigrationRecord}
- @param deleted: list of names to purge
- @type deleted: L{list} of L{str}
- """
-
- remaining = list(deleted)
- while remaining:
- yield self.purgeBatch(migrationRecord.localResourceID, remaining[:self.BATCH_SIZE])
- del remaining[:self.BATCH_SIZE]
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def purgeBatch(self, txn, localID, purge_names):
- """
- Purge a bunch of object resources from the specified calendar.
-
- @param txn: transaction to use
- @type txn: L{CommonStoreTransaction}
- @param localID: id of the local calendar to sync
- @type localID: L{int}
- @param purge_names: object resource names to purge
- @type purge_names: L{list} of L{str}
- """
-
- # Check whether the deleted set items
- local_home = yield self._localHome(txn)
- local_calendar = yield local_home.childWithID(localID)
- local_objects = yield local_calendar.objectResourcesWithNames(purge_names)
-
- for local_object in local_objects:
- yield local_object.purge()
- self.accounting(" Purged calendar object local-id={}.".format(local_object.id()))
-
-
- @inlineCallbacks
- def updateChangedObjectsInBatches(self, migrationRecord, changed):
- """
- Update the specified object resources. This needs to succeed in the
- case where some or all resources have already been deleted.
- Do this in batches to keep transaction times small.
-
- @param migrationRecord: local calendar migration record
- @type migrationRecord: L{CalendarMigrationRecord}
- @param changed: list of names to update
- @type changed: L{list} of L{str}
- """
-
- remaining = list(changed)
- while remaining:
- yield self.updateBatch(
- migrationRecord.localResourceID,
- migrationRecord.remoteResourceID,
- remaining[:self.BATCH_SIZE],
- )
- del remaining[:self.BATCH_SIZE]
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def updateBatch(self, txn, localID, remoteID, remaining):
- """
- Update a bunch of object resources from the specified remote calendar.
-
- @param txn: transaction to use
- @type txn: L{CommonStoreTransaction}
- @param localID: id of the local calendar to sync
- @type localID: L{int}
- @param remoteID: id of the remote calendar to sync with
- @type remoteID: L{int}
- @param purge_names: object resource names to update
- @type purge_names: L{list} of L{str}
- """
-
- # Get remote objects
- remote_home = yield self._remoteHome(txn)
- remote_calendar = yield remote_home.childWithID(remoteID)
- if remote_calendar is None:
- returnValue(None)
- remote_objects = yield remote_calendar.objectResourcesWithNames(remaining)
- remote_objects = dict([(obj.name(), obj) for obj in remote_objects])
-
- # Get local objects
- local_home = yield self._localHome(txn)
- local_calendar = yield local_home.childWithID(localID)
- local_objects = yield local_calendar.objectResourcesWithNames(remaining)
- local_objects = dict([(obj.name(), obj) for obj in local_objects])
-
- # Sync ones that still exist - use txn._migrating together with stuffing the remote md5
- # value onto the component being stored to ensure that the md5 value stored locally
- # matches the remote one (which should help reduce the need for a client to resync
- # the data when moved from one pod to the other).
- txn._migrating = True
- for obj_name in remote_objects.keys():
- remote_object = remote_objects[obj_name]
- remote_data = yield remote_object.component()
- remote_data.md5 = remote_object.md5()
- if obj_name in local_objects:
- local_object = yield local_objects[obj_name]
- yield local_object._setComponentInternal(remote_data, internal_state=ComponentUpdateState.RAW)
- del local_objects[obj_name]
- log_op = "Updated"
- else:
- local_object = yield local_calendar._createCalendarObjectWithNameInternal(obj_name, remote_data, internal_state=ComponentUpdateState.RAW)
-
- # Maintain the mapping from the remote to local id. Note that this mapping never changes as the ids on both
- # sides are immutable - though it may get deleted if the local object is removed during sync (via a cascade).
- yield CalendarObjectMigrationRecord.create(
- txn,
- calendarHomeResourceID=self.homeId,
- remoteResourceID=remote_object.id(),
- localResourceID=local_object.id()
- )
- log_op = "Created"
-
- # Sync meta-data such as schedule object, schedule tags, access mode etc
- yield local_object.copyMetadata(remote_object)
- self.accounting(" {} calendar object local-id={}, remote-id={}.".format(log_op, local_object.id(), remote_object.id()))
-
- # Purge the ones that remain
- for local_object in local_objects.values():
- yield local_object.purge()
- self.accounting(" Purged calendar object local-id={}.".format(local_object.id()))
-
-
- @inlineCallbacks
- def syncAttachments(self):
- """
- Sync attachments (both metadata and actual attachment data) for the home being migrated.
- """
-
- self.accounting("Starting: syncAttachments...")
-
- # Two steps - sync the table first in one txn, then sync each attachment's data
- changed_ids, removed_ids = yield self.syncAttachmentTable()
- self.accounting(" Attachments changed={}, removed={}".format(len(changed_ids), len(removed_ids)))
-
- for local_id in changed_ids:
- yield self.syncAttachmentData(local_id)
-
- self.accounting("Completed: syncAttachments.")
-
- returnValue((changed_ids, removed_ids,))
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def syncAttachmentTable(self, txn):
- """
- Sync the ATTACHMENT table data for the home being migrated. Return the list of local attachment ids that
- now need there attachment data sync'd from the server.
- """
-
- remote_home = yield self._remoteHome(txn)
- rattachments = yield remote_home.getAllAttachments()
- rmap = dict([(attachment.id(), attachment) for attachment in rattachments])
-
- local_home = yield self._localHome(txn)
- lattachments = yield local_home.getAllAttachments()
- lmap = dict([(attachment.id(), attachment) for attachment in lattachments])
-
- # Figure out the differences
- records = yield AttachmentMigrationRecord.querysimple(
- txn, calendarHomeResourceID=self.homeId
- )
- mapping = dict([(record.remoteResourceID, record) for record in records])
-
- # Removed - remove attachment and migration state
- removed = set(mapping.keys()) - set(rmap.keys())
- for remove_id in removed:
- record = mapping[remove_id]
- att = yield ManagedAttachment.load(txn, None, None, attachmentID=record.localResourceID)
- if att:
- yield att.remove(adjustQuota=False)
- else:
- yield record.delete()
-
- # Track which ones need attachment data sync'd over
- data_ids = set()
-
- # Added - add new attachment and migration state
- added = set(rmap.keys()) - set(mapping.keys())
- for added_id in added:
- attachment = yield ManagedAttachment._create(txn, None, self.homeId)
- yield AttachmentMigrationRecord.create(
- txn,
- calendarHomeResourceID=self.homeId,
- remoteResourceID=added_id,
- localResourceID=attachment.id(),
- )
- data_ids.add(attachment.id())
-
- # Possible updates - check for md5 change and sync
- updates = set(mapping.keys()) & set(rmap.keys())
- for updated_id in updates:
- local_id = mapping[updated_id].localResourceID
- if rmap[updated_id].md5() != lmap[local_id].md5():
- yield lmap[local_id].copyRemote(rmap[updated_id])
- data_ids.add(local_id)
-
- returnValue((data_ids, removed,))
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def syncAttachmentData(self, txn, local_id):
- """
- Sync the attachment data for the home being migrated.
- """
-
- remote_home = yield self._remoteHome(txn)
- local_home = yield self._localHome(txn)
- attachment = yield local_home.getAttachmentByID(local_id)
- if attachment is None:
- returnValue(None)
-
- records = yield AttachmentMigrationRecord.querysimple(
- txn, calendarHomeResourceID=self.homeId, localResourceID=local_id
- )
- if records:
- # Read the data from the conduit
- yield remote_home.readAttachmentData(records[0].remoteResourceID, attachment)
- self.accounting(" Read attachment local-id={0.localResourceID}, remote-id={0.remoteResourceID}".format(records[0]))
-
-
- @inlineCallbacks
- def linkAttachments(self):
- """
- Link attachments to the calendar objects they belong to.
- """
-
- self.accounting("Starting: linkAttachments...")
-
- # Get the map of links for the remote home
- links = yield self.getAttachmentLinks()
- self.accounting(" Linking {} attachments".format(len(links)))
-
- # Get remote->local ID mappings
- attachmentIDMap, objectIDMap = yield self.getAttachmentMappings()
-
- # Batch setting links for the local home
- len_links = len(links)
- while links:
- yield self.makeAttachmentLinks(links[:50], attachmentIDMap, objectIDMap)
- links = links[50:]
-
- self.accounting("Completed: linkAttachments.")
-
- returnValue(len_links)
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def getAttachmentLinks(self, txn):
- """
- Get the remote link information.
- """
-
- # Get the map of links for the remote home
- remote_home = yield self._remoteHome(txn)
- links = yield remote_home.getAttachmentLinks()
- returnValue(links)
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def getAttachmentMappings(self, txn):
- """
- Get the remote link information.
- """
-
- # Get migration mappings
- records = yield AttachmentMigrationRecord.querysimple(
- txn, calendarHomeResourceID=self.homeId
- )
- attachmentIDMap = dict([(record.remoteResourceID, record) for record in records])
-
- records = yield CalendarObjectMigrationRecord.querysimple(
- txn, calendarHomeResourceID=self.homeId
- )
- objectIDMap = dict([(record.remoteResourceID, record) for record in records])
-
- returnValue((attachmentIDMap, objectIDMap,))
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def makeAttachmentLinks(self, txn, links, attachmentIDMap, objectIDMap):
- """
- Map remote links to local links.
- """
-
- for link in links:
- # Remote link has an invalid txn at this point so replace that first
- link._txn = txn
-
- # Now re-map the attachment ID and calendar_object_id to the local ones
- link._attachmentID = attachmentIDMap[link._attachmentID].localResourceID
- link._calendarObjectID = objectIDMap[link._calendarObjectID].localResourceID
-
- yield link.insert()
-
-
- @inlineCallbacks
- def delegateReconcile(self):
- """
- Sync the delegate assignments from the remote home to the local home. We won't use
- a fake directory UID locally.
- """
-
- self.accounting("Starting: delegateReconcile...")
-
- yield self.individualDelegateReconcile()
- yield self.groupDelegateReconcile()
- yield self.externalDelegateReconcile()
-
- self.accounting("Completed: delegateReconcile.")
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def individualDelegateReconcile(self, txn):
- """
- Sync the delegate assignments from the remote home to the local home. We won't use
- a fake directory UID locally.
- """
- remote_records = yield txn.dumpIndividualDelegatesExternal(self.record)
- for record in remote_records:
- yield record.insert(txn)
-
- self.accounting(" Found {} individual delegates".format(len(remote_records)))
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def groupDelegateReconcile(self, txn):
- """
- Sync the delegate assignments from the remote home to the local home. We won't use
- a fake directory UID locally.
- """
- remote_records = yield txn.dumpGroupDelegatesExternal(self.record)
- for delegator, group in remote_records:
- # We need to make sure the group exists locally first and map the groupID to the local one
- local_group = yield txn.groupByUID(group.groupUID)
- delegator.groupID = local_group.groupID
- yield delegator.insert(txn)
-
- self.accounting(" Found {} group delegates".format(len(remote_records)))
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def externalDelegateReconcile(self, txn):
- """
- Sync the external delegate assignments from the remote home to the local home. We won't use
- a fake directory UID locally.
- """
- remote_records = yield txn.dumpExternalDelegatesExternal(self.record)
- for record in remote_records:
- yield record.insert(txn)
-
- self.accounting(" Found {} external delegates".format(len(remote_records)))
-
-
- @inlineCallbacks
- def groupAttendeeReconcile(self):
- """
- Sync the remote group attendee links to the local store.
- """
-
- self.accounting("Starting: groupAttendeeReconcile...")
-
- # Get remote data and local mapping information
- remote_group_attendees, objectIDMap = yield self.groupAttendeeData()
- self.accounting(" Found {} group attendees".format(len(remote_group_attendees)))
-
- # Map each result to a local resource (in batches)
- number_of_links = len(remote_group_attendees)
- while remote_group_attendees:
- yield self.groupAttendeeProcess(remote_group_attendees[:50], objectIDMap)
- remote_group_attendees = remote_group_attendees[50:]
-
- self.accounting("Completed: groupAttendeeReconcile.")
-
- returnValue(number_of_links)
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def groupAttendeeData(self, txn):
- """
- Sync the remote group attendee links to the local store.
- """
- remote_home = yield self._remoteHome(txn)
- remote_group_attendees = yield remote_home.getAllGroupAttendees()
-
- # Get all remote->local object maps
- records = yield CalendarObjectMigrationRecord.querysimple(
- txn, calendarHomeResourceID=self.homeId
- )
- objectIDMap = dict([(record.remoteResourceID, record.localResourceID) for record in records])
-
- returnValue((remote_group_attendees, objectIDMap,))
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def groupAttendeeProcess(self, txn, results, objectIDMap):
- """
- Sync the remote group attendee links to the local store.
- """
- # Map each result to a local resource
- for groupAttendee, group in results:
- local_group = yield txn.groupByUID(group.groupUID)
- groupAttendee.groupID = local_group.groupID
- try:
- groupAttendee.resourceID = objectIDMap[groupAttendee.resourceID]
- except KeyError:
- continue
- yield groupAttendee.insert(txn)
-
-
- @inlineCallbacks
- def notificationsReconcile(self):
- """
- Sync all the existing L{NotificationObject} resources from the remote store.
- """
-
- self.accounting("Starting: notificationsReconcile...")
- records = yield self.notificationRecords()
- self.accounting(" Found {} notifications".format(len(records)))
-
- # Batch setting resources for the local home
- len_records = len(records)
- while records:
- yield self.makeNotifications(records[:50])
- records = records[50:]
-
- self.accounting("Completed: notificationsReconcile.")
-
- returnValue(len_records)
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def notificationRecords(self, txn):
- """
- Get all the existing L{NotificationObjectRecord}'s from the remote store.
- """
-
- notifications = yield self._remoteNotificationsHome(txn)
- records = yield notifications.notificationObjectRecords()
- for record in records:
- # This needs to be reset when added to the local store
- del record.resourceID
-
- # Map the remote id to the local one.
- record.notificationHomeResourceID = notifications.id()
-
- returnValue(records)
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def makeNotifications(self, txn, records):
- """
- Create L{NotificationObjectRecord} records in the local store.
- """
-
- notifications = yield NotificationCollection.notificationsWithUID(txn, self.diruid, status=_HOME_STATUS_MIGRATING, create=True)
- for record in records:
- # Do this via the "write" API so that sync revisions are updated properly, rather than just
- # inserting the records directly.
- notification = yield notifications.writeNotificationObject(record.notificationUID, record.notificationType, record.notificationData)
- self.accounting(" Added notification local-id={}.".format(notification.id()))
-
-
- @inlineCallbacks
- def sharedByCollectionsReconcile(self):
- """
- Sync all the collections shared by the migrating user from the remote store. We will do this one calendar at a time since
- there could be a large number of sharees per calendar.
-
- Here is the logic we need: first assume we have three pods: A, B, C, and we are migrating a user from A->B. We start
- with a set of shares (X -> Y - where X is the sharer and Y the sharee) on pod A. We migrate the sharer to pod B. We
- then need to have a set of bind records on pod B, and adjust the set on pod A. Note that no changes are required on pod C.
-
- Original | Changes | Changes
- Shares | on B | on A
- --------------|------------------------------|---------------------
- A -> A | B -> A (new) | B -> A (modify existing)
- A -> B | B -> B (modify existing) | (removed)
- A -> C | B -> C (new) | (removed)
- """
-
- self.accounting("Starting: sharedByCollectionsReconcile...")
- calendars = yield self.getSyncState()
-
- len_records = 0
- for calendar in calendars.values():
- records, bindUID = yield self.sharedByCollectionRecords(calendar.remoteResourceID, calendar.localResourceID)
- if not records:
- continue
- records = records.items()
-
- self.accounting(" Found shared by calendar local-id={0.localResourceID}, remote-id={0.remoteResourceID} with {1} sharees".format(
- calendar, len(records),
- ))
-
- # Batch setting resources for the local home
- len_records += len(records)
- while records:
- yield self.makeSharedByCollections(records[:50], calendar.localResourceID)
- records = records[50:]
-
- # Get groups from remote pod
- yield self.syncGroupSharees(calendar.remoteResourceID, calendar.localResourceID)
-
- # Update the remote pod to switch over the shares
- yield self.updatedRemoteSharedByCollections(calendar.remoteResourceID, bindUID)
-
- self.accounting("Completed: sharedByCollectionsReconcile.")
-
- returnValue(len_records)
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def sharedByCollectionRecords(self, txn, remote_id, local_id):
- """
- Get all the existing L{CalendarBindRecord}'s from the remote store. Also make sure a
- bindUID exists for the local calendar.
- """
-
- remote_home = yield self._remoteHome(txn)
- remote_calendar = yield remote_home.childWithID(remote_id)
- records = yield remote_calendar.sharingBindRecords()
-
- # Check bindUID
- local_records = yield CalendarBindRecord.querysimple(
- txn,
- calendarHomeResourceID=self.homeId,
- calendarResourceID=local_id,
- )
- if records and not local_records[0].bindUID:
- yield local_records[0].update(bindUID=str(uuid4()))
-
- returnValue((records, local_records[0].bindUID,))
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def makeSharedByCollections(self, txn, records, calendar_id):
- """
- Create L{CalendarBindRecord} records in the local store.
- """
-
- for shareeUID, record in records:
- shareeHome = yield txn.calendarHomeWithUID(shareeUID, create=True)
-
- # First look for an existing record that could be present if the migrating user had
- # previously shared with this sharee as a cross-pod share
- oldrecord = yield CalendarBindRecord.querysimple(
- txn,
- calendarHomeResourceID=shareeHome.id(),
- calendarResourceName=record.calendarResourceName,
- )
-
- # FIXME: need to figure out sync-token and bind revision changes
-
- if oldrecord:
- # Point old record to the new local calendar being shared
- yield oldrecord[0].update(
- calendarResourceID=calendar_id,
- bindRevision=0,
- )
- self.accounting(" Updating existing sharee {}".format(shareeHome.uid()))
- else:
- # Map the record resource ids and insert a new record
- record.calendarHomeResourceID = shareeHome.id()
- record.calendarResourceID = calendar_id
- record.bindRevision = 0
- yield record.insert(txn)
- self.accounting(" Adding new sharee {}".format(shareeHome.uid()))
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def syncGroupSharees(self, txn, remote_id, local_id):
- """
- Sync the group sharees for a remote share.
- """
- remote_home = yield self._remoteHome(txn)
- remote_calendar = yield remote_home.childWithID(remote_id)
- results = yield remote_calendar.groupSharees()
- groups = dict([(group.groupID, group.groupUID,) for group in results["groups"]])
- for share in results["sharees"]:
- local_group = yield txn.groupByUID(groups[share.groupID])
- share.groupID = local_group.groupID
- share.calendarID = local_id
- yield share.insert(txn)
- self.accounting(" Adding group sharee {}".format(local_group.groupUID))
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def updatedRemoteSharedByCollections(self, txn, remote_id, bindUID):
- """
- Get all the existing L{CalendarBindRecord}'s from the remote store.
- """
-
- remote_home = yield self._remoteHome(txn)
- remote_calendar = yield remote_home.childWithID(remote_id)
- records = yield remote_calendar.migrateBindRecords(bindUID)
- self.accounting(" Updating remote records")
- returnValue(records)
-
-
- @inlineCallbacks
- def sharedToCollectionsReconcile(self):
- """
- Sync all the collections shared to the migrating user from the remote store.
-
- Here is the logic we need: first assume we have three pods: A, B, C, and we are migrating a user from A->B. We start
- with a set of shares (X -> Y - where X is the sharer and Y the sharee) with sharee on pod A. We migrate the sharee to pod B. We
- then need to have a set of bind records on pod B, and adjust the set on pod A. Note that no changes are required on pod C.
-
- Original | Changes | Changes
- Shares | on B | on A
- --------------|------------------------------|---------------------
- A -> A | A -> B (new) | A -> B (modify existing)
- B -> A | B -> B (modify existing) | (removed)
- C -> A | C -> B (new) | (removed)
- """
-
- self.accounting("Starting: sharedToCollectionsReconcile...")
-
- records = yield self.sharedToCollectionRecords()
- records = records.items()
- len_records = len(records)
- self.accounting(" Found {} shared to collections".format(len_records))
-
- while records:
- yield self.makeSharedToCollections(records[:50])
- records = records[50:]
-
- self.accounting("Completed: sharedToCollectionsReconcile.")
-
- returnValue(len_records)
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def sharedToCollectionRecords(self, txn):
- """
- Get the names and sharer UIDs for remote shared calendars.
- """
-
- # List of calendars from the remote side
- home = yield self._remoteHome(txn)
- if home is None:
- returnValue(None)
- results = yield home.sharedToBindRecords()
- returnValue(results)
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def makeSharedToCollections(self, txn, records):
- """
- Create L{CalendarBindRecord} records in the local store.
- """
-
- for sharerUID, (shareeRecord, ownerRecord, metadataRecord) in records:
- sharerHome = yield txn.calendarHomeWithUID(sharerUID, create=True)
-
- # We need to figure out the right thing to do based on whether the sharer is local to this pod
- # (the one where the migrated user will be hosted) vs located on another pod
-
- if sharerHome.normal():
- # First look for an existing record that must be present if the migrating user had
- # previously been shared with by this sharee
- oldrecord = yield CalendarBindRecord.querysimple(
- txn,
- calendarResourceName=shareeRecord.calendarResourceName,
- )
- if len(oldrecord) == 1:
- # Point old record to the new local calendar home
- yield oldrecord[0].update(
- calendarHomeResourceID=self.homeId,
- )
- self.accounting(" Updated existing local sharer record {}".format(sharerHome.uid()))
- else:
- raise AssertionError("An existing share must be present")
- else:
- # We have an external user. That sharer may have already shared the calendar with some other user
- # on this pod, in which case there is already a CALENDAR table entry for it, and we need the
- # resource ID from that to use in the new CALENDAR_BIND record we create. If a pre-existing share
- # is not present, then we have to create the CALENDAR table entry and associated pieces
-
- remote_id = shareeRecord.calendarResourceID
-
- # Look for pre-existing share with the same external ID
- oldrecord = yield CalendarBindRecord.querysimple(
- txn,
- calendarHomeResourceID=sharerHome.id(),
- bindUID=ownerRecord.bindUID,
- )
- if oldrecord:
- # Map the record resource ids and insert a new record
- calendar_id = oldrecord.calendarResourceID
- log_op = "Updated"
- else:
- sharerView = yield sharerHome.createCollectionForExternalShare(
- ownerRecord.calendarResourceName,
- ownerRecord.bindUID,
- metadataRecord.supportedComponents,
- )
- calendar_id = sharerView.id()
- log_op = "Created"
-
- shareeRecord.calendarHomeResourceID = self.homeId
- shareeRecord.calendarResourceID = calendar_id
- shareeRecord.bindRevision = 0
- yield shareeRecord.insert(txn)
- self.accounting(" {} remote sharer record {}".format(log_op, sharerHome.uid()))
-
- yield self.updatedRemoteSharedToCollection(remote_id, txn=txn)
-
-
- @inTransactionWrapper
- @inlineCallbacks
- def updatedRemoteSharedToCollection(self, txn, remote_id):
- """
- Get all the existing L{CalendarBindRecord}'s from the remote store.
- """
-
- remote_home = yield self._remoteHome(txn)
- remote_calendar = yield remote_home.childWithID(remote_id)
- records = yield remote_calendar.migrateBindRecords(None)
- self.accounting(" Updating remote records")
- returnValue(records)
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationhome_syncpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationhome_syncpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/home_sync.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,1353 @@
</span><ins>+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from functools import wraps
+
+from twext.python.log import Logger
+from twisted.internet.defer import returnValue, inlineCallbacks
+from twisted.python.failure import Failure
+from twistedcaldav.accounting import emitAccounting
+from txdav.caldav.icalendarstore import ComponentUpdateState
+from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord, \
+ CalendarObjectMigrationRecord, AttachmentMigrationRecord
+from txdav.caldav.datastore.sql import ManagedAttachment, CalendarBindRecord
+from txdav.common.datastore.sql_external import NotificationCollectionExternal
+from txdav.common.datastore.sql_notification import NotificationCollection
+from txdav.common.datastore.sql_tables import _HOME_STATUS_MIGRATING, _HOME_STATUS_DISABLED, \
+ _HOME_STATUS_EXTERNAL, _HOME_STATUS_NORMAL
+from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
+
+from uuid import uuid4
+import datetime
+
+log = Logger()
+
+ACCOUNTING_TYPE = "migration"
+ACCOUNTING_LOG = "migration.log"
+
+def inTransactionWrapper(operation):
+ """
+ This wrapper converts an instance method that takes a transaction as its
+ first parameter into one where the transaction parameter is an optional
+ keyword argument. If the keyword argument is present and not None, then
+ the instance method is called with that keyword as the first positional
+ argument (i.e., almost a NoOp). If the keyword argument is not present,
+ then a new transaction is created and the instance method called with
+ it as the first positional argument, plus the call is wrapped with
+ try/except/else to ensure proper commit and abort of the internally
+ created transaction is done.
+
+ So this wrapper allows for a method that requires a transaction to be run
+ with either an existing transaction or one created just for the purpose
+ of running it.
+
+ @param operation: a callable that takes an L{IAsyncTransaction} as its first
+ argument, and returns a value.
+ """
+
+ @wraps(operation)
+ @inlineCallbacks
+ def _inTxn(self, *args, **kwargs):
+ label = self.label(operation.__name__)
+ if "txn" in kwargs:
+ txn = kwargs["txn"]
+ del kwargs["txn"]
+ result = yield operation(self, txn, *args, **kwargs)
+ returnValue(result)
+ else:
+ txn = self.store.newTransaction(label=label)
+ try:
+ result = yield operation(self, txn, *args, **kwargs)
+ except Exception as ex:
+ f = Failure()
+ yield txn.abort()
+ log.error("{label} failed: {e}".format(label=label, e=str(ex)))
+ returnValue(f)
+ else:
+ yield txn.commit()
+ returnValue(result)
+
+ return _inTxn
+
+
+
+# Cross-pod synchronization of an entire calendar home
+class CrossPodHomeSync(object):
+
+ BATCH_SIZE = 50
+
+ def __init__(self, store, diruid, final=False, uselog=None):
+ """
+ @param store: the data store
+ @type store: L{CommonDataStore}
+ @param diruid: directory uid of the user whose home is to be sync'd
+ @type diruid: L{str}
+ @param final: indicates whether this is in the final sync stage with the remote home
+ already disabled
+ @type final: L{bool}
+ @param uselog: additional logging written to this object
+ @type: L{File}
+ """
+
+ self.store = store
+ self.diruid = diruid
+ self.disabledRemote = final
+ self.uselog = uselog
+ self.record = None
+ self.homeId = None
+
+
+ def label(self, detail):
+ return "Cross-pod Migration Sync for {}: {}".format(self.diruid, detail)
+
+
+ def accounting(self, logstr):
+ emitAccounting(ACCOUNTING_TYPE, self.record, "{} {}\n".format(datetime.datetime.now().isoformat(), logstr), filename=ACCOUNTING_LOG)
+ if self.uselog is not None:
+ self.uselog.write("CrossPodHomeSync: {}\n".format(logstr))
+
+
+ @inlineCallbacks
+ def migrateHere(self):
+ """
+ This is a full, serialized version of a data migration (minus any directory
+ update) that can be triggered via a command line tool. It is designed to
+ minimize down time for the migrating user.
+ """
+
+ # Step 1 - initial full sync
+ yield self.sync()
+
+ # Step 2 - increment sync (since the initial sync may take a long time
+ # to run we should do one incremental sync before bringing down the
+ # account being migrated)
+ yield self.sync()
+
+ # Step 3 - disable remote home
+ # NB Any failure from this point on will need to be caught and
+ # handled by re-enabling the old home (and fixing any sharing state
+ # that may have been changed)
+ yield self.disableRemoteHome()
+
+ # Step 4 - final incremental sync
+ yield self.sync()
+
+ # Step 5 - final overall sync of meta-data (including sharing re-linking)
+ yield self.finalSync()
+
+ # Step 6 - enable new home
+ yield self.enableLocalHome()
+
+ # Step 7 - remove remote home
+ yield self.removeRemoteHome()
+
+ # Step 8 - say phew! TODO: Actually alert everyone else
+ pass
+
+
+ @inlineCallbacks
+ def sync(self):
+ """
+ Initiate a sync of the home. This is a simple data sync that does not
+ reconcile sharing state etc. The L{finalSync} method will do a full
+ sharing reconcile as well as disable the migration source home.
+ """
+
+ yield self.loadRecord()
+ self.accounting("Starting: sync...")
+ yield self.prepareCalendarHome()
+
+ # Calendar list and calendar data
+ yield self.syncCalendarList()
+
+ # Sync home metadata such as alarms, default calendars, etc
+ yield self.syncCalendarHomeMetaData()
+
+ # Sync attachments
+ yield self.syncAttachments()
+
+ self.accounting("Completed: sync.\n")
+
+
+ @inlineCallbacks
+ def finalSync(self):
+ """
+ Do the final sync up of any additional data, re-link sharing bind
+ rows, recalculate quota etc.
+ """
+
+ yield self.loadRecord()
+ self.accounting("Starting: finalSync...")
+ yield self.prepareCalendarHome()
+
+ # Link attachments to resources: ATTACHMENT_CALENDAR_OBJECT table
+ yield self.linkAttachments()
+
+ # TODO: Re-write attachment URIs - not sure if we need this as reverse proxy may take care of it
+ pass
+
+ # Group attendee reconcile
+ yield self.groupAttendeeReconcile()
+
+ # Delegates reconcile
+ yield self.delegateReconcile()
+
+ # Shared collections reconcile (including group sharees)
+ yield self.sharedByCollectionsReconcile()
+ yield self.sharedToCollectionsReconcile()
+
+ # Notifications
+ yield self.notificationsReconcile()
+
+ # TODO: work items
+ pass
+
+ self.accounting("Completed: finalSync.\n")
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def disableRemoteHome(self, txn):
+ """
+ Mark the remote home as disabled.
+ """
+
+ yield self.loadRecord()
+ self.accounting("Starting: disableRemoteHome...")
+ yield self.prepareCalendarHome()
+
+ # Calendar home
+ remote_home = yield self._remoteHome(txn)
+ yield remote_home.setStatus(_HOME_STATUS_DISABLED)
+
+ # Notification home
+ notifications = yield self._remoteNotificationsHome(txn)
+ yield notifications.setStatus(_HOME_STATUS_DISABLED)
+
+ self.disabledRemote = True
+
+ self.accounting("Completed: disableRemoteHome.\n")
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def enableLocalHome(self, txn):
+ """
+ Mark the local home as enabled and remove any previously existing external home.
+ """
+
+ yield self.loadRecord()
+ self.accounting("Starting: enableLocalHome...")
+ yield self.prepareCalendarHome()
+
+ # Disable any local external homes
+ oldhome = yield txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_EXTERNAL)
+ if oldhome is not None:
+ yield oldhome.setLocalStatus(_HOME_STATUS_DISABLED)
+ oldnotifications = yield txn.notificationsWithUID(self.diruid, status=_HOME_STATUS_EXTERNAL)
+ if oldnotifications:
+ yield oldnotifications.setLocalStatus(_HOME_STATUS_DISABLED)
+
+ # Enable the migrating ones
+ newhome = yield txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_MIGRATING)
+ if newhome is not None:
+ yield newhome.setStatus(_HOME_STATUS_NORMAL)
+ newnotifications = yield txn.notificationsWithUID(self.diruid, status=_HOME_STATUS_MIGRATING)
+ if newnotifications:
+ yield newnotifications.setStatus(_HOME_STATUS_NORMAL)
+
+ # TODO: purge the old ones
+ pass
+
+ self.accounting("Completed: enableLocalHome.\n")
+
+
+ @inlineCallbacks
+ def removeRemoteHome(self):
+ """
+ Remove all the old data on the remote pod.
+ """
+
+ # TODO: implement API on CommonHome to purge the old data without
+ # any side-effects (scheduling, sharing etc).
+ yield self.loadRecord()
+ self.accounting("Starting: removeRemoteHome...")
+ yield self.prepareCalendarHome()
+
+ self.accounting("Completed: removeRemoteHome.\n")
+
+
+ @inlineCallbacks
+ def loadRecord(self):
+ """
+ Initiate a sync of the home.
+ """
+
+ if self.record is None:
+ self.record = yield self.store.directoryService().recordWithUID(self.diruid)
+ if self.record is None:
+ raise DirectoryRecordNotFoundError("Cross-pod Migration Sync missing directory record for {}".format(self.diruid))
+ if self.record.thisServer():
+ raise ValueError("Cross-pod Migration Sync cannot sync with user already on this server: {}".format(self.diruid))
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def prepareCalendarHome(self, txn):
+ """
+ Make sure the inactive home to migrate into is present on this pod.
+ """
+
+ if self.homeId is None:
+ home = yield self._localHome(txn)
+ if home is None:
+ if self.disabledRemote:
+ self.homeId = None
+ else:
+ home = yield txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_MIGRATING, create=True)
+ self.accounting(" Created new home collection to migrate into.")
+ self.homeId = home.id() if home is not None else None
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def syncCalendarHomeMetaData(self, txn):
+ """
+ Make sure the home meta-data (alarms, default calendars) is properly sync'd
+ """
+
+ self.accounting("Starting: syncCalendarHomeMetaData...")
+ remote_home = yield self._remoteHome(txn)
+ yield remote_home.readMetaData()
+
+ calendars = yield CalendarMigrationRecord.querysimple(txn, calendarHomeResourceID=self.homeId)
+ calendarIDMap = dict((item.remoteResourceID, item.localResourceID) for item in calendars)
+
+ local_home = yield self._localHome(txn)
+ yield local_home.copyMetadata(remote_home, calendarIDMap)
+
+ self.accounting("Completed: syncCalendarHomeMetaData.")
+
+
+ @inlineCallbacks
+ def _remoteHome(self, txn):
+ """
+ Create a synthetic external home object that maps to the actual remote home.
+ """
+
+ from txdav.caldav.datastore.sql_external import CalendarHomeExternal
+ resourceID = yield txn.store().conduit.send_home_resource_id(txn, self.record, migrating=True)
+ home = CalendarHomeExternal.makeSyntheticExternalHome(txn, self.record.uid, resourceID) if resourceID is not None else None
+ if self.disabledRemote:
+ home._migratingHome = True
+ returnValue(home)
+
+
+ @inlineCallbacks
+ def _remoteNotificationsHome(self, txn):
+ """
+ Create a synthetic external home object that maps to the actual remote home.
+ """
+
+ notifications = yield NotificationCollectionExternal.notificationsWithUID(txn, self.diruid, create=True)
+ if self.disabledRemote:
+ notifications._migratingHome = True
+ returnValue(notifications)
+
+
+ def _localHome(self, txn):
+ """
+ Get the home on this pod that will have data migrated to it.
+ """
+
+ return txn.calendarHomeWithUID(self.diruid, status=_HOME_STATUS_MIGRATING)
+
+
+ @inlineCallbacks
+ def syncCalendarList(self):
+ """
+ Synchronize each owned calendar.
+ """
+
+ self.accounting("Starting: syncCalendarList...")
+
+ # Remote sync details
+ remote_sync_state = yield self.getCalendarSyncList()
+ self.accounting(" Found {} remote calendars to sync.".format(len(remote_sync_state)))
+
+ # Get local sync details from local DB
+ local_sync_state = yield self.getSyncState()
+ self.accounting(" Found {} local calendars to sync.".format(len(local_sync_state)))
+
+ # Remove local calendars no longer on the remote side
+ yield self.purgeLocal(local_sync_state, remote_sync_state)
+
+ # Sync each calendar that matches on both sides
+ for remoteID in remote_sync_state.keys():
+ yield self.syncCalendar(remoteID, local_sync_state, remote_sync_state)
+
+ self.accounting("Completed: syncCalendarList.")
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def getCalendarSyncList(self, txn):
+ """
+ Get the names and sync-tokens for each remote owned calendar.
+ """
+
+ # List of calendars from the remote side
+ home = yield self._remoteHome(txn)
+ if home is None:
+ returnValue(None)
+ calendars = yield home.loadChildren()
+ results = {}
+ for calendar in calendars:
+ if calendar.owned():
+ sync_token = yield calendar.syncToken()
+ results[calendar.id()] = CalendarMigrationRecord.make(
+ calendarHomeResourceID=home.id(),
+ remoteResourceID=calendar.id(),
+ localResourceID=0,
+ lastSyncToken=sync_token,
+ )
+
+ returnValue(results)
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def getSyncState(self, txn):
+ """
+ Get local synchronization state for the home being migrated.
+ """
+ records = yield CalendarMigrationRecord.querysimple(
+ txn, calendarHomeResourceID=self.homeId
+ )
+ returnValue(dict([(record.remoteResourceID, record) for record in records]))
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def updateSyncState(self, txn, stateRecord, newSyncToken):
+ """
+ Update or insert an L{CalendarMigrationRecord} with the new specified sync token.
+ """
+ if stateRecord.isnew():
+ stateRecord.lastSyncToken = newSyncToken
+ yield stateRecord.insert(txn)
+ else:
+ # The existing stateRecord has a stale txn, but valid column values. We have
+ # to duplicate it before we can give it a different txn.
+ stateRecord = stateRecord.duplicate()
+ stateRecord.transaction = txn
+ yield stateRecord.update(lastSyncToken=newSyncToken)
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def purgeLocal(self, txn, local_sync_state, remote_sync_state):
+ """
+ Remove (silently - i.e., no scheduling) local calendars that are no longer on the remote side.
+
+ @param txn: transaction to use
+ @type txn: L{CommonStoreTransaction}
+ @param local_sync_state: local sync state
+ @type local_sync_state: L{dict}
+ @param remote_sync_state: remote sync state
+ @type remote_sync_state: L{dict}
+ """
+ home = yield self._localHome(txn)
+ for localID in set(local_sync_state.keys()) - set(remote_sync_state.keys()):
+ calendar = yield home.childWithID(local_sync_state[localID].localResourceID)
+ if calendar is not None:
+ yield calendar.purge()
+ del local_sync_state[localID]
+ self.accounting(" Purged calendar local-id={} that no longer exists on the remote pod.".format(localID))
+
+
+ @inlineCallbacks
+ def syncCalendar(self, remoteID, local_sync_state, remote_sync_state):
+ """
+ Sync the contents of a calendar from the remote side. The local calendar may need to be created
+ on initial sync. Make use of sync tokens to avoid unnecessary work.
+
+ @param remoteID: id of the remote calendar to sync
+ @type remoteID: L{int}
+ @param local_sync_state: local sync state
+ @type local_sync_state: L{dict}
+ @param remote_sync_state: remote sync state
+ @type remote_sync_state: L{dict}
+ """
+
+ self.accounting("Starting: syncCalendar.")
+
+ # See if we need to create the local one first
+ if remoteID not in local_sync_state:
+ localID = yield self.newCalendar()
+ local_sync_state[remoteID] = CalendarMigrationRecord.make(
+ calendarHomeResourceID=self.homeId,
+ remoteResourceID=remoteID,
+ localResourceID=localID,
+ lastSyncToken=None,
+ )
+ self.accounting(" Created new calendar local-id={}, remote-id={}.".format(localID, remoteID))
+ else:
+ localID = local_sync_state.get(remoteID).localResourceID
+ self.accounting(" Updating calendar local-id={}, remote-id={}.".format(localID, remoteID))
+ local_record = local_sync_state.get(remoteID)
+
+ remote_token = remote_sync_state[remoteID].lastSyncToken
+ if local_record.lastSyncToken != remote_token:
+ # Sync meta-data such as name, alarms, supported-components, transp, etc
+ yield self.syncCalendarMetaData(local_record)
+
+ # Sync object resources
+ changed, removed = yield self.findObjectsToSync(local_record)
+ self.accounting(" Calendar objects changed={}, removed={}.".format(len(changed), len(removed)))
+ yield self.purgeDeletedObjectsInBatches(local_record, removed)
+ yield self.updateChangedObjectsInBatches(local_record, changed)
+
+ yield self.updateSyncState(local_record, remote_token)
+ self.accounting("Completed: syncCalendar.")
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def newCalendar(self, txn):
+ """
+ Create a new local calendar to sync remote data to. We don't care about the name
+ of the calendar right now - it will be sync'd later.
+ """
+
+ home = yield self._localHome(txn)
+ calendar = yield home.createChildWithName(str(uuid4()))
+ returnValue(calendar.id())
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def syncCalendarMetaData(self, txn, migrationRecord):
+ """
+ Sync the metadata of a calendar from the remote side.
+
+ @param migrationRecord: current migration record
+ @type localID: L{CalendarMigrationRecord}
+ """
+
+ # Remote changes
+ remote_home = yield self._remoteHome(txn)
+ remote_calendar = yield remote_home.childWithID(migrationRecord.remoteResourceID)
+ if remote_calendar is None:
+ returnValue(None)
+
+ # Check whether the deleted set items
+ local_home = yield self._localHome(txn)
+ local_calendar = yield local_home.childWithID(migrationRecord.localResourceID)
+ yield local_calendar.copyMetadata(remote_calendar)
+ self.accounting(" Copied calendar meta-data for calendar local-id={0.localResourceID}, remote-id={0.remoteResourceID}.".format(migrationRecord))
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def findObjectsToSync(self, txn, migrationRecord):
+ """
+ Find the set of object resources that need to be sync'd from the remote
+ side and the set that need to be removed locally. Take into account the
+ possibility that this is a partial sync and removals or additions might
+ be false positives.
+
+ @param migrationRecord: current migration record
+ @type localID: L{CalendarMigrationRecord}
+ """
+
+ # Remote changes
+ remote_home = yield self._remoteHome(txn)
+ remote_calendar = yield remote_home.childWithID(migrationRecord.remoteResourceID)
+ if remote_calendar is None:
+ returnValue(None)
+ changed, deleted, _ignore_invalid = yield remote_calendar.resourceNamesSinceToken(migrationRecord.lastSyncToken)
+
+ # Check whether the deleted set items
+ local_home = yield self._localHome(txn)
+ local_calendar = yield local_home.childWithID(migrationRecord.localResourceID)
+
+ # Check the md5's on each changed remote with the local one to filter out ones
+ # we don't actually need to sync
+ remote_changes = yield remote_calendar.objectResourcesWithNames(changed)
+ remote_changes = dict([(calendar.name(), calendar) for calendar in remote_changes])
+
+ local_changes = yield local_calendar.objectResourcesWithNames(changed)
+ local_changes = dict([(calendar.name(), calendar) for calendar in local_changes])
+
+ actual_changes = []
+ for name, calendar in remote_changes.items():
+ if name not in local_changes or remote_changes[name].md5() != local_changes[name].md5():
+ actual_changes.append(name)
+
+ returnValue((actual_changes, deleted,))
+
+
+ @inlineCallbacks
+ def purgeDeletedObjectsInBatches(self, migrationRecord, deleted):
+ """
+ Purge (silently remove) the specified object resources. This needs to
+ succeed in the case where some or all resources have already been deleted.
+ Do this in batches to keep transaction times small.
+
+ @param migrationRecord: local calendar migration record
+ @type migrationRecord: L{CalendarMigrationRecord}
+ @param deleted: list of names to purge
+ @type deleted: L{list} of L{str}
+ """
+
+ remaining = list(deleted)
+ while remaining:
+ yield self.purgeBatch(migrationRecord.localResourceID, remaining[:self.BATCH_SIZE])
+ del remaining[:self.BATCH_SIZE]
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def purgeBatch(self, txn, localID, purge_names):
+ """
+ Purge a bunch of object resources from the specified calendar.
+
+ @param txn: transaction to use
+ @type txn: L{CommonStoreTransaction}
+ @param localID: id of the local calendar to sync
+ @type localID: L{int}
+ @param purge_names: object resource names to purge
+ @type purge_names: L{list} of L{str}
+ """
+
+ # Check whether the deleted set items
+ local_home = yield self._localHome(txn)
+ local_calendar = yield local_home.childWithID(localID)
+ local_objects = yield local_calendar.objectResourcesWithNames(purge_names)
+
+ for local_object in local_objects:
+ yield local_object.purge()
+ self.accounting(" Purged calendar object local-id={}.".format(local_object.id()))
+
+
+ @inlineCallbacks
+ def updateChangedObjectsInBatches(self, migrationRecord, changed):
+ """
+ Update the specified object resources. This needs to succeed in the
+ case where some or all resources have already been deleted.
+ Do this in batches to keep transaction times small.
+
+ @param migrationRecord: local calendar migration record
+ @type migrationRecord: L{CalendarMigrationRecord}
+ @param changed: list of names to update
+ @type changed: L{list} of L{str}
+ """
+
+ remaining = list(changed)
+ while remaining:
+ yield self.updateBatch(
+ migrationRecord.localResourceID,
+ migrationRecord.remoteResourceID,
+ remaining[:self.BATCH_SIZE],
+ )
+ del remaining[:self.BATCH_SIZE]
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def updateBatch(self, txn, localID, remoteID, remaining):
+ """
+ Update a bunch of object resources from the specified remote calendar.
+
+ @param txn: transaction to use
+ @type txn: L{CommonStoreTransaction}
+ @param localID: id of the local calendar to sync
+ @type localID: L{int}
+ @param remoteID: id of the remote calendar to sync with
+ @type remoteID: L{int}
+ @param purge_names: object resource names to update
+ @type purge_names: L{list} of L{str}
+ """
+
+ # Get remote objects
+ remote_home = yield self._remoteHome(txn)
+ remote_calendar = yield remote_home.childWithID(remoteID)
+ if remote_calendar is None:
+ returnValue(None)
+ remote_objects = yield remote_calendar.objectResourcesWithNames(remaining)
+ remote_objects = dict([(obj.name(), obj) for obj in remote_objects])
+
+ # Get local objects
+ local_home = yield self._localHome(txn)
+ local_calendar = yield local_home.childWithID(localID)
+ local_objects = yield local_calendar.objectResourcesWithNames(remaining)
+ local_objects = dict([(obj.name(), obj) for obj in local_objects])
+
+ # Sync ones that still exist - use txn._migrating together with stuffing the remote md5
+ # value onto the component being stored to ensure that the md5 value stored locally
+ # matches the remote one (which should help reduce the need for a client to resync
+ # the data when moved from one pod to the other).
+ txn._migrating = True
+ for obj_name in remote_objects.keys():
+ remote_object = remote_objects[obj_name]
+ remote_data = yield remote_object.component()
+ remote_data.md5 = remote_object.md5()
+ if obj_name in local_objects:
+ local_object = yield local_objects[obj_name]
+ yield local_object._setComponentInternal(remote_data, internal_state=ComponentUpdateState.RAW)
+ del local_objects[obj_name]
+ log_op = "Updated"
+ else:
+ local_object = yield local_calendar._createCalendarObjectWithNameInternal(obj_name, remote_data, internal_state=ComponentUpdateState.RAW)
+
+ # Maintain the mapping from the remote to local id. Note that this mapping never changes as the ids on both
+ # sides are immutable - though it may get deleted if the local object is removed during sync (via a cascade).
+ yield CalendarObjectMigrationRecord.create(
+ txn,
+ calendarHomeResourceID=self.homeId,
+ remoteResourceID=remote_object.id(),
+ localResourceID=local_object.id()
+ )
+ log_op = "Created"
+
+ # Sync meta-data such as schedule object, schedule tags, access mode etc
+ yield local_object.copyMetadata(remote_object)
+ self.accounting(" {} calendar object local-id={}, remote-id={}.".format(log_op, local_object.id(), remote_object.id()))
+
+ # Purge the ones that remain
+ for local_object in local_objects.values():
+ yield local_object.purge()
+ self.accounting(" Purged calendar object local-id={}.".format(local_object.id()))
+
+
+ @inlineCallbacks
+ def syncAttachments(self):
+ """
+ Sync attachments (both metadata and actual attachment data) for the home being migrated.
+ """
+
+ self.accounting("Starting: syncAttachments...")
+
+ # Two steps - sync the table first in one txn, then sync each attachment's data
+ changed_ids, removed_ids = yield self.syncAttachmentTable()
+ self.accounting(" Attachments changed={}, removed={}".format(len(changed_ids), len(removed_ids)))
+
+ for local_id in changed_ids:
+ yield self.syncAttachmentData(local_id)
+
+ self.accounting("Completed: syncAttachments.")
+
+ returnValue((changed_ids, removed_ids,))
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def syncAttachmentTable(self, txn):
+ """
+ Sync the ATTACHMENT table data for the home being migrated. Return the list of local attachment ids that
+ now need there attachment data sync'd from the server.
+ """
+
+ remote_home = yield self._remoteHome(txn)
+ rattachments = yield remote_home.getAllAttachments()
+ rmap = dict([(attachment.id(), attachment) for attachment in rattachments])
+
+ local_home = yield self._localHome(txn)
+ lattachments = yield local_home.getAllAttachments()
+ lmap = dict([(attachment.id(), attachment) for attachment in lattachments])
+
+ # Figure out the differences
+ records = yield AttachmentMigrationRecord.querysimple(
+ txn, calendarHomeResourceID=self.homeId
+ )
+ mapping = dict([(record.remoteResourceID, record) for record in records])
+
+ # Removed - remove attachment and migration state
+ removed = set(mapping.keys()) - set(rmap.keys())
+ for remove_id in removed:
+ record = mapping[remove_id]
+ att = yield ManagedAttachment.load(txn, None, None, attachmentID=record.localResourceID)
+ if att:
+ yield att.remove(adjustQuota=False)
+ else:
+ yield record.delete()
+
+ # Track which ones need attachment data sync'd over
+ data_ids = set()
+
+ # Added - add new attachment and migration state
+ added = set(rmap.keys()) - set(mapping.keys())
+ for added_id in added:
+ attachment = yield ManagedAttachment._create(txn, None, self.homeId)
+ yield AttachmentMigrationRecord.create(
+ txn,
+ calendarHomeResourceID=self.homeId,
+ remoteResourceID=added_id,
+ localResourceID=attachment.id(),
+ )
+ data_ids.add(attachment.id())
+
+ # Possible updates - check for md5 change and sync
+ updates = set(mapping.keys()) & set(rmap.keys())
+ for updated_id in updates:
+ local_id = mapping[updated_id].localResourceID
+ if rmap[updated_id].md5() != lmap[local_id].md5():
+ yield lmap[local_id].copyRemote(rmap[updated_id])
+ data_ids.add(local_id)
+
+ returnValue((data_ids, removed,))
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def syncAttachmentData(self, txn, local_id):
+ """
+ Sync the attachment data for the home being migrated.
+ """
+
+ remote_home = yield self._remoteHome(txn)
+ local_home = yield self._localHome(txn)
+ attachment = yield local_home.getAttachmentByID(local_id)
+ if attachment is None:
+ returnValue(None)
+
+ records = yield AttachmentMigrationRecord.querysimple(
+ txn, calendarHomeResourceID=self.homeId, localResourceID=local_id
+ )
+ if records:
+ # Read the data from the conduit
+ yield remote_home.readAttachmentData(records[0].remoteResourceID, attachment)
+ self.accounting(" Read attachment local-id={0.localResourceID}, remote-id={0.remoteResourceID}".format(records[0]))
+
+
+ @inlineCallbacks
+ def linkAttachments(self):
+ """
+ Link attachments to the calendar objects they belong to.
+ """
+
+ self.accounting("Starting: linkAttachments...")
+
+ # Get the map of links for the remote home
+ links = yield self.getAttachmentLinks()
+ self.accounting(" Linking {} attachments".format(len(links)))
+
+ # Get remote->local ID mappings
+ attachmentIDMap, objectIDMap = yield self.getAttachmentMappings()
+
+ # Batch setting links for the local home
+ len_links = len(links)
+ while links:
+ yield self.makeAttachmentLinks(links[:50], attachmentIDMap, objectIDMap)
+ links = links[50:]
+
+ self.accounting("Completed: linkAttachments.")
+
+ returnValue(len_links)
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def getAttachmentLinks(self, txn):
+ """
+ Get the remote link information.
+ """
+
+ # Get the map of links for the remote home
+ remote_home = yield self._remoteHome(txn)
+ links = yield remote_home.getAttachmentLinks()
+ returnValue(links)
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def getAttachmentMappings(self, txn):
+ """
+ Get the remote link information.
+ """
+
+ # Get migration mappings
+ records = yield AttachmentMigrationRecord.querysimple(
+ txn, calendarHomeResourceID=self.homeId
+ )
+ attachmentIDMap = dict([(record.remoteResourceID, record) for record in records])
+
+ records = yield CalendarObjectMigrationRecord.querysimple(
+ txn, calendarHomeResourceID=self.homeId
+ )
+ objectIDMap = dict([(record.remoteResourceID, record) for record in records])
+
+ returnValue((attachmentIDMap, objectIDMap,))
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def makeAttachmentLinks(self, txn, links, attachmentIDMap, objectIDMap):
+ """
+ Map remote links to local links.
+ """
+
+ for link in links:
+ # Remote link has an invalid txn at this point so replace that first
+ link._txn = txn
+
+ # Now re-map the attachment ID and calendar_object_id to the local ones
+ link._attachmentID = attachmentIDMap[link._attachmentID].localResourceID
+ link._calendarObjectID = objectIDMap[link._calendarObjectID].localResourceID
+
+ yield link.insert()
+
+
+ @inlineCallbacks
+ def delegateReconcile(self):
+ """
+ Sync the delegate assignments from the remote home to the local home. We won't use
+ a fake directory UID locally.
+ """
+
+ self.accounting("Starting: delegateReconcile...")
+
+ yield self.individualDelegateReconcile()
+ yield self.groupDelegateReconcile()
+ yield self.externalDelegateReconcile()
+
+ self.accounting("Completed: delegateReconcile.")
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def individualDelegateReconcile(self, txn):
+ """
+ Sync the delegate assignments from the remote home to the local home. We won't use
+ a fake directory UID locally.
+ """
+ remote_records = yield txn.dumpIndividualDelegatesExternal(self.record)
+ for record in remote_records:
+ yield record.insert(txn)
+
+ self.accounting(" Found {} individual delegates".format(len(remote_records)))
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def groupDelegateReconcile(self, txn):
+ """
+ Sync the delegate assignments from the remote home to the local home. We won't use
+ a fake directory UID locally.
+ """
+ remote_records = yield txn.dumpGroupDelegatesExternal(self.record)
+ for delegator, group in remote_records:
+ # We need to make sure the group exists locally first and map the groupID to the local one
+ local_group = yield txn.groupByUID(group.groupUID)
+ delegator.groupID = local_group.groupID
+ yield delegator.insert(txn)
+
+ self.accounting(" Found {} group delegates".format(len(remote_records)))
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def externalDelegateReconcile(self, txn):
+ """
+ Sync the external delegate assignments from the remote home to the local home. We won't use
+ a fake directory UID locally.
+ """
+ remote_records = yield txn.dumpExternalDelegatesExternal(self.record)
+ for record in remote_records:
+ yield record.insert(txn)
+
+ self.accounting(" Found {} external delegates".format(len(remote_records)))
+
+
+ @inlineCallbacks
+ def groupAttendeeReconcile(self):
+ """
+ Sync the remote group attendee links to the local store.
+ """
+
+ self.accounting("Starting: groupAttendeeReconcile...")
+
+ # Get remote data and local mapping information
+ remote_group_attendees, objectIDMap = yield self.groupAttendeeData()
+ self.accounting(" Found {} group attendees".format(len(remote_group_attendees)))
+
+ # Map each result to a local resource (in batches)
+ number_of_links = len(remote_group_attendees)
+ while remote_group_attendees:
+ yield self.groupAttendeeProcess(remote_group_attendees[:50], objectIDMap)
+ remote_group_attendees = remote_group_attendees[50:]
+
+ self.accounting("Completed: groupAttendeeReconcile.")
+
+ returnValue(number_of_links)
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def groupAttendeeData(self, txn):
+ """
+ Sync the remote group attendee links to the local store.
+ """
+ remote_home = yield self._remoteHome(txn)
+ remote_group_attendees = yield remote_home.getAllGroupAttendees()
+
+ # Get all remote->local object maps
+ records = yield CalendarObjectMigrationRecord.querysimple(
+ txn, calendarHomeResourceID=self.homeId
+ )
+ objectIDMap = dict([(record.remoteResourceID, record.localResourceID) for record in records])
+
+ returnValue((remote_group_attendees, objectIDMap,))
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def groupAttendeeProcess(self, txn, results, objectIDMap):
+ """
+ Sync the remote group attendee links to the local store.
+ """
+ # Map each result to a local resource
+ for groupAttendee, group in results:
+ local_group = yield txn.groupByUID(group.groupUID)
+ groupAttendee.groupID = local_group.groupID
+ try:
+ groupAttendee.resourceID = objectIDMap[groupAttendee.resourceID]
+ except KeyError:
+ continue
+ yield groupAttendee.insert(txn)
+
+
+ @inlineCallbacks
+ def notificationsReconcile(self):
+ """
+ Sync all the existing L{NotificationObject} resources from the remote store.
+ """
+
+ self.accounting("Starting: notificationsReconcile...")
+ records = yield self.notificationRecords()
+ self.accounting(" Found {} notifications".format(len(records)))
+
+ # Batch setting resources for the local home
+ len_records = len(records)
+ while records:
+ yield self.makeNotifications(records[:50])
+ records = records[50:]
+
+ self.accounting("Completed: notificationsReconcile.")
+
+ returnValue(len_records)
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def notificationRecords(self, txn):
+ """
+ Get all the existing L{NotificationObjectRecord}'s from the remote store.
+ """
+
+ notifications = yield self._remoteNotificationsHome(txn)
+ records = yield notifications.notificationObjectRecords()
+ for record in records:
+ # This needs to be reset when added to the local store
+ del record.resourceID
+
+ # Map the remote id to the local one.
+ record.notificationHomeResourceID = notifications.id()
+
+ returnValue(records)
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def makeNotifications(self, txn, records):
+ """
+ Create L{NotificationObjectRecord} records in the local store.
+ """
+
+ notifications = yield NotificationCollection.notificationsWithUID(txn, self.diruid, status=_HOME_STATUS_MIGRATING, create=True)
+ for record in records:
+ # Do this via the "write" API so that sync revisions are updated properly, rather than just
+ # inserting the records directly.
+ notification = yield notifications.writeNotificationObject(record.notificationUID, record.notificationType, record.notificationData)
+ self.accounting(" Added notification local-id={}.".format(notification.id()))
+
+
+ @inlineCallbacks
+ def sharedByCollectionsReconcile(self):
+ """
+ Sync all the collections shared by the migrating user from the remote store. We will do this one calendar at a time since
+ there could be a large number of sharees per calendar.
+
+ Here is the logic we need: first assume we have three pods: A, B, C, and we are migrating a user from A->B. We start
+ with a set of shares (X -> Y - where X is the sharer and Y the sharee) on pod A. We migrate the sharer to pod B. We
+ then need to have a set of bind records on pod B, and adjust the set on pod A. Note that no changes are required on pod C.
+
+ Original | Changes | Changes
+ Shares | on B | on A
+ --------------|------------------------------|---------------------
+ A -> A | B -> A (new) | B -> A (modify existing)
+ A -> B | B -> B (modify existing) | (removed)
+ A -> C | B -> C (new) | (removed)
+ """
+
+ self.accounting("Starting: sharedByCollectionsReconcile...")
+ calendars = yield self.getSyncState()
+
+ len_records = 0
+ for calendar in calendars.values():
+ records, bindUID = yield self.sharedByCollectionRecords(calendar.remoteResourceID, calendar.localResourceID)
+ if not records:
+ continue
+ records = records.items()
+
+ self.accounting(" Found shared by calendar local-id={0.localResourceID}, remote-id={0.remoteResourceID} with {1} sharees".format(
+ calendar, len(records),
+ ))
+
+ # Batch setting resources for the local home
+ len_records += len(records)
+ while records:
+ yield self.makeSharedByCollections(records[:50], calendar.localResourceID)
+ records = records[50:]
+
+ # Get groups from remote pod
+ yield self.syncGroupSharees(calendar.remoteResourceID, calendar.localResourceID)
+
+ # Update the remote pod to switch over the shares
+ yield self.updatedRemoteSharedByCollections(calendar.remoteResourceID, bindUID)
+
+ self.accounting("Completed: sharedByCollectionsReconcile.")
+
+ returnValue(len_records)
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def sharedByCollectionRecords(self, txn, remote_id, local_id):
+ """
+ Get all the existing L{CalendarBindRecord}'s from the remote store. Also make sure a
+ bindUID exists for the local calendar.
+ """
+
+ remote_home = yield self._remoteHome(txn)
+ remote_calendar = yield remote_home.childWithID(remote_id)
+ records = yield remote_calendar.sharingBindRecords()
+
+ # Check bindUID
+ local_records = yield CalendarBindRecord.querysimple(
+ txn,
+ calendarHomeResourceID=self.homeId,
+ calendarResourceID=local_id,
+ )
+ if records and not local_records[0].bindUID:
+ yield local_records[0].update(bindUID=str(uuid4()))
+
+ returnValue((records, local_records[0].bindUID,))
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def makeSharedByCollections(self, txn, records, calendar_id):
+ """
+ Create L{CalendarBindRecord} records in the local store.
+ """
+
+ for shareeUID, record in records:
+ shareeHome = yield txn.calendarHomeWithUID(shareeUID, create=True)
+
+ # First look for an existing record that could be present if the migrating user had
+ # previously shared with this sharee as a cross-pod share
+ oldrecord = yield CalendarBindRecord.querysimple(
+ txn,
+ calendarHomeResourceID=shareeHome.id(),
+ calendarResourceName=record.calendarResourceName,
+ )
+
+ # FIXME: need to figure out sync-token and bind revision changes
+
+ if oldrecord:
+ # Point old record to the new local calendar being shared
+ yield oldrecord[0].update(
+ calendarResourceID=calendar_id,
+ bindRevision=0,
+ )
+ self.accounting(" Updating existing sharee {}".format(shareeHome.uid()))
+ else:
+ # Map the record resource ids and insert a new record
+ record.calendarHomeResourceID = shareeHome.id()
+ record.calendarResourceID = calendar_id
+ record.bindRevision = 0
+ yield record.insert(txn)
+ self.accounting(" Adding new sharee {}".format(shareeHome.uid()))
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def syncGroupSharees(self, txn, remote_id, local_id):
+ """
+ Sync the group sharees for a remote share.
+ """
+ remote_home = yield self._remoteHome(txn)
+ remote_calendar = yield remote_home.childWithID(remote_id)
+ results = yield remote_calendar.groupSharees()
+ groups = dict([(group.groupID, group.groupUID,) for group in results["groups"]])
+ for share in results["sharees"]:
+ local_group = yield txn.groupByUID(groups[share.groupID])
+ share.groupID = local_group.groupID
+ share.calendarID = local_id
+ yield share.insert(txn)
+ self.accounting(" Adding group sharee {}".format(local_group.groupUID))
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def updatedRemoteSharedByCollections(self, txn, remote_id, bindUID):
+ """
+ Get all the existing L{CalendarBindRecord}'s from the remote store.
+ """
+
+ remote_home = yield self._remoteHome(txn)
+ remote_calendar = yield remote_home.childWithID(remote_id)
+ records = yield remote_calendar.migrateBindRecords(bindUID)
+ self.accounting(" Updating remote records")
+ returnValue(records)
+
+
+ @inlineCallbacks
+ def sharedToCollectionsReconcile(self):
+ """
+ Sync all the collections shared to the migrating user from the remote store.
+
+ Here is the logic we need: first assume we have three pods: A, B, C, and we are migrating a user from A->B. We start
+ with a set of shares (X -> Y - where X is the sharer and Y the sharee) with sharee on pod A. We migrate the sharee to pod B. We
+ then need to have a set of bind records on pod B, and adjust the set on pod A. Note that no changes are required on pod C.
+
+ Original | Changes | Changes
+ Shares | on B | on A
+ --------------|------------------------------|---------------------
+ A -> A | A -> B (new) | A -> B (modify existing)
+ B -> A | B -> B (modify existing) | (removed)
+ C -> A | C -> B (new) | (removed)
+ """
+
+ self.accounting("Starting: sharedToCollectionsReconcile...")
+
+ records = yield self.sharedToCollectionRecords()
+ records = records.items()
+ len_records = len(records)
+ self.accounting(" Found {} shared to collections".format(len_records))
+
+ while records:
+ yield self.makeSharedToCollections(records[:50])
+ records = records[50:]
+
+ self.accounting("Completed: sharedToCollectionsReconcile.")
+
+ returnValue(len_records)
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def sharedToCollectionRecords(self, txn):
+ """
+ Get the names and sharer UIDs for remote shared calendars.
+ """
+
+ # List of calendars from the remote side
+ home = yield self._remoteHome(txn)
+ if home is None:
+ returnValue(None)
+ results = yield home.sharedToBindRecords()
+ returnValue(results)
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def makeSharedToCollections(self, txn, records):
+ """
+ Create L{CalendarBindRecord} records in the local store.
+ """
+
+ for sharerUID, (shareeRecord, ownerRecord, metadataRecord) in records:
+ sharerHome = yield txn.calendarHomeWithUID(sharerUID, create=True)
+
+ # We need to figure out the right thing to do based on whether the sharer is local to this pod
+ # (the one where the migrated user will be hosted) vs located on another pod
+
+ if sharerHome.normal():
+ # First look for an existing record that must be present if the migrating user had
+ # previously been shared with by this sharee
+ oldrecord = yield CalendarBindRecord.querysimple(
+ txn,
+ calendarResourceName=shareeRecord.calendarResourceName,
+ )
+ if len(oldrecord) == 1:
+ # Point old record to the new local calendar home
+ yield oldrecord[0].update(
+ calendarHomeResourceID=self.homeId,
+ )
+ self.accounting(" Updated existing local sharer record {}".format(sharerHome.uid()))
+ else:
+ raise AssertionError("An existing share must be present")
+ else:
+ # We have an external user. That sharer may have already shared the calendar with some other user
+ # on this pod, in which case there is already a CALENDAR table entry for it, and we need the
+ # resource ID from that to use in the new CALENDAR_BIND record we create. If a pre-existing share
+ # is not present, then we have to create the CALENDAR table entry and associated pieces
+
+ remote_id = shareeRecord.calendarResourceID
+
+ # Look for pre-existing share with the same external ID
+ oldrecord = yield CalendarBindRecord.querysimple(
+ txn,
+ calendarHomeResourceID=sharerHome.id(),
+ bindUID=ownerRecord.bindUID,
+ )
+ if oldrecord:
+ # Map the record resource ids and insert a new record
+ calendar_id = oldrecord.calendarResourceID
+ log_op = "Updated"
+ else:
+ sharerView = yield sharerHome.createCollectionForExternalShare(
+ ownerRecord.calendarResourceName,
+ ownerRecord.bindUID,
+ metadataRecord.supportedComponents,
+ )
+ calendar_id = sharerView.id()
+ log_op = "Created"
+
+ shareeRecord.calendarHomeResourceID = self.homeId
+ shareeRecord.calendarResourceID = calendar_id
+ shareeRecord.bindRevision = 0
+ yield shareeRecord.insert(txn)
+ self.accounting(" {} remote sharer record {}".format(log_op, sharerHome.uid()))
+
+ yield self.updatedRemoteSharedToCollection(remote_id, txn=txn)
+
+
+ @inTransactionWrapper
+ @inlineCallbacks
+ def updatedRemoteSharedToCollection(self, txn, remote_id):
+ """
+ Get all the existing L{CalendarBindRecord}'s from the remote store.
+ """
+
+ remote_home = yield self._remoteHome(txn)
+ remote_calendar = yield remote_home.childWithID(remote_id)
+ records = yield remote_calendar.migrateBindRecords(None)
+ self.accounting(" Updating remote records")
+ returnValue(records)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationsync_metadatapy"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/sync_metadata.py        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,58 +0,0 @@
</span><del>-##
-# Copyright (c) 2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from twext.enterprise.dal.record import Record, fromTable
-from txdav.common.datastore.sql_tables import schema
-from twext.enterprise.dal.syntax import Parameter, Delete
-from twisted.internet.defer import inlineCallbacks
-
-"""
-Module that manages store-level metadata objects used during the migration process.
-"""
-
-class CalendarMigrationRecord(Record, fromTable(schema.CALENDAR_MIGRATION)):
- """
- @DynamicAttrs
- L{Record} for L{schema.CALENDAR_MIGRATION}.
- """
-
- @classmethod
- @inlineCallbacks
- def deleteremotes(cls, txn, homeid, remotes):
- return Delete(
- From=cls.table,
- Where=(cls.calendarHomeResourceID == homeid).And(
- cls.remoteResourceID.In(Parameter("remotes", len(remotes)))
- ),
- ).on(txn, remotes=remotes)
-
-
-
-class CalendarObjectMigrationRecord(Record, fromTable(schema.CALENDAR_OBJECT_MIGRATION)):
- """
- @DynamicAttrs
- L{Record} for L{schema.CALENDAR_OBJECT_MIGRATION}.
- """
- pass
-
-
-
-class AttachmentMigrationRecord(Record, fromTable(schema.ATTACHMENT_MIGRATION)):
- """
- @DynamicAttrs
- L{Record} for L{schema.ATTACHMENT_MIGRATION}.
- """
- pass
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationsync_metadatapyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationsync_metadatapy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/sync_metadata.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,58 @@
</span><ins>+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import Record, fromTable
+from txdav.common.datastore.sql_tables import schema
+from twext.enterprise.dal.syntax import Parameter, Delete
+from twisted.internet.defer import inlineCallbacks
+
+"""
+Module that manages store-level metadata objects used during the migration process.
+"""
+
+class CalendarMigrationRecord(Record, fromTable(schema.CALENDAR_MIGRATION)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.CALENDAR_MIGRATION}.
+ """
+
+ @classmethod
+ @inlineCallbacks
+ def deleteremotes(cls, txn, homeid, remotes):
+ return Delete(
+ From=cls.table,
+ Where=(cls.calendarHomeResourceID == homeid).And(
+ cls.remoteResourceID.In(Parameter("remotes", len(remotes)))
+ ),
+ ).on(txn, remotes=remotes)
+
+
+
+class CalendarObjectMigrationRecord(Record, fromTable(schema.CALENDAR_OBJECT_MIGRATION)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.CALENDAR_OBJECT_MIGRATION}.
+ """
+ pass
+
+
+
+class AttachmentMigrationRecord(Record, fromTable(schema.ATTACHMENT_MIGRATION)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.ATTACHMENT_MIGRATION}.
+ """
+ pass
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtest__init__py"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/__init__.py        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,15 +0,0 @@
</span><del>-##
-# Copyright (c) 2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtest__init__pyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationtest__init__py"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/__init__.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/__init__.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,15 @@
</span><ins>+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsaugmentsxml"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/augments.xml        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,142 +0,0 @@
</span><del>-<?xml version="1.0" encoding="utf-8"?>
-
-<!--
-Copyright (c) 2009-2015 Apple Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
- -->
-
-<!DOCTYPE augments SYSTEM "../../../conf/auth/augments.dtd">
-
-<augments>
-        <record>
-         <uid>user01</uid>
-         <server-id>A</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>user02</uid>
-         <server-id>A</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>user03</uid>
-         <server-id>A</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>user04</uid>
-         <server-id>A</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>user05</uid>
-         <server-id>A</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>user06</uid>
-         <server-id>A</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>user07</uid>
-         <server-id>A</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>user08</uid>
-         <server-id>A</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>user09</uid>
-         <server-id>A</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>user10</uid>
-         <server-id>A</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>puser01</uid>
-         <server-id>B</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>puser02</uid>
-         <server-id>B</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>puser03</uid>
-         <server-id>B</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>puser04</uid>
-         <server-id>B</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>puser05</uid>
-         <server-id>B</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>puser06</uid>
-         <server-id>B</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>puser07</uid>
-         <server-id>B</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>puser08</uid>
-         <server-id>B</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>puser09</uid>
-         <server-id>B</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-        <record>
-         <uid>puser10</uid>
-         <server-id>B</server-id>
-         <enable-calendar>true</enable-calendar>
-         <enable-addressbook>true</enable-addressbook>
-        </record>
-</augments>
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsaugmentsxmlfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationtestaccountsaugmentsxml"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/augments.xml) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/augments.xml        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,142 @@
</span><ins>+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Copyright (c) 2009-2015 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+ -->
+
+<!DOCTYPE augments SYSTEM "../../../conf/auth/augments.dtd">
+
+<augments>
+        <record>
+         <uid>user01</uid>
+         <server-id>A</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>user02</uid>
+         <server-id>A</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>user03</uid>
+         <server-id>A</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>user04</uid>
+         <server-id>A</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>user05</uid>
+         <server-id>A</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>user06</uid>
+         <server-id>A</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>user07</uid>
+         <server-id>A</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>user08</uid>
+         <server-id>A</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>user09</uid>
+         <server-id>A</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>user10</uid>
+         <server-id>A</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>puser01</uid>
+         <server-id>B</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>puser02</uid>
+         <server-id>B</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>puser03</uid>
+         <server-id>B</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>puser04</uid>
+         <server-id>B</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>puser05</uid>
+         <server-id>B</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>puser06</uid>
+         <server-id>B</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>puser07</uid>
+         <server-id>B</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>puser08</uid>
+         <server-id>B</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>puser09</uid>
+         <server-id>B</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+        <record>
+         <uid>puser10</uid>
+         <server-id>B</server-id>
+         <enable-calendar>true</enable-calendar>
+         <enable-addressbook>true</enable-addressbook>
+        </record>
+</augments>
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsgroupAccountsxml"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,211 +0,0 @@
</span><del>-<?xml version="1.0" encoding="utf-8"?>
-
-<!--
-Copyright (c) 2006-2015 Apple Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
- -->
-
-<!DOCTYPE accounts SYSTEM "accounts.dtd">
-
-<directory realm="Test Realm">
-        <record type="user">
-         <short-name>user01</short-name>
-         <uid>user01</uid>
-         <guid>10000000-0000-0000-0000-000000000001</guid>
-         <password>user01</password>
-         <full-name>User 01</full-name>
-         <email>user01@example.com</email>
-        </record>
-        <record type="user">
-         <short-name>user02</short-name>
-         <uid>user02</uid>
-         <guid>10000000-0000-0000-0000-000000000002</guid>
-         <password>user02</password>
-         <full-name>User 02</full-name>
-         <email>user02@example.com</email>
-        </record>
-        <record type="user">
-         <short-name>user03</short-name>
-         <uid>user03</uid>
-         <guid>10000000-0000-0000-0000-000000000003</guid>
-         <password>user03</password>
-         <full-name>User 03</full-name>
-         <email>user03@example.com</email>
-        </record>
-        <record type="user">
-         <short-name>user04</short-name>
-         <uid>user04</uid>
-         <guid>10000000-0000-0000-0000-000000000004</guid>
-         <password>user04</password>
-         <full-name>User 04</full-name>
-         <email>user04@example.com</email>
-        </record>
-        <record type="user">
-         <short-name>user05</short-name>
-         <uid>user05</uid>
-         <guid>10000000-0000-0000-0000-000000000005</guid>
-         <password>user05</password>
-         <full-name>User 05</full-name>
-         <email>user05@example.com</email>
-        </record>
-        <record type="user">
-         <short-name>user06</short-name>
-         <uid>user06</uid>
-         <guid>10000000-0000-0000-0000-000000000006</guid>
-         <password>user06</password>
-         <full-name>User 06</full-name>
-         <email>user06@example.com</email>
-        </record>
-        <record type="user">
-         <short-name>user07</short-name>
-         <uid>user07</uid>
-         <guid>10000000-0000-0000-0000-000000000007</guid>
-         <password>user07</password>
-         <full-name>User 07</full-name>
-         <email>user07@example.com</email>
-        </record>
-        <record type="user">
-         <short-name>user08</short-name>
-         <uid>user08</uid>
-         <guid>10000000-0000-0000-0000-000000000008</guid>
-         <password>user08</password>
-         <full-name>User 08</full-name>
-         <email>user08@example.com</email>
-        </record>
-        <record type="user">
-         <short-name>user09</short-name>
-         <uid>user09</uid>
-         <guid>10000000-0000-0000-0000-000000000009</guid>
-         <password>user09</password>
-         <full-name>User 09</full-name>
-         <email>user09@example.com</email>
-        </record>
-        <record type="user">
-         <short-name>user10</short-name>
-         <uid>user10</uid>
-         <guid>10000000-0000-0000-0000-000000000010</guid>
-         <password>user10</password>
-         <full-name>User 10</full-name>
-         <email>user10@example.com</email>
-        </record>
-        <record type="group">
-         <short-name>group01</short-name>
-         <uid>group01</uid>
-         <guid>20000000-0000-0000-0000-000000000001</guid>
-         <full-name>Group 01</full-name>
-         <email>group01@example.com</email>
-         <member-uid>user01</member-uid>
-         <member-uid>puser01</member-uid>
-        </record>
-        <record type="group">
-         <short-name>group02</short-name>
-         <uid>group02</uid>
-         <guid>20000000-0000-0000-0000-000000000002</guid>
-         <full-name>Group 02</full-name>
-         <email>group02@example.com</email>
-         <member-uid>user06</member-uid>
-         <member-uid>user07</member-uid>
-         <member-uid>user08</member-uid>
-        </record>
-        <record type="group">
-         <short-name>group03</short-name>
-         <uid>group03</uid>
-         <guid>20000000-0000-0000-0000-000000000003</guid>
-         <full-name>Group 03</full-name>
-         <email>group03@example.com</email>
-         <member-uid>user07</member-uid>
-         <member-uid>user08</member-uid>
-         <member-uid>user09</member-uid>
-        </record>
-        <record type="group">
-         <short-name>group04</short-name>
-         <uid>group04</uid>
-         <guid>20000000-0000-0000-0000-000000000004</guid>
-         <full-name>Group 04</full-name>
-         <email>group04@example.com</email>
-         <member-uid>group02</member-uid>
-         <member-uid>group03</member-uid>
-         <member-uid>user10</member-uid>
-        </record>
-        <record type="user">
-         <uid>puser01</uid>
-         <short-name>puser01</short-name>
-         <password>puser01</password>
-         <full-name>Puser 01</full-name>
-         <email>puser01@example.com</email>
-        </record>
-        <record type="user">
-         <uid>puser02</uid>
-         <short-name>puser02</short-name>
-         <password>puser02</password>
-         <full-name>Puser 02</full-name>
-         <email>puser02@example.com</email>
-        </record>
-        <record type="user">
-         <uid>puser03</uid>
-         <short-name>puser03</short-name>
-         <password>puser03</password>
-         <full-name>Puser 03</full-name>
-         <email>puser03@example.com</email>
-        </record>
-        <record type="user">
-         <uid>puser04</uid>
-         <short-name>puser04</short-name>
-         <password>puser04</password>
-         <full-name>Puser 04</full-name>
-         <email>puser04@example.com</email>
-        </record>
-        <record type="user">
-         <uid>puser05</uid>
-         <short-name>puser05</short-name>
-         <password>puser05</password>
-         <full-name>Puser 05</full-name>
-         <email>puser05@example.com</email>
-        </record>
-        <record type="user">
-         <uid>puser06</uid>
-         <short-name>puser06</short-name>
-         <password>puser06</password>
-         <full-name>Puser 06</full-name>
-         <email>puser06@example.com</email>
-        </record>
-        <record type="user">
-         <uid>puser07</uid>
-         <short-name>puser07</short-name>
-         <password>puser07</password>
-         <full-name>Puser 07</full-name>
-         <email>puser07@example.com</email>
-        </record>
-        <record type="user">
-         <uid>puser08</uid>
-         <short-name>puser08</short-name>
-         <password>puser08</password>
-         <full-name>Puser 08</full-name>
-         <email>puser08@example.com</email>
-        </record>
-        <record type="user">
-         <uid>puser09</uid>
-         <short-name>puser09</short-name>
-         <password>puser09</password>
-         <full-name>Puser 09</full-name>
-         <email>puser09@example.com</email>
-        </record>
-        <record type="user">
-         <uid>puser10</uid>
-         <short-name>puser10</short-name>
-         <password>puser10</password>
-         <full-name>Puser 10</full-name>
-         <email>puser10@example.com</email>
-        </record>
-</directory>
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtestaccountsgroupAccountsxmlfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationtestaccountsgroupAccountsxml"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,211 @@
</span><ins>+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Copyright (c) 2006-2015 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+ -->
+
+<!DOCTYPE accounts SYSTEM "accounts.dtd">
+
+<directory realm="Test Realm">
+        <record type="user">
+         <short-name>user01</short-name>
+         <uid>user01</uid>
+         <guid>10000000-0000-0000-0000-000000000001</guid>
+         <password>user01</password>
+         <full-name>User 01</full-name>
+         <email>user01@example.com</email>
+        </record>
+        <record type="user">
+         <short-name>user02</short-name>
+         <uid>user02</uid>
+         <guid>10000000-0000-0000-0000-000000000002</guid>
+         <password>user02</password>
+         <full-name>User 02</full-name>
+         <email>user02@example.com</email>
+        </record>
+        <record type="user">
+         <short-name>user03</short-name>
+         <uid>user03</uid>
+         <guid>10000000-0000-0000-0000-000000000003</guid>
+         <password>user03</password>
+         <full-name>User 03</full-name>
+         <email>user03@example.com</email>
+        </record>
+        <record type="user">
+         <short-name>user04</short-name>
+         <uid>user04</uid>
+         <guid>10000000-0000-0000-0000-000000000004</guid>
+         <password>user04</password>
+         <full-name>User 04</full-name>
+         <email>user04@example.com</email>
+        </record>
+        <record type="user">
+         <short-name>user05</short-name>
+         <uid>user05</uid>
+         <guid>10000000-0000-0000-0000-000000000005</guid>
+         <password>user05</password>
+         <full-name>User 05</full-name>
+         <email>user05@example.com</email>
+        </record>
+        <record type="user">
+         <short-name>user06</short-name>
+         <uid>user06</uid>
+         <guid>10000000-0000-0000-0000-000000000006</guid>
+         <password>user06</password>
+         <full-name>User 06</full-name>
+         <email>user06@example.com</email>
+        </record>
+        <record type="user">
+         <short-name>user07</short-name>
+         <uid>user07</uid>
+         <guid>10000000-0000-0000-0000-000000000007</guid>
+         <password>user07</password>
+         <full-name>User 07</full-name>
+         <email>user07@example.com</email>
+        </record>
+        <record type="user">
+         <short-name>user08</short-name>
+         <uid>user08</uid>
+         <guid>10000000-0000-0000-0000-000000000008</guid>
+         <password>user08</password>
+         <full-name>User 08</full-name>
+         <email>user08@example.com</email>
+        </record>
+        <record type="user">
+         <short-name>user09</short-name>
+         <uid>user09</uid>
+         <guid>10000000-0000-0000-0000-000000000009</guid>
+         <password>user09</password>
+         <full-name>User 09</full-name>
+         <email>user09@example.com</email>
+        </record>
+        <record type="user">
+         <short-name>user10</short-name>
+         <uid>user10</uid>
+         <guid>10000000-0000-0000-0000-000000000010</guid>
+         <password>user10</password>
+         <full-name>User 10</full-name>
+         <email>user10@example.com</email>
+        </record>
+        <record type="group">
+         <short-name>group01</short-name>
+         <uid>group01</uid>
+         <guid>20000000-0000-0000-0000-000000000001</guid>
+         <full-name>Group 01</full-name>
+         <email>group01@example.com</email>
+         <member-uid>user01</member-uid>
+         <member-uid>puser01</member-uid>
+        </record>
+        <record type="group">
+         <short-name>group02</short-name>
+         <uid>group02</uid>
+         <guid>20000000-0000-0000-0000-000000000002</guid>
+         <full-name>Group 02</full-name>
+         <email>group02@example.com</email>
+         <member-uid>user06</member-uid>
+         <member-uid>user07</member-uid>
+         <member-uid>user08</member-uid>
+        </record>
+        <record type="group">
+         <short-name>group03</short-name>
+         <uid>group03</uid>
+         <guid>20000000-0000-0000-0000-000000000003</guid>
+         <full-name>Group 03</full-name>
+         <email>group03@example.com</email>
+         <member-uid>user07</member-uid>
+         <member-uid>user08</member-uid>
+         <member-uid>user09</member-uid>
+        </record>
+        <record type="group">
+         <short-name>group04</short-name>
+         <uid>group04</uid>
+         <guid>20000000-0000-0000-0000-000000000004</guid>
+         <full-name>Group 04</full-name>
+         <email>group04@example.com</email>
+         <member-uid>group02</member-uid>
+         <member-uid>group03</member-uid>
+         <member-uid>user10</member-uid>
+        </record>
+        <record type="user">
+         <uid>puser01</uid>
+         <short-name>puser01</short-name>
+         <password>puser01</password>
+         <full-name>Puser 01</full-name>
+         <email>puser01@example.com</email>
+        </record>
+        <record type="user">
+         <uid>puser02</uid>
+         <short-name>puser02</short-name>
+         <password>puser02</password>
+         <full-name>Puser 02</full-name>
+         <email>puser02@example.com</email>
+        </record>
+        <record type="user">
+         <uid>puser03</uid>
+         <short-name>puser03</short-name>
+         <password>puser03</password>
+         <full-name>Puser 03</full-name>
+         <email>puser03@example.com</email>
+        </record>
+        <record type="user">
+         <uid>puser04</uid>
+         <short-name>puser04</short-name>
+         <password>puser04</password>
+         <full-name>Puser 04</full-name>
+         <email>puser04@example.com</email>
+        </record>
+        <record type="user">
+         <uid>puser05</uid>
+         <short-name>puser05</short-name>
+         <password>puser05</password>
+         <full-name>Puser 05</full-name>
+         <email>puser05@example.com</email>
+        </record>
+        <record type="user">
+         <uid>puser06</uid>
+         <short-name>puser06</short-name>
+         <password>puser06</password>
+         <full-name>Puser 06</full-name>
+         <email>puser06@example.com</email>
+        </record>
+        <record type="user">
+         <uid>puser07</uid>
+         <short-name>puser07</short-name>
+         <password>puser07</password>
+         <full-name>Puser 07</full-name>
+         <email>puser07@example.com</email>
+        </record>
+        <record type="user">
+         <uid>puser08</uid>
+         <short-name>puser08</short-name>
+         <password>puser08</password>
+         <full-name>Puser 08</full-name>
+         <email>puser08@example.com</email>
+        </record>
+        <record type="user">
+         <uid>puser09</uid>
+         <short-name>puser09</short-name>
+         <password>puser09</password>
+         <full-name>Puser 09</full-name>
+         <email>puser09@example.com</email>
+        </record>
+        <record type="user">
+         <uid>puser10</uid>
+         <short-name>puser10</short-name>
+         <password>puser10</password>
+         <full-name>Puser 10</full-name>
+         <email>puser10@example.com</email>
+        </record>
+</directory>
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_home_syncpy"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/test_home_sync.py        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,1307 +0,0 @@
</span><del>-##
-# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from pycalendar.datetime import DateTime
-from twext.enterprise.dal.syntax import Select
-from twext.enterprise.jobqueue import JobItem
-from twisted.internet import reactor
-from twisted.internet.defer import inlineCallbacks, returnValue
-from twisted.python.filepath import FilePath
-from twistedcaldav.config import config
-from twistedcaldav.ical import Component, normalize_iCalStr
-from txdav.caldav.datastore.sql import ManagedAttachment
-from txdav.caldav.datastore.sql_directory import GroupShareeRecord
-from txdav.common.datastore.podding.migration.home_sync import CrossPodHomeSync
-from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord, \
- AttachmentMigrationRecord
-from txdav.common.datastore.podding.test.util import MultiStoreConduitTest
-from txdav.common.datastore.sql_directory import DelegateRecord, \
- ExternalDelegateGroupsRecord, DelegateGroupsRecord, GroupsRecord
-from txdav.common.datastore.sql_notification import NotificationCollection
-from txdav.common.datastore.sql_tables import schema, _HOME_STATUS_EXTERNAL, \
- _BIND_MODE_READ, _HOME_STATUS_MIGRATING, _HOME_STATUS_NORMAL, \
- _HOME_STATUS_DISABLED
-from txdav.common.datastore.test.util import populateCalendarsFrom
-from txdav.who.delegates import Delegates
-from txweb2.http_headers import MimeType
-from txweb2.stream import MemoryStream
-from uuid import uuid4
-import json
-
-
-class TestCrossPodHomeSync(MultiStoreConduitTest):
- """
- Test that L{CrossPodHomeSync} works.
- """
-
- nowYear = {"now": DateTime.getToday().getYear()}
-
- caldata1 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid1
-DTSTART:{now:04d}0102T140000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:instance
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**nowYear)
-
- caldata1_changed = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid1
-DTSTART:{now:04d}0102T150000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:instance changed
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**nowYear)
-
- caldata2 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid2
-DTSTART:{now:04d}0102T160000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:instance
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**nowYear)
-
- caldata3 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid3
-DTSTART:{now:04d}0102T160000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:instance
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**nowYear)
-
- caldata4 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid4
-DTSTART:{now:04d}0102T180000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=DAILY
-SUMMARY:instance
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**nowYear)
-
-
- @inlineCallbacks
- def test_remote_home(self):
- """
- Test that a remote home can be accessed.
- """
-
- home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- self.assertTrue(home01 is not None)
- yield self.commitTransaction(0)
-
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.loadRecord()
- home = yield syncer._remoteHome(self.theTransactionUnderTest(1))
- self.assertTrue(home is not None)
- self.assertEqual(home.id(), home01.id())
- yield self.commitTransaction(1)
-
-
- @inlineCallbacks
- def test_prepare_home(self):
- """
- Test that L{prepareCalendarHome} creates a home.
- """
-
- # No home present
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- self.assertTrue(home is None)
- yield self.commitTransaction(1)
-
- yield syncer.prepareCalendarHome()
-
- # Home is present
- home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- self.assertTrue(home is not None)
- children = yield home.listChildren()
- self.assertEqual(len(children), 0)
- yield self.commitTransaction(1)
-
-
- @inlineCallbacks
- def test_prepare_home_external_txn(self):
- """
- Test that L{prepareCalendarHome} creates a home.
- """
-
- # No home present
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- self.assertTrue(home is None)
- yield self.commitTransaction(1)
-
- yield syncer.prepareCalendarHome(txn=self.theTransactionUnderTest(1))
- yield self.commitTransaction(1)
-
- # Home is present
- home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- self.assertTrue(home is not None)
- children = yield home.listChildren()
- self.assertEqual(len(children), 0)
- yield self.commitTransaction(1)
-
-
- @inlineCallbacks
- def test_home_metadata(self):
- """
- Test that L{syncCalendarHomeMetaData} sync home metadata correctly.
- """
-
- alarm_event_timed = """BEGIN:VALARM
-ACTION:DISPLAY
-DESCRIPTION:alarm_event_timed
-TRIGGER:-PT10M
-END:VALARM
-"""
- alarm_event_allday = """BEGIN:VALARM
-ACTION:DISPLAY
-DESCRIPTION:alarm_event_allday
-TRIGGER:-PT10M
-END:VALARM
-"""
- alarm_todo_timed = """BEGIN:VALARM
-ACTION:DISPLAY
-DESCRIPTION:alarm_todo_timed
-TRIGGER:-PT10M
-END:VALARM
-"""
- alarm_todo_allday = """BEGIN:VALARM
-ACTION:DISPLAY
-DESCRIPTION:alarm_todo_allday
-TRIGGER:-PT10M
-END:VALARM
-"""
- availability = """BEGIN:VCALENDAR
-VERSION:2.0
-PRODID:-//Example Inc.//Example Calendar//EN
-BEGIN:VAVAILABILITY
-UID:20061005T133225Z-00001-availability@example.com
-DTSTART:20060101T000000Z
-DTEND:20060108T000000Z
-DTSTAMP:20061005T133225Z
-ORGANIZER:mailto:bernard@example.com
-BEGIN:AVAILABLE
-UID:20061005T133225Z-00001-A-availability@example.com
-DTSTART:20060102T090000Z
-DTEND:20060102T120000Z
-DTSTAMP:20061005T133225Z
-RRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
-SUMMARY:Weekdays from 9:00 to 12:00
-END:AVAILABLE
-END:VAVAILABILITY
-END:VCALENDAR
-"""
-
- home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- events0 = yield home0.createChildWithName("events")
- yield home0.setDefaultCalendar(events0, "VEVENT")
- yield home0.setDefaultAlarm(alarm_event_timed, True, True)
- yield home0.setDefaultAlarm(alarm_event_allday, True, False)
- yield home0.setDefaultAlarm(alarm_todo_timed, False, True)
- yield home0.setDefaultAlarm(alarm_todo_allday, False, False)
- yield home0.setAvailability(Component.fromString(availability))
- yield self.commitTransaction(0)
-
- # Trigger sync
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.sync()
-
- # Home is present with correct metadata
- home1 = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- self.assertTrue(home1 is not None)
- calendar1 = yield home1.childWithName("calendar")
- events1 = yield home1.childWithName("events")
- tasks1 = yield home1.childWithName("tasks")
- self.assertFalse(home1.isDefaultCalendar(calendar1))
- self.assertTrue(home1.isDefaultCalendar(events1))
- self.assertTrue(home1.isDefaultCalendar(tasks1))
- self.assertEqual(home1.getDefaultAlarm(True, True), alarm_event_timed)
- self.assertEqual(home1.getDefaultAlarm(True, False), alarm_event_allday)
- self.assertEqual(home1.getDefaultAlarm(False, True), alarm_todo_timed)
- self.assertEqual(home1.getDefaultAlarm(False, False), alarm_todo_allday)
- self.assertEqual(normalize_iCalStr(home1.getAvailability()), normalize_iCalStr(availability))
- yield self.commitTransaction(1)
-
- # Make some changes
- home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- calendar0 = yield home0.childWithName("calendar")
- yield home0.setDefaultCalendar(calendar0, "VEVENT")
- yield home0.setDefaultAlarm(None, True, True)
- yield home0.setDefaultAlarm(None, False, True)
- yield self.commitTransaction(0)
-
- # Trigger sync again
- yield syncer.sync()
-
- # Home is present with correct metadata
- home1 = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- self.assertTrue(home1 is not None)
- calendar1 = yield home1.childWithName("calendar")
- events1 = yield home1.childWithName("events")
- tasks1 = yield home1.childWithName("tasks")
- self.assertTrue(home1.isDefaultCalendar(calendar1))
- self.assertFalse(home1.isDefaultCalendar(events1))
- self.assertTrue(home1.isDefaultCalendar(tasks1))
- self.assertEqual(home1.getDefaultAlarm(True, True), None)
- self.assertEqual(home1.getDefaultAlarm(True, False), alarm_event_allday)
- self.assertEqual(home1.getDefaultAlarm(False, True), None)
- self.assertEqual(home1.getDefaultAlarm(False, False), alarm_todo_allday)
- self.assertEqual(normalize_iCalStr(home1.getAvailability()), normalize_iCalStr(availability))
- yield self.commitTransaction(1)
-
-
- @inlineCallbacks
- def test_get_calendar_sync_list(self):
- """
- Test that L{getCalendarSyncList} returns the correct results.
- """
-
- yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- yield self.commitTransaction(0)
- home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01")
- self.assertTrue(home01 is not None)
- calendars01 = yield home01.loadChildren()
- results01 = {}
- for calendar in calendars01:
- if calendar.owned():
- sync_token = yield calendar.syncToken()
- results01[calendar.id()] = CalendarMigrationRecord.make(
- calendarHomeResourceID=home01.id(),
- remoteResourceID=calendar.id(),
- localResourceID=0,
- lastSyncToken=sync_token,
- )
-
- yield self.commitTransaction(0)
-
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.loadRecord()
- results = yield syncer.getCalendarSyncList()
- self.assertEqual(results, results01)
-
-
- @inlineCallbacks
- def test_sync_calendar_initial_empty(self):
- """
- Test that L{syncCalendar} syncs an initially non-existent local calendar with
- an empty remote calendar.
- """
-
- home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- calendar0 = yield home0.childWithName("calendar")
- remote_id = calendar0.id()
- remote_sync_token = yield calendar0.syncToken()
- yield self.commitTransaction(0)
-
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.loadRecord()
- yield syncer.prepareCalendarHome()
-
- # No local calendar exists yet
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- children = yield home1.listChildren()
- self.assertEqual(len(children), 0)
- yield self.commitTransaction(1)
-
- # Trigger sync of the one calendar
- local_sync_state = {}
- remote_sync_state = {remote_id: CalendarMigrationRecord.make(
- calendarHomeResourceID=home0.id(),
- remoteResourceID=remote_id,
- localResourceID=0,
- lastSyncToken=remote_sync_token,
- )}
- yield syncer.syncCalendar(
- remote_id,
- local_sync_state,
- remote_sync_state,
- )
- self.assertEqual(len(local_sync_state), 1)
- self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
-
- # Local calendar exists
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- calendar1 = yield home1.childWithName("calendar")
- self.assertTrue(calendar1 is not None)
- yield self.commitTransaction(1)
-
-
- @inlineCallbacks
- def test_sync_calendar_initial_with_data(self):
- """
- Test that L{syncCalendar} syncs an initially non-existent local calendar with
- a remote calendar containing data. Also check a change to one event is then
- sync'd the second time.
- """
-
- home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- calendar0 = yield home0.childWithName("calendar")
- o1 = yield calendar0.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
- o2 = yield calendar0.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
- o3 = yield calendar0.createCalendarObjectWithName("3.ics", Component.fromString(self.caldata3))
- remote_id = calendar0.id()
- mapping0 = dict([(o.name(), o.id()) for o in (o1, o2, o3)])
- yield self.commitTransaction(0)
-
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.loadRecord()
- yield syncer.prepareCalendarHome()
-
- # No local calendar exists yet
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- calendar1 = yield home1.childWithName("calendar")
- self.assertTrue(calendar1 is None)
- yield self.commitTransaction(1)
-
- # Trigger sync of the one calendar
- local_sync_state = {}
- remote_sync_state = yield syncer.getCalendarSyncList()
- yield syncer.syncCalendar(
- remote_id,
- local_sync_state,
- remote_sync_state,
- )
- self.assertEqual(len(local_sync_state), 1)
- self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
-
- @inlineCallbacks
- def _checkCalendarObjectMigrationState(home, mapping1):
- com = schema.CALENDAR_OBJECT_MIGRATION
- mappings = yield Select(
- columns=[com.REMOTE_RESOURCE_ID, com.LOCAL_RESOURCE_ID],
- From=com,
- Where=(com.CALENDAR_HOME_RESOURCE_ID == home.id())
- ).on(self.theTransactionUnderTest(1))
- expected_mappings = dict([(mapping0[name], mapping1[name]) for name in mapping0.keys()])
- self.assertEqual(dict(mappings), expected_mappings)
-
-
- # Local calendar exists
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- calendar1 = yield home1.childWithName("calendar")
- self.assertTrue(calendar1 is not None)
- children = yield calendar1.objectResources()
- self.assertEqual(set([child.name() for child in children]), set(("1.ics", "2.ics", "3.ics",)))
- mapping1 = dict([(o.name(), o.id()) for o in children])
- yield _checkCalendarObjectMigrationState(home1, mapping1)
- yield self.commitTransaction(1)
-
- # Change one resource
- object0 = yield self.calendarObjectUnderTest(
- txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics"
- )
- yield object0.setComponent(Component.fromString(self.caldata1_changed))
- yield self.commitTransaction(0)
-
- remote_sync_state = yield syncer.getCalendarSyncList()
- yield syncer.syncCalendar(
- remote_id,
- local_sync_state,
- remote_sync_state,
- )
-
- object1 = yield self.calendarObjectUnderTest(
- txn=self.theTransactionUnderTest(1), home="user01", status=_HOME_STATUS_MIGRATING, calendar_name="calendar", name="1.ics"
- )
- caldata = yield object1.component()
- self.assertEqual(normalize_iCalStr(caldata), normalize_iCalStr(self.caldata1_changed))
- yield self.commitTransaction(1)
-
- # Remove one resource
- object0 = yield self.calendarObjectUnderTest(
- txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="2.ics"
- )
- yield object0.remove()
- del mapping0["2.ics"]
- yield self.commitTransaction(0)
-
- remote_sync_state = yield syncer.getCalendarSyncList()
- yield syncer.syncCalendar(
- remote_id,
- local_sync_state,
- remote_sync_state,
- )
-
- calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="user01", status=_HOME_STATUS_MIGRATING, name="calendar")
- children = yield calendar1.objectResources()
- self.assertEqual(set([child.name() for child in children]), set(("1.ics", "3.ics",)))
- mapping1 = dict([(o.name(), o.id()) for o in children])
- yield _checkCalendarObjectMigrationState(home1, mapping1)
- yield self.commitTransaction(1)
-
- # Add one resource
- calendar0 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
- o4 = yield calendar0.createCalendarObjectWithName("4.ics", Component.fromString(self.caldata4))
- mapping0[o4.name()] = o4.id()
- yield self.commitTransaction(0)
-
- remote_sync_state = yield syncer.getCalendarSyncList()
- yield syncer.syncCalendar(
- remote_id,
- local_sync_state,
- remote_sync_state,
- )
-
- calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="user01", status=_HOME_STATUS_MIGRATING, name="calendar")
- children = yield calendar1.objectResources()
- self.assertEqual(set([child.name() for child in children]), set(("1.ics", "3.ics", "4.ics")))
- mapping1 = dict([(o.name(), o.id()) for o in children])
- yield _checkCalendarObjectMigrationState(home1, mapping1)
- yield self.commitTransaction(1)
-
-
- @inlineCallbacks
- def test_sync_calendars_add_remove(self):
- """
- Test that L{syncCalendar} syncs an initially non-existent local calendar with
- a remote calendar containing data. Also check a change to one event is then
- sync'd the second time.
- """
-
- home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- children0 = yield home0.loadChildren()
- details0 = dict([(child.id(), child.name()) for child in children0])
- yield self.commitTransaction(0)
-
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.loadRecord()
- yield syncer.prepareCalendarHome()
-
- # No local calendar exists yet
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- children1 = yield home1.loadChildren()
- self.assertEqual(len(children1), 0)
- yield self.commitTransaction(1)
-
- # Trigger sync
- yield syncer.syncCalendarList()
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- children1 = yield home1.loadChildren()
- details1 = dict([(child.id(), child.name()) for child in children1])
- self.assertEqual(set(details1.values()), set(details0.values()))
- yield self.commitTransaction(1)
-
- # Add a calendar
- home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- newcalendar0 = yield home0.createCalendarWithName("new-calendar")
- details0[newcalendar0.id()] = newcalendar0.name()
- yield self.commitTransaction(0)
-
- # Trigger sync
- yield syncer.syncCalendarList()
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- children1 = yield home1.loadChildren()
- details1 = dict([(child.id(), child.name()) for child in children1])
- self.assertTrue("new-calendar" in details1.values())
- self.assertEqual(set(details1.values()), set(details0.values()))
- yield self.commitTransaction(1)
-
- # Remove a calendar
- home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- calendar0 = yield home0.childWithName("new-calendar")
- del details0[calendar0.id()]
- yield calendar0.remove()
- yield self.commitTransaction(0)
-
- # Trigger sync
- yield syncer.syncCalendarList()
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- children1 = yield home1.loadChildren()
- details1 = dict([(child.id(), child.name()) for child in children1])
- self.assertTrue("new-calendar" not in details1.values())
- self.assertEqual(set(details1.values()), set(details0.values()))
- yield self.commitTransaction(1)
-
-
- @inlineCallbacks
- def test_sync_attachments_add_remove(self):
- """
- Test that L{syncAttachments} syncs attachment data, then an update to the data,
- and finally a removal of the data.
- """
-
-
- home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- calendar0 = yield home0.childWithName("calendar")
- yield calendar0.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
- yield calendar0.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
- yield calendar0.createCalendarObjectWithName("3.ics", Component.fromString(self.caldata3))
- remote_id = calendar0.id()
- mapping0 = dict()
- yield self.commitTransaction(0)
-
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.loadRecord()
- yield syncer.prepareCalendarHome()
-
- # Trigger sync of the one calendar
- local_sync_state = {}
- remote_sync_state = yield syncer.getCalendarSyncList()
- yield syncer.syncCalendar(
- remote_id,
- local_sync_state,
- remote_sync_state,
- )
- self.assertEqual(len(local_sync_state), 1)
- self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
-
- @inlineCallbacks
- def _mapLocalIDToRemote(remote_id):
- records = yield AttachmentMigrationRecord.all(self.theTransactionUnderTest(1))
- yield self.commitTransaction(1)
- for record in records:
- if record.remoteResourceID == remote_id:
- returnValue(record.localResourceID)
- else:
- returnValue(None)
-
- # Sync attachments
- changed, removed = yield syncer.syncAttachments()
- self.assertEqual(changed, set())
- self.assertEqual(removed, set())
-
- @inlineCallbacks
- def _checkAttachmentObjectMigrationState(home, mapping1):
- am = schema.ATTACHMENT_MIGRATION
- mappings = yield Select(
- columns=[am.REMOTE_RESOURCE_ID, am.LOCAL_RESOURCE_ID],
- From=am,
- Where=(am.CALENDAR_HOME_RESOURCE_ID == home.id())
- ).on(self.theTransactionUnderTest(1))
- expected_mappings = dict([(mapping0[name], mapping1[name]) for name in mapping0.keys()])
- self.assertEqual(dict(mappings), expected_mappings)
-
-
- # Local calendar exists
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- calendar1 = yield home1.childWithName("calendar")
- self.assertTrue(calendar1 is not None)
- children = yield calendar1.objectResources()
- self.assertEqual(set([child.name() for child in children]), set(("1.ics", "2.ics", "3.ics",)))
-
- attachments = yield home1.getAllAttachments()
- mapping1 = dict([(o.md5(), o.id()) for o in attachments])
- yield _checkAttachmentObjectMigrationState(home1, mapping1)
- yield self.commitTransaction(1)
-
- # Add one attachment
- object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
- attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text #1."))
- id0_1 = attachment.id()
- md50_1 = attachment.md5()
- managedid0_1 = attachment.managedID()
- mapping0[md50_1] = id0_1
- yield self.commitTransaction(0)
-
- # Sync attachments
- changed, removed = yield syncer.syncAttachments()
- self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1)),)))
- self.assertEqual(removed, set())
-
- # Validate changes
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- attachments = yield home1.getAllAttachments()
- mapping1 = dict([(o.md5(), o.id()) for o in attachments])
- yield _checkAttachmentObjectMigrationState(home1, mapping1)
-
- # Add another attachment
- object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="2.ics")
- attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test2.txt", MemoryStream("Here is some text #2."))
- id0_2 = attachment.id()
- md50_2 = attachment.md5()
- mapping0[md50_2] = id0_2
- yield self.commitTransaction(0)
-
- # Sync attachments
- changed, removed = yield syncer.syncAttachments()
- self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_2)),)))
- self.assertEqual(removed, set())
-
- # Validate changes
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- attachments = yield home1.getAllAttachments()
- mapping1 = dict([(o.md5(), o.id()) for o in attachments])
- yield _checkAttachmentObjectMigrationState(home1, mapping1)
-
- # Change original attachment (this is actually a remove and a create all in one)
- object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
- attachment, _ignore_location = yield object1.updateAttachment(managedid0_1, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text #1 - changed."))
- del mapping0[md50_1]
- id0_1_changed = attachment.id()
- md50_1_changed = attachment.md5()
- managedid0_1_changed = attachment.managedID()
- mapping0[md50_1_changed] = id0_1_changed
- yield self.commitTransaction(0)
-
- # Sync attachments
- changed, removed = yield syncer.syncAttachments()
- self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1_changed)),)))
- self.assertEqual(removed, set((id0_1,)))
-
- # Validate changes
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- attachments = yield home1.getAllAttachments()
- mapping1 = dict([(o.md5(), o.id()) for o in attachments])
- yield _checkAttachmentObjectMigrationState(home1, mapping1)
-
- # Add original to a different resource
- object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
- component = yield object1.componentForUser()
- attach = component.mainComponent().getProperty("ATTACH")
-
- object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="3.ics")
- component = yield object1.componentForUser()
- attach = component.mainComponent().addProperty(attach)
- yield object1.setComponent(component)
- yield self.commitTransaction(0)
-
- # Sync attachments
- changed, removed = yield syncer.syncAttachments()
- self.assertEqual(changed, set())
- self.assertEqual(removed, set())
-
- # Validate changes
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- attachments = yield home1.getAllAttachments()
- mapping1 = dict([(o.md5(), o.id()) for o in attachments])
- yield _checkAttachmentObjectMigrationState(home1, mapping1)
-
- # Change original attachment in original resource (this creates a new one and does not remove the old)
- object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
- attachment, _ignore_location = yield object1.updateAttachment(managedid0_1_changed, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text #1 - changed again."))
- id0_1_changed_again = attachment.id()
- md50_1_changed_again = attachment.md5()
- mapping0[md50_1_changed_again] = id0_1_changed_again
- yield self.commitTransaction(0)
-
- # Sync attachments
- changed, removed = yield syncer.syncAttachments()
- self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1_changed_again)),)))
- self.assertEqual(removed, set())
-
- # Validate changes
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- attachments = yield home1.getAllAttachments()
- mapping1 = dict([(o.md5(), o.id()) for o in attachments])
- yield _checkAttachmentObjectMigrationState(home1, mapping1)
-
-
- @inlineCallbacks
- def test_link_attachments(self):
- """
- Test that L{linkAttachments} links attachment data to the associated calendar object.
- """
-
- home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- calendar0 = yield home0.childWithName("calendar")
- object0_1 = yield calendar0.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
- object0_2 = yield calendar0.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
- yield calendar0.createCalendarObjectWithName("3.ics", Component.fromString(self.caldata3))
- remote_id = calendar0.id()
-
- attachment, _ignore_location = yield object0_1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text #1."))
- id0_1 = attachment.id()
- md50_1 = attachment.md5()
- managedid0_1 = attachment.managedID()
- pathID0_1 = ManagedAttachment.lastSegmentOfUriPath(managedid0_1, attachment.name())
-
- attachment, _ignore_location = yield object0_2.addAttachment(None, MimeType.fromString("text/plain"), "test2.txt", MemoryStream("Here is some text #2."))
- id0_2 = attachment.id()
- md50_2 = attachment.md5()
- managedid0_2 = attachment.managedID()
- pathID0_2 = ManagedAttachment.lastSegmentOfUriPath(managedid0_2, attachment.name())
-
- yield self.commitTransaction(0)
-
- # Add original to a different resource
- object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
- component = yield object1.componentForUser()
- attach = component.mainComponent().getProperty("ATTACH")
-
- object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="3.ics")
- component = yield object1.componentForUser()
- attach = component.mainComponent().addProperty(attach)
- yield object1.setComponent(component)
- yield self.commitTransaction(0)
-
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.loadRecord()
- yield syncer.prepareCalendarHome()
-
- # Trigger sync of the one calendar
- local_sync_state = {}
- remote_sync_state = yield syncer.getCalendarSyncList()
- yield syncer.syncCalendar(
- remote_id,
- local_sync_state,
- remote_sync_state,
- )
- self.assertEqual(len(local_sync_state), 1)
- self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
-
- # Sync attachments
- changed, removed = yield syncer.syncAttachments()
-
- @inlineCallbacks
- def _mapLocalIDToRemote(remote_id):
- records = yield AttachmentMigrationRecord.all(self.theTransactionUnderTest(1))
- yield self.commitTransaction(1)
- for record in records:
- if record.remoteResourceID == remote_id:
- returnValue(record.localResourceID)
- else:
- returnValue(None)
-
- self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1)), (yield _mapLocalIDToRemote(id0_2)),)))
- self.assertEqual(removed, set())
-
- # Link attachments (after home is disabled)
- yield syncer.disableRemoteHome()
- len_links = yield syncer.linkAttachments()
- self.assertEqual(len_links, 3)
-
- # Local calendar exists
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- calendar1 = yield home1.childWithName("calendar")
- self.assertTrue(calendar1 is not None)
- children = yield calendar1.objectResources()
- self.assertEqual(set([child.name() for child in children]), set(("1.ics", "2.ics", "3.ics",)))
-
- # Make sure calendar object is associated with attachment
- object1 = yield calendar1.objectResourceWithName("1.ics")
- attachments = yield object1.managedAttachmentList()
- self.assertEqual(attachments, [pathID0_1, ])
-
- attachment = yield object1.attachmentWithManagedID(managedid0_1)
- self.assertTrue(attachment is not None)
- self.assertEqual(attachment.md5(), md50_1)
-
- # Make sure calendar object is associated with attachment
- object1 = yield calendar1.objectResourceWithName("2.ics")
- attachments = yield object1.managedAttachmentList()
- self.assertEqual(attachments, [pathID0_2, ])
-
- attachment = yield object1.attachmentWithManagedID(managedid0_2)
- self.assertTrue(attachment is not None)
- self.assertEqual(attachment.md5(), md50_2)
-
- # Make sure calendar object is associated with attachment
- object1 = yield calendar1.objectResourceWithName("3.ics")
- attachments = yield object1.managedAttachmentList()
- self.assertEqual(attachments, [pathID0_1, ])
-
- attachment = yield object1.attachmentWithManagedID(managedid0_1)
- self.assertTrue(attachment is not None)
- self.assertEqual(attachment.md5(), md50_1)
-
-
- @inlineCallbacks
- def test_delegate_reconcile(self):
- """
- Test that L{delegateReconcile} copies over the full set of delegates and caches associated groups..
- """
-
- # Create remote home
- yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- yield self.commitTransaction(0)
-
- # Add some delegates
- txn = self.theTransactionUnderTest(0)
- record01 = yield txn.directoryService().recordWithUID(u"user01")
- record02 = yield txn.directoryService().recordWithUID(u"user02")
- record03 = yield txn.directoryService().recordWithUID(u"user03")
-
- group01 = yield txn.directoryService().recordWithUID(u"__top_group_1__")
- group02 = yield txn.directoryService().recordWithUID(u"right_coast")
-
- # Add user02 and user03 as individual delegates
- yield Delegates.addDelegate(txn, record01, record02, True)
- yield Delegates.addDelegate(txn, record01, record03, False)
-
- # Add group delegates
- yield Delegates.addDelegate(txn, record01, group01, True)
- yield Delegates.addDelegate(txn, record01, group02, False)
-
- # Add external delegates
- yield txn.assignExternalDelegates(u"user01", None, None, u"external1", u"external2")
-
- yield self.commitTransaction(0)
-
-
- # Initially no local delegates
- txn = self.theTransactionUnderTest(1)
- delegates = yield txn.dumpIndividualDelegatesLocal(u"user01")
- self.assertEqual(len(delegates), 0)
- delegates = yield txn.dumpGroupDelegatesLocal(u"user04")
- self.assertEqual(len(delegates), 0)
- externals = yield txn.dumpExternalDelegatesLocal(u"user01")
- self.assertEqual(len(externals), 0)
- yield self.commitTransaction(1)
-
- # Sync from remote side
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.loadRecord()
- yield syncer.disableRemoteHome()
- yield syncer.delegateReconcile()
-
- # Now have local delegates
- txn = self.theTransactionUnderTest(1)
-
- delegates = yield txn.dumpIndividualDelegatesLocal(u"user01")
- self.assertEqual(
- set(delegates),
- set((
- DelegateRecord.make(delegator="user01", delegate="user02", readWrite=1),
- DelegateRecord.make(delegator="user01", delegate="user03", readWrite=0),
- )),
- )
-
- delegateGroups = yield txn.dumpGroupDelegatesLocal(u"user01")
- group_top = yield txn.groupByUID(u"__top_group_1__")
- group_right = yield txn.groupByUID(u"right_coast")
- self.assertEqual(
- set([item[0] for item in delegateGroups]),
- set((
- DelegateGroupsRecord.make(delegator="user01", groupID=group_top.groupID, readWrite=1, isExternal=False),
- DelegateGroupsRecord.make(delegator="user01", groupID=group_right.groupID, readWrite=0, isExternal=False),
- )),
- )
-
- externals = yield txn.dumpExternalDelegatesLocal(u"user01")
- self.assertEqual(
- set(externals),
- set((
- ExternalDelegateGroupsRecord.make(
- delegator="user01",
- groupUIDRead="external1",
- groupUIDWrite="external2",
- ),
- )),
- )
-
- yield self.commitTransaction(1)
-
-
- @inlineCallbacks
- def test_notifications_reconcile(self):
- """
- Test that L{delegateReconcile} copies over the full set of delegates and caches associated groups..
- """
-
- # Create remote home - and add some fake notifications
- yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- notifications = yield self.theTransactionUnderTest(0).notificationsWithUID("user01", create=True)
- uid1 = str(uuid4())
- obj1 = yield notifications.writeNotificationObject(uid1, "type1", "data1")
- id1 = obj1.id()
- uid2 = str(uuid4())
- obj2 = yield notifications.writeNotificationObject(uid2, "type2", "data2")
- id2 = obj2.id()
- yield self.commitTransaction(0)
-
- # Sync from remote side
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.loadRecord()
- yield syncer.prepareCalendarHome()
- yield syncer.disableRemoteHome()
- changes = yield syncer.notificationsReconcile()
- self.assertEqual(changes, 2)
-
- # Now have local notifications
- notifications = yield NotificationCollection.notificationsWithUID(
- self.theTransactionUnderTest(1),
- "user01",
- status=_HOME_STATUS_MIGRATING,
- )
- results = yield notifications.notificationObjects()
- self.assertEqual(len(results), 2)
- for result in results:
- for test_uid, test_id, test_type, test_data in ((uid1, id1, "type1", "data1",), (uid2, id2, "type2", "data2",),):
- if result.uid() == test_uid:
- self.assertNotEqual(result.id(), test_id)
- self.assertEqual(json.loads(result.notificationType()), test_type)
- data = yield result.notificationData()
- self.assertEqual(json.loads(data), test_data)
- break
- else:
- self.fail("Notification uid {} not found".format(result.uid()))
- yield self.commitTransaction(1)
-
-
- @inlineCallbacks
- def test_disable_remote_home(self):
- """
- Test that L{disableRemoteHome} changes the remote status and prevents a normal state
- home from being created.
- """
-
- # Create remote home - and add some fake notifications
- yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- yield self.theTransactionUnderTest(0).notificationsWithUID("user01", create=True)
- yield self.commitTransaction(0)
-
- # Sync from remote side
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.loadRecord()
- yield syncer.prepareCalendarHome()
- yield syncer.disableRemoteHome()
-
- # It is disabled
- home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01")
- self.assertTrue(home is None)
- home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_NORMAL)
- self.assertTrue(home is None)
- home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_DISABLED)
- self.assertTrue(home is not None)
- yield self.commitTransaction(0)
-
-
-
-class TestSharingSync(MultiStoreConduitTest):
- """
- Test that L{CrossPodHomeSync} sharing sync works.
- """
-
- @inlineCallbacks
- def setUp(self):
- self.accounts = FilePath(__file__).sibling("accounts").child("groupAccounts.xml")
- self.augments = FilePath(__file__).sibling("accounts").child("augments.xml")
- yield super(TestSharingSync, self).setUp()
- yield self.populate()
-
-
- def configure(self):
- super(TestSharingSync, self).configure()
- config.Sharing.Enabled = True
- config.Sharing.Calendars.Enabled = True
- config.Sharing.Calendars.Groups.Enabled = True
- config.Sharing.Calendars.Groups.ReconciliationDelaySeconds = 0
-
-
- @inlineCallbacks
- def populate(self):
- yield populateCalendarsFrom(self.requirements, self.theStoreUnderTest(0))
-
- requirements = {
- "user01" : None,
- "user02" : None,
- "user06" : None,
- "user07" : None,
- "user08" : None,
- "user09" : None,
- "user10" : None,
- }
-
-
- @inlineCallbacks
- def _createShare(self, shareFrom, shareTo, accept=True):
- # Invite
- txnindex = 1 if shareFrom[0] == "p" else 0
- home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareFrom, create=True)
- calendar = yield home.childWithName("calendar")
- shareeView = yield calendar.inviteUIDToShare(shareTo, _BIND_MODE_READ, "summary")
- yield self.commitTransaction(txnindex)
-
- # Accept
- if accept:
- inviteUID = shareeView.shareUID()
- txnindex = 1 if shareTo[0] == "p" else 0
- shareeHome = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareTo)
- shareeView = yield shareeHome.acceptShare(inviteUID)
- sharedName = shareeView.name()
- yield self.commitTransaction(txnindex)
- else:
- sharedName = None
-
- returnValue(sharedName)
-
-
- @inlineCallbacks
- def test_shared_collections_reconcile(self):
- """
- Test that L{sharedCollectionsReconcile} copies over the full set of delegates and caches associated groups..
- """
-
- # Create home
- yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- yield self.commitTransaction(0)
-
- # Shared by migrating user
- shared_name_02 = yield self._createShare("user01", "user02")
- shared_name_03 = yield self._createShare("user01", "puser03")
-
- # Shared to migrating user
- shared_name_04 = yield self._createShare("user04", "user01")
- shared_name_05 = yield self._createShare("puser05", "user01")
-
- # Sync from remote side
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.loadRecord()
- yield syncer.sync()
- yield syncer.disableRemoteHome()
- changes = yield syncer.sharedByCollectionsReconcile()
- self.assertEqual(changes, 2)
- changes = yield syncer.sharedToCollectionsReconcile()
- self.assertEqual(changes, 2)
-
- # Local calendar exists with shares
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- calendar1 = yield home1.childWithName("calendar")
- invites1 = yield calendar1.sharingInvites()
- self.assertEqual(len(invites1), 2)
- self.assertEqual(set([invite.uid for invite in invites1]), set((shared_name_02, shared_name_03,)))
- yield self.commitTransaction(1)
-
- # Remote sharee can access it
- home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user02")
- calendar0 = yield home0.childWithName(shared_name_02)
- self.assertTrue(calendar0 is not None)
-
- # Local sharee can access it
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="puser03")
- calendar1 = yield home1.childWithName(shared_name_03)
- self.assertTrue(calendar1 is not None)
-
- # Local shared calendars exist
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- calendar1 = yield home1.childWithName(shared_name_04)
- self.assertTrue(calendar1 is not None)
- calendar1 = yield home1.childWithName(shared_name_05)
- self.assertTrue(calendar1 is not None)
- yield self.commitTransaction(1)
-
- # Sharers see migrated user as sharee
- externalHome0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_EXTERNAL)
- calendar0 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user04", name="calendar")
- invites = yield calendar0.allInvitations()
- self.assertEqual(len(invites), 1)
- self.assertEqual(invites[0].shareeUID, "user01")
- self.assertEqual(invites[0].shareeHomeID, externalHome0.id())
- yield self.commitTransaction(0)
-
- shareeHome1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser05", name="calendar")
- invites = yield calendar1.allInvitations()
- self.assertEqual(len(invites), 1)
- self.assertEqual(invites[0].shareeUID, "user01")
- self.assertEqual(invites[0].shareeHomeID, shareeHome1.id())
- yield self.commitTransaction(1)
-
-
- @inlineCallbacks
- def test_group_shared_collections_reconcile(self):
- """
- Test that L{sharedCollectionsReconcile} copies over the full set of delegates and caches associated groups..
- """
-
- # Create home
- yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- yield self.commitTransaction(0)
-
- # Shared by migrating user
- yield self._createShare("user01", "group02", accept=False)
-
- # Sync from remote side
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.loadRecord()
- yield syncer.sync()
- yield syncer.disableRemoteHome()
- changes = yield syncer.sharedByCollectionsReconcile()
- self.assertEqual(changes, 3)
- changes = yield syncer.sharedToCollectionsReconcile()
- self.assertEqual(changes, 0)
-
- # Local calendar exists with shares
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- calendar1 = yield home1.childWithName("calendar")
- invites1 = yield calendar1.sharingInvites()
- self.assertEqual(len(invites1), 3)
- sharee = yield GroupShareeRecord.querysimple(self.theTransactionUnderTest(1), calendarID=calendar1.id())
- self.assertEqual(len(sharee), 1)
- group = yield GroupsRecord.querysimple(self.theTransactionUnderTest(1), groupID=sharee[0].groupID)
- self.assertEqual(len(group), 1)
- self.assertEqual(group[0].groupUID, "group02")
- yield self.commitTransaction(1)
-
-
-
-class TestGroupAttendeeSync(MultiStoreConduitTest):
- """
- GroupAttendeeReconciliation tests
- """
-
- now = {"now1": DateTime.getToday().getYear() + 1}
-
- groupdata1 = """BEGIN:VCALENDAR
-CALSCALE:GREGORIAN
-PRODID:-//Example Inc.//Example Calendar//EN
-VERSION:2.0
-BEGIN:VEVENT
-DTSTAMP:20051222T205953Z
-CREATED:20060101T150000Z
-DTSTART:{now1:04d}0101T100000Z
-DURATION:PT1H
-SUMMARY:event 1
-UID:event1@ninevah.local
-END:VEVENT
-END:VCALENDAR""".format(**now)
-
- groupdata2 = """BEGIN:VCALENDAR
-CALSCALE:GREGORIAN
-PRODID:-//Example Inc.//Example Calendar//EN
-VERSION:2.0
-BEGIN:VEVENT
-DTSTAMP:20051222T205953Z
-CREATED:20060101T150000Z
-DTSTART:{now1:04d}0101T100000Z
-DURATION:PT1H
-SUMMARY:event 2
-UID:event2@ninevah.local
-ORGANIZER:mailto:user01@example.com
-ATTENDEE:mailto:user01@example.com
-ATTENDEE:mailto:group02@example.com
-END:VEVENT
-END:VCALENDAR""".format(**now)
-
- groupdata3 = """BEGIN:VCALENDAR
-CALSCALE:GREGORIAN
-PRODID:-//Example Inc.//Example Calendar//EN
-VERSION:2.0
-BEGIN:VEVENT
-DTSTAMP:20051222T205953Z
-CREATED:20060101T150000Z
-DTSTART:{now1:04d}0101T100000Z
-DURATION:PT1H
-SUMMARY:event 3
-UID:event3@ninevah.local
-ORGANIZER:mailto:user01@example.com
-ATTENDEE:mailto:user01@example.com
-ATTENDEE:mailto:group04@example.com
-END:VEVENT
-END:VCALENDAR""".format(**now)
-
- @inlineCallbacks
- def setUp(self):
- self.accounts = FilePath(__file__).sibling("accounts").child("groupAccounts.xml")
- yield super(TestGroupAttendeeSync, self).setUp()
- yield self.populate()
-
-
- def configure(self):
- super(TestGroupAttendeeSync, self).configure()
- config.GroupAttendees.Enabled = True
- config.GroupAttendees.ReconciliationDelaySeconds = 0
- config.GroupAttendees.AutoUpdateSecondsFromNow = 0
-
-
- @inlineCallbacks
- def populate(self):
- yield populateCalendarsFrom(self.requirements, self.theStoreUnderTest(0))
-
- requirements = {
- "user01" : None,
- "user02" : None,
- "user06" : None,
- "user07" : None,
- "user08" : None,
- "user09" : None,
- "user10" : None,
- }
-
- @inlineCallbacks
- def test_group_attendees(self):
- """
- Test that L{groupAttendeeReconcile} links groups to the associated calendar object.
- """
-
- home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- calendar0 = yield home0.childWithName("calendar")
- yield calendar0.createCalendarObjectWithName("1.ics", Component.fromString(self.groupdata1))
- yield calendar0.createCalendarObjectWithName("2.ics", Component.fromString(self.groupdata2))
- yield calendar0.createCalendarObjectWithName("3.ics", Component.fromString(self.groupdata3))
- yield self.commitTransaction(0)
-
- yield JobItem.waitEmpty(self.theStoreUnderTest(0).newTransaction, reactor, 60.0)
-
- # Trigger sync
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.sync()
-
- # Link groups
- len_links = yield syncer.groupAttendeeReconcile()
- self.assertEqual(len_links, 2)
-
- # Local calendar exists
- home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- calendar1 = yield home1.childWithName("calendar")
- self.assertTrue(calendar1 is not None)
- children = yield calendar1.objectResources()
- self.assertEqual(set([child.name() for child in children]), set(("1.ics", "2.ics", "3.ics",)))
-
- object2 = yield calendar1.objectResourceWithName("2.ics")
- record = (yield object2.groupEventLinks()).values()[0]
- group02 = yield self.theTransactionUnderTest(1).groupByUID(u"group02")
- self.assertEqual(record.groupID, group02.groupID)
- self.assertEqual(record.membershipHash, group02.membershipHash)
-
- object3 = yield calendar1.objectResourceWithName("3.ics")
- record = (yield object3.groupEventLinks()).values()[0]
- group04 = yield self.theTransactionUnderTest(1).groupByUID(u"group04")
- self.assertEqual(record.groupID, group04.groupID)
- self.assertEqual(record.membershipHash, group04.membershipHash)
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_home_syncpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationtesttest_home_syncpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/test_home_sync.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_home_sync.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,1307 @@
</span><ins>+##
+# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from pycalendar.datetime import DateTime
+from twext.enterprise.dal.syntax import Select
+from twext.enterprise.jobqueue import JobItem
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python.filepath import FilePath
+from twistedcaldav.config import config
+from twistedcaldav.ical import Component, normalize_iCalStr
+from txdav.caldav.datastore.sql import ManagedAttachment
+from txdav.caldav.datastore.sql_directory import GroupShareeRecord
+from txdav.common.datastore.podding.migration.home_sync import CrossPodHomeSync
+from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord, \
+ AttachmentMigrationRecord
+from txdav.common.datastore.podding.test.util import MultiStoreConduitTest
+from txdav.common.datastore.sql_directory import DelegateRecord, \
+ ExternalDelegateGroupsRecord, DelegateGroupsRecord, GroupsRecord
+from txdav.common.datastore.sql_notification import NotificationCollection
+from txdav.common.datastore.sql_tables import schema, _HOME_STATUS_EXTERNAL, \
+ _BIND_MODE_READ, _HOME_STATUS_MIGRATING, _HOME_STATUS_NORMAL, \
+ _HOME_STATUS_DISABLED
+from txdav.common.datastore.test.util import populateCalendarsFrom
+from txdav.who.delegates import Delegates
+from txweb2.http_headers import MimeType
+from txweb2.stream import MemoryStream
+from uuid import uuid4
+import json
+
+
+class TestCrossPodHomeSync(MultiStoreConduitTest):
+ """
+ Test that L{CrossPodHomeSync} works.
+ """
+
+ nowYear = {"now": DateTime.getToday().getYear()}
+
+ caldata1 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid1
+DTSTART:{now:04d}0102T140000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:instance
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**nowYear)
+
+ caldata1_changed = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid1
+DTSTART:{now:04d}0102T150000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:instance changed
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**nowYear)
+
+ caldata2 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid2
+DTSTART:{now:04d}0102T160000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:instance
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**nowYear)
+
+ caldata3 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid3
+DTSTART:{now:04d}0102T160000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:instance
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**nowYear)
+
+ caldata4 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid4
+DTSTART:{now:04d}0102T180000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=DAILY
+SUMMARY:instance
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**nowYear)
+
+
+ @inlineCallbacks
+ def test_remote_home(self):
+ """
+ Test that a remote home can be accessed.
+ """
+
+ home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ self.assertTrue(home01 is not None)
+ yield self.commitTransaction(0)
+
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.loadRecord()
+ home = yield syncer._remoteHome(self.theTransactionUnderTest(1))
+ self.assertTrue(home is not None)
+ self.assertEqual(home.id(), home01.id())
+ yield self.commitTransaction(1)
+
+
+ @inlineCallbacks
+ def test_prepare_home(self):
+ """
+ Test that L{prepareCalendarHome} creates a home.
+ """
+
+ # No home present
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ self.assertTrue(home is None)
+ yield self.commitTransaction(1)
+
+ yield syncer.prepareCalendarHome()
+
+ # Home is present
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ self.assertTrue(home is not None)
+ children = yield home.listChildren()
+ self.assertEqual(len(children), 0)
+ yield self.commitTransaction(1)
+
+
+ @inlineCallbacks
+ def test_prepare_home_external_txn(self):
+ """
+ Test that L{prepareCalendarHome} creates a home.
+ """
+
+ # No home present
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ self.assertTrue(home is None)
+ yield self.commitTransaction(1)
+
+ yield syncer.prepareCalendarHome(txn=self.theTransactionUnderTest(1))
+ yield self.commitTransaction(1)
+
+ # Home is present
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ self.assertTrue(home is not None)
+ children = yield home.listChildren()
+ self.assertEqual(len(children), 0)
+ yield self.commitTransaction(1)
+
+
+ @inlineCallbacks
+ def test_home_metadata(self):
+ """
+ Test that L{syncCalendarHomeMetaData} sync home metadata correctly.
+ """
+
+ alarm_event_timed = """BEGIN:VALARM
+ACTION:DISPLAY
+DESCRIPTION:alarm_event_timed
+TRIGGER:-PT10M
+END:VALARM
+"""
+ alarm_event_allday = """BEGIN:VALARM
+ACTION:DISPLAY
+DESCRIPTION:alarm_event_allday
+TRIGGER:-PT10M
+END:VALARM
+"""
+ alarm_todo_timed = """BEGIN:VALARM
+ACTION:DISPLAY
+DESCRIPTION:alarm_todo_timed
+TRIGGER:-PT10M
+END:VALARM
+"""
+ alarm_todo_allday = """BEGIN:VALARM
+ACTION:DISPLAY
+DESCRIPTION:alarm_todo_allday
+TRIGGER:-PT10M
+END:VALARM
+"""
+ availability = """BEGIN:VCALENDAR
+VERSION:2.0
+PRODID:-//Example Inc.//Example Calendar//EN
+BEGIN:VAVAILABILITY
+UID:20061005T133225Z-00001-availability@example.com
+DTSTART:20060101T000000Z
+DTEND:20060108T000000Z
+DTSTAMP:20061005T133225Z
+ORGANIZER:mailto:bernard@example.com
+BEGIN:AVAILABLE
+UID:20061005T133225Z-00001-A-availability@example.com
+DTSTART:20060102T090000Z
+DTEND:20060102T120000Z
+DTSTAMP:20061005T133225Z
+RRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
+SUMMARY:Weekdays from 9:00 to 12:00
+END:AVAILABLE
+END:VAVAILABILITY
+END:VCALENDAR
+"""
+
+ home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ events0 = yield home0.createChildWithName("events")
+ yield home0.setDefaultCalendar(events0, "VEVENT")
+ yield home0.setDefaultAlarm(alarm_event_timed, True, True)
+ yield home0.setDefaultAlarm(alarm_event_allday, True, False)
+ yield home0.setDefaultAlarm(alarm_todo_timed, False, True)
+ yield home0.setDefaultAlarm(alarm_todo_allday, False, False)
+ yield home0.setAvailability(Component.fromString(availability))
+ yield self.commitTransaction(0)
+
+ # Trigger sync
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.sync()
+
+ # Home is present with correct metadata
+ home1 = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ self.assertTrue(home1 is not None)
+ calendar1 = yield home1.childWithName("calendar")
+ events1 = yield home1.childWithName("events")
+ tasks1 = yield home1.childWithName("tasks")
+ self.assertFalse(home1.isDefaultCalendar(calendar1))
+ self.assertTrue(home1.isDefaultCalendar(events1))
+ self.assertTrue(home1.isDefaultCalendar(tasks1))
+ self.assertEqual(home1.getDefaultAlarm(True, True), alarm_event_timed)
+ self.assertEqual(home1.getDefaultAlarm(True, False), alarm_event_allday)
+ self.assertEqual(home1.getDefaultAlarm(False, True), alarm_todo_timed)
+ self.assertEqual(home1.getDefaultAlarm(False, False), alarm_todo_allday)
+ self.assertEqual(normalize_iCalStr(home1.getAvailability()), normalize_iCalStr(availability))
+ yield self.commitTransaction(1)
+
+ # Make some changes
+ home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ calendar0 = yield home0.childWithName("calendar")
+ yield home0.setDefaultCalendar(calendar0, "VEVENT")
+ yield home0.setDefaultAlarm(None, True, True)
+ yield home0.setDefaultAlarm(None, False, True)
+ yield self.commitTransaction(0)
+
+ # Trigger sync again
+ yield syncer.sync()
+
+ # Home is present with correct metadata
+ home1 = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ self.assertTrue(home1 is not None)
+ calendar1 = yield home1.childWithName("calendar")
+ events1 = yield home1.childWithName("events")
+ tasks1 = yield home1.childWithName("tasks")
+ self.assertTrue(home1.isDefaultCalendar(calendar1))
+ self.assertFalse(home1.isDefaultCalendar(events1))
+ self.assertTrue(home1.isDefaultCalendar(tasks1))
+ self.assertEqual(home1.getDefaultAlarm(True, True), None)
+ self.assertEqual(home1.getDefaultAlarm(True, False), alarm_event_allday)
+ self.assertEqual(home1.getDefaultAlarm(False, True), None)
+ self.assertEqual(home1.getDefaultAlarm(False, False), alarm_todo_allday)
+ self.assertEqual(normalize_iCalStr(home1.getAvailability()), normalize_iCalStr(availability))
+ yield self.commitTransaction(1)
+
+
+ @inlineCallbacks
+ def test_get_calendar_sync_list(self):
+ """
+ Test that L{getCalendarSyncList} returns the correct results.
+ """
+
+ yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ yield self.commitTransaction(0)
+ home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01")
+ self.assertTrue(home01 is not None)
+ calendars01 = yield home01.loadChildren()
+ results01 = {}
+ for calendar in calendars01:
+ if calendar.owned():
+ sync_token = yield calendar.syncToken()
+ results01[calendar.id()] = CalendarMigrationRecord.make(
+ calendarHomeResourceID=home01.id(),
+ remoteResourceID=calendar.id(),
+ localResourceID=0,
+ lastSyncToken=sync_token,
+ )
+
+ yield self.commitTransaction(0)
+
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.loadRecord()
+ results = yield syncer.getCalendarSyncList()
+ self.assertEqual(results, results01)
+
+
+ @inlineCallbacks
+ def test_sync_calendar_initial_empty(self):
+ """
+ Test that L{syncCalendar} syncs an initially non-existent local calendar with
+ an empty remote calendar.
+ """
+
+ home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ calendar0 = yield home0.childWithName("calendar")
+ remote_id = calendar0.id()
+ remote_sync_token = yield calendar0.syncToken()
+ yield self.commitTransaction(0)
+
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.loadRecord()
+ yield syncer.prepareCalendarHome()
+
+ # No local calendar exists yet
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ children = yield home1.listChildren()
+ self.assertEqual(len(children), 0)
+ yield self.commitTransaction(1)
+
+ # Trigger sync of the one calendar
+ local_sync_state = {}
+ remote_sync_state = {remote_id: CalendarMigrationRecord.make(
+ calendarHomeResourceID=home0.id(),
+ remoteResourceID=remote_id,
+ localResourceID=0,
+ lastSyncToken=remote_sync_token,
+ )}
+ yield syncer.syncCalendar(
+ remote_id,
+ local_sync_state,
+ remote_sync_state,
+ )
+ self.assertEqual(len(local_sync_state), 1)
+ self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
+
+ # Local calendar exists
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ calendar1 = yield home1.childWithName("calendar")
+ self.assertTrue(calendar1 is not None)
+ yield self.commitTransaction(1)
+
+
+ @inlineCallbacks
+ def test_sync_calendar_initial_with_data(self):
+ """
+ Test that L{syncCalendar} syncs an initially non-existent local calendar with
+ a remote calendar containing data. Also check a change to one event is then
+ sync'd the second time.
+ """
+
+ home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ calendar0 = yield home0.childWithName("calendar")
+ o1 = yield calendar0.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
+ o2 = yield calendar0.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
+ o3 = yield calendar0.createCalendarObjectWithName("3.ics", Component.fromString(self.caldata3))
+ remote_id = calendar0.id()
+ mapping0 = dict([(o.name(), o.id()) for o in (o1, o2, o3)])
+ yield self.commitTransaction(0)
+
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.loadRecord()
+ yield syncer.prepareCalendarHome()
+
+ # No local calendar exists yet
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ calendar1 = yield home1.childWithName("calendar")
+ self.assertTrue(calendar1 is None)
+ yield self.commitTransaction(1)
+
+ # Trigger sync of the one calendar
+ local_sync_state = {}
+ remote_sync_state = yield syncer.getCalendarSyncList()
+ yield syncer.syncCalendar(
+ remote_id,
+ local_sync_state,
+ remote_sync_state,
+ )
+ self.assertEqual(len(local_sync_state), 1)
+ self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
+
+ @inlineCallbacks
+ def _checkCalendarObjectMigrationState(home, mapping1):
+ com = schema.CALENDAR_OBJECT_MIGRATION
+ mappings = yield Select(
+ columns=[com.REMOTE_RESOURCE_ID, com.LOCAL_RESOURCE_ID],
+ From=com,
+ Where=(com.CALENDAR_HOME_RESOURCE_ID == home.id())
+ ).on(self.theTransactionUnderTest(1))
+ expected_mappings = dict([(mapping0[name], mapping1[name]) for name in mapping0.keys()])
+ self.assertEqual(dict(mappings), expected_mappings)
+
+
+ # Local calendar exists
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ calendar1 = yield home1.childWithName("calendar")
+ self.assertTrue(calendar1 is not None)
+ children = yield calendar1.objectResources()
+ self.assertEqual(set([child.name() for child in children]), set(("1.ics", "2.ics", "3.ics",)))
+ mapping1 = dict([(o.name(), o.id()) for o in children])
+ yield _checkCalendarObjectMigrationState(home1, mapping1)
+ yield self.commitTransaction(1)
+
+ # Change one resource
+ object0 = yield self.calendarObjectUnderTest(
+ txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics"
+ )
+ yield object0.setComponent(Component.fromString(self.caldata1_changed))
+ yield self.commitTransaction(0)
+
+ remote_sync_state = yield syncer.getCalendarSyncList()
+ yield syncer.syncCalendar(
+ remote_id,
+ local_sync_state,
+ remote_sync_state,
+ )
+
+ object1 = yield self.calendarObjectUnderTest(
+ txn=self.theTransactionUnderTest(1), home="user01", status=_HOME_STATUS_MIGRATING, calendar_name="calendar", name="1.ics"
+ )
+ caldata = yield object1.component()
+ self.assertEqual(normalize_iCalStr(caldata), normalize_iCalStr(self.caldata1_changed))
+ yield self.commitTransaction(1)
+
+ # Remove one resource
+ object0 = yield self.calendarObjectUnderTest(
+ txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="2.ics"
+ )
+ yield object0.remove()
+ del mapping0["2.ics"]
+ yield self.commitTransaction(0)
+
+ remote_sync_state = yield syncer.getCalendarSyncList()
+ yield syncer.syncCalendar(
+ remote_id,
+ local_sync_state,
+ remote_sync_state,
+ )
+
+ calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="user01", status=_HOME_STATUS_MIGRATING, name="calendar")
+ children = yield calendar1.objectResources()
+ self.assertEqual(set([child.name() for child in children]), set(("1.ics", "3.ics",)))
+ mapping1 = dict([(o.name(), o.id()) for o in children])
+ yield _checkCalendarObjectMigrationState(home1, mapping1)
+ yield self.commitTransaction(1)
+
+ # Add one resource
+ calendar0 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
+ o4 = yield calendar0.createCalendarObjectWithName("4.ics", Component.fromString(self.caldata4))
+ mapping0[o4.name()] = o4.id()
+ yield self.commitTransaction(0)
+
+ remote_sync_state = yield syncer.getCalendarSyncList()
+ yield syncer.syncCalendar(
+ remote_id,
+ local_sync_state,
+ remote_sync_state,
+ )
+
+ calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="user01", status=_HOME_STATUS_MIGRATING, name="calendar")
+ children = yield calendar1.objectResources()
+ self.assertEqual(set([child.name() for child in children]), set(("1.ics", "3.ics", "4.ics")))
+ mapping1 = dict([(o.name(), o.id()) for o in children])
+ yield _checkCalendarObjectMigrationState(home1, mapping1)
+ yield self.commitTransaction(1)
+
+
+ @inlineCallbacks
+ def test_sync_calendars_add_remove(self):
+ """
+ Test that L{syncCalendar} syncs an initially non-existent local calendar with
+ a remote calendar containing data. Also check a change to one event is then
+ sync'd the second time.
+ """
+
+ home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ children0 = yield home0.loadChildren()
+ details0 = dict([(child.id(), child.name()) for child in children0])
+ yield self.commitTransaction(0)
+
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.loadRecord()
+ yield syncer.prepareCalendarHome()
+
+ # No local calendar exists yet
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ children1 = yield home1.loadChildren()
+ self.assertEqual(len(children1), 0)
+ yield self.commitTransaction(1)
+
+ # Trigger sync
+ yield syncer.syncCalendarList()
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ children1 = yield home1.loadChildren()
+ details1 = dict([(child.id(), child.name()) for child in children1])
+ self.assertEqual(set(details1.values()), set(details0.values()))
+ yield self.commitTransaction(1)
+
+ # Add a calendar
+ home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ newcalendar0 = yield home0.createCalendarWithName("new-calendar")
+ details0[newcalendar0.id()] = newcalendar0.name()
+ yield self.commitTransaction(0)
+
+ # Trigger sync
+ yield syncer.syncCalendarList()
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ children1 = yield home1.loadChildren()
+ details1 = dict([(child.id(), child.name()) for child in children1])
+ self.assertTrue("new-calendar" in details1.values())
+ self.assertEqual(set(details1.values()), set(details0.values()))
+ yield self.commitTransaction(1)
+
+ # Remove a calendar
+ home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ calendar0 = yield home0.childWithName("new-calendar")
+ del details0[calendar0.id()]
+ yield calendar0.remove()
+ yield self.commitTransaction(0)
+
+ # Trigger sync
+ yield syncer.syncCalendarList()
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ children1 = yield home1.loadChildren()
+ details1 = dict([(child.id(), child.name()) for child in children1])
+ self.assertTrue("new-calendar" not in details1.values())
+ self.assertEqual(set(details1.values()), set(details0.values()))
+ yield self.commitTransaction(1)
+
+
+ @inlineCallbacks
+ def test_sync_attachments_add_remove(self):
+ """
+ Test that L{syncAttachments} syncs attachment data, then an update to the data,
+ and finally a removal of the data.
+ """
+
+
+ home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ calendar0 = yield home0.childWithName("calendar")
+ yield calendar0.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
+ yield calendar0.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
+ yield calendar0.createCalendarObjectWithName("3.ics", Component.fromString(self.caldata3))
+ remote_id = calendar0.id()
+ mapping0 = dict()
+ yield self.commitTransaction(0)
+
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.loadRecord()
+ yield syncer.prepareCalendarHome()
+
+ # Trigger sync of the one calendar
+ local_sync_state = {}
+ remote_sync_state = yield syncer.getCalendarSyncList()
+ yield syncer.syncCalendar(
+ remote_id,
+ local_sync_state,
+ remote_sync_state,
+ )
+ self.assertEqual(len(local_sync_state), 1)
+ self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
+
+ @inlineCallbacks
+ def _mapLocalIDToRemote(remote_id):
+ records = yield AttachmentMigrationRecord.all(self.theTransactionUnderTest(1))
+ yield self.commitTransaction(1)
+ for record in records:
+ if record.remoteResourceID == remote_id:
+ returnValue(record.localResourceID)
+ else:
+ returnValue(None)
+
+ # Sync attachments
+ changed, removed = yield syncer.syncAttachments()
+ self.assertEqual(changed, set())
+ self.assertEqual(removed, set())
+
+ @inlineCallbacks
+ def _checkAttachmentObjectMigrationState(home, mapping1):
+ am = schema.ATTACHMENT_MIGRATION
+ mappings = yield Select(
+ columns=[am.REMOTE_RESOURCE_ID, am.LOCAL_RESOURCE_ID],
+ From=am,
+ Where=(am.CALENDAR_HOME_RESOURCE_ID == home.id())
+ ).on(self.theTransactionUnderTest(1))
+ expected_mappings = dict([(mapping0[name], mapping1[name]) for name in mapping0.keys()])
+ self.assertEqual(dict(mappings), expected_mappings)
+
+
+ # Local calendar exists
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ calendar1 = yield home1.childWithName("calendar")
+ self.assertTrue(calendar1 is not None)
+ children = yield calendar1.objectResources()
+ self.assertEqual(set([child.name() for child in children]), set(("1.ics", "2.ics", "3.ics",)))
+
+ attachments = yield home1.getAllAttachments()
+ mapping1 = dict([(o.md5(), o.id()) for o in attachments])
+ yield _checkAttachmentObjectMigrationState(home1, mapping1)
+ yield self.commitTransaction(1)
+
+ # Add one attachment
+ object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
+ attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text #1."))
+ id0_1 = attachment.id()
+ md50_1 = attachment.md5()
+ managedid0_1 = attachment.managedID()
+ mapping0[md50_1] = id0_1
+ yield self.commitTransaction(0)
+
+ # Sync attachments
+ changed, removed = yield syncer.syncAttachments()
+ self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1)),)))
+ self.assertEqual(removed, set())
+
+ # Validate changes
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ attachments = yield home1.getAllAttachments()
+ mapping1 = dict([(o.md5(), o.id()) for o in attachments])
+ yield _checkAttachmentObjectMigrationState(home1, mapping1)
+
+ # Add another attachment
+ object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="2.ics")
+ attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test2.txt", MemoryStream("Here is some text #2."))
+ id0_2 = attachment.id()
+ md50_2 = attachment.md5()
+ mapping0[md50_2] = id0_2
+ yield self.commitTransaction(0)
+
+ # Sync attachments
+ changed, removed = yield syncer.syncAttachments()
+ self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_2)),)))
+ self.assertEqual(removed, set())
+
+ # Validate changes
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ attachments = yield home1.getAllAttachments()
+ mapping1 = dict([(o.md5(), o.id()) for o in attachments])
+ yield _checkAttachmentObjectMigrationState(home1, mapping1)
+
+ # Change original attachment (this is actually a remove and a create all in one)
+ object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
+ attachment, _ignore_location = yield object1.updateAttachment(managedid0_1, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text #1 - changed."))
+ del mapping0[md50_1]
+ id0_1_changed = attachment.id()
+ md50_1_changed = attachment.md5()
+ managedid0_1_changed = attachment.managedID()
+ mapping0[md50_1_changed] = id0_1_changed
+ yield self.commitTransaction(0)
+
+ # Sync attachments
+ changed, removed = yield syncer.syncAttachments()
+ self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1_changed)),)))
+ self.assertEqual(removed, set((id0_1,)))
+
+ # Validate changes
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ attachments = yield home1.getAllAttachments()
+ mapping1 = dict([(o.md5(), o.id()) for o in attachments])
+ yield _checkAttachmentObjectMigrationState(home1, mapping1)
+
+ # Add original to a different resource
+ object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
+ component = yield object1.componentForUser()
+ attach = component.mainComponent().getProperty("ATTACH")
+
+ object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="3.ics")
+ component = yield object1.componentForUser()
+ attach = component.mainComponent().addProperty(attach)
+ yield object1.setComponent(component)
+ yield self.commitTransaction(0)
+
+ # Sync attachments
+ changed, removed = yield syncer.syncAttachments()
+ self.assertEqual(changed, set())
+ self.assertEqual(removed, set())
+
+ # Validate changes
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ attachments = yield home1.getAllAttachments()
+ mapping1 = dict([(o.md5(), o.id()) for o in attachments])
+ yield _checkAttachmentObjectMigrationState(home1, mapping1)
+
+ # Change original attachment in original resource (this creates a new one and does not remove the old)
+ object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
+ attachment, _ignore_location = yield object1.updateAttachment(managedid0_1_changed, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text #1 - changed again."))
+ id0_1_changed_again = attachment.id()
+ md50_1_changed_again = attachment.md5()
+ mapping0[md50_1_changed_again] = id0_1_changed_again
+ yield self.commitTransaction(0)
+
+ # Sync attachments
+ changed, removed = yield syncer.syncAttachments()
+ self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1_changed_again)),)))
+ self.assertEqual(removed, set())
+
+ # Validate changes
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ attachments = yield home1.getAllAttachments()
+ mapping1 = dict([(o.md5(), o.id()) for o in attachments])
+ yield _checkAttachmentObjectMigrationState(home1, mapping1)
+
+
+ @inlineCallbacks
+ def test_link_attachments(self):
+ """
+ Test that L{linkAttachments} links attachment data to the associated calendar object.
+ """
+
+ home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ calendar0 = yield home0.childWithName("calendar")
+ object0_1 = yield calendar0.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
+ object0_2 = yield calendar0.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
+ yield calendar0.createCalendarObjectWithName("3.ics", Component.fromString(self.caldata3))
+ remote_id = calendar0.id()
+
+ attachment, _ignore_location = yield object0_1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text #1."))
+ id0_1 = attachment.id()
+ md50_1 = attachment.md5()
+ managedid0_1 = attachment.managedID()
+ pathID0_1 = ManagedAttachment.lastSegmentOfUriPath(managedid0_1, attachment.name())
+
+ attachment, _ignore_location = yield object0_2.addAttachment(None, MimeType.fromString("text/plain"), "test2.txt", MemoryStream("Here is some text #2."))
+ id0_2 = attachment.id()
+ md50_2 = attachment.md5()
+ managedid0_2 = attachment.managedID()
+ pathID0_2 = ManagedAttachment.lastSegmentOfUriPath(managedid0_2, attachment.name())
+
+ yield self.commitTransaction(0)
+
+ # Add original to a different resource
+ object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
+ component = yield object1.componentForUser()
+ attach = component.mainComponent().getProperty("ATTACH")
+
+ object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="3.ics")
+ component = yield object1.componentForUser()
+ attach = component.mainComponent().addProperty(attach)
+ yield object1.setComponent(component)
+ yield self.commitTransaction(0)
+
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.loadRecord()
+ yield syncer.prepareCalendarHome()
+
+ # Trigger sync of the one calendar
+ local_sync_state = {}
+ remote_sync_state = yield syncer.getCalendarSyncList()
+ yield syncer.syncCalendar(
+ remote_id,
+ local_sync_state,
+ remote_sync_state,
+ )
+ self.assertEqual(len(local_sync_state), 1)
+ self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
+
+ # Sync attachments
+ changed, removed = yield syncer.syncAttachments()
+
+ @inlineCallbacks
+ def _mapLocalIDToRemote(remote_id):
+ records = yield AttachmentMigrationRecord.all(self.theTransactionUnderTest(1))
+ yield self.commitTransaction(1)
+ for record in records:
+ if record.remoteResourceID == remote_id:
+ returnValue(record.localResourceID)
+ else:
+ returnValue(None)
+
+ self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1)), (yield _mapLocalIDToRemote(id0_2)),)))
+ self.assertEqual(removed, set())
+
+ # Link attachments (after home is disabled)
+ yield syncer.disableRemoteHome()
+ len_links = yield syncer.linkAttachments()
+ self.assertEqual(len_links, 3)
+
+ # Local calendar exists
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ calendar1 = yield home1.childWithName("calendar")
+ self.assertTrue(calendar1 is not None)
+ children = yield calendar1.objectResources()
+ self.assertEqual(set([child.name() for child in children]), set(("1.ics", "2.ics", "3.ics",)))
+
+ # Make sure calendar object is associated with attachment
+ object1 = yield calendar1.objectResourceWithName("1.ics")
+ attachments = yield object1.managedAttachmentList()
+ self.assertEqual(attachments, [pathID0_1, ])
+
+ attachment = yield object1.attachmentWithManagedID(managedid0_1)
+ self.assertTrue(attachment is not None)
+ self.assertEqual(attachment.md5(), md50_1)
+
+ # Make sure calendar object is associated with attachment
+ object1 = yield calendar1.objectResourceWithName("2.ics")
+ attachments = yield object1.managedAttachmentList()
+ self.assertEqual(attachments, [pathID0_2, ])
+
+ attachment = yield object1.attachmentWithManagedID(managedid0_2)
+ self.assertTrue(attachment is not None)
+ self.assertEqual(attachment.md5(), md50_2)
+
+ # Make sure calendar object is associated with attachment
+ object1 = yield calendar1.objectResourceWithName("3.ics")
+ attachments = yield object1.managedAttachmentList()
+ self.assertEqual(attachments, [pathID0_1, ])
+
+ attachment = yield object1.attachmentWithManagedID(managedid0_1)
+ self.assertTrue(attachment is not None)
+ self.assertEqual(attachment.md5(), md50_1)
+
+
+ @inlineCallbacks
+ def test_delegate_reconcile(self):
+ """
+ Test that L{delegateReconcile} copies over the full set of delegates and caches associated groups..
+ """
+
+ # Create remote home
+ yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ yield self.commitTransaction(0)
+
+ # Add some delegates
+ txn = self.theTransactionUnderTest(0)
+ record01 = yield txn.directoryService().recordWithUID(u"user01")
+ record02 = yield txn.directoryService().recordWithUID(u"user02")
+ record03 = yield txn.directoryService().recordWithUID(u"user03")
+
+ group01 = yield txn.directoryService().recordWithUID(u"__top_group_1__")
+ group02 = yield txn.directoryService().recordWithUID(u"right_coast")
+
+ # Add user02 and user03 as individual delegates
+ yield Delegates.addDelegate(txn, record01, record02, True)
+ yield Delegates.addDelegate(txn, record01, record03, False)
+
+ # Add group delegates
+ yield Delegates.addDelegate(txn, record01, group01, True)
+ yield Delegates.addDelegate(txn, record01, group02, False)
+
+ # Add external delegates
+ yield txn.assignExternalDelegates(u"user01", None, None, u"external1", u"external2")
+
+ yield self.commitTransaction(0)
+
+
+ # Initially no local delegates
+ txn = self.theTransactionUnderTest(1)
+ delegates = yield txn.dumpIndividualDelegatesLocal(u"user01")
+ self.assertEqual(len(delegates), 0)
+ delegates = yield txn.dumpGroupDelegatesLocal(u"user04")
+ self.assertEqual(len(delegates), 0)
+ externals = yield txn.dumpExternalDelegatesLocal(u"user01")
+ self.assertEqual(len(externals), 0)
+ yield self.commitTransaction(1)
+
+ # Sync from remote side
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.loadRecord()
+ yield syncer.disableRemoteHome()
+ yield syncer.delegateReconcile()
+
+ # Now have local delegates
+ txn = self.theTransactionUnderTest(1)
+
+ delegates = yield txn.dumpIndividualDelegatesLocal(u"user01")
+ self.assertEqual(
+ set(delegates),
+ set((
+ DelegateRecord.make(delegator="user01", delegate="user02", readWrite=1),
+ DelegateRecord.make(delegator="user01", delegate="user03", readWrite=0),
+ )),
+ )
+
+ delegateGroups = yield txn.dumpGroupDelegatesLocal(u"user01")
+ group_top = yield txn.groupByUID(u"__top_group_1__")
+ group_right = yield txn.groupByUID(u"right_coast")
+ self.assertEqual(
+ set([item[0] for item in delegateGroups]),
+ set((
+ DelegateGroupsRecord.make(delegator="user01", groupID=group_top.groupID, readWrite=1, isExternal=False),
+ DelegateGroupsRecord.make(delegator="user01", groupID=group_right.groupID, readWrite=0, isExternal=False),
+ )),
+ )
+
+ externals = yield txn.dumpExternalDelegatesLocal(u"user01")
+ self.assertEqual(
+ set(externals),
+ set((
+ ExternalDelegateGroupsRecord.make(
+ delegator="user01",
+ groupUIDRead="external1",
+ groupUIDWrite="external2",
+ ),
+ )),
+ )
+
+ yield self.commitTransaction(1)
+
+
+ @inlineCallbacks
+ def test_notifications_reconcile(self):
+ """
+ Test that L{delegateReconcile} copies over the full set of delegates and caches associated groups..
+ """
+
+ # Create remote home - and add some fake notifications
+ yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ notifications = yield self.theTransactionUnderTest(0).notificationsWithUID("user01", create=True)
+ uid1 = str(uuid4())
+ obj1 = yield notifications.writeNotificationObject(uid1, "type1", "data1")
+ id1 = obj1.id()
+ uid2 = str(uuid4())
+ obj2 = yield notifications.writeNotificationObject(uid2, "type2", "data2")
+ id2 = obj2.id()
+ yield self.commitTransaction(0)
+
+ # Sync from remote side
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.loadRecord()
+ yield syncer.prepareCalendarHome()
+ yield syncer.disableRemoteHome()
+ changes = yield syncer.notificationsReconcile()
+ self.assertEqual(changes, 2)
+
+ # Now have local notifications
+ notifications = yield NotificationCollection.notificationsWithUID(
+ self.theTransactionUnderTest(1),
+ "user01",
+ status=_HOME_STATUS_MIGRATING,
+ )
+ results = yield notifications.notificationObjects()
+ self.assertEqual(len(results), 2)
+ for result in results:
+ for test_uid, test_id, test_type, test_data in ((uid1, id1, "type1", "data1",), (uid2, id2, "type2", "data2",),):
+ if result.uid() == test_uid:
+ self.assertNotEqual(result.id(), test_id)
+ self.assertEqual(json.loads(result.notificationType()), test_type)
+ data = yield result.notificationData()
+ self.assertEqual(json.loads(data), test_data)
+ break
+ else:
+ self.fail("Notification uid {} not found".format(result.uid()))
+ yield self.commitTransaction(1)
+
+
+ @inlineCallbacks
+ def test_disable_remote_home(self):
+ """
+ Test that L{disableRemoteHome} changes the remote status and prevents a normal state
+ home from being created.
+ """
+
+ # Create remote home - and add some fake notifications
+ yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ yield self.theTransactionUnderTest(0).notificationsWithUID("user01", create=True)
+ yield self.commitTransaction(0)
+
+ # Sync from remote side
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.loadRecord()
+ yield syncer.prepareCalendarHome()
+ yield syncer.disableRemoteHome()
+
+ # It is disabled
+ home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01")
+ self.assertTrue(home is None)
+ home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_NORMAL)
+ self.assertTrue(home is None)
+ home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_DISABLED)
+ self.assertTrue(home is not None)
+ yield self.commitTransaction(0)
+
+
+
+class TestSharingSync(MultiStoreConduitTest):
+ """
+ Test that L{CrossPodHomeSync} sharing sync works.
+ """
+
+ @inlineCallbacks
+ def setUp(self):
+ self.accounts = FilePath(__file__).sibling("accounts").child("groupAccounts.xml")
+ self.augments = FilePath(__file__).sibling("accounts").child("augments.xml")
+ yield super(TestSharingSync, self).setUp()
+ yield self.populate()
+
+
+ def configure(self):
+ super(TestSharingSync, self).configure()
+ config.Sharing.Enabled = True
+ config.Sharing.Calendars.Enabled = True
+ config.Sharing.Calendars.Groups.Enabled = True
+ config.Sharing.Calendars.Groups.ReconciliationDelaySeconds = 0
+
+
+ @inlineCallbacks
+ def populate(self):
+ yield populateCalendarsFrom(self.requirements, self.theStoreUnderTest(0))
+
+ requirements = {
+ "user01" : None,
+ "user02" : None,
+ "user06" : None,
+ "user07" : None,
+ "user08" : None,
+ "user09" : None,
+ "user10" : None,
+ }
+
+
+ @inlineCallbacks
+ def _createShare(self, shareFrom, shareTo, accept=True):
+ # Invite
+ txnindex = 1 if shareFrom[0] == "p" else 0
+ home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareFrom, create=True)
+ calendar = yield home.childWithName("calendar")
+ shareeView = yield calendar.inviteUIDToShare(shareTo, _BIND_MODE_READ, "summary")
+ yield self.commitTransaction(txnindex)
+
+ # Accept
+ if accept:
+ inviteUID = shareeView.shareUID()
+ txnindex = 1 if shareTo[0] == "p" else 0
+ shareeHome = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareTo)
+ shareeView = yield shareeHome.acceptShare(inviteUID)
+ sharedName = shareeView.name()
+ yield self.commitTransaction(txnindex)
+ else:
+ sharedName = None
+
+ returnValue(sharedName)
+
+
+ @inlineCallbacks
+ def test_shared_collections_reconcile(self):
+ """
+ Test that L{sharedCollectionsReconcile} copies over the full set of delegates and caches associated groups..
+ """
+
+ # Create home
+ yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ yield self.commitTransaction(0)
+
+ # Shared by migrating user
+ shared_name_02 = yield self._createShare("user01", "user02")
+ shared_name_03 = yield self._createShare("user01", "puser03")
+
+ # Shared to migrating user
+ shared_name_04 = yield self._createShare("user04", "user01")
+ shared_name_05 = yield self._createShare("puser05", "user01")
+
+ # Sync from remote side
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.loadRecord()
+ yield syncer.sync()
+ yield syncer.disableRemoteHome()
+ changes = yield syncer.sharedByCollectionsReconcile()
+ self.assertEqual(changes, 2)
+ changes = yield syncer.sharedToCollectionsReconcile()
+ self.assertEqual(changes, 2)
+
+ # Local calendar exists with shares
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ calendar1 = yield home1.childWithName("calendar")
+ invites1 = yield calendar1.sharingInvites()
+ self.assertEqual(len(invites1), 2)
+ self.assertEqual(set([invite.uid for invite in invites1]), set((shared_name_02, shared_name_03,)))
+ yield self.commitTransaction(1)
+
+ # Remote sharee can access it
+ home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user02")
+ calendar0 = yield home0.childWithName(shared_name_02)
+ self.assertTrue(calendar0 is not None)
+
+ # Local sharee can access it
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="puser03")
+ calendar1 = yield home1.childWithName(shared_name_03)
+ self.assertTrue(calendar1 is not None)
+
+ # Local shared calendars exist
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ calendar1 = yield home1.childWithName(shared_name_04)
+ self.assertTrue(calendar1 is not None)
+ calendar1 = yield home1.childWithName(shared_name_05)
+ self.assertTrue(calendar1 is not None)
+ yield self.commitTransaction(1)
+
+ # Sharers see migrated user as sharee
+ externalHome0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_EXTERNAL)
+ calendar0 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user04", name="calendar")
+ invites = yield calendar0.allInvitations()
+ self.assertEqual(len(invites), 1)
+ self.assertEqual(invites[0].shareeUID, "user01")
+ self.assertEqual(invites[0].shareeHomeID, externalHome0.id())
+ yield self.commitTransaction(0)
+
+ shareeHome1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser05", name="calendar")
+ invites = yield calendar1.allInvitations()
+ self.assertEqual(len(invites), 1)
+ self.assertEqual(invites[0].shareeUID, "user01")
+ self.assertEqual(invites[0].shareeHomeID, shareeHome1.id())
+ yield self.commitTransaction(1)
+
+
+ @inlineCallbacks
+ def test_group_shared_collections_reconcile(self):
+ """
+ Test that L{sharedCollectionsReconcile} copies over the full set of delegates and caches associated groups..
+ """
+
+ # Create home
+ yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ yield self.notificationCollectionUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ yield self.commitTransaction(0)
+
+ # Shared by migrating user
+ yield self._createShare("user01", "group02", accept=False)
+
+ # Sync from remote side
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.loadRecord()
+ yield syncer.sync()
+ yield syncer.disableRemoteHome()
+ changes = yield syncer.sharedByCollectionsReconcile()
+ self.assertEqual(changes, 3)
+ changes = yield syncer.sharedToCollectionsReconcile()
+ self.assertEqual(changes, 0)
+
+ # Local calendar exists with shares
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ calendar1 = yield home1.childWithName("calendar")
+ invites1 = yield calendar1.sharingInvites()
+ self.assertEqual(len(invites1), 3)
+ sharee = yield GroupShareeRecord.querysimple(self.theTransactionUnderTest(1), calendarID=calendar1.id())
+ self.assertEqual(len(sharee), 1)
+ group = yield GroupsRecord.querysimple(self.theTransactionUnderTest(1), groupID=sharee[0].groupID)
+ self.assertEqual(len(group), 1)
+ self.assertEqual(group[0].groupUID, "group02")
+ yield self.commitTransaction(1)
+
+
+
+class TestGroupAttendeeSync(MultiStoreConduitTest):
+ """
+ GroupAttendeeReconciliation tests
+ """
+
+ now = {"now1": DateTime.getToday().getYear() + 1}
+
+ groupdata1 = """BEGIN:VCALENDAR
+CALSCALE:GREGORIAN
+PRODID:-//Example Inc.//Example Calendar//EN
+VERSION:2.0
+BEGIN:VEVENT
+DTSTAMP:20051222T205953Z
+CREATED:20060101T150000Z
+DTSTART:{now1:04d}0101T100000Z
+DURATION:PT1H
+SUMMARY:event 1
+UID:event1@ninevah.local
+END:VEVENT
+END:VCALENDAR""".format(**now)
+
+ groupdata2 = """BEGIN:VCALENDAR
+CALSCALE:GREGORIAN
+PRODID:-//Example Inc.//Example Calendar//EN
+VERSION:2.0
+BEGIN:VEVENT
+DTSTAMP:20051222T205953Z
+CREATED:20060101T150000Z
+DTSTART:{now1:04d}0101T100000Z
+DURATION:PT1H
+SUMMARY:event 2
+UID:event2@ninevah.local
+ORGANIZER:mailto:user01@example.com
+ATTENDEE:mailto:user01@example.com
+ATTENDEE:mailto:group02@example.com
+END:VEVENT
+END:VCALENDAR""".format(**now)
+
+ groupdata3 = """BEGIN:VCALENDAR
+CALSCALE:GREGORIAN
+PRODID:-//Example Inc.//Example Calendar//EN
+VERSION:2.0
+BEGIN:VEVENT
+DTSTAMP:20051222T205953Z
+CREATED:20060101T150000Z
+DTSTART:{now1:04d}0101T100000Z
+DURATION:PT1H
+SUMMARY:event 3
+UID:event3@ninevah.local
+ORGANIZER:mailto:user01@example.com
+ATTENDEE:mailto:user01@example.com
+ATTENDEE:mailto:group04@example.com
+END:VEVENT
+END:VCALENDAR""".format(**now)
+
+ @inlineCallbacks
+ def setUp(self):
+ self.accounts = FilePath(__file__).sibling("accounts").child("groupAccounts.xml")
+ yield super(TestGroupAttendeeSync, self).setUp()
+ yield self.populate()
+
+
+ def configure(self):
+ super(TestGroupAttendeeSync, self).configure()
+ config.GroupAttendees.Enabled = True
+ config.GroupAttendees.ReconciliationDelaySeconds = 0
+ config.GroupAttendees.AutoUpdateSecondsFromNow = 0
+
+
+ @inlineCallbacks
+ def populate(self):
+ yield populateCalendarsFrom(self.requirements, self.theStoreUnderTest(0))
+
+ requirements = {
+ "user01" : None,
+ "user02" : None,
+ "user06" : None,
+ "user07" : None,
+ "user08" : None,
+ "user09" : None,
+ "user10" : None,
+ }
+
+ @inlineCallbacks
+ def test_group_attendees(self):
+ """
+ Test that L{groupAttendeeReconcile} links groups to the associated calendar object.
+ """
+
+ home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ calendar0 = yield home0.childWithName("calendar")
+ yield calendar0.createCalendarObjectWithName("1.ics", Component.fromString(self.groupdata1))
+ yield calendar0.createCalendarObjectWithName("2.ics", Component.fromString(self.groupdata2))
+ yield calendar0.createCalendarObjectWithName("3.ics", Component.fromString(self.groupdata3))
+ yield self.commitTransaction(0)
+
+ yield JobItem.waitEmpty(self.theStoreUnderTest(0).newTransaction, reactor, 60.0)
+
+ # Trigger sync
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.sync()
+
+ # Link groups
+ len_links = yield syncer.groupAttendeeReconcile()
+ self.assertEqual(len_links, 2)
+
+ # Local calendar exists
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ calendar1 = yield home1.childWithName("calendar")
+ self.assertTrue(calendar1 is not None)
+ children = yield calendar1.objectResources()
+ self.assertEqual(set([child.name() for child in children]), set(("1.ics", "2.ics", "3.ics",)))
+
+ object2 = yield calendar1.objectResourceWithName("2.ics")
+ record = (yield object2.groupEventLinks()).values()[0]
+ group02 = yield self.theTransactionUnderTest(1).groupByUID(u"group02")
+ self.assertEqual(record.groupID, group02.groupID)
+ self.assertEqual(record.membershipHash, group02.membershipHash)
+
+ object3 = yield calendar1.objectResourceWithName("3.ics")
+ record = (yield object3.groupEventLinks()).values()[0]
+ group04 = yield self.theTransactionUnderTest(1).groupByUID(u"group04")
+ self.assertEqual(record.groupID, group04.groupID)
+ self.assertEqual(record.membershipHash, group04.membershipHash)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_migrationpy"></a>
<div class="delfile"><h4>Deleted: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py (14520 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/test_migration.py        2015-03-06 21:20:35 UTC (rev 14520)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -1,693 +0,0 @@
</span><del>-##
-# Copyright (c) 2015 Apple Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-
-from pycalendar.datetime import DateTime
-from twisted.internet.defer import inlineCallbacks, returnValue
-from twisted.python.filepath import FilePath
-from twistedcaldav.config import config
-from twistedcaldav.ical import Component
-from txdav.common.datastore.podding.migration.home_sync import CrossPodHomeSync
-from txdav.common.datastore.podding.test.util import MultiStoreConduitTest
-from txdav.common.datastore.sql_tables import _BIND_MODE_READ, \
- _HOME_STATUS_DISABLED, _HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL, \
- _HOME_STATUS_MIGRATING
-from txdav.common.datastore.test.util import populateCalendarsFrom
-from txdav.who.delegates import Delegates
-from txweb2.http_headers import MimeType
-from txweb2.stream import MemoryStream
-from txdav.caldav.datastore.scheduling.ischedule.delivery import IScheduleRequest
-from txdav.caldav.datastore.scheduling.ischedule.resource import IScheduleInboxResource
-from txweb2.dav.test.util import SimpleRequest
-from txdav.caldav.datastore.test.common import CaptureProtocol
-
-
-class TestCompleteMigrationCycle(MultiStoreConduitTest):
- """
- Test that a full migration cycle using L{CrossPodHomeSync} works.
- """
-
- def __init__(self, methodName='runTest'):
- super(TestCompleteMigrationCycle, self).__init__(methodName)
- self.stash = {}
-
-
- @inlineCallbacks
- def setUp(self):
- @inlineCallbacks
- def _fakeSubmitRequest(iself, ssl, host, port, request):
- pod = (port - 8008) / 100
- inbox = IScheduleInboxResource(self.site.resource, self.theStoreUnderTest(pod), podding=True)
- response = yield inbox.http_POST(SimpleRequest(
- self.site,
- "POST",
- "http://{host}:{port}/podding".format(host=host, port=port),
- request.headers,
- request.stream.mem,
- ))
- returnValue(response)
-
-
- self.patch(IScheduleRequest, "_submitRequest", _fakeSubmitRequest)
- self.accounts = FilePath(__file__).sibling("accounts").child("groupAccounts.xml")
- self.augments = FilePath(__file__).sibling("accounts").child("augments.xml")
- yield super(TestCompleteMigrationCycle, self).setUp()
- yield self.populate()
-
-
- def configure(self):
- super(TestCompleteMigrationCycle, self).configure()
- config.GroupAttendees.Enabled = True
- config.GroupAttendees.ReconciliationDelaySeconds = 0
- config.GroupAttendees.AutoUpdateSecondsFromNow = 0
- config.AccountingCategories.migration = True
- config.AccountingPrincipals = ["*"]
-
-
- @inlineCallbacks
- def populate(self):
- yield populateCalendarsFrom(self.requirements0, self.theStoreUnderTest(0))
- yield populateCalendarsFrom(self.requirements1, self.theStoreUnderTest(1))
-
- requirements0 = {
- "user01" : None,
- "user02" : None,
- "user03" : None,
- "user04" : None,
- "user05" : None,
- "user06" : None,
- "user07" : None,
- "user08" : None,
- "user09" : None,
- "user10" : None,
- }
-
- requirements1 = {
- "puser01" : None,
- "puser02" : None,
- "puser03" : None,
- "puser04" : None,
- "puser05" : None,
- "puser06" : None,
- "puser07" : None,
- "puser08" : None,
- "puser09" : None,
- "puser10" : None,
- }
-
-
- @inlineCallbacks
- def _createShare(self, shareFrom, shareTo, accept=True):
- # Invite
- txnindex = 1 if shareFrom[0] == "p" else 0
- home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareFrom, create=True)
- calendar = yield home.childWithName("calendar")
- shareeView = yield calendar.inviteUIDToShare(shareTo, _BIND_MODE_READ, "summary")
- yield self.commitTransaction(txnindex)
-
- # Accept
- if accept:
- inviteUID = shareeView.shareUID()
- txnindex = 1 if shareTo[0] == "p" else 0
- shareeHome = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareTo)
- shareeView = yield shareeHome.acceptShare(inviteUID)
- sharedName = shareeView.name()
- yield self.commitTransaction(txnindex)
- else:
- sharedName = None
-
- returnValue(sharedName)
-
-
- def attachmentToString(self, attachment):
- """
- Convenience to convert an L{IAttachment} to a string.
-
- @param attachment: an L{IAttachment} provider to convert into a string.
-
- @return: a L{Deferred} that fires with the contents of the attachment.
-
- @rtype: L{Deferred} firing C{bytes}
- """
- capture = CaptureProtocol()
- attachment.retrieve(capture)
- return capture.deferred
-
-
- now = {
- "now": DateTime.getToday().getYear(),
- "now1": DateTime.getToday().getYear() + 1,
- }
-
- data01_1 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data01_1
-DTSTART:{now1:04d}0102T140000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:data01_1
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**now)
-
- data01_1_changed = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data01_1
-DTSTART:{now1:04d}0102T140000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:data01_1_changed
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**now)
-
- data01_2 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data01_2
-DTSTART:{now1:04d}0102T160000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-SUMMARY:data01_2
-ORGANIZER:mailto:user01@example.com
-ATTENDEE:mailto:user01@example.com
-ATTENDEE:mailto:user02@example.com
-ATTENDEE:mailto:puser02@example.com
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**now)
-
- data01_3 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data01_3
-DTSTART:{now1:04d}0102T180000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-SUMMARY:data01_3
-ORGANIZER:mailto:user01@example.com
-ATTENDEE:mailto:user01@example.com
-ATTENDEE:mailto:group02@example.com
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**now)
-
- data02_1 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data02_1
-DTSTART:{now1:04d}0103T140000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:data02_1
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**now)
-
- data02_2 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data02_2
-DTSTART:{now1:04d}0103T160000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-SUMMARY:data02_2
-ORGANIZER:mailto:user02@example.com
-ATTENDEE:mailto:user02@example.com
-ATTENDEE:mailto:user01@example.com
-ATTENDEE:mailto:puser02@example.com
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**now)
-
- data02_3 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_data02_3
-DTSTART:{now1:04d}0103T180000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-SUMMARY:data02_3
-ORGANIZER:mailto:user02@example.com
-ATTENDEE:mailto:user02@example.com
-ATTENDEE:mailto:group01@example.com
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**now)
-
- datap02_1 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_datap02_1
-DTSTART:{now1:04d}0103T140000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-RRULE:FREQ=WEEKLY
-SUMMARY:datap02_1
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**now)
-
- datap02_2 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_datap02_2
-DTSTART:{now1:04d}0103T160000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-SUMMARY:datap02_2
-ORGANIZER:mailto:puser02@example.com
-ATTENDEE:mailto:puser02@example.com
-ATTENDEE:mailto:user01@example.com
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**now)
-
- datap02_3 = """BEGIN:VCALENDAR
-VERSION:2.0
-CALSCALE:GREGORIAN
-PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
-BEGIN:VEVENT
-UID:uid_datap02_3
-DTSTART:{now1:04d}0103T180000Z
-DURATION:PT1H
-CREATED:20060102T190000Z
-DTSTAMP:20051222T210507Z
-SUMMARY:datap02_3
-ORGANIZER:mailto:puser02@example.com
-ATTENDEE:mailto:puser02@example.com
-ATTENDEE:mailto:group01@example.com
-END:VEVENT
-END:VCALENDAR
-""".replace("\n", "\r\n").format(**now)
-
-
- @inlineCallbacks
- def preCheck(self):
- """
- Checks prior to starting any tests
- """
-
- for i in range(self.numberOfStores):
- txn = self.theTransactionUnderTest(i)
- record = yield txn.directoryService().recordWithUID(u"user01")
- self.assertEqual(record.serviceNodeUID, "A")
- self.assertEqual(record.thisServer(), i == 0)
- record = yield txn.directoryService().recordWithUID(u"user02")
- self.assertEqual(record.serviceNodeUID, "A")
- self.assertEqual(record.thisServer(), i == 0)
- record = yield txn.directoryService().recordWithUID(u"puser02")
- self.assertEqual(record.serviceNodeUID, "B")
- self.assertEqual(record.thisServer(), i == 1)
- yield self.commitTransaction(i)
-
-
- @inlineCallbacks
- def initialState(self):
- """
- Setup the server with an initial set of data
-
- user01 - migrating user
- user02 - has a calendar shared with user01
- user03 - shared to by user01
-
- puser01 - user on other pod
- puser02 - has a calendar shared with user01
- puser03 - shared to by user01
- """
-
- # Data for user01
- home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
- calendar = yield home.childWithName("calendar")
- yield calendar.createCalendarObjectWithName("01_1.ics", Component.fromString(self.data01_1))
- yield calendar.createCalendarObjectWithName("01_2.ics", Component.fromString(self.data01_2))
- obj3 = yield calendar.createCalendarObjectWithName("01_3.ics", Component.fromString(self.data01_3))
- attachment, _ignore_location = yield obj3.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text #1."))
- self.stash["user01_attachment_id"] = attachment.id()
- self.stash["user01_attachment_md5"] = attachment.md5()
- self.stash["user01_attachment_mid"] = attachment.managedID()
- yield self.commitTransaction(0)
-
- # Data for user02
- home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user02", create=True)
- calendar = yield home.childWithName("calendar")
- yield calendar.createCalendarObjectWithName("02_1.ics", Component.fromString(self.data02_1))
- yield calendar.createCalendarObjectWithName("02_2.ics", Component.fromString(self.data02_2))
- yield calendar.createCalendarObjectWithName("02_3.ics", Component.fromString(self.data02_3))
- yield self.commitTransaction(0)
-
- # Data for puser02
- home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="puser02", create=True)
- calendar = yield home.childWithName("calendar")
- yield calendar.createCalendarObjectWithName("p02_1.ics", Component.fromString(self.datap02_1))
- yield calendar.createCalendarObjectWithName("p02_2.ics", Component.fromString(self.datap02_2))
- yield calendar.createCalendarObjectWithName("p02_3.ics", Component.fromString(self.datap02_3))
- yield self.commitTransaction(1)
-
- # Share calendars
- self.stash["sharename_user01_to_user03"] = yield self._createShare("user01", "user03")
- self.stash["sharename_user01_to_puser03"] = yield self._createShare("user01", "puser03")
- self.stash["sharename_user02_to_user01"] = yield self._createShare("user02", "user01")
- self.stash["sharename_puser02_to_user01"] = yield self._createShare("puser02", "user01")
-
- # Add some delegates
- txn = self.theTransactionUnderTest(0)
- record01 = yield txn.directoryService().recordWithUID(u"user01")
- record02 = yield txn.directoryService().recordWithUID(u"user02")
- record03 = yield txn.directoryService().recordWithUID(u"user03")
- precord01 = yield txn.directoryService().recordWithUID(u"puser01")
-
- group02 = yield txn.directoryService().recordWithUID(u"group02")
- group03 = yield txn.directoryService().recordWithUID(u"group03")
-
- # Add user02 and user03 as individual delegates
- yield Delegates.addDelegate(txn, record01, record02, True)
- yield Delegates.addDelegate(txn, record01, record03, False)
- yield Delegates.addDelegate(txn, record01, precord01, False)
-
- # Add group delegates
- yield Delegates.addDelegate(txn, record01, group02, True)
- yield Delegates.addDelegate(txn, record01, group03, False)
-
- # Add external delegates
- yield txn.assignExternalDelegates(u"user01", None, None, u"external1", u"external2")
-
- yield self.commitTransaction(0)
-
- yield self.waitAllEmpty()
-
-
- @inlineCallbacks
- def secondState(self):
- """
- Setup the server with data changes appearing after the first sync
- """
- txn = self.theTransactionUnderTest(0)
- obj = yield self.calendarObjectUnderTest(txn, name="01_1.ics", calendar_name="calendar", home="user01")
- yield obj.setComponent(self.data01_1_changed)
-
- obj = yield self.calendarObjectUnderTest(txn, name="02_2.ics", calendar_name="calendar", home="user02")
- attachment, _ignore_location = yield obj.addAttachment(None, MimeType.fromString("text/plain"), "test_02.txt", MemoryStream("Here is some text #02."))
- self.stash["user02_attachment_id"] = attachment.id()
- self.stash["user02_attachment_md5"] = attachment.md5()
- self.stash["user02_attachment_mid"] = attachment.managedID()
-
- yield self.commitTransaction(0)
-
- yield self.waitAllEmpty()
-
-
- @inlineCallbacks
- def finalState(self):
- """
- Setup the server with data changes appearing before the final sync
- """
- txn = self.theTransactionUnderTest(1)
- obj = yield self.calendarObjectUnderTest(txn, name="p02_2.ics", calendar_name="calendar", home="puser02")
- attachment, _ignore_location = yield obj.addAttachment(None, MimeType.fromString("text/plain"), "test_p02.txt", MemoryStream("Here is some text #p02."))
- self.stash["puser02_attachment_id"] = attachment.id()
- self.stash["puser02_attachment_mid"] = attachment.managedID()
- self.stash["puser02_attachment_md5"] = attachment.md5()
-
- yield self.commitTransaction(1)
-
- yield self.waitAllEmpty()
-
-
- @inlineCallbacks
- def switchAccounts(self):
- """
- Switch the migrated user accounts to point to the new pod
- """
-
- for i in range(self.numberOfStores):
- txn = self.theTransactionUnderTest(i)
- record = yield txn.directoryService().recordWithUID(u"user01")
- yield self.changeRecord(record, txn.directoryService().fieldName.serviceNodeUID, u"B", directory=txn.directoryService())
- yield self.commitTransaction(i)
-
- for i in range(self.numberOfStores):
- txn = self.theTransactionUnderTest(i)
- record = yield txn.directoryService().recordWithUID(u"user01")
- self.assertEqual(record.serviceNodeUID, "B")
- self.assertEqual(record.thisServer(), i == 1)
- record = yield txn.directoryService().recordWithUID(u"user02")
- self.assertEqual(record.serviceNodeUID, "A")
- self.assertEqual(record.thisServer(), i == 0)
- record = yield txn.directoryService().recordWithUID(u"puser02")
- self.assertEqual(record.serviceNodeUID, "B")
- self.assertEqual(record.thisServer(), i == 1)
- yield self.commitTransaction(i)
-
-
- @inlineCallbacks
- def postCheck(self):
- """
- Checks after migration is done
- """
-
- # Check that the home has been moved
- home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01")
- self.assertTrue(home.external())
- home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_NORMAL)
- self.assertTrue(home is None)
- home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_EXTERNAL)
- self.assertTrue(home is not None)
- home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_DISABLED)
- self.assertTrue(home is not None)
- home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_MIGRATING)
- self.assertTrue(home is None)
- yield self.commitTransaction(0)
-
- home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01")
- self.assertTrue(home.normal())
- home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_NORMAL)
- self.assertTrue(home is not None)
- home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_EXTERNAL)
- self.assertTrue(home is None)
- home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_DISABLED)
- self.assertTrue(home is not None)
- home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
- self.assertTrue(home is None)
- yield self.commitTransaction(1)
-
- # Check that the notifications have been moved
- notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_NORMAL)
- self.assertTrue(notifications is None)
- notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_EXTERNAL)
- self.assertTrue(notifications is None)
- notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_DISABLED)
- self.assertTrue(notifications is not None)
- yield self.commitTransaction(0)
-
- notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_NORMAL)
- self.assertTrue(notifications is not None)
- notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_EXTERNAL)
- self.assertTrue(notifications is None)
- notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_DISABLED)
- self.assertTrue(notifications is not None)
- yield self.commitTransaction(1)
-
- # New pod data
- homes = {}
- homes["user01"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01")
- homes["user02"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user02")
- self.assertTrue(homes["user02"].external())
- homes["user03"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user03")
- self.assertTrue(homes["user03"].external())
- homes["puser01"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="puser01")
- self.assertTrue(homes["puser01"].normal())
- homes["puser02"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="puser02")
- self.assertTrue(homes["puser02"].normal())
- homes["puser03"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="puser03")
- self.assertTrue(homes["puser03"].normal())
-
- # Check calendar data on new pod
- calendars = yield homes["user01"].loadChildren()
- calnames = dict([(calendar.name(), calendar) for calendar in calendars])
- self.assertEqual(
- set(calnames.keys()),
- set(("calendar", "tasks", "inbox", self.stash["sharename_user02_to_user01"], self.stash["sharename_puser02_to_user01"],))
- )
-
- # Check shared-by user01 on new pod
- shared = calnames["calendar"]
- invitations = yield shared.sharingInvites()
- by_sharee = dict([(invitation.shareeUID, invitation) for invitation in invitations])
- self.assertEqual(len(invitations), 2)
- self.assertEqual(set(by_sharee.keys()), set(("user03", "puser03",)))
- self.assertEqual(by_sharee["user03"].shareeHomeID, homes["user03"].id())
- self.assertEqual(by_sharee["puser03"].shareeHomeID, homes["puser03"].id())
-
- # Check shared-to user01 on new pod
- shared = calnames[self.stash["sharename_user02_to_user01"]]
- self.assertEqual(shared.ownerHome().uid(), "user02")
- self.assertEqual(shared.ownerHome().id(), homes["user02"].id())
-
- shared = calnames[self.stash["sharename_puser02_to_user01"]]
- self.assertEqual(shared.ownerHome().uid(), "puser02")
- self.assertEqual(shared.ownerHome().id(), homes["puser02"].id())
-
- shared = yield homes["puser02"].calendarWithName("calendar")
- invitations = yield shared.sharingInvites()
- self.assertEqual(len(invitations), 1)
- self.assertEqual(invitations[0].shareeHomeID, homes["user01"].id())
-
- yield self.commitTransaction(1)
-
- # Old pod data
- homes = {}
- homes["user01"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01")
- homes["user02"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user02")
- self.assertTrue(homes["user02"].normal())
- homes["user03"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user03")
- self.assertTrue(homes["user03"].normal())
- homes["puser01"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="puser01")
- self.assertTrue(homes["puser01"] is None)
- homes["puser02"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="puser02")
- self.assertTrue(homes["puser02"].external())
- homes["puser03"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="puser03")
- self.assertTrue(homes["puser03"].external())
-
- # Check shared-by user01 on old pod
- shared = yield homes["user03"].calendarWithName(self.stash["sharename_user01_to_user03"])
- self.assertEqual(shared.ownerHome().uid(), "user01")
- self.assertEqual(shared.ownerHome().id(), homes["user01"].id())
-
- # Check shared-to user01 on old pod
- shared = yield homes["user02"].calendarWithName("calendar")
- invitations = yield shared.sharingInvites()
- self.assertEqual(len(invitations), 1)
- self.assertEqual(invitations[0].shareeHomeID, homes["user01"].id())
-
- yield self.commitTransaction(0)
-
- # Delegates on each pod
- for pod in range(self.numberOfStores):
- txn = self.theTransactionUnderTest(pod)
- records = {}
- for ctr in range(10):
- uid = u"user{:02d}".format(ctr + 1)
- records[uid] = yield txn.directoryService().recordWithUID(uid)
- for ctr in range(10):
- uid = u"puser{:02d}".format(ctr + 1)
- records[uid] = yield txn.directoryService().recordWithUID(uid)
- for ctr in range(10):
- uid = u"group{:02d}".format(ctr + 1)
- records[uid] = yield txn.directoryService().recordWithUID(uid)
-
- delegates = yield Delegates.delegatesOf(txn, records["user01"], True, False)
- self.assertTrue(records["user02"] in delegates)
- self.assertTrue(records["group02"] in delegates)
- delegates = yield Delegates.delegatesOf(txn, records["user01"], True, True)
- self.assertTrue(records["user02"] in delegates)
- self.assertTrue(records["user06"] in delegates)
- self.assertTrue(records["user07"] in delegates)
- self.assertTrue(records["user08"] in delegates)
-
- delegates = yield Delegates.delegatesOf(txn, records["user01"], False, False)
- self.assertTrue(records["user03"] in delegates)
- self.assertTrue(records["group03"] in delegates)
- self.assertTrue(records["puser01"] in delegates)
- delegates = yield Delegates.delegatesOf(txn, records["user01"], False, True)
- self.assertTrue(records["user03"] in delegates)
- self.assertTrue(records["user07"] in delegates)
- self.assertTrue(records["user08"] in delegates)
- self.assertTrue(records["user09"] in delegates)
- self.assertTrue(records["puser01"] in delegates)
-
- # Attachments
- obj = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), name="01_3.ics", calendar_name="calendar", home="user01")
- attachment = yield obj.attachmentWithManagedID(self.stash["user01_attachment_mid"])
- self.assertTrue(attachment is not None)
- self.assertEqual(attachment.md5(), self.stash["user01_attachment_md5"])
- data = yield self.attachmentToString(attachment)
- self.assertEqual(data, "Here is some text #1.")
-
-
- @inlineCallbacks
- def test_migration(self):
- """
- Full migration cycle.
- """
-
- yield self.preCheck()
-
- # Step 1. Live full sync
- yield self.initialState()
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.sync()
-
- # Step 2. Live incremental sync
- yield self.secondState()
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.sync()
-
- # Step 3. Disable home after final changes
- yield self.finalState()
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
- yield syncer.disableRemoteHome()
-
- # Step 4. Final incremental sync
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01", final=True)
- yield syncer.sync()
-
- # Step 5. Final reconcile sync
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01", final=True)
- yield syncer.finalSync()
-
- # Step 6. Enable new home
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01", final=True)
- yield syncer.enableLocalHome()
-
- # Step 7. Remove old home
- syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01", final=True)
- yield syncer.removeRemoteHome()
-
- yield self.switchAccounts()
-
- yield self.postCheck()
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingmigrationtesttest_migrationpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingmigrationtesttest_migrationpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/test_migration.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,693 @@
</span><ins>+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from pycalendar.datetime import DateTime
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python.filepath import FilePath
+from twistedcaldav.config import config
+from twistedcaldav.ical import Component
+from txdav.common.datastore.podding.migration.home_sync import CrossPodHomeSync
+from txdav.common.datastore.podding.test.util import MultiStoreConduitTest
+from txdav.common.datastore.sql_tables import _BIND_MODE_READ, \
+ _HOME_STATUS_DISABLED, _HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL, \
+ _HOME_STATUS_MIGRATING
+from txdav.common.datastore.test.util import populateCalendarsFrom
+from txdav.who.delegates import Delegates
+from txweb2.http_headers import MimeType
+from txweb2.stream import MemoryStream
+from txdav.caldav.datastore.scheduling.ischedule.delivery import IScheduleRequest
+from txdav.caldav.datastore.scheduling.ischedule.resource import IScheduleInboxResource
+from txweb2.dav.test.util import SimpleRequest
+from txdav.caldav.datastore.test.common import CaptureProtocol
+
+
+class TestCompleteMigrationCycle(MultiStoreConduitTest):
+ """
+ Test that a full migration cycle using L{CrossPodHomeSync} works.
+ """
+
+ def __init__(self, methodName='runTest'):
+ super(TestCompleteMigrationCycle, self).__init__(methodName)
+ self.stash = {}
+
+
+ @inlineCallbacks
+ def setUp(self):
+ @inlineCallbacks
+ def _fakeSubmitRequest(iself, ssl, host, port, request):
+ pod = (port - 8008) / 100
+ inbox = IScheduleInboxResource(self.site.resource, self.theStoreUnderTest(pod), podding=True)
+ response = yield inbox.http_POST(SimpleRequest(
+ self.site,
+ "POST",
+ "http://{host}:{port}/podding".format(host=host, port=port),
+ request.headers,
+ request.stream.mem,
+ ))
+ returnValue(response)
+
+
+ self.patch(IScheduleRequest, "_submitRequest", _fakeSubmitRequest)
+ self.accounts = FilePath(__file__).sibling("accounts").child("groupAccounts.xml")
+ self.augments = FilePath(__file__).sibling("accounts").child("augments.xml")
+ yield super(TestCompleteMigrationCycle, self).setUp()
+ yield self.populate()
+
+
+ def configure(self):
+ super(TestCompleteMigrationCycle, self).configure()
+ config.GroupAttendees.Enabled = True
+ config.GroupAttendees.ReconciliationDelaySeconds = 0
+ config.GroupAttendees.AutoUpdateSecondsFromNow = 0
+ config.AccountingCategories.migration = True
+ config.AccountingPrincipals = ["*"]
+
+
+ @inlineCallbacks
+ def populate(self):
+ yield populateCalendarsFrom(self.requirements0, self.theStoreUnderTest(0))
+ yield populateCalendarsFrom(self.requirements1, self.theStoreUnderTest(1))
+
+ requirements0 = {
+ "user01" : None,
+ "user02" : None,
+ "user03" : None,
+ "user04" : None,
+ "user05" : None,
+ "user06" : None,
+ "user07" : None,
+ "user08" : None,
+ "user09" : None,
+ "user10" : None,
+ }
+
+ requirements1 = {
+ "puser01" : None,
+ "puser02" : None,
+ "puser03" : None,
+ "puser04" : None,
+ "puser05" : None,
+ "puser06" : None,
+ "puser07" : None,
+ "puser08" : None,
+ "puser09" : None,
+ "puser10" : None,
+ }
+
+
+ @inlineCallbacks
+ def _createShare(self, shareFrom, shareTo, accept=True):
+ # Invite
+ txnindex = 1 if shareFrom[0] == "p" else 0
+ home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareFrom, create=True)
+ calendar = yield home.childWithName("calendar")
+ shareeView = yield calendar.inviteUIDToShare(shareTo, _BIND_MODE_READ, "summary")
+ yield self.commitTransaction(txnindex)
+
+ # Accept
+ if accept:
+ inviteUID = shareeView.shareUID()
+ txnindex = 1 if shareTo[0] == "p" else 0
+ shareeHome = yield self.homeUnderTest(txn=self.theTransactionUnderTest(txnindex), name=shareTo)
+ shareeView = yield shareeHome.acceptShare(inviteUID)
+ sharedName = shareeView.name()
+ yield self.commitTransaction(txnindex)
+ else:
+ sharedName = None
+
+ returnValue(sharedName)
+
+
+ def attachmentToString(self, attachment):
+ """
+ Convenience to convert an L{IAttachment} to a string.
+
+ @param attachment: an L{IAttachment} provider to convert into a string.
+
+ @return: a L{Deferred} that fires with the contents of the attachment.
+
+ @rtype: L{Deferred} firing C{bytes}
+ """
+ capture = CaptureProtocol()
+ attachment.retrieve(capture)
+ return capture.deferred
+
+
+ now = {
+ "now": DateTime.getToday().getYear(),
+ "now1": DateTime.getToday().getYear() + 1,
+ }
+
+ data01_1 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data01_1
+DTSTART:{now1:04d}0102T140000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:data01_1
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**now)
+
+ data01_1_changed = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data01_1
+DTSTART:{now1:04d}0102T140000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:data01_1_changed
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**now)
+
+ data01_2 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data01_2
+DTSTART:{now1:04d}0102T160000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+SUMMARY:data01_2
+ORGANIZER:mailto:user01@example.com
+ATTENDEE:mailto:user01@example.com
+ATTENDEE:mailto:user02@example.com
+ATTENDEE:mailto:puser02@example.com
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**now)
+
+ data01_3 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data01_3
+DTSTART:{now1:04d}0102T180000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+SUMMARY:data01_3
+ORGANIZER:mailto:user01@example.com
+ATTENDEE:mailto:user01@example.com
+ATTENDEE:mailto:group02@example.com
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**now)
+
+ data02_1 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data02_1
+DTSTART:{now1:04d}0103T140000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:data02_1
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**now)
+
+ data02_2 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data02_2
+DTSTART:{now1:04d}0103T160000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+SUMMARY:data02_2
+ORGANIZER:mailto:user02@example.com
+ATTENDEE:mailto:user02@example.com
+ATTENDEE:mailto:user01@example.com
+ATTENDEE:mailto:puser02@example.com
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**now)
+
+ data02_3 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_data02_3
+DTSTART:{now1:04d}0103T180000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+SUMMARY:data02_3
+ORGANIZER:mailto:user02@example.com
+ATTENDEE:mailto:user02@example.com
+ATTENDEE:mailto:group01@example.com
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**now)
+
+ datap02_1 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_datap02_1
+DTSTART:{now1:04d}0103T140000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+RRULE:FREQ=WEEKLY
+SUMMARY:datap02_1
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**now)
+
+ datap02_2 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_datap02_2
+DTSTART:{now1:04d}0103T160000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+SUMMARY:datap02_2
+ORGANIZER:mailto:puser02@example.com
+ATTENDEE:mailto:puser02@example.com
+ATTENDEE:mailto:user01@example.com
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**now)
+
+ datap02_3 = """BEGIN:VCALENDAR
+VERSION:2.0
+CALSCALE:GREGORIAN
+PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
+BEGIN:VEVENT
+UID:uid_datap02_3
+DTSTART:{now1:04d}0103T180000Z
+DURATION:PT1H
+CREATED:20060102T190000Z
+DTSTAMP:20051222T210507Z
+SUMMARY:datap02_3
+ORGANIZER:mailto:puser02@example.com
+ATTENDEE:mailto:puser02@example.com
+ATTENDEE:mailto:group01@example.com
+END:VEVENT
+END:VCALENDAR
+""".replace("\n", "\r\n").format(**now)
+
+
+ @inlineCallbacks
+ def preCheck(self):
+ """
+ Checks prior to starting any tests
+ """
+
+ for i in range(self.numberOfStores):
+ txn = self.theTransactionUnderTest(i)
+ record = yield txn.directoryService().recordWithUID(u"user01")
+ self.assertEqual(record.serviceNodeUID, "A")
+ self.assertEqual(record.thisServer(), i == 0)
+ record = yield txn.directoryService().recordWithUID(u"user02")
+ self.assertEqual(record.serviceNodeUID, "A")
+ self.assertEqual(record.thisServer(), i == 0)
+ record = yield txn.directoryService().recordWithUID(u"puser02")
+ self.assertEqual(record.serviceNodeUID, "B")
+ self.assertEqual(record.thisServer(), i == 1)
+ yield self.commitTransaction(i)
+
+
+ @inlineCallbacks
+ def initialState(self):
+ """
+ Setup the server with an initial set of data
+
+ user01 - migrating user
+ user02 - has a calendar shared with user01
+ user03 - shared to by user01
+
+ puser01 - user on other pod
+ puser02 - has a calendar shared with user01
+ puser03 - shared to by user01
+ """
+
+ # Data for user01
+ home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ calendar = yield home.childWithName("calendar")
+ yield calendar.createCalendarObjectWithName("01_1.ics", Component.fromString(self.data01_1))
+ yield calendar.createCalendarObjectWithName("01_2.ics", Component.fromString(self.data01_2))
+ obj3 = yield calendar.createCalendarObjectWithName("01_3.ics", Component.fromString(self.data01_3))
+ attachment, _ignore_location = yield obj3.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text #1."))
+ self.stash["user01_attachment_id"] = attachment.id()
+ self.stash["user01_attachment_md5"] = attachment.md5()
+ self.stash["user01_attachment_mid"] = attachment.managedID()
+ yield self.commitTransaction(0)
+
+ # Data for user02
+ home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user02", create=True)
+ calendar = yield home.childWithName("calendar")
+ yield calendar.createCalendarObjectWithName("02_1.ics", Component.fromString(self.data02_1))
+ yield calendar.createCalendarObjectWithName("02_2.ics", Component.fromString(self.data02_2))
+ yield calendar.createCalendarObjectWithName("02_3.ics", Component.fromString(self.data02_3))
+ yield self.commitTransaction(0)
+
+ # Data for puser02
+ home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="puser02", create=True)
+ calendar = yield home.childWithName("calendar")
+ yield calendar.createCalendarObjectWithName("p02_1.ics", Component.fromString(self.datap02_1))
+ yield calendar.createCalendarObjectWithName("p02_2.ics", Component.fromString(self.datap02_2))
+ yield calendar.createCalendarObjectWithName("p02_3.ics", Component.fromString(self.datap02_3))
+ yield self.commitTransaction(1)
+
+ # Share calendars
+ self.stash["sharename_user01_to_user03"] = yield self._createShare("user01", "user03")
+ self.stash["sharename_user01_to_puser03"] = yield self._createShare("user01", "puser03")
+ self.stash["sharename_user02_to_user01"] = yield self._createShare("user02", "user01")
+ self.stash["sharename_puser02_to_user01"] = yield self._createShare("puser02", "user01")
+
+ # Add some delegates
+ txn = self.theTransactionUnderTest(0)
+ record01 = yield txn.directoryService().recordWithUID(u"user01")
+ record02 = yield txn.directoryService().recordWithUID(u"user02")
+ record03 = yield txn.directoryService().recordWithUID(u"user03")
+ precord01 = yield txn.directoryService().recordWithUID(u"puser01")
+
+ group02 = yield txn.directoryService().recordWithUID(u"group02")
+ group03 = yield txn.directoryService().recordWithUID(u"group03")
+
+ # Add user02 and user03 as individual delegates
+ yield Delegates.addDelegate(txn, record01, record02, True)
+ yield Delegates.addDelegate(txn, record01, record03, False)
+ yield Delegates.addDelegate(txn, record01, precord01, False)
+
+ # Add group delegates
+ yield Delegates.addDelegate(txn, record01, group02, True)
+ yield Delegates.addDelegate(txn, record01, group03, False)
+
+ # Add external delegates
+ yield txn.assignExternalDelegates(u"user01", None, None, u"external1", u"external2")
+
+ yield self.commitTransaction(0)
+
+ yield self.waitAllEmpty()
+
+
+ @inlineCallbacks
+ def secondState(self):
+ """
+ Setup the server with data changes appearing after the first sync
+ """
+ txn = self.theTransactionUnderTest(0)
+ obj = yield self.calendarObjectUnderTest(txn, name="01_1.ics", calendar_name="calendar", home="user01")
+ yield obj.setComponent(self.data01_1_changed)
+
+ obj = yield self.calendarObjectUnderTest(txn, name="02_2.ics", calendar_name="calendar", home="user02")
+ attachment, _ignore_location = yield obj.addAttachment(None, MimeType.fromString("text/plain"), "test_02.txt", MemoryStream("Here is some text #02."))
+ self.stash["user02_attachment_id"] = attachment.id()
+ self.stash["user02_attachment_md5"] = attachment.md5()
+ self.stash["user02_attachment_mid"] = attachment.managedID()
+
+ yield self.commitTransaction(0)
+
+ yield self.waitAllEmpty()
+
+
+ @inlineCallbacks
+ def finalState(self):
+ """
+ Setup the server with data changes appearing before the final sync
+ """
+ txn = self.theTransactionUnderTest(1)
+ obj = yield self.calendarObjectUnderTest(txn, name="p02_2.ics", calendar_name="calendar", home="puser02")
+ attachment, _ignore_location = yield obj.addAttachment(None, MimeType.fromString("text/plain"), "test_p02.txt", MemoryStream("Here is some text #p02."))
+ self.stash["puser02_attachment_id"] = attachment.id()
+ self.stash["puser02_attachment_mid"] = attachment.managedID()
+ self.stash["puser02_attachment_md5"] = attachment.md5()
+
+ yield self.commitTransaction(1)
+
+ yield self.waitAllEmpty()
+
+
+ @inlineCallbacks
+ def switchAccounts(self):
+ """
+ Switch the migrated user accounts to point to the new pod
+ """
+
+ for i in range(self.numberOfStores):
+ txn = self.theTransactionUnderTest(i)
+ record = yield txn.directoryService().recordWithUID(u"user01")
+ yield self.changeRecord(record, txn.directoryService().fieldName.serviceNodeUID, u"B", directory=txn.directoryService())
+ yield self.commitTransaction(i)
+
+ for i in range(self.numberOfStores):
+ txn = self.theTransactionUnderTest(i)
+ record = yield txn.directoryService().recordWithUID(u"user01")
+ self.assertEqual(record.serviceNodeUID, "B")
+ self.assertEqual(record.thisServer(), i == 1)
+ record = yield txn.directoryService().recordWithUID(u"user02")
+ self.assertEqual(record.serviceNodeUID, "A")
+ self.assertEqual(record.thisServer(), i == 0)
+ record = yield txn.directoryService().recordWithUID(u"puser02")
+ self.assertEqual(record.serviceNodeUID, "B")
+ self.assertEqual(record.thisServer(), i == 1)
+ yield self.commitTransaction(i)
+
+
+ @inlineCallbacks
+ def postCheck(self):
+ """
+ Checks after migration is done
+ """
+
+ # Check that the home has been moved
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01")
+ self.assertTrue(home.external())
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_NORMAL)
+ self.assertTrue(home is None)
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_EXTERNAL)
+ self.assertTrue(home is not None)
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_DISABLED)
+ self.assertTrue(home is not None)
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_MIGRATING)
+ self.assertTrue(home is None)
+ yield self.commitTransaction(0)
+
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01")
+ self.assertTrue(home.normal())
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_NORMAL)
+ self.assertTrue(home is not None)
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_EXTERNAL)
+ self.assertTrue(home is None)
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_DISABLED)
+ self.assertTrue(home is not None)
+ home = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_MIGRATING)
+ self.assertTrue(home is None)
+ yield self.commitTransaction(1)
+
+ # Check that the notifications have been moved
+ notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_NORMAL)
+ self.assertTrue(notifications is None)
+ notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_EXTERNAL)
+ self.assertTrue(notifications is None)
+ notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(0), name="user01", status=_HOME_STATUS_DISABLED)
+ self.assertTrue(notifications is not None)
+ yield self.commitTransaction(0)
+
+ notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_NORMAL)
+ self.assertTrue(notifications is not None)
+ notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_EXTERNAL)
+ self.assertTrue(notifications is None)
+ notifications = yield self.notificationCollectionUnderTest(self.theTransactionUnderTest(1), name="user01", status=_HOME_STATUS_DISABLED)
+ self.assertTrue(notifications is not None)
+ yield self.commitTransaction(1)
+
+ # New pod data
+ homes = {}
+ homes["user01"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user01")
+ homes["user02"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user02")
+ self.assertTrue(homes["user02"].external())
+ homes["user03"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="user03")
+ self.assertTrue(homes["user03"].external())
+ homes["puser01"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="puser01")
+ self.assertTrue(homes["puser01"].normal())
+ homes["puser02"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="puser02")
+ self.assertTrue(homes["puser02"].normal())
+ homes["puser03"] = yield self.homeUnderTest(self.theTransactionUnderTest(1), name="puser03")
+ self.assertTrue(homes["puser03"].normal())
+
+ # Check calendar data on new pod
+ calendars = yield homes["user01"].loadChildren()
+ calnames = dict([(calendar.name(), calendar) for calendar in calendars])
+ self.assertEqual(
+ set(calnames.keys()),
+ set(("calendar", "tasks", "inbox", self.stash["sharename_user02_to_user01"], self.stash["sharename_puser02_to_user01"],))
+ )
+
+ # Check shared-by user01 on new pod
+ shared = calnames["calendar"]
+ invitations = yield shared.sharingInvites()
+ by_sharee = dict([(invitation.shareeUID, invitation) for invitation in invitations])
+ self.assertEqual(len(invitations), 2)
+ self.assertEqual(set(by_sharee.keys()), set(("user03", "puser03",)))
+ self.assertEqual(by_sharee["user03"].shareeHomeID, homes["user03"].id())
+ self.assertEqual(by_sharee["puser03"].shareeHomeID, homes["puser03"].id())
+
+ # Check shared-to user01 on new pod
+ shared = calnames[self.stash["sharename_user02_to_user01"]]
+ self.assertEqual(shared.ownerHome().uid(), "user02")
+ self.assertEqual(shared.ownerHome().id(), homes["user02"].id())
+
+ shared = calnames[self.stash["sharename_puser02_to_user01"]]
+ self.assertEqual(shared.ownerHome().uid(), "puser02")
+ self.assertEqual(shared.ownerHome().id(), homes["puser02"].id())
+
+ shared = yield homes["puser02"].calendarWithName("calendar")
+ invitations = yield shared.sharingInvites()
+ self.assertEqual(len(invitations), 1)
+ self.assertEqual(invitations[0].shareeHomeID, homes["user01"].id())
+
+ yield self.commitTransaction(1)
+
+ # Old pod data
+ homes = {}
+ homes["user01"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user01")
+ homes["user02"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user02")
+ self.assertTrue(homes["user02"].normal())
+ homes["user03"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="user03")
+ self.assertTrue(homes["user03"].normal())
+ homes["puser01"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="puser01")
+ self.assertTrue(homes["puser01"] is None)
+ homes["puser02"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="puser02")
+ self.assertTrue(homes["puser02"].external())
+ homes["puser03"] = yield self.homeUnderTest(self.theTransactionUnderTest(0), name="puser03")
+ self.assertTrue(homes["puser03"].external())
+
+ # Check shared-by user01 on old pod
+ shared = yield homes["user03"].calendarWithName(self.stash["sharename_user01_to_user03"])
+ self.assertEqual(shared.ownerHome().uid(), "user01")
+ self.assertEqual(shared.ownerHome().id(), homes["user01"].id())
+
+ # Check shared-to user01 on old pod
+ shared = yield homes["user02"].calendarWithName("calendar")
+ invitations = yield shared.sharingInvites()
+ self.assertEqual(len(invitations), 1)
+ self.assertEqual(invitations[0].shareeHomeID, homes["user01"].id())
+
+ yield self.commitTransaction(0)
+
+ # Delegates on each pod
+ for pod in range(self.numberOfStores):
+ txn = self.theTransactionUnderTest(pod)
+ records = {}
+ for ctr in range(10):
+ uid = u"user{:02d}".format(ctr + 1)
+ records[uid] = yield txn.directoryService().recordWithUID(uid)
+ for ctr in range(10):
+ uid = u"puser{:02d}".format(ctr + 1)
+ records[uid] = yield txn.directoryService().recordWithUID(uid)
+ for ctr in range(10):
+ uid = u"group{:02d}".format(ctr + 1)
+ records[uid] = yield txn.directoryService().recordWithUID(uid)
+
+ delegates = yield Delegates.delegatesOf(txn, records["user01"], True, False)
+ self.assertTrue(records["user02"] in delegates)
+ self.assertTrue(records["group02"] in delegates)
+ delegates = yield Delegates.delegatesOf(txn, records["user01"], True, True)
+ self.assertTrue(records["user02"] in delegates)
+ self.assertTrue(records["user06"] in delegates)
+ self.assertTrue(records["user07"] in delegates)
+ self.assertTrue(records["user08"] in delegates)
+
+ delegates = yield Delegates.delegatesOf(txn, records["user01"], False, False)
+ self.assertTrue(records["user03"] in delegates)
+ self.assertTrue(records["group03"] in delegates)
+ self.assertTrue(records["puser01"] in delegates)
+ delegates = yield Delegates.delegatesOf(txn, records["user01"], False, True)
+ self.assertTrue(records["user03"] in delegates)
+ self.assertTrue(records["user07"] in delegates)
+ self.assertTrue(records["user08"] in delegates)
+ self.assertTrue(records["user09"] in delegates)
+ self.assertTrue(records["puser01"] in delegates)
+
+ # Attachments
+ obj = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), name="01_3.ics", calendar_name="calendar", home="user01")
+ attachment = yield obj.attachmentWithManagedID(self.stash["user01_attachment_mid"])
+ self.assertTrue(attachment is not None)
+ self.assertEqual(attachment.md5(), self.stash["user01_attachment_md5"])
+ data = yield self.attachmentToString(attachment)
+ self.assertEqual(data, "Here is some text #1.")
+
+
+ @inlineCallbacks
+ def test_migration(self):
+ """
+ Full migration cycle.
+ """
+
+ yield self.preCheck()
+
+ # Step 1. Live full sync
+ yield self.initialState()
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.sync()
+
+ # Step 2. Live incremental sync
+ yield self.secondState()
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.sync()
+
+ # Step 3. Disable home after final changes
+ yield self.finalState()
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+ yield syncer.disableRemoteHome()
+
+ # Step 4. Final incremental sync
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01", final=True)
+ yield syncer.sync()
+
+ # Step 5. Final reconcile sync
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01", final=True)
+ yield syncer.finalSync()
+
+ # Step 6. Enable new home
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01", final=True)
+ yield syncer.enableLocalHome()
+
+ # Step 7. Remove old home
+ syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01", final=True)
+ yield syncer.removeRemoteHome()
+
+ yield self.switchAccounts()
+
+ yield self.postCheck()
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingrequestpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/request.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/request.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/request.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -23,7 +23,7 @@
</span><span class="cx"> from txweb2.client.http import HTTPClientProtocol, ClientRequest
</span><span class="cx"> from txweb2.dav.util import allDataFromStream
</span><span class="cx"> from txweb2.http_headers import Headers, MimeType
</span><del>-from txweb2.stream import MemoryStream
</del><ins>+from txweb2.stream import MemoryStream, readStream
</ins><span class="cx">
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue
</span><span class="cx"> from twisted.internet.protocol import Factory
</span><span class="lines">@@ -50,11 +50,12 @@
</span><span class="cx"> case the JSON data is sent in an HTTP header.
</span><span class="cx"> """
</span><span class="cx">
</span><del>- def __init__(self, server, data, stream=None, stream_type=None):
</del><ins>+ def __init__(self, server, data, stream=None, stream_type=None, writeStream=None):
</ins><span class="cx"> self.server = server
</span><span class="cx"> self.data = json.dumps(data)
</span><span class="cx"> self.stream = stream
</span><span class="cx"> self.streamType = stream_type
</span><ins>+ self.writeStream = writeStream
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="lines">@@ -72,7 +73,28 @@
</span><span class="cx"> self.loggedResponse = yield self.logResponse(response)
</span><span class="cx"> emitAccounting("xPod", "", self.loggedRequest + "\n" + self.loggedResponse, "POST")
</span><span class="cx">
</span><del>- if response.code in (responsecode.OK, responsecode.BAD_REQUEST,):
</del><ins>+ if response.code == responsecode.OK:
+ if self.writeStream is None:
+ data = (yield allDataFromStream(response.stream))
+ data = json.loads(data)
+ else:
+ yield readStream(response.stream, self.writeStream.write)
+ content_type = response.headers.getHeader("content-type")
+ if content_type is None:
+ content_type = MimeType("application", "octet-stream")
+ content_disposition = response.headers.getHeader("content-disposition")
+ if content_disposition is None or "filename" not in content_disposition.params:
+ filename = ""
+ else:
+ filename = content_disposition.params["filename"]
+ self.writeStream.resetDetails(content_type, filename)
+ yield self.writeStream.loseConnection()
+ data = {
+ "result": "ok",
+ "content-type": content_type,
+ "name": filename,
+ }
+ elif response.code == responsecode.BAD_REQUEST:
</ins><span class="cx"> data = (yield allDataFromStream(response.stream))
</span><span class="cx"> data = json.loads(data)
</span><span class="cx"> else:
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingresourcepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/resource.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/resource.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/resource.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -18,9 +18,11 @@
</span><span class="cx"> from txweb2.dav.noneprops import NonePropertyStore
</span><span class="cx"> from txweb2.dav.util import allDataFromStream
</span><span class="cx"> from txweb2.http import Response, HTTPError, StatusResponse, JSONResponse
</span><del>-from txweb2.http_headers import MimeType
</del><ins>+from txweb2.http_headers import MimeType, MimeDisposition
+from txweb2.stream import ProducerStream
</ins><span class="cx">
</span><span class="cx"> from twisted.internet.defer import succeed, returnValue, inlineCallbacks
</span><ins>+from twisted.internet.protocol import Protocol
</ins><span class="cx">
</span><span class="cx"> from twistedcaldav.extensions import DAVResource, \
</span><span class="cx"> DAVResourceWithoutChildrenMixin
</span><span class="lines">@@ -154,19 +156,54 @@
</span><span class="cx"> request.extendedLogItems = {}
</span><span class="cx"> request.extendedLogItems["xpod"] = j["action"] if "action" in j else "unknown"
</span><span class="cx">
</span><del>- # Get the conduit to process the data
- try:
- result = yield self.store.conduit.processRequest(j)
- code = responsecode.OK if result["result"] == "ok" else responsecode.BAD_REQUEST
- except Exception as e:
- # Send the exception over to the other side
- result = {
- "result": "exception",
- "class": ".".join((e.__class__.__module__, e.__class__.__name__,)),
- "request": str(e),
- }
- code = responsecode.BAD_REQUEST
</del><ins>+ # Look for a streaming action which needs special handling
+ if self.store.conduit.isStreamAction(j):
+ # Get the conduit to process the data stream
+ try:
</ins><span class="cx">
</span><ins>+ stream = ProducerStream()
+ class StreamProtocol(Protocol):
+ def connectionMade(self):
+ stream.registerProducer(self.transport, False)
+ def dataReceived(self, data):
+ stream.write(data)
+ def connectionLost(self, reason):
+ stream.finish()
+
+ result = yield self.store.conduit.processRequestStream(j, StreamProtocol())
+
+ try:
+ ct, name = result
+ except ValueError:
+ code = responsecode.BAD_REQUEST
+ else:
+ headers = {"content-type": MimeType.fromString(ct)}
+ headers["content-disposition"] = MimeDisposition("attachment", params={"filename": name})
+ returnValue(Response(responsecode.OK, headers, stream))
+
+ except Exception as e:
+ # Send the exception over to the other side
+ result = {
+ "result": "exception",
+ "class": ".".join((e.__class__.__module__, e.__class__.__name__,)),
+ "details": str(e),
+ }
+ code = responsecode.BAD_REQUEST
+
+ else:
+ # Get the conduit to process the data
+ try:
+ result = yield self.store.conduit.processRequest(j)
+ code = responsecode.OK if result["result"] == "ok" else responsecode.BAD_REQUEST
+ except Exception as e:
+ # Send the exception over to the other side
+ result = {
+ "result": "exception",
+ "class": ".".join((e.__class__.__module__, e.__class__.__name__,)),
+ "details": str(e),
+ }
+ code = responsecode.BAD_REQUEST
+
</ins><span class="cx"> response = JSONResponse(code, result)
</span><span class="cx"> returnValue(response)
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingsharing_invitespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/sharing_invites.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/sharing_invites.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/sharing_invites.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -27,9 +27,8 @@
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def send_shareinvite(
</span><del>- self, txn, homeType, ownerUID, ownerID, ownerName,
- shareeUID, shareUID, bindMode,
- summary, copy_properties, supported_components
</del><ins>+ self, txn, homeType, ownerUID, ownerName, shareeUID, shareUID,
+ bindMode, bindUID, summary, copy_properties, supported_components
</ins><span class="cx"> ):
</span><span class="cx"> """
</span><span class="cx"> Send a sharing invite cross-pod message.
</span><span class="lines">@@ -40,9 +39,6 @@
</span><span class="cx"> @param ownerUID: UID of the sharer.
</span><span class="cx"> @type ownerUID: C{str}
</span><span class="cx">
</span><del>- @param ownerID: resource ID of the sharer calendar
- @type ownerID: C{int}
-
</del><span class="cx"> @param ownerName: owner's name of the sharer calendar
</span><span class="cx"> @type ownerName: C{str}
</span><span class="cx">
</span><span class="lines">@@ -54,7 +50,8 @@
</span><span class="cx">
</span><span class="cx"> @param bindMode: bind mode for the share
</span><span class="cx"> @type bindMode: C{str}
</span><del>-
</del><ins>+ @param bindUID: bind UID of the sharer calendar
+ @type bindUID: C{str}
</ins><span class="cx"> @param summary: sharing message
</span><span class="cx"> @type summary: C{str}
</span><span class="cx">
</span><span class="lines">@@ -73,11 +70,11 @@
</span><span class="cx"> "action": "shareinvite",
</span><span class="cx"> "type": homeType,
</span><span class="cx"> "owner": ownerUID,
</span><del>- "owner_id": ownerID,
</del><span class="cx"> "owner_name": ownerName,
</span><span class="cx"> "sharee": shareeUID,
</span><span class="cx"> "share_id": shareUID,
</span><span class="cx"> "mode": bindMode,
</span><ins>+ "bind_uid": bindUID,
</ins><span class="cx"> "summary": summary,
</span><span class="cx"> "properties": copy_properties,
</span><span class="cx"> }
</span><span class="lines">@@ -107,10 +104,10 @@
</span><span class="cx"> # Create a share
</span><span class="cx"> yield shareeHome.processExternalInvite(
</span><span class="cx"> request["owner"],
</span><del>- request["owner_id"],
</del><span class="cx"> request["owner_name"],
</span><span class="cx"> request["share_id"],
</span><span class="cx"> request["mode"],
</span><ins>+ request["bind_uid"],
</ins><span class="cx"> request["summary"],
</span><span class="cx"> request["properties"],
</span><span class="cx"> supported_components=request.get("supported-components")
</span><span class="lines">@@ -119,7 +116,8 @@
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def send_shareuninvite(
</span><del>- self, txn, homeType, ownerUID, ownerID, shareeUID, shareUID
</del><ins>+ self, txn, homeType, ownerUID,
+ bindUID, shareeUID, shareUID
</ins><span class="cx"> ):
</span><span class="cx"> """
</span><span class="cx"> Send a sharing uninvite cross-pod message.
</span><span class="lines">@@ -129,10 +127,9 @@
</span><span class="cx">
</span><span class="cx"> @param ownerUID: UID of the sharer.
</span><span class="cx"> @type ownerUID: C{str}
</span><ins>+ @param bindUID: bind UID of the sharer calendar
+ @type bindUID: C{str}
</ins><span class="cx">
</span><del>- @param ownerID: resource ID of the sharer calendar
- @type ownerID: C{int}
-
</del><span class="cx"> @param shareeUID: UID of the sharee
</span><span class="cx"> @type shareeUID: C{str}
</span><span class="cx">
</span><span class="lines">@@ -148,7 +145,7 @@
</span><span class="cx"> "action": "shareuninvite",
</span><span class="cx"> "type": homeType,
</span><span class="cx"> "owner": ownerUID,
</span><del>- "owner_id": ownerID,
</del><ins>+ "bind_uid": bindUID,
</ins><span class="cx"> "sharee": shareeUID,
</span><span class="cx"> "share_id": shareUID,
</span><span class="cx"> }
</span><span class="lines">@@ -174,7 +171,7 @@
</span><span class="cx"> # Remove a share
</span><span class="cx"> yield shareeHome.processExternalUninvite(
</span><span class="cx"> request["owner"],
</span><del>- request["owner_id"],
</del><ins>+ request["bind_uid"],
</ins><span class="cx"> request["share_id"],
</span><span class="cx"> )
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingstore_apipy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/store_api.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/store_api.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/store_api.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -16,8 +16,9 @@
</span><span class="cx">
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue
</span><span class="cx">
</span><del>-from txdav.common.datastore.podding.base import FailedCrossPodRequestError
</del><span class="cx"> from txdav.caldav.datastore.scheduling.freebusy import generateFreeBusyInfo
</span><ins>+from txdav.common.datastore.podding.util import UtilityConduitMixin
+from txdav.common.datastore.sql_tables import _HOME_STATUS_DISABLED
</ins><span class="cx">
</span><span class="cx"> from twistedcaldav.caldavxml import TimeRange
</span><span class="cx">
</span><span class="lines">@@ -27,145 +28,21 @@
</span><span class="cx"> Defines common cross-pod API for generic access to remote resources.
</span><span class="cx"> """
</span><span class="cx">
</span><del>- #
- # Utility methods to map from store objects to/from JSON
- #
-
</del><span class="cx"> @inlineCallbacks
</span><del>- def _getRequestForStoreObject(self, action, storeObject, classMethod):
</del><ins>+ def send_home_resource_id(self, txn, recipient, migrating=False):
</ins><span class="cx"> """
</span><del>- Create the JSON data needed to identify the remote resource by type and
- ids, along with any parent resources.
-
- @param action: the conduit action name
- @type action: L{str}
-
- @param storeObject: the store object that is being operated on
- @type storeObject: L{object}
-
- @param classMethod: indicates whether the method being called is a
- classmethod
- @type classMethod: L{bool}
-
- @return: the transaction in use, the JSON dict to send in the request,
- the server where the request should be sent
- @rtype: L{tuple} of (L{CommonStoreTransaction}, L{dict}, L{str})
- """
-
- from txdav.common.datastore.sql import (
- CommonObjectResource, CommonHomeChild, CommonHome
- )
-
- result = {
- "action": action,
- }
-
- # Extract the relevant store objects
- txn = storeObject._txn
- owner_home = None
- viewer_home = None
- home_child = None
- object_resource = None
-
- if isinstance(storeObject, CommonObjectResource):
- owner_home = storeObject.ownerHome()
- viewer_home = storeObject.viewerHome()
- home_child = storeObject.parentCollection()
- object_resource = storeObject
- elif isinstance(storeObject, CommonHomeChild):
- owner_home = storeObject.ownerHome()
- viewer_home = storeObject.viewerHome()
- home_child = storeObject
- result["classMethod"] = classMethod
- elif isinstance(storeObject, CommonHome):
- owner_home = storeObject
- viewer_home = storeObject
- txn = storeObject._txn
- result["classMethod"] = classMethod
-
- # Add store object identities to JSON request
- result["homeType"] = viewer_home._homeType
- result["homeUID"] = viewer_home.uid()
- if home_child:
- if home_child.owned():
- result["homeChildID"] = home_child.id()
- else:
- result["homeChildSharedID"] = home_child.name()
- if object_resource:
- result["objectResourceID"] = object_resource.id()
-
- # Note that the owner_home is always the ownerHome() because in the
- # sharing case a viewer is accessing the owner's data on another pod.
- directory = self.store.directoryService()
- recipient = yield directory.recordWithUID(owner_home.uid())
-
- returnValue((txn, result, recipient.server(),))
-
-
- @inlineCallbacks
- def _getStoreObjectForRequest(self, txn, request):
- """
- Resolve the supplied JSON data to get a store object to operate on.
- """
-
- returnObject = txn
- classObject = None
-
- if "homeUID" in request:
- home = yield txn.homeWithUID(
- request["homeType"], request["homeUID"]
- )
- if home is None:
- raise FailedCrossPodRequestError("Invalid owner UID specified")
- home._internalRequest = False
- returnObject = home
- if request.get("classMethod", False):
- classObject = home._childClass
-
- if "homeChildID" in request:
- homeChild = yield home.childWithID(request["homeChildID"])
- if homeChild is None:
- raise FailedCrossPodRequestError(
- "Invalid home child specified"
- )
- returnObject = homeChild
- if request.get("classMethod", False):
- classObject = homeChild._objectResourceClass
- elif "homeChildSharedID" in request:
- homeChild = yield home.childWithName(request["homeChildSharedID"])
- if homeChild is None:
- raise FailedCrossPodRequestError(
- "Invalid home child specified"
- )
- returnObject = homeChild
- if request.get("classMethod", False):
- classObject = homeChild._objectResourceClass
-
- if "objectResourceID" in request:
- objectResource = yield homeChild.objectResourceWithID(
- request["objectResourceID"]
- )
- if objectResource is None:
- raise FailedCrossPodRequestError(
- "Invalid object resource specified"
- )
- returnObject = objectResource
-
- returnValue((returnObject, classObject,))
-
-
- @inlineCallbacks
- def send_home_resource_id(self, txn, recipient):
- """
</del><span class="cx"> Lookup the remote resourceID matching the specified directory uid.
</span><span class="cx">
</span><span class="cx"> @param ownerUID: directory record for user whose home is needed
</span><span class="cx"> @type ownerUID: L{DirectroryRecord}
</span><ins>+ @param migrating: if L{True} then also return a disbaled home
+ @type migrating: L{bool}
</ins><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> request = {
</span><span class="cx"> "action": "home-resource_id",
</span><span class="cx"> "ownerUID": recipient.uid,
</span><ins>+ "migrating": migrating,
</ins><span class="cx"> }
</span><span class="cx">
</span><span class="cx"> response = yield self.sendRequest(txn, recipient, request)
</span><span class="lines">@@ -175,14 +52,15 @@
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def recv_home_resource_id(self, txn, request):
</span><span class="cx"> """
</span><del>- Process an addAttachment cross-pod request.
- Request arguments as per L{send_add_attachment}.
</del><ins>+ Process an addAttachment cross-pod request. Request arguments as per L{send_add_attachment}.
</ins><span class="cx">
</span><span class="cx"> @param request: request arguments
</span><span class="cx"> @type request: C{dict}
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> home = yield txn.calendarHomeWithUID(request["ownerUID"])
</span><ins>+ if home is None and request["migrating"]:
+ home = yield txn.calendarHomeWithUID(request["ownerUID"], status=_HOME_STATUS_DISABLED)
</ins><span class="cx"> returnValue(home.id() if home is not None else None)
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -200,18 +78,12 @@
</span><span class="cx"> event_details,
</span><span class="cx"> ):
</span><span class="cx"> """
</span><del>- Request free busy information for a shared calendar collection hosted
- on a different pod.
- See L{txdav.caldav.datastore.scheduling.freebusy} for the base free
- busy lookup behavior.
</del><ins>+ Request free busy information for a shared calendar collection hosted on a different pod. See
+ L{txdav.caldav.datastore.scheduling.freebusy} for the base free busy lookup behavior.
</ins><span class="cx"> """
</span><del>- txn, request, server = yield self._getRequestForStoreObject(
- "freebusy", calresource, False
- )
</del><ins>+ txn, request, server = yield self._getRequestForStoreObject("freebusy", calresource, False)
</ins><span class="cx">
</span><del>- request["timerange"] = [
- timerange.start.getText(), timerange.end.getText()
- ]
</del><ins>+ request["timerange"] = [timerange.start.getText(), timerange.end.getText()]
</ins><span class="cx"> request["matchtotal"] = matchtotal
</span><span class="cx"> request["excludeuid"] = excludeuid
</span><span class="cx"> request["organizer"] = organizer
</span><span class="lines">@@ -227,26 +99,20 @@
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def recv_freebusy(self, txn, request):
</span><span class="cx"> """
</span><del>- Process a freebusy cross-pod request.
- Message arguments as per L{send_freebusy}.
</del><ins>+ Process a freebusy cross-pod request. Message arguments as per L{send_freebusy}.
</ins><span class="cx">
</span><span class="cx"> @param request: request arguments
</span><span class="cx"> @type request: C{dict}
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> # Operate on the L{CommonHomeChild}
</span><del>- calresource, _ignore = yield self._getStoreObjectForRequest(
- txn, request
- )
</del><ins>+ calresource, _ignore = yield self._getStoreObjectForRequest(txn, request)
</ins><span class="cx">
</span><span class="cx"> fbinfo = [[], [], []]
</span><span class="cx"> matchtotal = yield generateFreeBusyInfo(
</span><span class="cx"> calresource,
</span><span class="cx"> fbinfo,
</span><del>- TimeRange(
- start=request["timerange"][0],
- end=request["timerange"][1],
- ),
</del><ins>+ TimeRange(start=request["timerange"][0], end=request["timerange"][1]),
</ins><span class="cx"> request["matchtotal"],
</span><span class="cx"> request["excludeuid"],
</span><span class="cx"> request["organizer"],
</span><span class="lines">@@ -268,232 +134,63 @@
</span><span class="cx"> })
</span><span class="cx">
</span><span class="cx">
</span><del>- #
- # We can simplify code generation for simple calls by dynamically
- # generating the appropriate class methods.
- #
-
- @inlineCallbacks
- def _simple_object_send(
- self, actionName, storeObject,
- classMethod=False, transform=None, args=None, kwargs=None
- ):
</del><ins>+ @staticmethod
+ def _to_serialize_pair_list(value):
</ins><span class="cx"> """
</span><del>- A simple send operation that returns a value.
-
- @param actionName: name of the action.
- @type actionName: C{str}
-
- @param shareeView: sharee resource being operated on.
- @type shareeView: L{CommonHomeChildExternal}
-
- @param objectResource: the resource being operated on, or C{None} for
- classmethod.
- @type objectResource: L{CommonObjectResourceExternal}
-
- @param transform: a function used to convert the JSON response into
- return values.
- @type transform: C{callable}
-
- @param args: list of optional arguments.
- @type args: C{list}
-
- @param kwargs: optional keyword arguments.
- @type kwargs: C{dict}
</del><ins>+ Convert the value to the external (JSON-based) representation.
</ins><span class="cx"> """
</span><ins>+ return [[a.serialize(), b.serialize(), ] for a, b in value]
</ins><span class="cx">
</span><del>- txn, request, server = yield self._getRequestForStoreObject(
- actionName, storeObject, classMethod
- )
- if args is not None:
- request["arguments"] = args
- if kwargs is not None:
- request["keywords"] = kwargs
- response = yield self.sendRequestToServer(txn, server, request)
- returnValue(transform(response) if transform is not None else response)
</del><span class="cx">
</span><del>-
- @inlineCallbacks
- def _simple_object_recv(
- self, txn, actionName, request, method, transform=None
- ):
- """
- A simple recv operation that returns a value.
- We also look for an optional set of arguments/keywords and include
- those only if present.
-
- @param actionName: name of the action.
- @type actionName: C{str}
-
- @param request: request arguments
- @type request: C{dict}
-
- @param method: name of the method to execute on the shared resource to
- get the result.
- @type method: C{str}
-
- @param transform: method to call on returned JSON value to convert it
- to something useful.
- @type transform: C{callable}
- """
-
- storeObject, classObject = yield self._getStoreObjectForRequest(
- txn, request
- )
-
- if classObject is not None:
- m = getattr(classObject, method)
- value = yield m(
- storeObject,
- *request.get("arguments", ()),
- **request.get("keywords", {})
- )
- else:
- m = getattr(storeObject, method)
- value = yield m(
- *request.get("arguments", ()),
- **request.get("keywords", {})
- )
-
- returnValue(transform(value) if transform is not None else value)
-
-
- #
- # Factory methods for binding actions to the conduit class
- #
- @classmethod
- def _make_simple_action(
- cls, action, method, classMethod=False,
- transform_recv_result=None, transform_send_result=None
- ):
- setattr(
- cls,
- "send_{}".format(action),
- lambda self, storeObject, *args, **kwargs:
- self._simple_object_send(
- action, storeObject, classMethod=classMethod,
- transform=transform_send_result, args=args, kwargs=kwargs
- )
- )
- setattr(
- cls,
- "recv_{}".format(action),
- lambda self, txn, message:
- self._simple_object_recv(
- txn, action, message, method,
- transform=transform_recv_result
- )
- )
-
-
- #
- # Transforms for returned data
- #
</del><span class="cx"> @staticmethod
</span><del>- def _to_externalize(value):
</del><ins>+ def _to_serialize_dict_value(value):
</ins><span class="cx"> """
</span><span class="cx"> Convert the value to the external (JSON-based) representation.
</span><span class="cx"> """
</span><del>- return value.externalize() if value is not None else None
</del><ins>+ return dict([(k, v.serialize(),) for k, v in value.items()])
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @staticmethod
</span><del>- def _to_externalize_list(value):
</del><ins>+ def _to_serialize_dict_list_serialized_value(value):
</ins><span class="cx"> """
</span><span class="cx"> Convert the value to the external (JSON-based) representation.
</span><span class="cx"> """
</span><del>- return [v.externalize() for v in value]
</del><ins>+ return dict([(k, UtilityConduitMixin._to_serialize_list(v),) for k, v in value.items()])
</ins><span class="cx">
</span><del>-
- @staticmethod
- def _to_string(value):
- return str(value)
-
-
- @staticmethod
- def _to_tuple(value):
- return tuple(value)
-
</del><span class="cx"> # These are the actions on store objects we need to expose via the conduit api
</span><span class="cx">
</span><span class="cx"> # Calls on L{CommonHome} objects
</span><ins>+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "home_metadata", "serialize")
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "home_set_status", "setStatus")
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "home_get_all_group_attendees", "getAllGroupAttendees", transform_recv_result=StoreAPIConduitMixin._to_serialize_pair_list)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "home_shared_to_records", "sharedToBindRecords", transform_recv_result=StoreAPIConduitMixin._to_serialize_dict_list_serialized_value)
</ins><span class="cx">
</span><span class="cx"> # Calls on L{CommonHomeChild} objects
</span><del>-StoreAPIConduitMixin._make_simple_action(
- "homechild_listobjects", "listObjects",
- classMethod=True
-)
-StoreAPIConduitMixin._make_simple_action(
- "homechild_loadallobjects", "loadAllObjects",
- classMethod=True,
- transform_recv_result=StoreAPIConduitMixin._to_externalize_list
-)
-StoreAPIConduitMixin._make_simple_action(
- "homechild_objectwith", "objectWith",
- classMethod=True,
- transform_recv_result=StoreAPIConduitMixin._to_externalize
-)
-StoreAPIConduitMixin._make_simple_action(
- "homechild_movehere", "moveObjectResourceHere"
-)
-StoreAPIConduitMixin._make_simple_action(
- "homechild_moveaway", "moveObjectResourceAway"
-)
-StoreAPIConduitMixin._make_simple_action(
- "homechild_synctoken", "syncToken"
-)
-StoreAPIConduitMixin._make_simple_action(
- "homechild_resourcenamessincerevision", "resourceNamesSinceRevision",
- transform_send_result=StoreAPIConduitMixin._to_tuple
-)
-StoreAPIConduitMixin._make_simple_action(
- "homechild_search", "search"
-)
</del><ins>+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "homechild_listobjects", "listObjects", classMethod=True)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "homechild_loadallobjects", "loadAllObjects", classMethod=True, transform_recv_result=UtilityConduitMixin._to_serialize_list)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "homechild_objectwith", "objectWith", classMethod=True, transform_recv_result=UtilityConduitMixin._to_serialize)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "homechild_movehere", "moveObjectResourceHere")
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "homechild_moveaway", "moveObjectResourceAway")
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "homechild_synctokenrevision", "syncTokenRevision")
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "homechild_resourcenamessincerevision", "resourceNamesSinceRevision", transform_send_result=UtilityConduitMixin._to_tuple)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "homechild_search", "search")
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "homechild_sharing_records", "sharingBindRecords", transform_recv_result=StoreAPIConduitMixin._to_serialize_dict_value)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "homechild_migrate_sharing_records", "migrateBindRecords")
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "homechild_group_sharees", "groupSharees", transform_recv_result=StoreAPIConduitMixin._to_serialize_dict_list_serialized_value)
</ins><span class="cx">
</span><span class="cx"> # Calls on L{CommonObjectResource} objects
</span><del>-StoreAPIConduitMixin._make_simple_action(
- "objectresource_loadallobjects", "loadAllObjects",
- classMethod=True,
- transform_recv_result=StoreAPIConduitMixin._to_externalize_list
-)
-StoreAPIConduitMixin._make_simple_action(
- "objectresource_loadallobjectswithnames", "loadAllObjectsWithNames",
- classMethod=True,
- transform_recv_result=StoreAPIConduitMixin._to_externalize_list
-)
-StoreAPIConduitMixin._make_simple_action(
- "objectresource_listobjects", "listObjects",
- classMethod=True
-)
-StoreAPIConduitMixin._make_simple_action(
- "objectresource_countobjects", "countObjects",
- classMethod=True
-)
-StoreAPIConduitMixin._make_simple_action(
- "objectresource_objectwith", "objectWith",
- classMethod=True,
- transform_recv_result=StoreAPIConduitMixin._to_externalize
-)
-StoreAPIConduitMixin._make_simple_action(
- "objectresource_resourcenameforuid", "resourceNameForUID",
- classMethod=True
-)
-StoreAPIConduitMixin._make_simple_action(
- "objectresource_resourceuidforname", "resourceUIDForName",
- classMethod=True
-)
-StoreAPIConduitMixin._make_simple_action(
- "objectresource_create", "create",
- classMethod=True,
- transform_recv_result=StoreAPIConduitMixin._to_externalize
-)
-StoreAPIConduitMixin._make_simple_action(
- "objectresource_setcomponent", "setComponent"
-)
-StoreAPIConduitMixin._make_simple_action(
- "objectresource_component", "component",
- transform_recv_result=StoreAPIConduitMixin._to_string
-)
-StoreAPIConduitMixin._make_simple_action(
- "objectresource_remove", "remove"
-)
</del><ins>+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "objectresource_loadallobjects", "loadAllObjects", classMethod=True, transform_recv_result=UtilityConduitMixin._to_serialize_list)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "objectresource_loadallobjectswithnames", "loadAllObjectsWithNames", classMethod=True, transform_recv_result=UtilityConduitMixin._to_serialize_list)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "objectresource_listobjects", "listObjects", classMethod=True)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "objectresource_countobjects", "countObjects", classMethod=True)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "objectresource_objectwith", "objectWith", classMethod=True, transform_recv_result=UtilityConduitMixin._to_serialize)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "objectresource_resourcenameforuid", "resourceNameForUID", classMethod=True)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "objectresource_resourceuidforname", "resourceUIDForName", classMethod=True)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "objectresource_create", "create", classMethod=True, transform_recv_result=UtilityConduitMixin._to_serialize)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "objectresource_setcomponent", "setComponent")
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "objectresource_component", "component", transform_recv_result=UtilityConduitMixin._to_string)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "objectresource_remove", "remove")
+
+# Calls on L{NotificationCollection} objects
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "notification_set_status", "setStatus")
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "notification_all_records", "notificationObjectRecords", transform_recv_result=UtilityConduitMixin._to_serialize_list)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingtesttest_conduitpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/test/test_conduit.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/test/test_conduit.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/test/test_conduit.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -32,7 +32,7 @@
</span><span class="cx"> from txdav.caldav.datastore.query.filter import Filter
</span><span class="cx"> from txdav.caldav.datastore.scheduling.freebusy import generateFreeBusyInfo
</span><span class="cx"> from txdav.caldav.datastore.scheduling.ischedule.localservers import ServersDB, Server
</span><del>-from txdav.caldav.datastore.sql import ManagedAttachment
</del><ins>+from txdav.caldav.datastore.sql import ManagedAttachment, AttachmentLink
</ins><span class="cx"> from txdav.caldav.datastore.test.common import CaptureProtocol
</span><span class="cx"> from txdav.common.datastore.podding.conduit import PoddingConduit, \
</span><span class="cx"> FailedCrossPodRequestError
</span><span class="lines">@@ -362,11 +362,11 @@
</span><span class="cx"> yield self.createShare("user01", "puser01")
</span><span class="cx">
</span><span class="cx"> calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
</span><del>- token1_1 = yield calendar1.syncToken()
</del><ins>+ token1_1 = yield calendar1.syncTokenRevision()
</ins><span class="cx"> yield self.commitTransaction(0)
</span><span class="cx">
</span><span class="cx"> shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
</span><del>- token2_1 = yield shared.syncToken()
</del><ins>+ token2_1 = yield shared.syncTokenRevision()
</ins><span class="cx"> yield self.commitTransaction(1)
</span><span class="cx">
</span><span class="cx"> self.assertEqual(token1_1, token2_1)
</span><span class="lines">@@ -376,11 +376,11 @@
</span><span class="cx"> yield self.commitTransaction(0)
</span><span class="cx">
</span><span class="cx"> calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
</span><del>- token1_2 = yield calendar1.syncToken()
</del><ins>+ token1_2 = yield calendar1.syncTokenRevision()
</ins><span class="cx"> yield self.commitTransaction(0)
</span><span class="cx">
</span><span class="cx"> shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
</span><del>- token2_2 = yield shared.syncToken()
</del><ins>+ token2_2 = yield shared.syncTokenRevision()
</ins><span class="cx"> yield self.commitTransaction(1)
</span><span class="cx">
</span><span class="cx"> self.assertNotEqual(token1_1, token1_2)
</span><span class="lines">@@ -394,11 +394,11 @@
</span><span class="cx"> yield self.commitTransaction(0)
</span><span class="cx">
</span><span class="cx"> calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
</span><del>- token1_3 = yield calendar1.syncToken()
</del><ins>+ token1_3 = yield calendar1.syncTokenRevision()
</ins><span class="cx"> yield self.commitTransaction(0)
</span><span class="cx">
</span><span class="cx"> shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
</span><del>- token2_3 = yield shared.syncToken()
</del><ins>+ token2_3 = yield shared.syncTokenRevision()
</ins><span class="cx"> yield self.commitTransaction(1)
</span><span class="cx">
</span><span class="cx"> self.assertNotEqual(token1_1, token1_3)
</span><span class="lines">@@ -1056,3 +1056,83 @@
</span><span class="cx"> attachment = yield ManagedAttachment.load(self.theTransactionUnderTest(0), resourceID, managedID)
</span><span class="cx"> self.assertTrue(attachment is None)
</span><span class="cx"> yield self.commitTransaction(0)
</span><ins>+
+
+ @inlineCallbacks
+ def test_get_all_attachments(self):
+ """
+ Test that action=get-all-attachments works.
+ """
+
+ yield self.createShare("user01", "puser01")
+
+ calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
+ yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
+ yield self.commitTransaction(0)
+
+ object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
+ yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
+ yield self.commitTransaction(0)
+
+ shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
+ attachments = yield shared_object.ownerHome().getAllAttachments()
+ self.assertEqual(len(attachments), 1)
+ self.assertTrue(isinstance(attachments[0], ManagedAttachment))
+ self.assertEqual(attachments[0].contentType(), MimeType.fromString("text/plain"))
+ self.assertEqual(attachments[0].name(), "test.txt")
+ yield self.commitTransaction(1)
+
+
+ @inlineCallbacks
+ def test_get_attachment_data(self):
+ """
+ Test that action=get-all-attachments works.
+ """
+
+ yield self.createShare("user01", "puser01")
+
+ calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
+ yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
+ yield self.commitTransaction(0)
+
+ object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
+ attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
+ remote_id = attachment.id()
+ yield self.commitTransaction(0)
+
+ home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="puser01")
+ shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
+ attachment = yield ManagedAttachment._create(self.theTransactionUnderTest(1), None, home1.id())
+ attachment._contentType = MimeType.fromString("text/plain")
+ attachment._name = "test.txt"
+ yield shared_object.ownerHome().readAttachmentData(remote_id, attachment)
+ yield self.commitTransaction(1)
+
+
+ @inlineCallbacks
+ def test_get_attachment_links(self):
+ """
+ Test that action=get-attachment-links works.
+ """
+
+ yield self.createShare("user01", "puser01")
+
+ calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
+ cobj1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
+ calobjID = cobj1.id()
+ yield self.commitTransaction(0)
+
+ object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
+ attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
+ attID = attachment.id()
+ managedID = attachment.managedID()
+ yield self.commitTransaction(0)
+
+ shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
+ links = yield shared_object.ownerHome().getAttachmentLinks()
+ self.assertEqual(len(links), 1)
+ self.assertTrue(isinstance(links[0], AttachmentLink))
+ self.assertEqual(links[0]._attachmentID, attID)
+ self.assertEqual(links[0]._managedID, managedID)
+ self.assertEqual(links[0]._calendarObjectID, calobjID)
+ yield self.commitTransaction(1)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingtesttest_store_apipy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/test/test_store_api.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/test/test_store_api.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/test/test_store_api.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -104,8 +104,8 @@
</span><span class="cx">
</span><span class="cx"> from txdav.caldav.datastore.sql_external import CalendarHomeExternal
</span><span class="cx"> recipient = yield txn.store().directoryService().recordWithUID(uid)
</span><del>- resourceID = yield txn.store().conduit.send_home_resource_id(self, recipient)
- home = CalendarHomeExternal(txn, recipient.uid, resourceID) if resourceID is not None else None
</del><ins>+ resourceID = yield txn.store().conduit.send_home_resource_id(txn, recipient)
+ home = CalendarHomeExternal.makeSyntheticExternalHome(txn, recipient.uid, resourceID) if resourceID is not None else None
</ins><span class="cx"> if home:
</span><span class="cx"> home._childClass = home._childClass._externalClass
</span><span class="cx"> returnValue(home)
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingtestutilpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/podding/test/util.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/test/util.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/podding/test/util.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -14,25 +14,33 @@
</span><span class="cx"> # limitations under the License.
</span><span class="cx"> ##
</span><span class="cx">
</span><ins>+from twisted.internet import reactor
</ins><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue
</span><ins>+from twisted.internet.protocol import Protocol
</ins><span class="cx">
</span><span class="cx"> from txdav.caldav.datastore.scheduling.ischedule.localservers import (
</span><span class="cx"> Server, ServersDB
</span><span class="cx"> )
</span><span class="cx"> from txdav.common.datastore.podding.conduit import PoddingConduit
</span><ins>+from txdav.common.datastore.podding.request import ConduitRequest
</ins><span class="cx"> from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE
</span><span class="cx"> from txdav.common.datastore.test.util import (
</span><span class="cx"> CommonCommonTests, SQLStoreBuilder, buildTestDirectory
</span><span class="cx"> )
</span><span class="cx">
</span><span class="cx"> import txweb2.dav.test.util
</span><ins>+from txweb2 import responsecode
+from txweb2.http import Response, JSONResponse
+from txweb2.http_headers import MimeDisposition, MimeType
+from txweb2.stream import ProducerStream
</ins><span class="cx">
</span><span class="cx"> from twext.enterprise.ienterprise import AlreadyFinishedError
</span><ins>+from twext.enterprise.jobqueue import JobItem
</ins><span class="cx">
</span><span class="cx"> import json
</span><span class="cx">
</span><span class="cx">
</span><del>-class FakeConduitRequest(object):
</del><ins>+class FakeConduitRequest(ConduitRequest):
</ins><span class="cx"> """
</span><span class="cx"> A conduit request that sends messages internally rather than using HTTP
</span><span class="cx"> """
</span><span class="lines">@@ -54,28 +62,16 @@
</span><span class="cx"> cls.storeMap[server.details()] = store
</span><span class="cx">
</span><span class="cx">
</span><del>- def __init__(self, server, data, stream=None, stream_type=None):
</del><ins>+ def __init__(self, server, data, stream=None, stream_type=None, writeStream=None):
</ins><span class="cx">
</span><span class="cx"> self.server = server
</span><span class="cx"> self.data = json.dumps(data)
</span><span class="cx"> self.stream = stream
</span><span class="cx"> self.streamType = stream_type
</span><ins>+ self.writeStream = writeStream
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def doRequest(self, txn):
-
- # Generate an HTTP client request
- try:
- response = (yield self._processRequest())
- response = json.loads(response)
- except Exception as e:
- raise ValueError("Failed cross-pod request: {}".format(e))
-
- returnValue(response)
-
-
- @inlineCallbacks
</del><span class="cx"> def _processRequest(self):
</span><span class="cx"> """
</span><span class="cx"> Process the request by sending it to the relevant server.
</span><span class="lines">@@ -90,19 +86,43 @@
</span><span class="cx"> j["stream"] = self.stream
</span><span class="cx"> j["streamType"] = self.streamType
</span><span class="cx"> try:
</span><del>- result = yield store.conduit.processRequest(j)
</del><ins>+ if store.conduit.isStreamAction(j):
+ stream = ProducerStream()
+ class StreamProtocol(Protocol):
+ def connectionMade(self):
+ stream.registerProducer(self.transport, False)
+ def dataReceived(self, data):
+ stream.write(data)
+ def connectionLost(self, reason):
+ stream.finish()
+
+ result = yield store.conduit.processRequestStream(j, StreamProtocol())
+
+ try:
+ ct, name = result
+ except ValueError:
+ code = responsecode.BAD_REQUEST
+ else:
+ headers = {"content-type": MimeType.fromString(ct)}
+ headers["content-disposition"] = MimeDisposition("attachment", params={"filename": name})
+ returnValue(Response(responsecode.OK, headers, stream))
+ else:
+ result = yield store.conduit.processRequest(j)
+ code = responsecode.OK
</ins><span class="cx"> except Exception as e:
</span><span class="cx"> # Send the exception over to the other side
</span><span class="cx"> result = {
</span><span class="cx"> "result": "exception",
</span><span class="cx"> "class": ".".join((e.__class__.__module__, e.__class__.__name__,)),
</span><del>- "request": str(e),
</del><ins>+ "details": str(e),
</ins><span class="cx"> }
</span><del>- result = json.dumps(result)
- returnValue(result)
</del><ins>+ code = responsecode.BAD_REQUEST
</ins><span class="cx">
</span><ins>+ response = JSONResponse(code, result)
+ returnValue(response)
</ins><span class="cx">
</span><span class="cx">
</span><ins>+
</ins><span class="cx"> class MultiStoreConduitTest(CommonCommonTests, txweb2.dav.test.util.TestCase):
</span><span class="cx">
</span><span class="cx"> numberOfStores = 2
</span><span class="lines">@@ -110,6 +130,8 @@
</span><span class="cx"> theStoreBuilders = []
</span><span class="cx"> theStores = []
</span><span class="cx"> activeTransactions = []
</span><ins>+ accounts = None
+ augments = None
</ins><span class="cx">
</span><span class="cx"> def __init__(self, methodName='runTest'):
</span><span class="cx"> txweb2.dav.test.util.TestCase.__init__(self, methodName)
</span><span class="lines">@@ -135,13 +157,19 @@
</span><span class="cx"> if i == 0:
</span><span class="cx"> yield self.buildStoreAndDirectory(
</span><span class="cx"> serversDB=serversDB,
</span><del>- storeBuilder=self.theStoreBuilders[i]
</del><ins>+ storeBuilder=self.theStoreBuilders[i],
+ accounts=self.accounts,
+ augments=self.augments,
</ins><span class="cx"> )
</span><span class="cx"> self.theStores[i] = self.store
</span><span class="cx"> else:
</span><span class="cx"> self.theStores[i] = yield self.buildStore(self.theStoreBuilders[i])
</span><span class="cx"> directory = buildTestDirectory(
</span><del>- self.theStores[i], self.mktemp(), serversDB=serversDB
</del><ins>+ self.theStores[i],
+ self.mktemp(),
+ serversDB=serversDB,
+ accounts=self.accounts,
+ augments=self.augments,
</ins><span class="cx"> )
</span><span class="cx"> self.theStores[i].setDirectoryService(directory)
</span><span class="cx">
</span><span class="lines">@@ -199,6 +227,12 @@
</span><span class="cx"> self.activeTransactions[count] = None
</span><span class="cx">
</span><span class="cx">
</span><ins>+ @inlineCallbacks
+ def waitAllEmpty(self):
+ for i in range(self.numberOfStores):
+ yield JobItem.waitEmpty(self.theStoreUnderTest(i).newTransaction, reactor, 60.0)
+
+
</ins><span class="cx"> def makeConduit(self, store):
</span><span class="cx"> conduit = PoddingConduit(store)
</span><span class="cx"> conduit.conduitRequestClass = FakeConduitRequest
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastorepoddingutilpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastorepoddingutilpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/podding/util.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/util.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/podding/util.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/util.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,265 @@
</span><ins>+##
+# Copyright (c) 2013-2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+from txdav.common.datastore.podding.base import FailedCrossPodRequestError
+from txdav.common.datastore.sql_notification import NotificationCollection, \
+ NotificationObject
+
+
+class UtilityConduitMixin(object):
+ """
+ Defines utility methods for cross-pod API and mix-ins.
+ """
+
+ #
+ # Utility methods to map from store objects to/from JSON
+ #
+
+ @inlineCallbacks
+ def _getRequestForStoreObject(self, action, storeObject, classMethod):
+ """
+ Create the JSON data needed to identify the remote resource by type and ids, along with any parent resources.
+
+ @param action: the conduit action name
+ @type action: L{str}
+ @param storeObject: the store object that is being operated on
+ @type storeObject: L{object}
+ @param classMethod: indicates whether the method being called is a classmethod
+ @type classMethod: L{bool}
+
+ @return: the transaction in use, the JSON dict to send in the request,
+ the server where the request should be sent
+ @rtype: L{tuple} of (L{CommonStoreTransaction}, L{dict}, L{str})
+ """
+
+ from txdav.common.datastore.sql import CommonObjectResource, CommonHomeChild, CommonHome
+ result = {
+ "action": action,
+ }
+
+ # Extract the relevant store objects
+ txn = storeObject._txn
+ owner_home = None
+ viewer_home = None
+ home_child = None
+ object_resource = None
+ notification = None
+ if isinstance(storeObject, CommonObjectResource):
+ owner_home = storeObject.ownerHome()
+ viewer_home = storeObject.viewerHome()
+ home_child = storeObject.parentCollection()
+ object_resource = storeObject
+ elif isinstance(storeObject, CommonHomeChild):
+ owner_home = storeObject.ownerHome()
+ viewer_home = storeObject.viewerHome()
+ home_child = storeObject
+ result["classMethod"] = classMethod
+ elif isinstance(storeObject, CommonHome):
+ owner_home = storeObject
+ viewer_home = storeObject
+ txn = storeObject._txn
+ result["classMethod"] = classMethod
+ elif isinstance(storeObject, NotificationCollection):
+ notification = storeObject
+ txn = storeObject._txn
+ result["classMethod"] = classMethod
+
+ # Add store object identities to JSON request
+ if viewer_home:
+ result["homeType"] = viewer_home._homeType
+ result["homeUID"] = viewer_home.uid()
+ if getattr(viewer_home, "_migratingHome", False):
+ result["allowDisabledHome"] = True
+ if home_child:
+ if home_child.owned():
+ result["homeChildID"] = home_child.id()
+ else:
+ result["homeChildSharedID"] = home_child.name()
+ if object_resource:
+ result["objectResourceID"] = object_resource.id()
+
+ # Note that the owner_home is always the ownerHome() because in the sharing case
+ # a viewer is accessing the owner's data on another pod.
+ recipient = yield self.store.directoryService().recordWithUID(owner_home.uid())
+
+ elif notification:
+ result["notificationUID"] = notification.uid()
+ if getattr(notification, "_migratingHome", False):
+ result["allowDisabledHome"] = True
+ recipient = yield self.store.directoryService().recordWithUID(notification.uid())
+
+ returnValue((txn, result, recipient.server(),))
+
+
+ @inlineCallbacks
+ def _getStoreObjectForRequest(self, txn, request):
+ """
+ Resolve the supplied JSON data to get a store object to operate on.
+ """
+
+ returnObject = txn
+ classObject = None
+
+ if "allowDisabledHome" in request:
+ txn._allowDisabled = True
+
+ if "homeUID" in request:
+ home = yield txn.homeWithUID(request["homeType"], request["homeUID"])
+ if home is None:
+ raise FailedCrossPodRequestError("Invalid owner UID specified")
+ home._internalRequest = False
+ returnObject = home
+ if request.get("classMethod", False):
+ classObject = home._childClass
+
+ if "homeChildID" in request:
+ homeChild = yield home.childWithID(request["homeChildID"])
+ if homeChild is None:
+ raise FailedCrossPodRequestError("Invalid home child specified")
+ returnObject = homeChild
+ if request.get("classMethod", False):
+ classObject = homeChild._objectResourceClass
+ elif "homeChildSharedID" in request:
+ homeChild = yield home.childWithName(request["homeChildSharedID"])
+ if homeChild is None:
+ raise FailedCrossPodRequestError("Invalid home child specified")
+ returnObject = homeChild
+ if request.get("classMethod", False):
+ classObject = homeChild._objectResourceClass
+
+ if "objectResourceID" in request:
+ objectResource = yield homeChild.objectResourceWithID(request["objectResourceID"])
+ if objectResource is None:
+ raise FailedCrossPodRequestError("Invalid object resource specified")
+ returnObject = objectResource
+
+ if "notificationUID" in request:
+ notification = yield txn.notificationsWithUID(request["notificationUID"])
+ if notification is None:
+ raise FailedCrossPodRequestError("Invalid notification UID specified")
+ notification._internalRequest = False
+ returnObject = notification
+ if request.get("classMethod", False):
+ classObject = NotificationObject
+
+ returnValue((returnObject, classObject,))
+
+
+ #
+ # We can simplify code generation for simple calls by dynamically generating the appropriate class methods.
+ #
+
+ @inlineCallbacks
+ def _simple_object_send(self, actionName, storeObject, classMethod=False, transform=None, args=None, kwargs=None):
+ """
+ A simple send operation that returns a value.
+
+ @param actionName: name of the action.
+ @type actionName: C{str}
+ @param shareeView: sharee resource being operated on.
+ @type shareeView: L{CommonHomeChildExternal}
+ @param objectResource: the resource being operated on, or C{None} for classmethod.
+ @type objectResource: L{CommonObjectResourceExternal}
+ @param transform: a function used to convert the JSON response into return values.
+ @type transform: C{callable}
+ @param args: list of optional arguments.
+ @type args: C{list}
+ @param kwargs: optional keyword arguments.
+ @type kwargs: C{dict}
+ """
+
+ txn, request, server = yield self._getRequestForStoreObject(actionName, storeObject, classMethod)
+ if args is not None:
+ request["arguments"] = args
+ if kwargs is not None:
+ request["keywords"] = kwargs
+ response = yield self.sendRequestToServer(txn, server, request)
+ returnValue(transform(response) if transform is not None else response)
+
+
+ @inlineCallbacks
+ def _simple_object_recv(self, txn, actionName, request, method, transform=None):
+ """
+ A simple recv operation that returns a value. We also look for an optional set of arguments/keywords
+ and include those only if present.
+
+ @param actionName: name of the action.
+ @type actionName: C{str}
+ @param request: request arguments
+ @type request: C{dict}
+ @param method: name of the method to execute on the shared resource to get the result.
+ @type method: C{str}
+ @param transform: method to call on returned JSON value to convert it to something useful.
+ @type transform: C{callable}
+ """
+
+ storeObject, classObject = yield self._getStoreObjectForRequest(txn, request)
+ if classObject is not None:
+ value = yield getattr(classObject, method)(storeObject, *request.get("arguments", ()), **request.get("keywords", {}))
+ else:
+ value = yield getattr(storeObject, method)(*request.get("arguments", ()), **request.get("keywords", {}))
+
+ returnValue(transform(value) if transform is not None else value)
+
+
+ #
+ # Factory methods for binding actions to the conduit class
+ #
+ @staticmethod
+ def _make_simple_action(bindcls, action, method, classMethod=False, transform_recv_result=None, transform_send_result=None):
+ setattr(
+ bindcls,
+ "send_{}".format(action),
+ lambda self, storeObject, *args, **kwargs:
+ self._simple_object_send(action, storeObject, classMethod=classMethod, transform=transform_send_result, args=args, kwargs=kwargs)
+ )
+ setattr(
+ bindcls,
+ "recv_{}".format(action),
+ lambda self, txn, message:
+ self._simple_object_recv(txn, action, message, method, transform=transform_recv_result)
+ )
+
+
+ #
+ # Transforms for returned data
+ #
+ @staticmethod
+ def _to_serialize(value):
+ """
+ Convert the value to the external (JSON-based) representation.
+ """
+ return value.serialize() if value is not None else None
+
+
+ @staticmethod
+ def _to_serialize_list(value):
+ """
+ Convert the value to the external (JSON-based) representation.
+ """
+ return [v.serialize() for v in value]
+
+
+ @staticmethod
+ def _to_string(value):
+ return str(value)
+
+
+ @staticmethod
+ def _to_tuple(value):
+ return tuple(value)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/sql.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/sql.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -31,7 +31,7 @@
</span><span class="cx">
</span><span class="cx"> from twext.enterprise.dal.syntax import (
</span><span class="cx"> Delete, utcNowSQL, Union, Insert, Len, Max, Parameter, SavepointAction,
</span><del>- Select, Update, ColumnSyntax, TableSyntax, Upper, Count, ALL_COLUMNS, Sum,
</del><ins>+ Select, Update, Count, ALL_COLUMNS, Sum,
</ins><span class="cx"> DatabaseLock, DatabaseUnlock)
</span><span class="cx"> from twext.enterprise.ienterprise import AlreadyFinishedError
</span><span class="cx"> from twext.enterprise.jobqueue import LocalQueuer
</span><span class="lines">@@ -39,12 +39,10 @@
</span><span class="cx"> from twext.internet.decorate import memoizedKey, Memoizable
</span><span class="cx"> from twext.python.clsprop import classproperty
</span><span class="cx"> from twext.python.log import Logger
</span><del>-from txweb2.http_headers import MimeType
</del><span class="cx">
</span><span class="cx"> from twisted.application.service import Service
</span><span class="cx"> from twisted.internet import reactor
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue, succeed
</span><del>-from twisted.python import hashlib
</del><span class="cx"> from twisted.python.failure import Failure
</span><span class="cx"> from twisted.python.modules import getModule
</span><span class="cx"> from twisted.python.util import FancyEqMixin
</span><span class="lines">@@ -53,45 +51,40 @@
</span><span class="cx"> from twistedcaldav.dateops import datetimeMktime, pyCalendarTodatetime
</span><span class="cx">
</span><span class="cx"> from txdav.base.datastore.util import QueryCacher
</span><del>-from txdav.base.datastore.util import normalizeUUIDOrNot
-from txdav.base.propertystore.base import PropertyName
</del><span class="cx"> from txdav.base.propertystore.none import PropertyStore as NonePropertyStore
</span><span class="cx"> from txdav.base.propertystore.sql import PropertyStore
</span><span class="cx"> from txdav.caldav.icalendarstore import ICalendarTransaction, ICalendarStore
</span><span class="cx"> from txdav.carddav.iaddressbookstore import IAddressBookTransaction
</span><span class="cx"> from txdav.common.datastore.common import HomeChildBase
</span><span class="cx"> from txdav.common.datastore.podding.conduit import PoddingConduit
</span><del>-from txdav.common.datastore.sql_tables import _BIND_MODE_DIRECT, \
- _BIND_MODE_INDIRECT, _BIND_MODE_OWN, _BIND_STATUS_ACCEPTED, \
- _BIND_STATUS_DECLINED, _BIND_STATUS_DELETED, _BIND_STATUS_INVALID, \
- _BIND_STATUS_INVITED, _HOME_STATUS_EXTERNAL, _HOME_STATUS_NORMAL, \
- _HOME_STATUS_PURGING, schema, splitSQLString
</del><ins>+from txdav.common.datastore.sql_apn import APNSubscriptionsMixin
+from txdav.common.datastore.sql_directory import DelegatesAPIMixin, \
+ GroupsAPIMixin, GroupCacherAPIMixin
+from txdav.common.datastore.sql_imip import imipAPIMixin
+from txdav.common.datastore.sql_notification import NotificationCollection
+from txdav.common.datastore.sql_tables import _BIND_MODE_OWN, _BIND_STATUS_ACCEPTED, \
+ _HOME_STATUS_EXTERNAL, _HOME_STATUS_NORMAL, \
+ _HOME_STATUS_PURGING, schema, splitSQLString, _HOME_STATUS_MIGRATING, \
+ _HOME_STATUS_DISABLED
+from txdav.common.datastore.sql_util import _SharedSyncLogic
+from txdav.common.datastore.sql_sharing import SharingHomeMixIn, SharingMixIn
</ins><span class="cx"> from txdav.common.icommondatastore import ConcurrentModification, \
</span><del>- RecordNotAllowedError, ExternalShareFailed, ShareNotAllowed, \
- IndexedSearchException, NotFoundError
</del><ins>+ RecordNotAllowedError, ShareNotAllowed, \
+ IndexedSearchException, EADDRESSBOOKTYPE, ECALENDARTYPE
</ins><span class="cx"> from txdav.common.icommondatastore import HomeChildNameNotAllowedError, \
</span><span class="cx"> HomeChildNameAlreadyExistsError, NoSuchHomeChildError, \
</span><span class="cx"> ObjectResourceNameNotAllowedError, ObjectResourceNameAlreadyExistsError, \
</span><del>- NoSuchObjectResourceError, AllRetriesFailed, InvalidSubscriptionValues, \
- InvalidIMIPTokenValues, TooManyObjectResourcesError, \
- SyncTokenValidException
</del><ins>+ NoSuchObjectResourceError, AllRetriesFailed, \
+ TooManyObjectResourcesError, SyncTokenValidException
</ins><span class="cx"> from txdav.common.idirectoryservice import IStoreDirectoryService, \
</span><span class="cx"> DirectoryRecordNotFoundError
</span><del>-from txdav.common.inotifications import INotificationCollection, \
- INotificationObject
</del><span class="cx"> from txdav.idav import ChangeCategory
</span><del>-from txdav.who.delegates import Delegates
-from txdav.xml import element
</del><span class="cx">
</span><del>-from uuid import uuid4, UUID
-
</del><span class="cx"> from zope.interface import implements, directlyProvides
</span><span class="cx">
</span><del>-from collections import namedtuple
-import datetime
</del><ins>+import collections
</ins><span class="cx"> import inspect
</span><span class="cx"> import itertools
</span><del>-import json
</del><span class="cx"> import sys
</span><span class="cx"> import time
</span><span class="cx">
</span><span class="lines">@@ -99,18 +92,6 @@
</span><span class="cx">
</span><span class="cx"> log = Logger()
</span><span class="cx">
</span><del>-ECALENDARTYPE = 0
-EADDRESSBOOKTYPE = 1
-ENOTIFICATIONTYPE = 2
-
-# Labels used to identify the class of resource being modified, so that
-# notification systems can target the correct application
-NotifierPrefixes = {
- ECALENDARTYPE: "CalDAV",
- EADDRESSBOOKTYPE: "CardDAV",
-}
-
-
</del><span class="cx"> class CommonDataStore(Service, object):
</span><span class="cx"> """
</span><span class="cx"> Shared logic for SQL-based data stores, between calendar and addressbook
</span><span class="lines">@@ -565,7 +546,10 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span><del>-class CommonStoreTransaction(object):
</del><ins>+class CommonStoreTransaction(
+ GroupsAPIMixin, GroupCacherAPIMixin, DelegatesAPIMixin,
+ imipAPIMixin, APNSubscriptionsMixin,
+):
</ins><span class="cx"> """
</span><span class="cx"> Transaction implementation for SQL database.
</span><span class="cx"> """
</span><span class="lines">@@ -585,14 +569,26 @@
</span><span class="cx">
</span><span class="cx"> self._store = store
</span><span class="cx"> self._queuer = self._store.queuer
</span><del>- self._calendarHomes = {}
- self._addressbookHomes = {}
- self._notificationHomes = {}
</del><ins>+ self._cachedHomes = {
+ ECALENDARTYPE: {
+ "byUID": collections.defaultdict(dict),
+ "byID": collections.defaultdict(dict),
+ },
+ EADDRESSBOOKTYPE: {
+ "byUID": collections.defaultdict(dict),
+ "byID": collections.defaultdict(dict),
+ },
+ }
+ self._notificationHomes = {
+ "byUID": collections.defaultdict(dict),
+ "byID": collections.defaultdict(dict),
+ }
</ins><span class="cx"> self._notifierFactories = notifierFactories
</span><span class="cx"> self._notifiedAlready = set()
</span><span class="cx"> self._bumpedRevisionAlready = set()
</span><span class="cx"> self._label = label
</span><span class="cx"> self._migrating = migrating
</span><ins>+ self._allowDisabled = False
</ins><span class="cx"> self._primaryHomeType = None
</span><span class="cx"> self._disableCache = disableCache or not store.queryCachingEnabled()
</span><span class="cx"> if disableCache:
</span><span class="lines">@@ -695,14 +691,11 @@
</span><span class="cx"> ).on(self)
</span><span class="cx">
</span><span class="cx">
</span><del>- def _determineMemo(self, storeType, uid, create=False, authzUID=None):
</del><ins>+ def _determineMemo(self, storeType, lookupMode, status):
</ins><span class="cx"> """
</span><span class="cx"> Determine the memo dictionary to use for homeWithUID.
</span><span class="cx"> """
</span><del>- if storeType == ECALENDARTYPE:
- return self._calendarHomes
- else:
- return self._addressbookHomes
</del><ins>+ return self._cachedHomes[storeType][lookupMode][status]
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="lines">@@ -717,11 +710,11 @@
</span><span class="cx"> yield self.homeWithUID(storeType, uid, create=False)
</span><span class="cx">
</span><span class="cx"> # Return the memoized list directly
</span><del>- returnValue([kv[1] for kv in sorted(self._determineMemo(storeType, None).items(), key=lambda x: x[0])])
</del><ins>+ returnValue([kv[1] for kv in sorted(self._determineMemo(storeType, "byUID", _HOME_STATUS_NORMAL).items(), key=lambda x: x[0])])
</ins><span class="cx">
</span><span class="cx">
</span><del>- @memoizedKey("uid", _determineMemo)
- def homeWithUID(self, storeType, uid, create=False, authzUID=None):
</del><ins>+ @inlineCallbacks
+ def homeWithUID(self, storeType, uid, status=None, create=False, authzUID=None):
</ins><span class="cx"> """
</span><span class="cx"> We need to distinguish between various different users "looking" at a home and its
</span><span class="cx"> child resources because we have per-user properties that depend on which user is "looking".
</span><span class="lines">@@ -733,15 +726,21 @@
</span><span class="cx"> if storeType not in (ECALENDARTYPE, EADDRESSBOOKTYPE):
</span><span class="cx"> raise RuntimeError("Unknown home type.")
</span><span class="cx">
</span><del>- return self._homeClass[storeType].homeWithUID(self, uid, create, authzUID)
</del><ins>+ result = self._determineMemo(storeType, "byUID", status).get(uid)
+ if result is None:
+ result = yield self._homeClass[storeType].homeWithUID(self, uid, status, create, authzUID)
+ if result:
+ self._determineMemo(storeType, "byUID", status)[uid] = result
+ self._determineMemo(storeType, "byID", None)[result.id()] = result
+ returnValue(result)
</ins><span class="cx">
</span><span class="cx">
</span><del>- def calendarHomeWithUID(self, uid, create=False, authzUID=None):
- return self.homeWithUID(ECALENDARTYPE, uid, create=create, authzUID=authzUID)
</del><ins>+ def calendarHomeWithUID(self, uid, status=None, create=False, authzUID=None):
+ return self.homeWithUID(ECALENDARTYPE, uid, status=status, create=create, authzUID=authzUID)
</ins><span class="cx">
</span><span class="cx">
</span><del>- def addressbookHomeWithUID(self, uid, create=False, authzUID=None):
- return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create, authzUID=authzUID)
</del><ins>+ def addressbookHomeWithUID(self, uid, status=None, create=False, authzUID=None):
+ return self.homeWithUID(EADDRESSBOOKTYPE, uid, status=status, create=create, authzUID=authzUID)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="lines">@@ -749,12 +748,15 @@
</span><span class="cx"> """
</span><span class="cx"> Load a calendar or addressbook home by its integer resource ID.
</span><span class="cx"> """
</span><del>- uid = (yield self._homeClass[storeType].homeUIDWithResourceID(self, rid))
- if uid:
- # Always get the owner's view of the home = i.e., authzUID=uid
- result = (yield self.homeWithUID(storeType, uid, authzUID=uid))
- else:
- result = None
</del><ins>+ if storeType not in (ECALENDARTYPE, EADDRESSBOOKTYPE):
+ raise RuntimeError("Unknown home type.")
+
+ result = self._determineMemo(storeType, "byID", None).get(rid)
+ if result is None:
+ result = yield self._homeClass[storeType].homeWithResourceID(self, rid)
+ if result:
+ self._determineMemo(storeType, "byID", None)[rid] = result
+ self._determineMemo(storeType, "byUID", result.status())[result.uid()] = result
</ins><span class="cx"> returnValue(result)
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -766,1303 +768,36 @@
</span><span class="cx"> return self.homeWithResourceID(EADDRESSBOOKTYPE, rid)
</span><span class="cx">
</span><span class="cx">
</span><del>- @memoizedKey("uid", "_notificationHomes")
- def notificationsWithUID(self, uid, create=True):
</del><ins>+ @inlineCallbacks
+ def notificationsWithUID(self, uid, status=None, create=False):
</ins><span class="cx"> """
</span><span class="cx"> Implement notificationsWithUID.
</span><span class="cx"> """
</span><del>- return NotificationCollection.notificationsWithUID(self, uid, create)
</del><span class="cx">
</span><ins>+ result = self._notificationHomes["byUID"][status].get(uid)
+ if result is None:
+ result = yield NotificationCollection.notificationsWithUID(self, uid, status=status, create=create)
+ if result:
+ self._notificationHomes["byUID"][status][uid] = result
+ self._notificationHomes["byID"][None][result.id()] = result
+ returnValue(result)
</ins><span class="cx">
</span><del>- @memoizedKey("rid", "_notificationHomes")
</del><ins>+
+ @inlineCallbacks
</ins><span class="cx"> def notificationsWithResourceID(self, rid):
</span><span class="cx"> """
</span><span class="cx"> Implement notificationsWithResourceID.
</span><span class="cx"> """
</span><del>- return NotificationCollection.notificationsWithResourceID(self, rid)
</del><span class="cx">
</span><ins>+ result = self._notificationHomes["byID"][None].get(rid)
+ if result is None:
+ result = yield NotificationCollection.notificationsWithResourceID(self, rid)
+ if result:
+ self._notificationHomes["byID"][None][rid] = result
+ self._notificationHomes["byUID"][result.status()][result.uid()] = result
+ returnValue(result)
</ins><span class="cx">
</span><del>- @classproperty
- def _insertAPNSubscriptionQuery(cls):
- apn = schema.APN_SUBSCRIPTIONS
- return Insert({
- apn.TOKEN: Parameter("token"),
- apn.RESOURCE_KEY: Parameter("resourceKey"),
- apn.MODIFIED: Parameter("modified"),
- apn.SUBSCRIBER_GUID: Parameter("subscriber"),
- apn.USER_AGENT: Parameter("userAgent"),
- apn.IP_ADDR: Parameter("ipAddr")
- })
</del><span class="cx">
</span><del>-
- @classproperty
- def _updateAPNSubscriptionQuery(cls):
- apn = schema.APN_SUBSCRIPTIONS
- return Update(
- {
- apn.MODIFIED: Parameter("modified"),
- apn.SUBSCRIBER_GUID: Parameter("subscriber"),
- apn.USER_AGENT: Parameter("userAgent"),
- apn.IP_ADDR: Parameter("ipAddr")
- },
- Where=(apn.TOKEN == Parameter("token")).And(
- apn.RESOURCE_KEY == Parameter("resourceKey"))
- )
-
-
- @classproperty
- def _selectAPNSubscriptionQuery(cls):
- apn = schema.APN_SUBSCRIPTIONS
- return Select(
- [apn.MODIFIED, apn.SUBSCRIBER_GUID],
- From=apn,
- Where=(apn.TOKEN == Parameter("token")).And(
- apn.RESOURCE_KEY == Parameter("resourceKey")
- )
- )
-
-
- @inlineCallbacks
- def addAPNSubscription(
- self, token, key, timestamp, subscriber,
- userAgent, ipAddr
- ):
- if not (token and key and timestamp and subscriber):
- raise InvalidSubscriptionValues()
-
- # Cap these values at 255 characters
- userAgent = userAgent[:255]
- ipAddr = ipAddr[:255]
-
- row = yield self._selectAPNSubscriptionQuery.on(
- self,
- token=token, resourceKey=key
- )
- if not row: # Subscription does not yet exist
- try:
- yield self._insertAPNSubscriptionQuery.on(
- self,
- token=token, resourceKey=key, modified=timestamp,
- subscriber=subscriber, userAgent=userAgent,
- ipAddr=ipAddr)
- except Exception:
- # Subscription may have been added by someone else, which is fine
- pass
-
- else: # Subscription exists, so update with new timestamp and subscriber
- try:
- yield self._updateAPNSubscriptionQuery.on(
- self,
- token=token, resourceKey=key, modified=timestamp,
- subscriber=subscriber, userAgent=userAgent,
- ipAddr=ipAddr)
- except Exception:
- # Subscription may have been added by someone else, which is fine
- pass
-
-
- @classproperty
- def _removeAPNSubscriptionQuery(cls):
- apn = schema.APN_SUBSCRIPTIONS
- return Delete(From=apn,
- Where=(apn.TOKEN == Parameter("token")).And(
- apn.RESOURCE_KEY == Parameter("resourceKey")))
-
-
- def removeAPNSubscription(self, token, key):
- return self._removeAPNSubscriptionQuery.on(
- self,
- token=token, resourceKey=key)
-
-
- @classproperty
- def _purgeOldAPNSubscriptionQuery(cls):
- apn = schema.APN_SUBSCRIPTIONS
- return Delete(From=apn,
- Where=(apn.MODIFIED < Parameter("olderThan")))
-
-
- def purgeOldAPNSubscriptions(self, olderThan):
- return self._purgeOldAPNSubscriptionQuery.on(
- self,
- olderThan=olderThan)
-
-
- @classproperty
- def _apnSubscriptionsByTokenQuery(cls):
- apn = schema.APN_SUBSCRIPTIONS
- return Select([apn.RESOURCE_KEY, apn.MODIFIED, apn.SUBSCRIBER_GUID],
- From=apn, Where=apn.TOKEN == Parameter("token"))
-
-
- def apnSubscriptionsByToken(self, token):
- return self._apnSubscriptionsByTokenQuery.on(self, token=token)
-
-
- @classproperty
- def _apnSubscriptionsByKeyQuery(cls):
- apn = schema.APN_SUBSCRIPTIONS
- return Select([apn.TOKEN, apn.SUBSCRIBER_GUID],
- From=apn, Where=apn.RESOURCE_KEY == Parameter("resourceKey"))
-
-
- def apnSubscriptionsByKey(self, key):
- return self._apnSubscriptionsByKeyQuery.on(self, resourceKey=key)
-
-
- @classproperty
- def _apnSubscriptionsBySubscriberQuery(cls):
- apn = schema.APN_SUBSCRIPTIONS
- return Select([apn.TOKEN, apn.RESOURCE_KEY, apn.MODIFIED, apn.USER_AGENT, apn.IP_ADDR],
- From=apn, Where=apn.SUBSCRIBER_GUID == Parameter("subscriberGUID"))
-
-
- def apnSubscriptionsBySubscriber(self, guid):
- return self._apnSubscriptionsBySubscriberQuery.on(self, subscriberGUID=guid)
-
-
- # Create IMIP token
-
- @classproperty
- def _insertIMIPTokenQuery(cls):
- imip = schema.IMIP_TOKENS
- return Insert({
- imip.TOKEN: Parameter("token"),
- imip.ORGANIZER: Parameter("organizer"),
- imip.ATTENDEE: Parameter("attendee"),
- imip.ICALUID: Parameter("icaluid"),
- })
-
-
- @inlineCallbacks
- def imipCreateToken(self, organizer, attendee, icaluid, token=None):
- if not (organizer and attendee and icaluid):
- raise InvalidIMIPTokenValues()
-
- if token is None:
- token = str(uuid4())
-
- try:
- yield self._insertIMIPTokenQuery.on(
- self,
- token=token, organizer=organizer, attendee=attendee,
- icaluid=icaluid)
- except Exception:
- # TODO: is it okay if someone else created the same row just now?
- pass
- returnValue(token)
-
- # Lookup IMIP organizer+attendee+icaluid for token
-
-
- @classproperty
- def _selectIMIPTokenByTokenQuery(cls):
- imip = schema.IMIP_TOKENS
- return Select([imip.ORGANIZER, imip.ATTENDEE, imip.ICALUID], From=imip,
- Where=(imip.TOKEN == Parameter("token")))
-
-
- def imipLookupByToken(self, token):
- return self._selectIMIPTokenByTokenQuery.on(self, token=token)
-
- # Lookup IMIP token for organizer+attendee+icaluid
-
-
- @classproperty
- def _selectIMIPTokenQuery(cls):
- imip = schema.IMIP_TOKENS
- return Select(
- [imip.TOKEN],
- From=imip,
- Where=(imip.ORGANIZER == Parameter("organizer")).And(
- imip.ATTENDEE == Parameter("attendee")).And(
- imip.ICALUID == Parameter("icaluid"))
- )
-
-
- @classproperty
- def _updateIMIPTokenQuery(cls):
- imip = schema.IMIP_TOKENS
- return Update(
- {imip.ACCESSED: utcNowSQL, },
- Where=(imip.ORGANIZER == Parameter("organizer")).And(
- imip.ATTENDEE == Parameter("attendee")).And(
- imip.ICALUID == Parameter("icaluid"))
- )
-
-
- @inlineCallbacks
- def imipGetToken(self, organizer, attendee, icaluid):
- row = (yield self._selectIMIPTokenQuery.on(
- self, organizer=organizer,
- attendee=attendee, icaluid=icaluid))
- if row:
- token = row[0][0]
- # update the timestamp
- yield self._updateIMIPTokenQuery.on(
- self, organizer=organizer,
- attendee=attendee, icaluid=icaluid)
- else:
- token = None
- returnValue(token)
-
-
- # Remove IMIP token
- @classproperty
- def _removeIMIPTokenQuery(cls):
- imip = schema.IMIP_TOKENS
- return Delete(From=imip,
- Where=(imip.TOKEN == Parameter("token")))
-
-
- def imipRemoveToken(self, token):
- return self._removeIMIPTokenQuery.on(self, token=token)
-
-
- # Purge old IMIP tokens
- @classproperty
- def _purgeOldIMIPTokensQuery(cls):
- imip = schema.IMIP_TOKENS
- return Delete(From=imip,
- Where=(imip.ACCESSED < Parameter("olderThan")))
-
-
- def purgeOldIMIPTokens(self, olderThan):
- """
- @type olderThan: datetime
- """
- return self._purgeOldIMIPTokensQuery.on(self, olderThan=olderThan)
-
- # End of IMIP
-
-
- # Groups
-
- @classproperty
- def _addGroupQuery(cls):
- gr = schema.GROUPS
- return Insert(
- {
- gr.NAME: Parameter("name"),
- gr.GROUP_UID: Parameter("groupUID"),
- gr.MEMBERSHIP_HASH: Parameter("membershipHash")
- },
- Return=gr.GROUP_ID
- )
-
-
- @classproperty
- def _updateGroupQuery(cls):
- gr = schema.GROUPS
- return Update(
- {
- gr.MEMBERSHIP_HASH: Parameter("membershipHash"),
- gr.NAME: Parameter("name"),
- gr.MODIFIED: Parameter("timestamp"),
- gr.EXTANT: Parameter("extant"),
- },
- Where=(gr.GROUP_UID == Parameter("groupUID"))
- )
-
-
- @classproperty
- def _groupByUID(cls):
- gr = schema.GROUPS
- return Select(
- [gr.GROUP_ID, gr.NAME, gr.MEMBERSHIP_HASH, gr.MODIFIED, gr.EXTANT],
- From=gr,
- Where=(gr.GROUP_UID == Parameter("groupUID"))
- )
-
-
- @classproperty
- def _groupByID(cls):
- gr = schema.GROUPS
- return Select(
- [gr.GROUP_UID, gr.NAME, gr.MEMBERSHIP_HASH, gr.EXTANT],
- From=gr,
- Where=(gr.GROUP_ID == Parameter("groupID"))
- )
-
-
- @classproperty
- def _deleteGroup(cls):
- gr = schema.GROUPS
- return Delete(
- From=gr,
- Where=(gr.GROUP_ID == Parameter("groupID"))
- )
-
-
- @inlineCallbacks
- def addGroup(self, groupUID, name, membershipHash):
- """
- @type groupUID: C{unicode}
- @type name: C{unicode}
- @type membershipHash: C{str}
- """
- record = yield self.directoryService().recordWithUID(groupUID)
- if record is None:
- returnValue(None)
-
- groupID = (yield self._addGroupQuery.on(
- self,
- name=name.encode("utf-8"),
- groupUID=groupUID.encode("utf-8"),
- membershipHash=membershipHash
- ))[0][0]
-
- yield self.refreshGroup(
- groupUID, record, groupID, name.encode("utf-8"), membershipHash, True
- )
- returnValue(groupID)
-
-
- def updateGroup(self, groupUID, name, membershipHash, extant=True):
- """
- @type groupUID: C{unicode}
- @type name: C{unicode}
- @type membershipHash: C{str}
- @type extant: C{boolean}
- """
- timestamp = datetime.datetime.utcnow()
- return self._updateGroupQuery.on(
- self,
- name=name.encode("utf-8"),
- groupUID=groupUID.encode("utf-8"),
- timestamp=timestamp,
- membershipHash=membershipHash,
- extant=(1 if extant else 0)
- )
-
-
- @inlineCallbacks
- def groupByUID(self, groupUID, create=True):
- """
- Return or create a record for the group UID.
-
- @type groupUID: C{unicode}
-
- @return: Deferred firing with tuple of group ID C{str}, group name
- C{unicode}, membership hash C{str}, modified timestamp, and
- extant C{boolean}
- """
- results = (
- yield self._groupByUID.on(
- self, groupUID=groupUID.encode("utf-8")
- )
- )
- if results:
- returnValue((
- results[0][0], # group id
- results[0][1].decode("utf-8"), # name
- results[0][2], # membership hash
- results[0][3], # modified timestamp
- bool(results[0][4]), # extant
- ))
- elif create:
- savepoint = SavepointAction("groupByUID")
- yield savepoint.acquire(self)
- try:
- groupID = yield self.addGroup(groupUID, u"", "")
- if groupID is None:
- # The record does not actually exist within the directory
- yield savepoint.release(self)
- returnValue((None, None, None, None, None))
-
- except Exception:
- yield savepoint.rollback(self)
- results = (
- yield self._groupByUID.on(
- self, groupUID=groupUID.encode("utf-8")
- )
- )
- if results:
- returnValue((
- results[0][0], # group id
- results[0][1].decode("utf-8"), # name
- results[0][2], # membership hash
- results[0][3], # modified timestamp
- bool(results[0][4]), # extant
- ))
- else:
- returnValue((None, None, None, None, None))
- else:
- yield savepoint.release(self)
- results = (
- yield self._groupByUID.on(
- self, groupUID=groupUID.encode("utf-8")
- )
- )
- if results:
- returnValue((
- results[0][0], # group id
- results[0][1].decode("utf-8"), # name
- results[0][2], # membership hash
- results[0][3], # modified timestamp
- bool(results[0][4]), # extant
- ))
- else:
- returnValue((None, None, None, None, None))
- else:
- returnValue((None, None, None, None, None))
-
-
- @inlineCallbacks
- def groupByID(self, groupID):
- """
- Given a group ID, return the group UID, or raise NotFoundError
-
- @type groupID: C{str}
- @return: Deferred firing with a tuple of group UID C{unicode},
- group name C{unicode}, membership hash C{str}, and extant C{boolean}
- """
- try:
- results = (yield self._groupByID.on(self, groupID=groupID))[0]
- if results:
- results = (
- results[0].decode("utf-8"),
- results[1].decode("utf-8"),
- results[2],
- bool(results[3])
- )
- returnValue(results)
- except IndexError:
- raise NotFoundError
-
-
- def deleteGroup(self, groupID):
- return self._deleteGroup.on(self, groupID=groupID)
-
- # End of Groups
-
-
- # Group Members
-
- @classproperty
- def _addMemberToGroupQuery(cls):
- gm = schema.GROUP_MEMBERSHIP
- return Insert(
- {
- gm.GROUP_ID: Parameter("groupID"),
- gm.MEMBER_UID: Parameter("memberUID")
- }
- )
-
-
- @classproperty
- def _removeMemberFromGroupQuery(cls):
- gm = schema.GROUP_MEMBERSHIP
- return Delete(
- From=gm,
- Where=(
- gm.GROUP_ID == Parameter("groupID")
- ).And(
- gm.MEMBER_UID == Parameter("memberUID")
- )
- )
-
-
- @classproperty
- def _selectGroupMembersQuery(cls):
- gm = schema.GROUP_MEMBERSHIP
- return Select(
- [gm.MEMBER_UID],
- From=gm,
- Where=(
- gm.GROUP_ID == Parameter("groupID")
- )
- )
-
-
- @classproperty
- def _selectGroupsForQuery(cls):
- gr = schema.GROUPS
- gm = schema.GROUP_MEMBERSHIP
-
- return Select(
- [gr.GROUP_UID],
- From=gr,
- Where=(
- gr.GROUP_ID.In(
- Select(
- [gm.GROUP_ID],
- From=gm,
- Where=(
- gm.MEMBER_UID == Parameter("uid")
- )
- )
- )
- )
- )
-
-
- def addMemberToGroup(self, memberUID, groupID):
- return self._addMemberToGroupQuery.on(
- self, groupID=groupID, memberUID=memberUID.encode("utf-8")
- )
-
-
- def removeMemberFromGroup(self, memberUID, groupID):
- return self._removeMemberFromGroupQuery.on(
- self, groupID=groupID, memberUID=memberUID.encode("utf-8")
- )
-
-
- @inlineCallbacks
- def groupMemberUIDs(self, groupID):
- """
- Returns the cached set of UIDs for members of the given groupID.
- Sub-groups are not returned in the results but their members are,
- because the group membership has already been expanded/flattened
- before storing in the db.
-
- @param groupID: the group ID
- @type groupID: C{int}
- @return: the set of member UIDs
- @rtype: a Deferred which fires with a set() of C{str} UIDs
- """
- members = set()
- results = (yield self._selectGroupMembersQuery.on(self, groupID=groupID))
- for row in results:
- members.add(row[0].decode("utf-8"))
- returnValue(members)
-
-
- @inlineCallbacks
- def refreshGroup(self, groupUID, record, groupID, cachedName, cachedMembershipHash, cachedExtant):
- """
- @param groupUID: the directory record
- @type groupUID: C{unicode}
- @param record: the directory record
- @type record: C{iDirectoryRecord}
- @param groupID: group resource id
- @type groupID: C{str}
- @param cachedName: group name in the database
- @type cachedName: C{unicode}
- @param cachedMembershipHash: membership hash in the database
- @type cachedMembershipHash: C{str}
- @param cachedExtant: extent field from in the database
- @type cachedExtant: C{bool}
-
- @return: Deferred firing with membershipChanged C{boolean}
-
- """
- if record is not None:
- memberUIDs = yield record.expandedMemberUIDs()
- name = record.displayName
- extant = True
- else:
- memberUIDs = frozenset()
- name = cachedName
- extant = False
-
- membershipHashContent = hashlib.md5()
- for memberUID in sorted(memberUIDs):
- membershipHashContent.update(str(memberUID))
- membershipHash = membershipHashContent.hexdigest()
-
- if cachedMembershipHash != membershipHash:
- membershipChanged = True
- log.debug(
- "Group '{group}' changed", group=name
- )
- else:
- membershipChanged = False
-
- if membershipChanged or extant != cachedExtant:
- # also updates group mod date
- yield self.updateGroup(
- groupUID, name, membershipHash, extant=extant
- )
-
- if membershipChanged:
- addedUIDs, removedUIDs = yield self.synchronizeMembers(groupID, set(memberUIDs))
- else:
- addedUIDs = removedUIDs = None
-
- returnValue((membershipChanged, addedUIDs, removedUIDs,))
-
-
- @inlineCallbacks
- def synchronizeMembers(self, groupID, newMemberUIDs):
- """
- Update the group membership table in the database to match the new membership list. This
- method will diff the existing set with the new set and apply the changes. It also calls out
- to a groupChanged() method with the set of added and removed members so that other modules
- that depend on groups can monitor the changes.
-
- @param groupID: group id of group to update
- @type groupID: L{str}
- @param newMemberUIDs: set of new member UIDs in the group
- @type newMemberUIDs: L{set} of L{str}
- """
- cachedMemberUIDs = (yield self.groupMemberUIDs(groupID))
-
- removed = cachedMemberUIDs - newMemberUIDs
- for memberUID in removed:
- yield self.removeMemberFromGroup(memberUID, groupID)
-
- added = newMemberUIDs - cachedMemberUIDs
- for memberUID in added:
- yield self.addMemberToGroup(memberUID, groupID)
-
- yield self.groupChanged(groupID, added, removed)
-
- returnValue((added, removed,))
-
-
- @inlineCallbacks
- def groupChanged(self, groupID, addedUIDs, removedUIDs):
- """
- Called when membership of a group changes.
-
- @param groupID: group id of group that changed
- @type groupID: L{str}
- @param addedUIDs: set of new member UIDs added to the group
- @type addedUIDs: L{set} of L{str}
- @param removedUIDs: set of old member UIDs removed from the group
- @type removedUIDs: L{set} of L{str}
- """
- yield Delegates.groupChanged(self, groupID, addedUIDs, removedUIDs)
-
-
- @inlineCallbacks
- def groupMembers(self, groupID):
- """
- The members of the given group as recorded in the db
- """
- members = set()
- memberUIDs = (yield self.groupMemberUIDs(groupID))
- for uid in memberUIDs:
- record = (yield self.directoryService().recordWithUID(uid))
- if record is not None:
- members.add(record)
- returnValue(members)
-
-
- @inlineCallbacks
- def groupUIDsFor(self, uid):
- """
- Returns the cached set of UIDs for the groups this given uid is
- a member of.
-
- @param uid: the uid
- @type uid: C{unicode}
- @return: the set of group IDs
- @rtype: a Deferred which fires with a set() of C{int} group IDs
- """
- groups = set()
- results = (
- yield self._selectGroupsForQuery.on(
- self, uid=uid.encode("utf-8")
- )
- )
- for row in results:
- groups.add(row[0].decode("utf-8"))
- returnValue(groups)
-
- # End of Group Members
-
- # Delegates
-
-
- @classproperty
- def _addDelegateQuery(cls):
- de = schema.DELEGATES
- return Insert({de.DELEGATOR: Parameter("delegator"),
- de.DELEGATE: Parameter("delegate"),
- de.READ_WRITE: Parameter("readWrite"),
- })
-
-
- @classproperty
- def _addDelegateGroupQuery(cls):
- ds = schema.DELEGATE_GROUPS
- return Insert({ds.DELEGATOR: Parameter("delegator"),
- ds.GROUP_ID: Parameter("groupID"),
- ds.READ_WRITE: Parameter("readWrite"),
- ds.IS_EXTERNAL: Parameter("isExternal"),
- })
-
-
- @classproperty
- def _removeDelegateQuery(cls):
- de = schema.DELEGATES
- return Delete(
- From=de,
- Where=(
- de.DELEGATOR == Parameter("delegator")
- ).And(
- de.DELEGATE == Parameter("delegate")
- ).And(
- de.READ_WRITE == Parameter("readWrite")
- )
- )
-
-
- @classproperty
- def _removeDelegatesQuery(cls):
- de = schema.DELEGATES
- return Delete(
- From=de,
- Where=(
- de.DELEGATOR == Parameter("delegator")
- ).And(
- de.READ_WRITE == Parameter("readWrite")
- )
- )
-
-
- @classproperty
- def _removeDelegateGroupQuery(cls):
- ds = schema.DELEGATE_GROUPS
- return Delete(
- From=ds,
- Where=(
- ds.DELEGATOR == Parameter("delegator")
- ).And(
- ds.GROUP_ID == Parameter("groupID")
- ).And(
- ds.READ_WRITE == Parameter("readWrite")
- )
- )
-
-
- @classproperty
- def _removeDelegateGroupsQuery(cls):
- ds = schema.DELEGATE_GROUPS
- return Delete(
- From=ds,
- Where=(
- ds.DELEGATOR == Parameter("delegator")
- ).And(
- ds.READ_WRITE == Parameter("readWrite")
- )
- )
-
-
- @classproperty
- def _selectDelegatesQuery(cls):
- de = schema.DELEGATES
- return Select(
- [de.DELEGATE],
- From=de,
- Where=(
- de.DELEGATOR == Parameter("delegator")
- ).And(
- de.READ_WRITE == Parameter("readWrite")
- )
- )
-
-
- @classproperty
- def _selectDelegatorsToGroupQuery(cls):
- dg = schema.DELEGATE_GROUPS
- return Select(
- [dg.DELEGATOR],
- From=dg,
- Where=(
- dg.GROUP_ID == Parameter("delegateGroup")
- ).And(
- dg.READ_WRITE == Parameter("readWrite")
- )
- )
-
-
- @classproperty
- def _selectDelegateGroupsQuery(cls):
- ds = schema.DELEGATE_GROUPS
- gr = schema.GROUPS
-
- return Select(
- [gr.GROUP_UID],
- From=gr,
- Where=(
- gr.GROUP_ID.In(
- Select(
- [ds.GROUP_ID],
- From=ds,
- Where=(
- ds.DELEGATOR == Parameter("delegator")
- ).And(
- ds.READ_WRITE == Parameter("readWrite")
- )
- )
- )
- )
- )
-
-
- @classproperty
- def _selectDirectDelegatorsQuery(cls):
- de = schema.DELEGATES
- return Select(
- [de.DELEGATOR],
- From=de,
- Where=(
- de.DELEGATE == Parameter("delegate")
- ).And(
- de.READ_WRITE == Parameter("readWrite")
- )
- )
-
-
- @classproperty
- def _selectIndirectDelegatorsQuery(cls):
- dg = schema.DELEGATE_GROUPS
- gm = schema.GROUP_MEMBERSHIP
-
- return Select(
- [dg.DELEGATOR],
- From=dg,
- Where=(
- dg.GROUP_ID.In(
- Select(
- [gm.GROUP_ID],
- From=gm,
- Where=(gm.MEMBER_UID == Parameter("delegate"))
- )
- ).And(
- dg.READ_WRITE == Parameter("readWrite")
- )
- )
- )
-
-
- @classproperty
- def _selectIndirectDelegatesQuery(cls):
- dg = schema.DELEGATE_GROUPS
- gm = schema.GROUP_MEMBERSHIP
-
- return Select(
- [gm.MEMBER_UID],
- From=gm,
- Where=(
- gm.GROUP_ID.In(
- Select(
- [dg.GROUP_ID],
- From=dg,
- Where=(dg.DELEGATOR == Parameter("delegator")).And(
- dg.READ_WRITE == Parameter("readWrite"))
- )
- )
- )
- )
-
-
- @classproperty
- def _selectExternalDelegateGroupsQuery(cls):
- edg = schema.EXTERNAL_DELEGATE_GROUPS
- return Select(
- [edg.DELEGATOR, edg.GROUP_UID_READ, edg.GROUP_UID_WRITE],
- From=edg
- )
-
-
- @classproperty
- def _removeExternalDelegateGroupsPairQuery(cls):
- edg = schema.EXTERNAL_DELEGATE_GROUPS
- return Delete(
- From=edg,
- Where=(
- edg.DELEGATOR == Parameter("delegator")
- )
- )
-
-
- @classproperty
- def _storeExternalDelegateGroupsPairQuery(cls):
- edg = schema.EXTERNAL_DELEGATE_GROUPS
- return Insert(
- {
- edg.DELEGATOR: Parameter("delegator"),
- edg.GROUP_UID_READ: Parameter("readDelegate"),
- edg.GROUP_UID_WRITE: Parameter("writeDelegate"),
- }
- )
-
-
- @classproperty
- def _removeExternalDelegateGroupsQuery(cls):
- ds = schema.DELEGATE_GROUPS
- return Delete(
- From=ds,
- Where=(
- ds.DELEGATOR == Parameter("delegator")
- ).And(
- ds.IS_EXTERNAL == 1
- )
- )
-
-
- @inlineCallbacks
- def addDelegate(self, delegator, delegate, readWrite):
- """
- Adds a row to the DELEGATES table. The delegate should not be a
- group. To delegate to a group, call addDelegateGroup() instead.
-
- @param delegator: the UID of the delegator
- @type delegator: C{unicode}
- @param delegate: the UID of the delegate
- @type delegate: C{unicode}
- @param readWrite: grant read and write access if True, otherwise
- read-only access
- @type readWrite: C{boolean}
- """
-
- def _addDelegate(subtxn):
- return self._addDelegateQuery.on(
- subtxn,
- delegator=delegator.encode("utf-8"),
- delegate=delegate.encode("utf-8"),
- readWrite=1 if readWrite else 0
- )
-
- try:
- yield self.subtransaction(_addDelegate, retries=0, failureOK=True)
- except AllRetriesFailed:
- pass
-
-
- @inlineCallbacks
- def addDelegateGroup(self, delegator, delegateGroupID, readWrite,
- isExternal=False):
- """
- Adds a row to the DELEGATE_GROUPS table. The delegate should be a
- group. To delegate to a person, call addDelegate() instead.
-
- @param delegator: the UID of the delegator
- @type delegator: C{unicode}
- @param delegateGroupID: the GROUP_ID of the delegate group
- @type delegateGroupID: C{int}
- @param readWrite: grant read and write access if True, otherwise
- read-only access
- @type readWrite: C{boolean}
- """
-
- def _addDelegateGroup(subtxn):
- return self._addDelegateGroupQuery.on(
- subtxn,
- delegator=delegator.encode("utf-8"),
- groupID=delegateGroupID,
- readWrite=1 if readWrite else 0,
- isExternal=1 if isExternal else 0
- )
-
- try:
- yield self.subtransaction(_addDelegateGroup, retries=0, failureOK=True)
- except AllRetriesFailed:
- pass
-
-
- def removeDelegate(self, delegator, delegate, readWrite):
- """
- Removes a row from the DELEGATES table. The delegate should not be a
- group. To remove a delegate group, call removeDelegateGroup() instead.
-
- @param delegator: the UID of the delegator
- @type delegator: C{unicode}
- @param delegate: the UID of the delegate
- @type delegate: C{unicode}
- @param readWrite: remove read and write access if True, otherwise
- read-only access
- @type readWrite: C{boolean}
- """
- return self._removeDelegateQuery.on(
- self,
- delegator=delegator.encode("utf-8"),
- delegate=delegate.encode("utf-8"),
- readWrite=1 if readWrite else 0
- )
-
-
- def removeDelegates(self, delegator, readWrite):
- """
- Removes all rows for this delegator/readWrite combination from the
- DELEGATES table.
-
- @param delegator: the UID of the delegator
- @type delegator: C{unicode}
- @param readWrite: remove read and write access if True, otherwise
- read-only access
- @type readWrite: C{boolean}
- """
- return self._removeDelegatesQuery.on(
- self,
- delegator=delegator.encode("utf-8"),
- readWrite=1 if readWrite else 0
- )
-
-
- def removeDelegateGroup(self, delegator, delegateGroupID, readWrite):
- """
- Removes a row from the DELEGATE_GROUPS table. The delegate should be a
- group. To remove a delegate person, call removeDelegate() instead.
-
- @param delegator: the UID of the delegator
- @type delegator: C{unicode}
- @param delegateGroupID: the GROUP_ID of the delegate group
- @type delegateGroupID: C{int}
- @param readWrite: remove read and write access if True, otherwise
- read-only access
- @type readWrite: C{boolean}
- """
- return self._removeDelegateGroupQuery.on(
- self,
- delegator=delegator.encode("utf-8"),
- groupID=delegateGroupID,
- readWrite=1 if readWrite else 0
- )
-
-
- def removeDelegateGroups(self, delegator, readWrite):
- """
- Removes all rows for this delegator/readWrite combination from the
- DELEGATE_GROUPS table.
-
- @param delegator: the UID of the delegator
- @type delegator: C{unicode}
- @param readWrite: remove read and write access if True, otherwise
- read-only access
- @type readWrite: C{boolean}
- """
- return self._removeDelegateGroupsQuery.on(
- self,
- delegator=delegator.encode("utf-8"),
- readWrite=1 if readWrite else 0
- )
-
-
- @inlineCallbacks
- def delegates(self, delegator, readWrite, expanded=False):
- """
- Returns the UIDs of all delegates for the given delegator. If
- expanded is False, only the direct delegates (users and groups)
- are returned. If expanded is True, the expanded membership is
- returned, not including the groups themselves.
-
- @param delegator: the UID of the delegator
- @type delegator: C{unicode}
- @param readWrite: the access-type to check for; read and write
- access if True, otherwise read-only access
- @type readWrite: C{boolean}
- @returns: the UIDs of the delegates (for the specified access
- type)
- @rtype: a Deferred resulting in a set
- """
- delegates = set()
- delegatorU = delegator.encode("utf-8")
-
- # First get the direct delegates
- results = (
- yield self._selectDelegatesQuery.on(
- self,
- delegator=delegatorU,
- readWrite=1 if readWrite else 0
- )
- )
- delegates.update([row[0].decode("utf-8") for row in results])
-
- if expanded:
- # Get those who are in groups which have been delegated to
- results = (
- yield self._selectIndirectDelegatesQuery.on(
- self,
- delegator=delegatorU,
- readWrite=1 if readWrite else 0
- )
- )
- # Skip the delegator if they are in one of the groups
- delegates.update([row[0].decode("utf-8") for row in results if row[0] != delegatorU])
-
- else:
- # Get the directly-delegated-to groups
- results = (
- yield self._selectDelegateGroupsQuery.on(
- self,
- delegator=delegatorU,
- readWrite=1 if readWrite else 0
- )
- )
- delegates.update([row[0].decode("utf-8") for row in results])
-
- returnValue(delegates)
-
-
- @inlineCallbacks
- def delegators(self, delegate, readWrite):
- """
- Returns the UIDs of all delegators which have granted access to
- the given delegate, either directly or indirectly via groups.
-
- @param delegate: the UID of the delegate
- @type delegate: C{unicode}
- @param readWrite: the access-type to check for; read and write
- access if True, otherwise read-only access
- @type readWrite: C{boolean}
- @returns: the UIDs of the delegators (for the specified access
- type)
- @rtype: a Deferred resulting in a set
- """
- delegators = set()
- delegateU = delegate.encode("utf-8")
-
- # First get the direct delegators
- results = (
- yield self._selectDirectDelegatorsQuery.on(
- self,
- delegate=delegateU,
- readWrite=1 if readWrite else 0
- )
- )
- delegators.update([row[0].decode("utf-8") for row in results])
-
- # Finally get those who have delegated to groups the delegate
- # is a member of
- results = (
- yield self._selectIndirectDelegatorsQuery.on(
- self,
- delegate=delegateU,
- readWrite=1 if readWrite else 0
- )
- )
- # Skip the delegator if they are in one of the groups
- delegators.update([row[0].decode("utf-8") for row in results if row[0] != delegateU])
-
- returnValue(delegators)
-
-
- @inlineCallbacks
- def delegatorsToGroup(self, delegateGroupID, readWrite):
- """
- Return the UIDs of those who have delegated to the given group with the
- given access level.
-
- @param delegateGroupID: the group ID of the delegate group
- @type delegateGroupID: C{int}
- @param readWrite: the access-type to check for; read and write
- access if True, otherwise read-only access
- @type readWrite: C{boolean}
- @returns: the UIDs of the delegators (for the specified access
- type)
- @rtype: a Deferred resulting in a set
-
- """
- results = (
- yield self._selectDelegatorsToGroupQuery.on(
- self,
- delegateGroup=delegateGroupID,
- readWrite=1 if readWrite else 0
- )
- )
- delegators = set([row[0].decode("utf-8") for row in results])
- returnValue(delegators)
-
-
- @inlineCallbacks
- def allGroupDelegates(self):
- """
- Return the UIDs of all groups which have been delegated to. Useful
- for obtaining the set of groups which need to be synchronized from
- the directory.
-
- @returns: the UIDs of all delegated-to groups
- @rtype: a Deferred resulting in a set
- """
- gr = schema.GROUPS
- dg = schema.DELEGATE_GROUPS
-
- results = (yield Select(
- [gr.GROUP_UID],
- From=gr,
- Where=(gr.GROUP_ID.In(Select([dg.GROUP_ID], From=dg, Where=None)))
- ).on(self))
- delegates = set()
- for row in results:
- delegates.add(row[0].decode("utf-8"))
-
- returnValue(delegates)
-
-
- @inlineCallbacks
- def externalDelegates(self):
- """
- Returns a dictionary mapping delegate UIDs to (read-group, write-group)
- tuples, including only those assignments that originated from the
- directory.
-
- @returns: dictionary mapping delegator uid to (readDelegateUID,
- writeDelegateUID) tuples
- @rtype: a Deferred resulting in a dictionary
- """
- delegates = {}
-
- # Get the externally managed delegates (which are all groups)
- results = (yield self._selectExternalDelegateGroupsQuery.on(self))
- for delegator, readDelegateUID, writeDelegateUID in results:
- delegates[delegator.encode("utf-8")] = (
- readDelegateUID.encode("utf-8") if readDelegateUID else None,
- writeDelegateUID.encode("utf-8") if writeDelegateUID else None
- )
-
- returnValue(delegates)
-
-
- @inlineCallbacks
- def assignExternalDelegates(
- self, delegator, readDelegateGroupID, writeDelegateGroupID,
- readDelegateUID, writeDelegateUID
- ):
- """
- Update the external delegate group table so we can quickly identify
- diffs next time, and update the delegate group table itself
-
- @param delegator
- @type delegator: C{UUID}
- """
-
- # Delete existing external assignments for the delegator
- yield self._removeExternalDelegateGroupsQuery.on(
- self,
- delegator=str(delegator)
- )
-
- # Remove from the external comparison table
- yield self._removeExternalDelegateGroupsPairQuery.on(
- self,
- delegator=str(delegator)
- )
-
- # Store new assignments in the external comparison table
- if readDelegateUID or writeDelegateUID:
- readDelegateForDB = (
- readDelegateUID.encode("utf-8") if readDelegateUID else ""
- )
- writeDelegateForDB = (
- writeDelegateUID.encode("utf-8") if writeDelegateUID else ""
- )
- yield self._storeExternalDelegateGroupsPairQuery.on(
- self,
- delegator=str(delegator),
- readDelegate=readDelegateForDB,
- writeDelegate=writeDelegateForDB
- )
-
- # Apply new assignments
- if readDelegateGroupID is not None:
- yield self.addDelegateGroup(
- delegator, readDelegateGroupID, False, isExternal=True
- )
- if writeDelegateGroupID is not None:
- yield self.addDelegateGroup(
- delegator, writeDelegateGroupID, True, isExternal=True
- )
-
-
- # End of Delegates
-
-
</del><span class="cx"> def preCommit(self, operation):
</span><span class="cx"> """
</span><span class="cx"> Run things before C{commit}. (Note: only provided by SQL
</span><span class="lines">@@ -2809,227 +1544,58 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span><del>-class _EmptyCacher(object):
-
- def set(self, key, value):
- return succeed(True)
-
-
- def get(self, key, withIdentifier=False):
- return succeed(None)
-
-
- def delete(self, key):
- return succeed(True)
-
-
-
-class SharingHomeMixIn(object):
- """
- Common class for CommonHome to implement sharing operations
- """
-
- @inlineCallbacks
- def acceptShare(self, shareUID, summary=None):
- """
- This share is being accepted.
- """
-
- shareeView = yield self.anyObjectWithShareUID(shareUID)
- if shareeView is not None:
- yield shareeView.acceptShare(summary)
-
- returnValue(shareeView)
-
-
- @inlineCallbacks
- def declineShare(self, shareUID):
- """
- This share is being declined.
- """
-
- shareeView = yield self.anyObjectWithShareUID(shareUID)
- if shareeView is not None:
- yield shareeView.declineShare()
-
- returnValue(shareeView is not None)
-
-
- #
- # External (cross-pod) sharing - entry point is the sharee's home collection.
- #
- @inlineCallbacks
- def processExternalInvite(
- self, ownerUID, ownerRID, ownerName, shareUID, bindMode, summary,
- copy_invite_properties, supported_components=None
- ):
- """
- External invite received.
- """
-
- # Get the owner home - create external one if not present
- ownerHome = yield self._txn.homeWithUID(
- self._homeType, ownerUID, create=True
- )
- if ownerHome is None or not ownerHome.external():
- raise ExternalShareFailed("Invalid owner UID: {}".format(ownerUID))
-
- # Try to find owner calendar via its external id
- ownerView = yield ownerHome.childWithExternalID(ownerRID)
- if ownerView is None:
- try:
- ownerView = yield ownerHome.createChildWithName(
- ownerName, externalID=ownerRID
- )
- except HomeChildNameAlreadyExistsError:
- # This is odd - it means we possibly have a left over sharer
- # collection which the sharer likely removed and re-created
- # with the same name but now it has a different externalID and
- # is not found by the initial query. What we do is check to see
- # whether any shares still reference the old ID - if they do we
- # are hosed. If not, we can remove the old item and create a new one.
- oldOwnerView = yield ownerHome.childWithName(ownerName)
- invites = yield oldOwnerView.sharingInvites()
- if len(invites) != 0:
- log.error(
- "External invite collection name is present with a "
- "different externalID and still has shares"
- )
- raise
- log.error(
- "External invite collection name is present with a "
- "different externalID - trying to fix"
- )
- yield ownerHome.removeExternalChild(oldOwnerView)
- ownerView = yield ownerHome.createChildWithName(
- ownerName, externalID=ownerRID
- )
-
- if (
- supported_components is not None and
- hasattr(ownerView, "setSupportedComponents")
- ):
- yield ownerView.setSupportedComponents(supported_components)
-
- # Now carry out the share operation
- if bindMode == _BIND_MODE_DIRECT:
- shareeView = yield ownerView.directShareWithUser(
- self.uid(), shareName=shareUID
- )
- else:
- shareeView = yield ownerView.inviteUIDToShare(
- self.uid(), bindMode, summary, shareName=shareUID
- )
-
- shareeView.setInviteCopyProperties(copy_invite_properties)
-
-
- @inlineCallbacks
- def processExternalUninvite(self, ownerUID, ownerRID, shareUID):
- """
- External invite received.
- """
-
- # Get the owner home
- ownerHome = yield self._txn.homeWithUID(self._homeType, ownerUID)
- if ownerHome is None or not ownerHome.external():
- raise ExternalShareFailed("Invalid owner UID: {}".format(ownerUID))
-
- # Try to find owner calendar via its external id
- ownerView = yield ownerHome.childWithExternalID(ownerRID)
- if ownerView is None:
- raise ExternalShareFailed("Invalid share ID: {}".format(shareUID))
-
- # Now carry out the share operation
- yield ownerView.uninviteUIDFromShare(self.uid())
-
- # See if there are any references to the external share. If not,
- # remove it
- invites = yield ownerView.sharingInvites()
- if len(invites) == 0:
- yield ownerHome.removeExternalChild(ownerView)
-
-
- @inlineCallbacks
- def processExternalReply(
- self, ownerUID, shareeUID, shareUID, bindStatus, summary=None
- ):
- """
- External invite received.
- """
-
- # Make sure the shareeUID and shareUID match
-
- # Get the owner home - create external one if not present
- shareeHome = yield self._txn.homeWithUID(self._homeType, shareeUID)
- if shareeHome is None or not shareeHome.external():
- raise ExternalShareFailed(
- "Invalid sharee UID: {}".format(shareeUID)
- )
-
- # Try to find owner calendar via its external id
- shareeView = yield shareeHome.anyObjectWithShareUID(shareUID)
- if shareeView is None:
- raise ExternalShareFailed("Invalid share UID: {}".format(shareUID))
-
- # Now carry out the share operation
- if bindStatus == _BIND_STATUS_ACCEPTED:
- yield shareeHome.acceptShare(shareUID, summary)
- elif bindStatus == _BIND_STATUS_DECLINED:
- if shareeView.direct():
- yield shareeView.deleteShare()
- else:
- yield shareeHome.declineShare(shareUID)
-
-
-
</del><span class="cx"> class CommonHome(SharingHomeMixIn):
</span><span class="cx"> log = Logger()
</span><span class="cx">
</span><span class="cx"> # All these need to be initialized by derived classes for each store type
</span><span class="cx"> _homeType = None
</span><del>- _homeTable = None
- _homeMetaDataTable = None
</del><ins>+ _homeSchema = None
+ _homeMetaDataSchema = None
+
</ins><span class="cx"> _externalClass = None
</span><span class="cx"> _childClass = None
</span><del>- _childTable = None
</del><ins>+
+ _bindSchema = None
+ _revisionsSchema = None
+ _objectSchema = None
+
</ins><span class="cx"> _notifierPrefix = None
</span><span class="cx">
</span><span class="cx"> _dataVersionKey = None
</span><span class="cx"> _dataVersionValue = None
</span><span class="cx">
</span><del>- _cacher = None # Initialize in derived classes
-
</del><span class="cx"> @classmethod
</span><del>- @inlineCallbacks
- def makeClass(cls, transaction, ownerUID, no_cache=False, authzUID=None):
</del><ins>+ def makeClass(cls, transaction, homeData, authzUID=None):
</ins><span class="cx"> """
</span><span class="cx"> Build the actual home class taking into account the possibility that we might need to
</span><span class="cx"> switch in the external version of the class.
</span><span class="cx">
</span><span class="cx"> @param transaction: transaction
</span><span class="cx"> @type transaction: L{CommonStoreTransaction}
</span><del>- @param ownerUID: owner UID of home to load
- @type ownerUID: C{str}
- @param no_cache: should cached query be used
- @type no_cache: C{bool}
</del><ins>+ @param homeData: home table column data
+ @type homeData: C{list}
</ins><span class="cx"> """
</span><del>- home = cls(transaction, ownerUID, authzUID=authzUID)
- actualHome = yield home.initFromStore(no_cache)
- returnValue(actualHome)
</del><span class="cx">
</span><ins>+ status = homeData[cls.homeColumns().index(cls._homeSchema.STATUS)]
+ if status == _HOME_STATUS_EXTERNAL:
+ home = cls._externalClass(transaction, homeData)
+ else:
+ home = cls(transaction, homeData, authzUID=authzUID)
+ return home.initFromStore()
</ins><span class="cx">
</span><del>- def __init__(self, transaction, ownerUID, authzUID=None):
</del><ins>+
+ def __init__(self, transaction, homeData, authzUID=None):
</ins><span class="cx"> self._txn = transaction
</span><del>- self._ownerUID = ownerUID
</del><ins>+
+ for attr, value in zip(self.homeAttributes(), homeData):
+ setattr(self, attr, value)
+
</ins><span class="cx"> self._authzUID = authzUID
</span><span class="cx"> if self._authzUID is None:
</span><span class="cx"> if self._txn._authz_uid is not None:
</span><span class="cx"> self._authzUID = self._txn._authz_uid
</span><span class="cx"> else:
</span><span class="cx"> self._authzUID = self._ownerUID
</span><del>- self._resourceID = None
- self._status = _HOME_STATUS_NORMAL
</del><span class="cx"> self._dataVersion = None
</span><span class="cx"> self._childrenLoaded = False
</span><span class="cx"> self._children = {}
</span><span class="lines">@@ -3038,15 +1604,13 @@
</span><span class="cx"> self._created = None
</span><span class="cx"> self._modified = None
</span><span class="cx"> self._syncTokenRevision = None
</span><del>- if transaction._disableCache:
- self._cacher = _EmptyCacher()
</del><span class="cx">
</span><span class="cx"> # This is used to track whether the originating request is from the store associated
</span><span class="cx"> # by the transaction, or from a remote store. We need to be able to distinguish store
</span><span class="cx"> # objects that are locally hosted (_HOME_STATUS_NORMAL) or remotely hosted
</span><span class="cx"> # (_HOME_STATUS_EXTERNAL). For the later we need to know whether the object is being
</span><span class="cx"> # accessed from the local store (in which case requests for child objects etc will be
</span><del>- # directed at a remote store) or whether it is being accessed as the tresult of a remote
</del><ins>+ # directed at a remote store) or whether it is being accessed as the result of a remote
</ins><span class="cx"> # request (in which case requests for child objects etc will be directed at the local store).
</span><span class="cx"> self._internalRequest = True
</span><span class="cx">
</span><span class="lines">@@ -3071,14 +1635,16 @@
</span><span class="cx"> return Select(
</span><span class="cx"> cls.homeColumns(),
</span><span class="cx"> From=home,
</span><del>- Where=home.OWNER_UID == Parameter("ownerUID")
</del><ins>+ Where=(home.OWNER_UID == Parameter("ownerUID")).And(
+ home.STATUS == Parameter("status")
+ )
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classproperty
</span><span class="cx"> def _ownerFromResourceID(cls):
</span><span class="cx"> home = cls._homeSchema
</span><del>- return Select([home.OWNER_UID],
</del><ins>+ return Select([home.OWNER_UID, home.STATUS],
</ins><span class="cx"> From=home,
</span><span class="cx"> Where=home.RESOURCE_ID == Parameter("resourceID"))
</span><span class="cx">
</span><span class="lines">@@ -3154,41 +1720,22 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def initFromStore(self, no_cache=False):
</del><ins>+ def initFromStore(self):
</ins><span class="cx"> """
</span><span class="cx"> Initialize this object from the store. We read in and cache all the
</span><span class="cx"> extra meta-data from the DB to avoid having to do DB queries for those
</span><span class="cx"> individually later.
</span><span class="cx"> """
</span><del>- result = yield self._cacher.get(self._ownerUID)
- if result is None:
- result = yield self._homeColumnsFromOwnerQuery.on(self._txn, ownerUID=self._ownerUID)
- if result:
- result = result[0]
- if not no_cache:
- yield self._cacher.set(self._ownerUID, result)
</del><span class="cx">
</span><del>- if result:
- for attr, value in zip(self.homeAttributes(), result):
- setattr(self, attr, value)
</del><ins>+ yield self.initMetaDataFromStore()
+ yield self._loadPropertyStore()
</ins><span class="cx">
</span><del>- # STOP! If the status is external we need to convert this object to a CommonHomeExternal class which will
- # have the right behavior for non-hosted external users.
- if self._status == _HOME_STATUS_EXTERNAL:
- actualHome = self._externalClass(self._txn, self._ownerUID, self._resourceID)
- else:
- actualHome = self
- yield actualHome.initMetaDataFromStore()
- yield actualHome._loadPropertyStore()
</del><ins>+ for factory_type, factory in self._txn._notifierFactories.items():
+ self.addNotifier(factory_type, factory.newNotifier(self))
</ins><span class="cx">
</span><del>- for factory_type, factory in self._txn._notifierFactories.items():
- actualHome.addNotifier(factory_type, factory.newNotifier(actualHome))
</del><ins>+ returnValue(self)
</ins><span class="cx">
</span><del>- returnValue(actualHome)
- else:
- returnValue(None)
</del><span class="cx">
</span><del>-
</del><span class="cx"> @inlineCallbacks
</span><span class="cx"> def initMetaDataFromStore(self):
</span><span class="cx"> """
</span><span class="lines">@@ -3213,6 +1760,24 @@
</span><span class="cx"> setattr(self, attr, value)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def serialize(self):
+ """
+ Create a dictionary mapping metadata attributes so this object can be sent over a cross-pod call
+ and reconstituted at the other end. Note that the other end may have a different schema so
+ the attributes may not match exactly and will need to be processed accordingly.
+ """
+ return dict([(attr[1:], getattr(self, attr, None)) for attr in self.metadataAttributes()])
+
+
+ def deserialize(self, mapping):
+ """
+ Given a mapping generated by L{serialize}, convert the values to attributes on this object.
+ """
+
+ for attr in self.metadataAttributes():
+ setattr(self, attr, mapping.get(attr[1:]))
+
+
</ins><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def listHomes(cls, txn):
</span><span class="lines">@@ -3230,16 +1795,93 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><ins>+ def homeWithUID(cls, txn, uid, status=None, create=False, authzUID=None):
+ return cls.homeWith(txn, None, uid, status, create=create, authzUID=authzUID)
+
+
+ @classmethod
+ def homeWithResourceID(cls, txn, rid):
+ return cls.homeWith(txn, rid, None)
+
+
+ @classmethod
</ins><span class="cx"> @inlineCallbacks
</span><del>- def homeWithUID(cls, txn, uid, create=False, authzUID=None):
</del><ins>+ def homeWith(cls, txn, rid, uid, status=None, create=False, authzUID=None):
</ins><span class="cx"> """
</span><del>- @param uid: I'm going to assume uid is utf-8 encoded bytes
</del><ins>+ Lookup or create a home based in either its resource id or uid. If a status is given,
+ return only the one matching that status. If status is L{None} we lookup any regular
+ status type (normal, external or purging). When creating with status L{None} we create
+ one with a status matching the current directory record thisServer() value. The only
+ other status that can be directly created is migrating.
</ins><span class="cx"> """
</span><del>- homeObject = yield cls.makeClass(txn, uid, authzUID=authzUID)
- if homeObject is not None:
</del><ins>+
+ # Setup the SQL query and query cacher keys
+ queryCacher = txn._queryCacher
+ cacheKeys = []
+ if rid is not None:
+ query = cls._homeSchema.RESOURCE_ID == rid
+ if queryCacher:
+ cacheKeys.append(queryCacher.keyForHomeWithID(cls._homeType, rid, status))
+ elif uid is not None:
+ query = cls._homeSchema.OWNER_UID == uid
+ if status is not None:
+ query = query.And(cls._homeSchema.STATUS == status)
+ if queryCacher:
+ cacheKeys.append(queryCacher.keyForHomeWithUID(cls._homeType, uid, status))
+ else:
+ statusSet = (_HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL, _HOME_STATUS_PURGING)
+ if txn._allowDisabled:
+ statusSet += (_HOME_STATUS_DISABLED,)
+ query = query.And(cls._homeSchema.STATUS.In(statusSet))
+ if queryCacher:
+ for item in statusSet:
+ cacheKeys.append(queryCacher.keyForHomeWithUID(cls._homeType, uid, item))
+ else:
+ raise AssertionError("One of rid or uid must be set")
+
+ # Try to fetch a result from the query cache first
+ for cacheKey in cacheKeys:
+ result = (yield queryCacher.get(cacheKey))
+ if result is not None:
+ break
+ else:
+ result = None
+
+ # If nothing in the cache, do the SQL query and cache the result
+ if result is None:
+ results = yield Select(
+ cls.homeColumns(),
+ From=cls._homeSchema,
+ Where=query,
+ ).on(txn)
+
+ if len(results) > 1:
+ # Pick the best one in order: normal, disabled and external
+ byStatus = dict([(result[cls.homeColumns().index(cls._homeSchema.STATUS)], result) for result in results])
+ result = byStatus.get(_HOME_STATUS_NORMAL)
+ if result is None:
+ result = byStatus.get(_HOME_STATUS_DISABLED)
+ if result is None:
+ result = byStatus.get(_HOME_STATUS_EXTERNAL)
+ elif results:
+ result = results[0]
+ else:
+ result = None
+
+ if result and queryCacher:
+ if rid is not None:
+ cacheKey = cacheKeys[0]
+ elif uid is not None:
+ cacheKey = queryCacher.keyForHomeWithUID(cls._homeType, uid, result[cls.homeColumns().index(cls._homeSchema.STATUS)])
+ yield queryCacher.set(cacheKey, result)
+
+ if result:
+ # Return object that already exists in the store
+ homeObject = yield cls.makeClass(txn, result, authzUID=authzUID)
</ins><span class="cx"> returnValue(homeObject)
</span><span class="cx"> else:
</span><del>- if not create:
</del><ins>+ # Can only create when uid is specified
+ if not create or uid is None:
</ins><span class="cx"> returnValue(None)
</span><span class="cx">
</span><span class="cx"> # Determine if the user is local or external
</span><span class="lines">@@ -3247,8 +1889,18 @@
</span><span class="cx"> if record is None:
</span><span class="cx"> raise DirectoryRecordNotFoundError("Cannot create home for UID since no directory record exists: {}".format(uid))
</span><span class="cx">
</span><del>- state = _HOME_STATUS_NORMAL if record.thisServer() else _HOME_STATUS_EXTERNAL
</del><ins>+ if status is None:
+ createStatus = _HOME_STATUS_NORMAL if record.thisServer() else _HOME_STATUS_EXTERNAL
+ elif status == _HOME_STATUS_MIGRATING:
+ if record.thisServer():
+ raise RecordNotAllowedError("Cannot migrate a user data for a user already hosted on this server")
+ createStatus = status
+ elif status in (_HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL,):
+ createStatus = status
+ else:
+ raise RecordNotAllowedError("Cannot create home with status {}: {}".format(status, uid))
</ins><span class="cx">
</span><ins>+
</ins><span class="cx"> # Use savepoint so we can do a partial rollback if there is a race condition
</span><span class="cx"> # where this row has already been inserted
</span><span class="cx"> savepoint = SavepointAction("homeWithUID")
</span><span class="lines">@@ -3260,7 +1912,7 @@
</span><span class="cx"> resourceid = (yield Insert(
</span><span class="cx"> {
</span><span class="cx"> cls._homeSchema.OWNER_UID: uid,
</span><del>- cls._homeSchema.STATUS: state,
</del><ins>+ cls._homeSchema.STATUS: createStatus,
</ins><span class="cx"> cls._homeSchema.DATAVERSION: cls._dataVersionValue,
</span><span class="cx"> },
</span><span class="cx"> Return=cls._homeSchema.RESOURCE_ID
</span><span class="lines">@@ -3270,8 +1922,13 @@
</span><span class="cx"> yield savepoint.rollback(txn)
</span><span class="cx">
</span><span class="cx"> # Retry the query - row may exist now, if not re-raise
</span><del>- homeObject = yield cls.makeClass(txn, uid, authzUID=authzUID)
- if homeObject:
</del><ins>+ results = yield Select(
+ cls.homeColumns(),
+ From=cls._homeSchema,
+ Where=query,
+ ).on(txn)
+ if results:
+ homeObject = yield cls.makeClass(txn, results[0], authzUID=authzUID)
</ins><span class="cx"> returnValue(homeObject)
</span><span class="cx"> else:
</span><span class="cx"> raise
</span><span class="lines">@@ -3279,27 +1936,27 @@
</span><span class="cx"> yield savepoint.release(txn)
</span><span class="cx">
</span><span class="cx"> # Note that we must not cache the owner_uid->resource_id
</span><del>- # mapping in _cacher when creating as we don't want that to appear
</del><ins>+ # mapping in the query cacher when creating as we don't want that to appear
</ins><span class="cx"> # until AFTER the commit
</span><del>- home = yield cls.makeClass(txn, uid, no_cache=True, authzUID=authzUID)
- yield home.createdHome()
- returnValue(home)
</del><ins>+ results = yield Select(
+ cls.homeColumns(),
+ From=cls._homeSchema,
+ Where=cls._homeSchema.RESOURCE_ID == resourceid,
+ ).on(txn)
+ homeObject = yield cls.makeClass(txn, results[0], authzUID=authzUID)
+ if homeObject.normal():
+ yield homeObject.createdHome()
+ returnValue(homeObject)
</ins><span class="cx">
</span><span class="cx">
</span><del>- @classmethod
- @inlineCallbacks
- def homeUIDWithResourceID(cls, txn, rid):
- rows = (yield cls._ownerFromResourceID.on(txn, resourceID=rid))
- if rows:
- returnValue(rows[0][0])
- else:
- returnValue(None)
-
-
</del><span class="cx"> def __repr__(self):
</span><span class="cx"> return "<%s: %s, %s>" % (self.__class__.__name__, self._resourceID, self._ownerUID)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def cacheKey(self):
+ return "{}-{}".format(self._status, self._ownerUID)
+
+
</ins><span class="cx"> def id(self):
</span><span class="cx"> """
</span><span class="cx"> Retrieve the store identifier for this home.
</span><span class="lines">@@ -3328,6 +1985,19 @@
</span><span class="cx"> return self._authzUID
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def status(self):
+ return self._status
+
+
+ def normal(self):
+ """
+ Is this an normal (internal) home.
+
+ @return: a L{bool}.
+ """
+ return self._status == _HOME_STATUS_NORMAL
+
+
</ins><span class="cx"> def external(self):
</span><span class="cx"> """
</span><span class="cx"> Is this an external home.
</span><span class="lines">@@ -3357,6 +2027,15 @@
</span><span class="cx"> return self._status == _HOME_STATUS_PURGING
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def migrating(self):
+ """
+ Is this an external home.
+
+ @return: a string.
+ """
+ return self._status == _HOME_STATUS_MIGRATING
+
+
</ins><span class="cx"> def purge(self):
</span><span class="cx"> """
</span><span class="cx"> Mark this home as being purged.
</span><span class="lines">@@ -3364,6 +2043,13 @@
</span><span class="cx"> return self.setStatus(_HOME_STATUS_PURGING)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def migrate(self):
+ """
+ Mark this home as being purged.
+ """
+ return self.setStatus(_HOME_STATUS_MIGRATING)
+
+
</ins><span class="cx"> @inlineCallbacks
</span><span class="cx"> def setStatus(self, newStatus):
</span><span class="cx"> """
</span><span class="lines">@@ -3375,10 +2061,67 @@
</span><span class="cx"> {self._homeSchema.STATUS: newStatus},
</span><span class="cx"> Where=(self._homeSchema.RESOURCE_ID == self._resourceID),
</span><span class="cx"> ).on(self._txn)
</span><ins>+ if self._txn._queryCacher:
+ yield self._txn._queryCacher.delete(self._txn._queryCacher.keyForHomeWithUID(
+ self._homeType,
+ self.uid(),
+ self._status,
+ ))
+ yield self._txn._queryCacher.delete(self._txn._queryCacher.keyForHomeWithID(
+ self._homeType,
+ self.id(),
+ self._status,
+ ))
</ins><span class="cx"> self._status = newStatus
</span><del>- yield self._cacher.delete(self._ownerUID)
</del><span class="cx">
</span><span class="cx">
</span><ins>+ @inlineCallbacks
+ def remove(self):
+
+ # Removing the home table entry does NOT remove the child class entry - it does remove
+ # the associated bind entry. So manually remove each child.
+ yield self.removeAllChildren()
+
+ r = self._childClass._revisionsSchema
+ yield Delete(
+ From=r,
+ Where=r.HOME_RESOURCE_ID == self._resourceID,
+ ).on(self._txn)
+
+ h = self._homeSchema
+ yield Delete(
+ From=h,
+ Where=h.RESOURCE_ID == self._resourceID,
+ ).on(self._txn)
+
+ yield self.properties()._removeResource()
+
+ if self._txn._queryCacher:
+ yield self._txn._queryCacher.delete(self._txn._queryCacher.keyForHomeWithUID(
+ self._homeType,
+ self.uid(),
+ self._status,
+ ))
+ yield self._txn._queryCacher.delete(self._txn._queryCacher.keyForHomeWithID(
+ self._homeType,
+ self.id(),
+ self._status,
+ ))
+
+
+ @inlineCallbacks
+ def removeAllChildren(self):
+ """
+ Remove each child.
+ """
+
+ children = yield self.loadChildren()
+ for child in children:
+ yield child.remove()
+ self._children.pop(child.name(), None)
+ self._children.pop(child.id(), None)
+
+
</ins><span class="cx"> def transaction(self):
</span><span class="cx"> return self._txn
</span><span class="cx">
</span><span class="lines">@@ -3496,15 +2239,15 @@
</span><span class="cx"> return self._childClass.objectWithID(self, resourceID)
</span><span class="cx">
</span><span class="cx">
</span><del>- def childWithExternalID(self, externalID):
</del><ins>+ def childWithBindUID(self, bindUID):
</ins><span class="cx"> """
</span><del>- Retrieve the child with the given C{externalID} contained in this
</del><ins>+ Retrieve the child with the given C{bindUID} contained in this
</ins><span class="cx"> home.
</span><span class="cx">
</span><span class="cx"> @param name: a string.
</span><span class="cx"> @return: an L{ICalendar} or C{None} if no such child exists.
</span><span class="cx"> """
</span><del>- return self._childClass.objectWithExternalID(self, externalID)
</del><ins>+ return self._childClass.objectWithBindUID(self, bindUID)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> def allChildWithID(self, resourceID):
</span><span class="lines">@@ -3519,11 +2262,11 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def createChildWithName(self, name, externalID=None):
</del><ins>+ def createChildWithName(self, name, bindUID=None):
</ins><span class="cx"> if name.startswith("."):
</span><span class="cx"> raise HomeChildNameNotAllowedError(name)
</span><span class="cx">
</span><del>- child = yield self._childClass.create(self, name, externalID=externalID)
</del><ins>+ child = yield self._childClass.create(self, name, bindUID=bindUID)
</ins><span class="cx"> returnValue(child)
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -3598,13 +2341,18 @@
</span><span class="cx"> taken to invalid the cached value properly.
</span><span class="cx"> """
</span><span class="cx"> if self._syncTokenRevision is None:
</span><del>- self._syncTokenRevision = (yield self._syncTokenQuery.on(
- self._txn, resourceID=self._resourceID))[0][0]
- if self._syncTokenRevision is None:
- self._syncTokenRevision = int((yield self._txn.calendarserverValue("MIN-VALID-REVISION")))
</del><ins>+ self._syncTokenRevision = yield self.syncTokenRevision()
</ins><span class="cx"> returnValue("%s_%s" % (self._resourceID, self._syncTokenRevision))
</span><span class="cx">
</span><span class="cx">
</span><ins>+ @inlineCallbacks
+ def syncTokenRevision(self):
+ revision = (yield self._syncTokenQuery.on(self._txn, resourceID=self._resourceID))[0][0]
+ if revision is None:
+ revision = int((yield self._txn.calendarserverValue("MIN-VALID-REVISION")))
+ returnValue(revision)
+
+
</ins><span class="cx"> @classproperty
</span><span class="cx"> def _changesQuery(cls):
</span><span class="cx"> bind = cls._bindSchema
</span><span class="lines">@@ -4091,1492 +2839,16 @@
</span><span class="cx"> Get the owner home for a shared child ID and the owner's name for that bound child.
</span><span class="cx"> Subclasses may override.
</span><span class="cx"> """
</span><del>- ownerHomeID, ownerName = (yield self._childClass._ownerHomeWithResourceID.on(self._txn, resourceID=resourceID))[0]
- ownerHome = yield self._txn.homeWithResourceID(self._homeType, ownerHomeID)
- returnValue((ownerHome, ownerName))
-
-
-
-class _SharedSyncLogic(object):
- """
- Logic for maintaining sync-token shared between notification collections and
- shared collections.
- """
-
- @classproperty
- def _childSyncTokenQuery(cls):
- """
- DAL query for retrieving the sync token of a L{CommonHomeChild} based on
- its resource ID.
- """
- rev = cls._revisionsSchema
- return Select([Max(rev.REVISION)], From=rev,
- Where=rev.RESOURCE_ID == Parameter("resourceID"))
-
-
- def revisionFromToken(self, token):
- if token is None:
- return 0
- elif isinstance(token, str) or isinstance(token, unicode):
- _ignore_uuid, revision = token.split("_", 1)
- return int(revision)
- else:
- return token
-
-
- @inlineCallbacks
- def syncToken(self):
- if self._syncTokenRevision is None:
- self._syncTokenRevision = (yield self._childSyncTokenQuery.on(
- self._txn, resourceID=self._resourceID))[0][0]
- if self._syncTokenRevision is None:
- self._syncTokenRevision = int((yield self._txn.calendarserverValue("MIN-VALID-REVISION")))
- returnValue(("%s_%s" % (self._resourceID, self._syncTokenRevision,)))
-
-
- def objectResourcesSinceToken(self, token):
- raise NotImplementedError()
-
-
- @classmethod
- def _objectNamesSinceRevisionQuery(cls, deleted=True):
- """
- DAL query for (resource, deleted-flag)
- """
- rev = cls._revisionsSchema
- where = (rev.REVISION > Parameter("revision")).And(rev.RESOURCE_ID == Parameter("resourceID"))
- if not deleted:
- where = where.And(rev.DELETED == False)
- return Select(
- [rev.RESOURCE_NAME, rev.DELETED],
- From=rev,
- Where=where,
- )
-
-
- def resourceNamesSinceToken(self, token):
- """
- Return the changed and deleted resources since a particular sync-token. This simply extracts
- the revision from from the token then calls L{resourceNamesSinceRevision}.
-
- @param revision: the revision to determine changes since
- @type revision: C{int}
- """
-
- return self.resourceNamesSinceRevision(self.revisionFromToken(token))
-
-
- @inlineCallbacks
- def resourceNamesSinceRevision(self, revision):
- """
- Return the changed and deleted resources since a particular revision.
-
- @param revision: the revision to determine changes since
- @type revision: C{int}
- """
- changed = []
- deleted = []
- invalid = []
- if revision:
- minValidRevision = yield self._txn.calendarserverValue("MIN-VALID-REVISION")
- if revision < int(minValidRevision):
- raise SyncTokenValidException
-
- results = [
- (name if name else "", removed) for name, removed in (
- yield self._objectNamesSinceRevisionQuery().on(
- self._txn, revision=revision, resourceID=self._resourceID)
- )
- ]
- results.sort(key=lambda x: x[1])
-
- for name, wasdeleted in results:
- if name:
- if wasdeleted:
- deleted.append(name)
- else:
- changed.append(name)
- else:
- changed = yield self.listObjectResources()
-
- returnValue((changed, deleted, invalid))
-
-
- @classproperty
- def _removeDeletedRevision(cls):
- rev = cls._revisionsSchema
- return Delete(From=rev,
- Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And(
- rev.COLLECTION_NAME == Parameter("collectionName")))
-
-
- @classproperty
- def _addNewRevision(cls):
- rev = cls._revisionsSchema
- return Insert(
- {
- rev.HOME_RESOURCE_ID: Parameter("homeID"),
- rev.RESOURCE_ID: Parameter("resourceID"),
- rev.COLLECTION_NAME: Parameter("collectionName"),
- rev.RESOURCE_NAME: None,
- # Always starts false; may be updated to be a tombstone
- # later.
- rev.DELETED: False
- },
- Return=[rev.REVISION]
- )
-
-
- @inlineCallbacks
- def _initSyncToken(self):
- yield self._removeDeletedRevision.on(
- self._txn, homeID=self._home._resourceID, collectionName=self._name
- )
- self._syncTokenRevision = (yield (
- self._addNewRevision.on(self._txn, homeID=self._home._resourceID,
- resourceID=self._resourceID,
- collectionName=self._name)))[0][0]
- self._txn.bumpRevisionForObject(self)
-
-
- @classproperty
- def _renameSyncTokenQuery(cls):
- """
- DAL query to change sync token for a rename (increment and adjust
- resource name).
- """
- rev = cls._revisionsSchema
- return Update(
- {
- rev.REVISION: schema.REVISION_SEQ,
- rev.COLLECTION_NAME: Parameter("name")
- },
- Where=(rev.RESOURCE_ID == Parameter("resourceID")).And
- (rev.RESOURCE_NAME == None),
- Return=rev.REVISION
- )
-
-
- @inlineCallbacks
- def _renameSyncToken(self):
- rows = yield self._renameSyncTokenQuery.on(
- self._txn, name=self._name, resourceID=self._resourceID)
</del><ins>+ rows = yield self._childClass._ownerHomeWithResourceID.on(self._txn, resourceID=resourceID)
</ins><span class="cx"> if rows:
</span><del>- self._syncTokenRevision = rows[0][0]
- self._txn.bumpRevisionForObject(self)
</del><ins>+ ownerHomeID, ownerName = rows[0]
+ ownerHome = yield self._txn.homeWithResourceID(self._homeType, ownerHomeID)
+ returnValue((ownerHome, ownerName))
</ins><span class="cx"> else:
</span><del>- yield self._initSyncToken()
</del><ins>+ returnValue((None, None))
</ins><span class="cx">
</span><span class="cx">
</span><del>- @classproperty
- def _bumpSyncTokenQuery(cls):
- """
- DAL query to change collection sync token. Note this can impact multiple rows if the
- collection is shared.
- """
- rev = cls._revisionsSchema
- return Update(
- {rev.REVISION: schema.REVISION_SEQ, },
- Where=(rev.RESOURCE_ID == Parameter("resourceID")).And
- (rev.RESOURCE_NAME == None)
- )
</del><span class="cx">
</span><del>-
- @inlineCallbacks
- def _bumpSyncToken(self):
-
- if not self._txn.isRevisionBumpedAlready(self):
- self._txn.bumpRevisionForObject(self)
- yield self._bumpSyncTokenQuery.on(
- self._txn,
- resourceID=self._resourceID,
- )
- self._syncTokenRevision = None
-
-
- @classproperty
- def _deleteSyncTokenQuery(cls):
- """
- DAL query to remove all child revision information. The revision for the collection
- itself is not touched.
- """
- rev = cls._revisionsSchema
- return Delete(
- From=rev,
- Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And
- (rev.RESOURCE_ID == Parameter("resourceID")).And
- (rev.COLLECTION_NAME == None)
- )
-
-
- @classproperty
- def _sharedRemovalQuery(cls):
- """
- DAL query to indicate a shared collection has been deleted.
- """
- rev = cls._revisionsSchema
- return Update(
- {
- rev.RESOURCE_ID: None,
- rev.REVISION: schema.REVISION_SEQ,
- rev.DELETED: True
- },
- Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And(
- rev.RESOURCE_ID == Parameter("resourceID")).And(
- rev.RESOURCE_NAME == None)
- )
-
-
- @classproperty
- def _unsharedRemovalQuery(cls):
- """
- DAL query to indicate an owned collection has been deleted.
- """
- rev = cls._revisionsSchema
- return Update(
- {
- rev.RESOURCE_ID: None,
- rev.REVISION: schema.REVISION_SEQ,
- rev.DELETED: True
- },
- Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
- rev.RESOURCE_NAME == None),
- )
-
-
- @inlineCallbacks
- def _deletedSyncToken(self, sharedRemoval=False):
- """
- When a collection is deleted we remove all the revision information for its child resources.
- We update the collection's sync token to indicate it has been deleted - that way a sync on
- the home collection can report the deletion of the collection.
-
- @param sharedRemoval: indicates whether the collection being removed is shared
- @type sharedRemoval: L{bool}
- """
- # Remove all child entries
- yield self._deleteSyncTokenQuery.on(self._txn,
- homeID=self._home._resourceID,
- resourceID=self._resourceID)
-
- # If this is a share being removed then we only mark this one specific
- # home/resource-id as being deleted. On the other hand, if it is a
- # non-shared collection, then we need to mark all collections
- # with the resource-id as being deleted to account for direct shares.
- if sharedRemoval:
- yield self._sharedRemovalQuery.on(self._txn,
- homeID=self._home._resourceID,
- resourceID=self._resourceID)
- else:
- yield self._unsharedRemovalQuery.on(self._txn,
- resourceID=self._resourceID)
- self._syncTokenRevision = None
-
-
- def _insertRevision(self, name):
- return self._changeRevision("insert", name)
-
-
- def _updateRevision(self, name):
- return self._changeRevision("update", name)
-
-
- def _deleteRevision(self, name):
- return self._changeRevision("delete", name)
-
-
- @classproperty
- def _deleteBumpTokenQuery(cls):
- rev = cls._revisionsSchema
- return Update(
- {rev.REVISION: schema.REVISION_SEQ, rev.DELETED: True},
- Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
- rev.RESOURCE_NAME == Parameter("name")),
- Return=rev.REVISION
- )
-
-
- @classproperty
- def _updateBumpTokenQuery(cls):
- rev = cls._revisionsSchema
- return Update(
- {rev.REVISION: schema.REVISION_SEQ},
- Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
- rev.RESOURCE_NAME == Parameter("name")),
- Return=rev.REVISION
- )
-
-
- @classproperty
- def _insertFindPreviouslyNamedQuery(cls):
- rev = cls._revisionsSchema
- return Select(
- [rev.RESOURCE_ID],
- From=rev,
- Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
- rev.RESOURCE_NAME == Parameter("name"))
- )
-
-
- @classproperty
- def _updatePreviouslyNamedQuery(cls):
- rev = cls._revisionsSchema
- return Update(
- {rev.REVISION: schema.REVISION_SEQ, rev.DELETED: False},
- Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
- rev.RESOURCE_NAME == Parameter("name")),
- Return=rev.REVISION
- )
-
-
- @classproperty
- def _completelyNewRevisionQuery(cls):
- rev = cls._revisionsSchema
- return Insert(
- {
- rev.HOME_RESOURCE_ID: Parameter("homeID"),
- rev.RESOURCE_ID: Parameter("resourceID"),
- rev.RESOURCE_NAME: Parameter("name"),
- rev.REVISION: schema.REVISION_SEQ,
- rev.DELETED: False
- },
- Return=rev.REVISION
- )
-
-
- @classproperty
- def _completelyNewDeletedRevisionQuery(cls):
- rev = cls._revisionsSchema
- return Insert(
- {
- rev.HOME_RESOURCE_ID: Parameter("homeID"),
- rev.RESOURCE_ID: Parameter("resourceID"),
- rev.RESOURCE_NAME: Parameter("name"),
- rev.REVISION: schema.REVISION_SEQ,
- rev.DELETED: True
- },
- Return=rev.REVISION
- )
-
-
- @inlineCallbacks
- def _changeRevision(self, action, name):
-
- # Need to handle the case where for some reason the revision entry is
- # actually missing. For a "delete" we don't care, for an "update" we
- # will turn it into an "insert".
- if action == "delete":
- rows = (
- yield self._deleteBumpTokenQuery.on(
- self._txn, resourceID=self._resourceID, name=name))
- if rows:
- self._syncTokenRevision = rows[0][0]
- else:
- self._syncTokenRevision = (
- yield self._completelyNewDeletedRevisionQuery.on(
- self._txn, homeID=self.ownerHome()._resourceID,
- resourceID=self._resourceID, name=name)
- )[0][0]
-
- elif action == "update":
- rows = (
- yield self._updateBumpTokenQuery.on(
- self._txn, resourceID=self._resourceID, name=name))
- if rows:
- self._syncTokenRevision = rows[0][0]
- else:
- self._syncTokenRevision = (
- yield self._completelyNewRevisionQuery.on(
- self._txn, homeID=self.ownerHome()._resourceID,
- resourceID=self._resourceID, name=name)
- )[0][0]
-
- elif action == "insert":
- # Note that an "insert" may happen for a resource that previously
- # existed and then was deleted. In that case an entry in the
- # REVISIONS table still exists so we have to detect that and do db
- # INSERT or UPDATE as appropriate
-
- found = bool((
- yield self._insertFindPreviouslyNamedQuery.on(
- self._txn, resourceID=self._resourceID, name=name)))
- if found:
- self._syncTokenRevision = (
- yield self._updatePreviouslyNamedQuery.on(
- self._txn, resourceID=self._resourceID, name=name)
- )[0][0]
- else:
- self._syncTokenRevision = (
- yield self._completelyNewRevisionQuery.on(
- self._txn, homeID=self.ownerHome()._resourceID,
- resourceID=self._resourceID, name=name)
- )[0][0]
- yield self._maybeNotify()
- returnValue(self._syncTokenRevision)
-
-
- def _maybeNotify(self):
- """
- Maybe notify changed. (Overridden in NotificationCollection.)
- """
- return succeed(None)
-
-
-
-SharingInvitation = namedtuple(
- "SharingInvitation",
- ["uid", "ownerUID", "ownerHomeID", "shareeUID", "shareeHomeID", "mode", "status", "summary"]
-)
-
-
-
-class SharingMixIn(object):
- """
- Common class for CommonHomeChild and AddressBookObject
- """
-
- @classproperty
- def _bindInsertQuery(cls, **kw):
- """
- DAL statement to create a bind entry that connects a collection to its
- home.
- """
- bind = cls._bindSchema
- return Insert({
- bind.HOME_RESOURCE_ID: Parameter("homeID"),
- bind.RESOURCE_ID: Parameter("resourceID"),
- bind.EXTERNAL_ID: Parameter("externalID"),
- bind.RESOURCE_NAME: Parameter("name"),
- bind.BIND_MODE: Parameter("mode"),
- bind.BIND_STATUS: Parameter("bindStatus"),
- bind.MESSAGE: Parameter("message"),
- })
-
-
- @classmethod
- def _updateBindColumnsQuery(cls, columnMap):
- bind = cls._bindSchema
- return Update(
- columnMap,
- Where=(bind.RESOURCE_ID == Parameter("resourceID")).And(
- bind.HOME_RESOURCE_ID == Parameter("homeID")),
- )
-
-
- @classproperty
- def _deleteBindForResourceIDAndHomeID(cls):
- bind = cls._bindSchema
- return Delete(
- From=bind,
- Where=(bind.RESOURCE_ID == Parameter("resourceID")).And(
- bind.HOME_RESOURCE_ID == Parameter("homeID")),
- )
-
-
- @classmethod
- def _bindFor(cls, condition):
- bind = cls._bindSchema
- columns = cls.bindColumns() + cls.additionalBindColumns()
- return Select(
- columns,
- From=bind,
- Where=condition
- )
-
-
- @classmethod
- def _bindInviteFor(cls, condition):
- home = cls._homeSchema
- bind = cls._bindSchema
- return Select(
- [
- home.OWNER_UID,
- bind.HOME_RESOURCE_ID,
- bind.RESOURCE_ID,
- bind.RESOURCE_NAME,
- bind.BIND_MODE,
- bind.BIND_STATUS,
- bind.MESSAGE,
- ],
- From=bind.join(home, on=(bind.HOME_RESOURCE_ID == home.RESOURCE_ID)),
- Where=condition
- )
-
-
- @classproperty
- def _sharedInvitationBindForResourceID(cls):
- bind = cls._bindSchema
- return cls._bindInviteFor(
- (bind.RESOURCE_ID == Parameter("resourceID")).And
- (bind.BIND_MODE != _BIND_MODE_OWN)
- )
-
-
- @classproperty
- def _acceptedBindForHomeID(cls):
- bind = cls._bindSchema
- return cls._bindFor((bind.HOME_RESOURCE_ID == Parameter("homeID"))
- .And(bind.BIND_STATUS == _BIND_STATUS_ACCEPTED))
-
-
- @classproperty
- def _bindForResourceIDAndHomeID(cls):
- """
- DAL query that looks up home bind rows by home child
- resource ID and home resource ID.
- """
- bind = cls._bindSchema
- return cls._bindFor((bind.RESOURCE_ID == Parameter("resourceID"))
- .And(bind.HOME_RESOURCE_ID == Parameter("homeID")))
-
-
- @classproperty
- def _bindForExternalIDAndHomeID(cls):
- """
- DAL query that looks up home bind rows by home child
- resource ID and home resource ID.
- """
- bind = cls._bindSchema
- return cls._bindFor((bind.EXTERNAL_ID == Parameter("externalID"))
- .And(bind.HOME_RESOURCE_ID == Parameter("homeID")))
-
-
- @classproperty
- def _bindForNameAndHomeID(cls):
- """
- DAL query that looks up any bind rows by home child
- resource ID and home resource ID.
- """
- bind = cls._bindSchema
- return cls._bindFor((bind.RESOURCE_NAME == Parameter("name"))
- .And(bind.HOME_RESOURCE_ID == Parameter("homeID")))
-
-
- #
- # Higher level API
- #
- @inlineCallbacks
- def inviteUIDToShare(self, shareeUID, mode, summary=None, shareName=None):
- """
- Invite a user to share this collection - either create the share if it does not exist, or
- update the existing share with new values. Make sure a notification is sent as well.
-
- @param shareeUID: UID of the sharee
- @type shareeUID: C{str}
- @param mode: access mode
- @type mode: C{int}
- @param summary: share message
- @type summary: C{str}
- """
-
- # Look for existing invite and update its fields or create new one
- shareeView = yield self.shareeView(shareeUID)
- if shareeView is not None:
- status = _BIND_STATUS_INVITED if shareeView.shareStatus() in (_BIND_STATUS_DECLINED, _BIND_STATUS_INVALID) else None
- yield self.updateShare(shareeView, mode=mode, status=status, summary=summary)
- else:
- shareeView = yield self.createShare(shareeUID=shareeUID, mode=mode, summary=summary, shareName=shareName)
-
- # Check for external
- if shareeView.viewerHome().external():
- yield self._sendExternalInvite(shareeView)
- else:
- # Send invite notification
- yield self._sendInviteNotification(shareeView)
- returnValue(shareeView)
-
-
- @inlineCallbacks
- def directShareWithUser(self, shareeUID, shareName=None):
- """
- Create a direct share with the specified user. Note it is currently up to the app layer
- to enforce access control - this is not ideal as we really should have control of that in
- the store. Once we do, this api will need to verify that access is allowed for a direct share.
-
- NB no invitations are used with direct sharing.
-
- @param shareeUID: UID of the sharee
- @type shareeUID: C{str}
- """
-
- # Ignore if it already exists
- shareeView = yield self.shareeView(shareeUID)
- if shareeView is None:
- shareeView = yield self.createShare(shareeUID=shareeUID, mode=_BIND_MODE_DIRECT, shareName=shareName)
- yield shareeView.newShare()
-
- # Check for external
- if shareeView.viewerHome().external():
- yield self._sendExternalInvite(shareeView)
-
- returnValue(shareeView)
-
-
- @inlineCallbacks
- def uninviteUIDFromShare(self, shareeUID):
- """
- Remove a user from a share. Make sure a notification is sent as well.
-
- @param shareeUID: UID of the sharee
- @type shareeUID: C{str}
- """
- # Cancel invites - we'll just use whatever userid we are given
-
- shareeView = yield self.shareeView(shareeUID)
- if shareeView is not None:
- if shareeView.viewerHome().external():
- yield self._sendExternalUninvite(shareeView)
- else:
- # If current user state is accepted then we send an invite with the new state, otherwise
- # we cancel any existing invites for the user
- if not shareeView.direct():
- if shareeView.shareStatus() != _BIND_STATUS_ACCEPTED:
- yield self._removeInviteNotification(shareeView)
- else:
- yield self._sendInviteNotification(shareeView, notificationState=_BIND_STATUS_DELETED)
-
- # Remove the bind
- yield self.removeShare(shareeView)
-
-
- @inlineCallbacks
- def acceptShare(self, summary=None):
- """
- This share is being accepted.
- """
-
- if not self.direct() and self.shareStatus() != _BIND_STATUS_ACCEPTED:
- if self.external():
- yield self._replyExternalInvite(_BIND_STATUS_ACCEPTED, summary)
- ownerView = yield self.ownerView()
- yield ownerView.updateShare(self, status=_BIND_STATUS_ACCEPTED)
- yield self.newShare(displayname=summary)
- if not ownerView.external():
- yield self._sendReplyNotification(ownerView, summary)
-
-
- @inlineCallbacks
- def declineShare(self):
- """
- This share is being declined.
- """
-
- if not self.direct() and self.shareStatus() != _BIND_STATUS_DECLINED:
- if self.external():
- yield self._replyExternalInvite(_BIND_STATUS_DECLINED)
- ownerView = yield self.ownerView()
- yield ownerView.updateShare(self, status=_BIND_STATUS_DECLINED)
- if not ownerView.external():
- yield self._sendReplyNotification(ownerView)
-
-
- @inlineCallbacks
- def deleteShare(self):
- """
- This share is being deleted (by the sharee) - either decline or remove (for direct shares).
- """
-
- ownerView = yield self.ownerView()
- if self.direct():
- yield ownerView.removeShare(self)
- if ownerView.external():
- yield self._replyExternalInvite(_BIND_STATUS_DECLINED)
- else:
- yield self.declineShare()
-
-
- @inlineCallbacks
- def ownerDeleteShare(self):
- """
- This share is being deleted (by the owner) - either decline or remove (for direct shares).
- """
-
- # Change status on store object
- yield self.setShared(False)
-
- # Remove all sharees (direct and invited)
- for invitation in (yield self.sharingInvites()):
- yield self.uninviteUIDFromShare(invitation.shareeUID)
-
-
- def newShare(self, displayname=None):
- """
- Override in derived classes to do any specific operations needed when a share
- is first accepted.
- """
- return succeed(None)
-
-
- @inlineCallbacks
- def allInvitations(self):
- """
- Get list of all invitations (non-direct) to this object.
- """
- invitations = yield self.sharingInvites()
-
- # remove direct shares as those are not "real" invitations
- invitations = filter(lambda x: x.mode != _BIND_MODE_DIRECT, invitations)
- invitations.sort(key=lambda invitation: invitation.shareeUID)
- returnValue(invitations)
-
-
- @inlineCallbacks
- def _sendInviteNotification(self, shareeView, notificationState=None):
- """
- Called on the owner's resource.
- """
- # When deleting the message is the sharee's display name
- displayname = shareeView.shareMessage()
- if notificationState == _BIND_STATUS_DELETED:
- displayname = str(shareeView.properties().get(PropertyName.fromElement(element.DisplayName), displayname))
-
- notificationtype = {
- "notification-type": "invite-notification",
- "shared-type": shareeView.sharedResourceType(),
- }
- notificationdata = {
- "notification-type": "invite-notification",
- "shared-type": shareeView.sharedResourceType(),
- "dtstamp": DateTime.getNowUTC().getText(),
- "owner": shareeView.ownerHome().uid(),
- "sharee": shareeView.viewerHome().uid(),
- "uid": shareeView.shareUID(),
- "status": shareeView.shareStatus() if notificationState is None else notificationState,
- "access": (yield shareeView.effectiveShareMode()),
- "ownerName": self.shareName(),
- "summary": displayname,
- }
- if hasattr(self, "getSupportedComponents"):
- notificationdata["supported-components"] = self.getSupportedComponents()
-
- # Add to sharee's collection
- notifications = yield self._txn.notificationsWithUID(shareeView.viewerHome().uid())
- yield notifications.writeNotificationObject(shareeView.shareUID(), notificationtype, notificationdata)
-
-
- @inlineCallbacks
- def _sendReplyNotification(self, ownerView, summary=None):
- """
- Create a reply notification based on the current state of this shared resource.
- """
-
- # Generate invite XML
- notificationUID = "%s-reply" % (self.shareUID(),)
-
- notificationtype = {
- "notification-type": "invite-reply",
- "shared-type": self.sharedResourceType(),
- }
-
- notificationdata = {
- "notification-type": "invite-reply",
- "shared-type": self.sharedResourceType(),
- "dtstamp": DateTime.getNowUTC().getText(),
- "owner": self.ownerHome().uid(),
- "sharee": self.viewerHome().uid(),
- "status": self.shareStatus(),
- "ownerName": ownerView.shareName(),
- "in-reply-to": self.shareUID(),
- "summary": summary,
- }
-
- # Add to owner notification collection
- notifications = yield self._txn.notificationsWithUID(self.ownerHome().uid())
- yield notifications.writeNotificationObject(notificationUID, notificationtype, notificationdata)
-
-
- @inlineCallbacks
- def _removeInviteNotification(self, shareeView):
- """
- Called on the owner's resource.
- """
-
- # Remove from sharee's collection
- notifications = yield self._txn.notificationsWithUID(shareeView.viewerHome().uid())
- yield notifications.removeNotificationObjectWithUID(shareeView.shareUID())
-
-
- #
- # External/cross-pod API
- #
- @inlineCallbacks
- def _sendExternalInvite(self, shareeView):
-
- yield self._txn.store().conduit.send_shareinvite(
- self._txn,
- shareeView.ownerHome()._homeType,
- shareeView.ownerHome().uid(),
- self.id(),
- self.shareName(),
- shareeView.viewerHome().uid(),
- shareeView.shareUID(),
- shareeView.shareMode(),
- shareeView.shareMessage(),
- self.getInviteCopyProperties(),
- supported_components=self.getSupportedComponents() if hasattr(self, "getSupportedComponents") else None,
- )
-
-
- @inlineCallbacks
- def _sendExternalUninvite(self, shareeView):
-
- yield self._txn.store().conduit.send_shareuninvite(
- self._txn,
- shareeView.ownerHome()._homeType,
- shareeView.ownerHome().uid(),
- self.id(),
- shareeView.viewerHome().uid(),
- shareeView.shareUID(),
- )
-
-
- @inlineCallbacks
- def _replyExternalInvite(self, status, summary=None):
-
- yield self._txn.store().conduit.send_sharereply(
- self._txn,
- self.viewerHome()._homeType,
- self.ownerHome().uid(),
- self.viewerHome().uid(),
- self.shareUID(),
- status,
- summary,
- )
-
-
- #
- # Lower level API
- #
- @inlineCallbacks
- def ownerView(self):
- """
- Return the owner resource counterpart of this shared resource.
-
- Note we have to play a trick with the property store to coerce it to match
- the per-user properties for the owner.
- """
- # Get the child of the owner home that has the same resource id as the owned one
- ownerView = yield self.ownerHome().childWithID(self.id())
- returnValue(ownerView)
-
-
- @inlineCallbacks
- def shareeView(self, shareeUID):
- """
- Return the shared resource counterpart of this owned resource for the specified sharee.
-
- Note we have to play a trick with the property store to coerce it to match
- the per-user properties for the sharee.
- """
-
- # Never return the owner's own resource
- if self._home.uid() == shareeUID:
- returnValue(None)
-
- # Get the child of the sharee home that has the same resource id as the owned one
- shareeHome = yield self._txn.homeWithUID(self._home._homeType, shareeUID, authzUID=shareeUID)
- shareeView = (yield shareeHome.allChildWithID(self.id())) if shareeHome is not None else None
- returnValue(shareeView)
-
-
- @inlineCallbacks
- def shareWithUID(self, shareeUID, mode, status=None, summary=None, shareName=None):
- """
- Share this (owned) L{CommonHomeChild} with another principal.
-
- @param shareeUID: The UID of the sharee.
- @type: L{str}
-
- @param mode: The sharing mode; L{_BIND_MODE_READ} or
- L{_BIND_MODE_WRITE} or L{_BIND_MODE_DIRECT}
- @type mode: L{str}
-
- @param status: The sharing status; L{_BIND_STATUS_INVITED} or
- L{_BIND_STATUS_ACCEPTED}
- @type: L{str}
-
- @param summary: The proposed message to go along with the share, which
- will be used as the default display name.
- @type: L{str}
-
- @return: the name of the shared calendar in the new calendar home.
- @rtype: L{str}
- """
- shareeHome = yield self._txn.calendarHomeWithUID(shareeUID, create=True)
- returnValue(
- (yield self.shareWith(shareeHome, mode, status, summary, shareName))
- )
-
-
- @inlineCallbacks
- def shareWith(self, shareeHome, mode, status=None, summary=None, shareName=None):
- """
- Share this (owned) L{CommonHomeChild} with another home.
-
- @param shareeHome: The home of the sharee.
- @type: L{CommonHome}
-
- @param mode: The sharing mode; L{_BIND_MODE_READ} or
- L{_BIND_MODE_WRITE} or L{_BIND_MODE_DIRECT}
- @type: L{str}
-
- @param status: The sharing status; L{_BIND_STATUS_INVITED} or
- L{_BIND_STATUS_ACCEPTED}
- @type: L{str}
-
- @param summary: The proposed message to go along with the share, which
- will be used as the default display name.
- @type: L{str}
-
- @param shareName: The proposed name of the new share.
- @type: L{str}
-
- @return: the name of the shared calendar in the new calendar home.
- @rtype: L{str}
- """
-
- if status is None:
- status = _BIND_STATUS_ACCEPTED
-
- @inlineCallbacks
- def doInsert(subt):
- newName = shareName if shareName is not None else self.newShareName()
- yield self._bindInsertQuery.on(
- subt,
- homeID=shareeHome._resourceID,
- resourceID=self._resourceID,
- externalID=self._externalID,
- name=newName,
- mode=mode,
- bindStatus=status,
- message=summary
- )
- returnValue(newName)
- try:
- bindName = yield self._txn.subtransaction(doInsert)
- except AllRetriesFailed:
- # FIXME: catch more specific exception
- child = yield shareeHome.allChildWithID(self._resourceID)
- yield self.updateShare(
- child, mode=mode, status=status,
- summary=summary
- )
- bindName = child._name
- else:
- if status == _BIND_STATUS_ACCEPTED:
- shareeView = yield shareeHome.anyObjectWithShareUID(bindName)
- yield shareeView._initSyncToken()
- yield shareeView._initBindRevision()
-
- # Mark this as shared
- yield self.setShared(True)
-
- # Must send notification to ensure cache invalidation occurs
- yield self.notifyPropertyChanged()
- yield shareeHome.notifyChanged()
-
- returnValue(bindName)
-
-
- @inlineCallbacks
- def createShare(self, shareeUID, mode, summary=None, shareName=None):
- """
- Create a new shared resource. If the mode is direct, the share is created in accepted state,
- otherwise the share is created in invited state.
- """
- shareeHome = yield self._txn.homeWithUID(self.ownerHome()._homeType, shareeUID, create=True)
-
- yield self.shareWith(
- shareeHome,
- mode=mode,
- status=_BIND_STATUS_INVITED if mode != _BIND_MODE_DIRECT else _BIND_STATUS_ACCEPTED,
- summary=summary,
- shareName=shareName,
- )
- shareeView = yield self.shareeView(shareeUID)
- returnValue(shareeView)
-
-
- @inlineCallbacks
- def updateShare(self, shareeView, mode=None, status=None, summary=None):
- """
- Update share mode, status, and message for a home child shared with
- this (owned) L{CommonHomeChild}.
-
- @param shareeView: The sharee home child that shares this.
- @type shareeView: L{CommonHomeChild}
-
- @param mode: The sharing mode; L{_BIND_MODE_READ} or
- L{_BIND_MODE_WRITE} or None to not update
- @type mode: L{str}
-
- @param status: The sharing status; L{_BIND_STATUS_INVITED} or
- L{_BIND_STATUS_ACCEPTED} or L{_BIND_STATUS_DECLINED} or
- L{_BIND_STATUS_INVALID} or None to not update
- @type status: L{str}
-
- @param summary: The proposed message to go along with the share, which
- will be used as the default display name, or None to not update
- @type summary: L{str}
- """
- # TODO: raise a nice exception if shareeView is not, in fact, a shared
- # version of this same L{CommonHomeChild}
-
- # remove None parameters, and substitute None for empty string
- bind = self._bindSchema
- columnMap = {}
- if mode != None and mode != shareeView._bindMode:
- columnMap[bind.BIND_MODE] = mode
- if status != None and status != shareeView._bindStatus:
- columnMap[bind.BIND_STATUS] = status
- if summary != None and summary != shareeView._bindMessage:
- columnMap[bind.MESSAGE] = summary
-
- if columnMap:
-
- # Count accepted
- if bind.BIND_STATUS in columnMap:
- previouslyAcceptedCount = yield shareeView._previousAcceptCount()
-
- yield self._updateBindColumnsQuery(columnMap).on(
- self._txn,
- resourceID=self._resourceID, homeID=shareeView._home._resourceID
- )
-
- # Update affected attributes
- if bind.BIND_MODE in columnMap:
- shareeView._bindMode = columnMap[bind.BIND_MODE]
-
- if bind.BIND_STATUS in columnMap:
- shareeView._bindStatus = columnMap[bind.BIND_STATUS]
- yield shareeView._changedStatus(previouslyAcceptedCount)
-
- if bind.MESSAGE in columnMap:
- shareeView._bindMessage = columnMap[bind.MESSAGE]
-
- yield shareeView.invalidateQueryCache()
-
- # Must send notification to ensure cache invalidation occurs
- yield self.notifyPropertyChanged()
- yield shareeView.viewerHome().notifyChanged()
-
-
- def _previousAcceptCount(self):
- return succeed(1)
-
-
- @inlineCallbacks
- def _changedStatus(self, previouslyAcceptedCount):
- if self._bindStatus == _BIND_STATUS_ACCEPTED:
- yield self._initSyncToken()
- yield self._initBindRevision()
- self._home._children[self._name] = self
- self._home._children[self._resourceID] = self
- elif self._bindStatus in (_BIND_STATUS_INVITED, _BIND_STATUS_DECLINED):
- yield self._deletedSyncToken(sharedRemoval=True)
- self._home._children.pop(self._name, None)
- self._home._children.pop(self._resourceID, None)
-
-
- @inlineCallbacks
- def removeShare(self, shareeView):
- """
- Remove the shared version of this (owned) L{CommonHomeChild} from the
- referenced L{CommonHome}.
-
- @see: L{CommonHomeChild.shareWith}
-
- @param shareeView: The shared resource being removed.
-
- @return: a L{Deferred} which will fire with the previous shareUID
- """
-
- # remove sync tokens
- shareeHome = shareeView.viewerHome()
- yield shareeView._deletedSyncToken(sharedRemoval=True)
- shareeHome._children.pop(shareeView._name, None)
- shareeHome._children.pop(shareeView._resourceID, None)
-
- # Must send notification to ensure cache invalidation occurs
- yield self.notifyPropertyChanged()
- yield shareeHome.notifyChanged()
-
- # delete binds including invites
- yield self._deleteBindForResourceIDAndHomeID.on(
- self._txn,
- resourceID=self._resourceID,
- homeID=shareeHome._resourceID,
- )
-
- yield shareeView.invalidateQueryCache()
-
-
- @inlineCallbacks
- def unshare(self):
- """
- Unshares a collection, regardless of which "direction" it was shared.
- """
- if self.owned():
- # This collection may be shared to others
- invites = yield self.sharingInvites()
- for invite in invites:
- shareeView = yield self.shareeView(invite.shareeUID)
- yield self.removeShare(shareeView)
- else:
- # This collection is shared to me
- ownerView = yield self.ownerView()
- yield ownerView.removeShare(self)
-
-
- @inlineCallbacks
- def sharingInvites(self):
- """
- Retrieve the list of all L{SharingInvitation}'s for this L{CommonHomeChild}, irrespective of mode.
-
- @return: L{SharingInvitation} objects
- @rtype: a L{Deferred} which fires with a L{list} of L{SharingInvitation}s.
- """
- if not self.owned():
- returnValue([])
-
- # get all accepted binds
- invitedRows = yield self._sharedInvitationBindForResourceID.on(
- self._txn, resourceID=self._resourceID, homeID=self._home._resourceID
- )
-
- result = []
- for homeUID, homeRID, _ignore_resourceID, resourceName, bindMode, bindStatus, bindMessage in invitedRows:
- invite = SharingInvitation(
- resourceName,
- self.ownerHome().name(),
- self.ownerHome().id(),
- homeUID,
- homeRID,
- bindMode,
- bindStatus,
- bindMessage,
- )
- result.append(invite)
- returnValue(result)
-
-
- @inlineCallbacks
- def _initBindRevision(self):
- yield self.syncToken() # init self._syncTokenRevision if None
- self._bindRevision = self._syncTokenRevision
-
- bind = self._bindSchema
- yield self._updateBindColumnsQuery(
- {bind.BIND_REVISION : Parameter("revision"), }
- ).on(
- self._txn,
- revision=self._bindRevision,
- resourceID=self._resourceID,
- homeID=self.viewerHome()._resourceID,
- )
- yield self.invalidateQueryCache()
-
-
- def sharedResourceType(self):
- """
- The sharing resource type. Needs to be overridden by each type of resource that can be shared.
-
- @return: an identifier for the type of the share.
- @rtype: C{str}
- """
- return ""
-
-
- def newShareName(self):
- """
- Name used when creating a new share. By default this is a UUID.
- """
- return str(uuid4())
-
-
- def owned(self):
- """
- @see: L{ICalendar.owned}
- """
- return self._bindMode == _BIND_MODE_OWN
-
-
- def isShared(self):
- """
- For an owned collection indicate whether it is shared.
-
- @return: C{True} if shared, C{False} otherwise
- @rtype: C{bool}
- """
- return self.owned() and self._bindMessage == "shared"
-
-
- @inlineCallbacks
- def setShared(self, shared):
- """
- Set an owned collection to shared or unshared state. Technically this is not useful as "shared"
- really means it has invitees, but the current sharing spec supports a notion of a shared collection
- that has not yet had invitees added. For the time being we will support that option by using a new
- MESSAGE value to indicate an owned collection that is "shared".
-
- @param shared: whether or not the owned collection is "shared"
- @type shared: C{bool}
- """
- assert self.owned(), "Cannot change share mode on a shared collection"
-
- # Only if change is needed
- newMessage = "shared" if shared else None
- if self._bindMessage == newMessage:
- returnValue(None)
-
- self._bindMessage = newMessage
-
- bind = self._bindSchema
- yield Update(
- {bind.MESSAGE: self._bindMessage},
- Where=(bind.RESOURCE_ID == Parameter("resourceID")).And(
- bind.HOME_RESOURCE_ID == Parameter("homeID")),
- ).on(self._txn, resourceID=self._resourceID, homeID=self.viewerHome()._resourceID)
-
- yield self.invalidateQueryCache()
- yield self.notifyPropertyChanged()
-
-
- def direct(self):
- """
- Is this a "direct" share?
-
- @return: a boolean indicating whether it's direct.
- """
- return self._bindMode == _BIND_MODE_DIRECT
-
-
- def indirect(self):
- """
- Is this an "indirect" share?
-
- @return: a boolean indicating whether it's indirect.
- """
- return self._bindMode == _BIND_MODE_INDIRECT
-
-
- def shareUID(self):
- """
- @see: L{ICalendar.shareUID}
- """
- return self.name()
-
-
- def shareMode(self):
- """
- @see: L{ICalendar.shareMode}
- """
- return self._bindMode
-
-
- def _effectiveShareMode(self, bindMode, viewerUID, txn):
- """
- Get the effective share mode without a calendar object
- """
- return bindMode
-
-
- def effectiveShareMode(self):
- """
- @see: L{ICalendar.shareMode}
- """
- return self._bindMode
-
-
- def shareName(self):
- """
- This is a path like name for the resource within the home being shared. For object resource
- shares this will be a combination of the L{CommonHomeChild} name and the L{CommonObjecrResource}
- name. Otherwise it is just the L{CommonHomeChild} name. This is needed to expose a value to the
- app-layer such that it can construct a URI for the actual WebDAV resource being shared.
- """
- name = self.name()
- if self.sharedResourceType() == "group":
- name = self.parentCollection().name() + "/" + name
- return name
-
-
- def shareStatus(self):
- """
- @see: L{ICalendar.shareStatus}
- """
- return self._bindStatus
-
-
- def accepted(self):
- """
- @see: L{ICalendar.shareStatus}
- """
- return self._bindStatus == _BIND_STATUS_ACCEPTED
-
-
- def shareMessage(self):
- """
- @see: L{ICalendar.shareMessage}
- """
- return self._bindMessage
-
-
- def getInviteCopyProperties(self):
- """
- Get a dictionary of property name/values (as strings) for properties that are shadowable and
- need to be copied to a sharee's collection when an external (cross-pod) share is created.
- Sub-classes should override to expose the properties they care about.
- """
- return {}
-
-
- def setInviteCopyProperties(self, props):
- """
- Copy a set of shadowable properties (as name/value strings) onto this shared resource when
- a cross-pod invite is processed. Sub-classes should override to expose the properties they
- care about.
- """
- pass
-
-
- @classmethod
- def metadataColumns(cls):
- """
- Return a list of column name for retrieval of metadata. This allows
- different child classes to have their own type specific data, but still make use of the
- common base logic.
- """
-
- # Common behavior is to have created and modified
-
- return (
- cls._homeChildMetaDataSchema.CREATED,
- cls._homeChildMetaDataSchema.MODIFIED,
- )
-
-
- @classmethod
- def metadataAttributes(cls):
- """
- Return a list of attribute names for retrieval of metadata. This allows
- different child classes to have their own type specific data, but still make use of the
- common base logic.
- """
-
- # Common behavior is to have created and modified
-
- return (
- "_created",
- "_modified",
- )
-
-
- @classmethod
- def bindColumns(cls):
- """
- Return a list of column names for retrieval during creation. This allows
- different child classes to have their own type specific data, but still make use of the
- common base logic.
- """
-
- return (
- cls._bindSchema.BIND_MODE,
- cls._bindSchema.HOME_RESOURCE_ID,
- cls._bindSchema.RESOURCE_ID,
- cls._bindSchema.EXTERNAL_ID,
- cls._bindSchema.RESOURCE_NAME,
- cls._bindSchema.BIND_STATUS,
- cls._bindSchema.BIND_REVISION,
- cls._bindSchema.MESSAGE
- )
-
-
- @classmethod
- def bindAttributes(cls):
- """
- Return a list of column names for retrieval during creation. This allows
- different child classes to have their own type specific data, but still make use of the
- common base logic.
- """
-
- return (
- "_bindMode",
- "_homeResourceID",
- "_resourceID",
- "_externalID",
- "_name",
- "_bindStatus",
- "_bindRevision",
- "_bindMessage",
- )
-
- bindColumnCount = 8
-
- @classmethod
- def additionalBindColumns(cls):
- """
- Return a list of column names for retrieval during creation. This allows
- different child classes to have their own type specific data, but still make use of the
- common base logic.
- """
-
- return ()
-
-
- @classmethod
- def additionalBindAttributes(cls):
- """
- Return a list of attribute names for retrieval of during creation. This allows
- different child classes to have their own type specific data, but still make use of the
- common base logic.
- """
-
- return ()
-
-
- @classproperty
- def _childrenAndMetadataForHomeID(cls):
- bind = cls._bindSchema
- child = cls._homeChildSchema
- childMetaData = cls._homeChildMetaDataSchema
-
- columns = cls.bindColumns() + cls.additionalBindColumns() + cls.metadataColumns()
- return Select(
- columns,
- From=child.join(
- bind, child.RESOURCE_ID == bind.RESOURCE_ID,
- 'left outer').join(
- childMetaData, childMetaData.RESOURCE_ID == bind.RESOURCE_ID,
- 'left outer'),
- Where=(bind.HOME_RESOURCE_ID == Parameter("homeID")).And(
- bind.BIND_STATUS == _BIND_STATUS_ACCEPTED)
- )
-
-
- @classmethod
- def _revisionsForResourceIDs(cls, resourceIDs):
- rev = cls._revisionsSchema
- return Select(
- [rev.RESOURCE_ID, Max(rev.REVISION)],
- From=rev,
- Where=rev.RESOURCE_ID.In(Parameter("resourceIDs", len(resourceIDs))).And(
- (rev.RESOURCE_NAME != None).Or(rev.DELETED == False)),
- GroupBy=rev.RESOURCE_ID
- )
-
-
- @inlineCallbacks
- def invalidateQueryCache(self):
- queryCacher = self._txn._queryCacher
- if queryCacher is not None:
- yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForHomeChildMetaData(self._resourceID))
- yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithName(self._home._resourceID, self._name))
- yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID))
- yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithExternalID(self._home._resourceID, self._externalID))
-
-
-
</del><span class="cx"> class CommonHomeChild(FancyEqMixin, Memoizable, _SharedSyncLogic, HomeChildBase, SharingMixIn):
</span><span class="cx"> """
</span><span class="cx"> Common ancestor class of AddressBooks and Calendars.
</span><span class="lines">@@ -5590,6 +2862,11 @@
</span><span class="cx"> )
</span><span class="cx">
</span><span class="cx"> _externalClass = None
</span><ins>+ _homeRecordClass = None
+ _metadataRecordClass = None
+ _bindRecordClass = None
+ _bindHomeIDAttributeName = None
+ _bindResourceIDAttributeName = None
</ins><span class="cx"> _objectResourceClass = None
</span><span class="cx">
</span><span class="cx"> _bindSchema = None
</span><span class="lines">@@ -5623,7 +2900,7 @@
</span><span class="cx"> @rtype: L{CommonHomeChild}
</span><span class="cx"> """
</span><span class="cx">
</span><del>- bindMode, _ignore_homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = bindData
</del><ins>+ _ignore_homeID, resourceID, name, bindMode, bindStatus, bindRevision, bindUID, bindMessage = bindData
</ins><span class="cx">
</span><span class="cx"> if ownerHome is None:
</span><span class="cx"> if bindMode == _BIND_MODE_OWN:
</span><span class="lines">@@ -5634,7 +2911,7 @@
</span><span class="cx"> else:
</span><span class="cx"> ownerName = None
</span><span class="cx">
</span><del>- c = cls._externalClass if ownerHome.externalClass() else cls
</del><ins>+ c = cls._externalClass if ownerHome and ownerHome.externalClass() else cls
</ins><span class="cx"> child = c(
</span><span class="cx"> home=home,
</span><span class="cx"> name=name,
</span><span class="lines">@@ -5645,7 +2922,7 @@
</span><span class="cx"> message=bindMessage,
</span><span class="cx"> ownerHome=ownerHome,
</span><span class="cx"> ownerName=ownerName,
</span><del>- externalID=externalID,
</del><ins>+ bindUID=bindUID,
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx"> if additionalBindData:
</span><span class="lines">@@ -5658,7 +2935,7 @@
</span><span class="cx">
</span><span class="cx"> # We have to re-adjust the property store object to account for possible shared
</span><span class="cx"> # collections as previously we loaded them all as if they were owned
</span><del>- if propstore and bindMode != _BIND_MODE_OWN:
</del><ins>+ if ownerHome and propstore and bindMode != _BIND_MODE_OWN:
</ins><span class="cx"> propstore._setDefaultUserUID(ownerHome.uid())
</span><span class="cx"> yield child._loadPropertyStore(propstore)
</span><span class="cx">
</span><span class="lines">@@ -5667,10 +2944,10 @@
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><del>- def _getDBData(cls, home, name, resourceID, externalID):
</del><ins>+ def _getDBData(cls, home, name, resourceID, bindUID):
</ins><span class="cx"> """
</span><span class="cx"> Given a set of identifying information, load the data rows for the object. Only one of
</span><del>- L{name}, L{resourceID} or L{externalID} is specified - others are C{None}.
</del><ins>+ L{name}, L{resourceID} or L{bindUID} is specified - others are C{None}.
</ins><span class="cx">
</span><span class="cx"> @param home: the parent home object
</span><span class="cx"> @type home: L{CommonHome}
</span><span class="lines">@@ -5678,8 +2955,8 @@
</span><span class="cx"> @type name: C{str}
</span><span class="cx"> @param resourceID: the resource ID
</span><span class="cx"> @type resourceID: C{int}
</span><del>- @param externalID: the resource ID of the external (cross-pod) referenced item
- @type externalID: C{int}
</del><ins>+ @param bindUID: the unique ID of the external (cross-pod) referenced item
+ @type bindUID: C{int}
</ins><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> # Get the bind row data
</span><span class="lines">@@ -5692,8 +2969,8 @@
</span><span class="cx"> cacheKey = queryCacher.keyForObjectWithName(home._resourceID, name)
</span><span class="cx"> elif resourceID:
</span><span class="cx"> cacheKey = queryCacher.keyForObjectWithResourceID(home._resourceID, resourceID)
</span><del>- elif externalID:
- cacheKey = queryCacher.keyForObjectWithExternalID(home._resourceID, externalID)
</del><ins>+ elif bindUID:
+ cacheKey = queryCacher.keyForObjectWithBindUID(home._resourceID, bindUID)
</ins><span class="cx"> row = yield queryCacher.get(cacheKey)
</span><span class="cx">
</span><span class="cx"> if row is None:
</span><span class="lines">@@ -5702,8 +2979,8 @@
</span><span class="cx"> rows = yield cls._bindForNameAndHomeID.on(home._txn, name=name, homeID=home._resourceID)
</span><span class="cx"> elif resourceID:
</span><span class="cx"> rows = yield cls._bindForResourceIDAndHomeID.on(home._txn, resourceID=resourceID, homeID=home._resourceID)
</span><del>- elif externalID:
- rows = yield cls._bindForExternalIDAndHomeID.on(home._txn, externalID=externalID, homeID=home._resourceID)
</del><ins>+ elif bindUID:
+ rows = yield cls._bindForBindUIDAndHomeID.on(home._txn, bindUID=bindUID, homeID=home._resourceID)
</ins><span class="cx"> row = rows[0] if rows else None
</span><span class="cx">
</span><span class="cx"> if not row:
</span><span class="lines">@@ -5713,7 +2990,7 @@
</span><span class="cx"> # Cache the result
</span><span class="cx"> queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithName(home._resourceID, name), row)
</span><span class="cx"> queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithResourceID(home._resourceID, resourceID), row)
</span><del>- queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithExternalID(home._resourceID, externalID), row)
</del><ins>+ queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithBindUID(home._resourceID, bindUID), row)
</ins><span class="cx">
</span><span class="cx"> bindData = row[:cls.bindColumnCount]
</span><span class="cx"> additionalBindData = row[cls.bindColumnCount:cls.bindColumnCount + len(cls.additionalBindColumns())]
</span><span class="lines">@@ -5736,15 +3013,15 @@
</span><span class="cx"> returnValue((bindData, additionalBindData, metadataData,))
</span><span class="cx">
</span><span class="cx">
</span><del>- def __init__(self, home, name, resourceID, mode, status, revision=0, message=None, ownerHome=None, ownerName=None, externalID=None):
</del><ins>+ def __init__(self, home, name, resourceID, mode, status, revision=0, message=None, ownerHome=None, ownerName=None, bindUID=None):
</ins><span class="cx">
</span><span class="cx"> self._home = home
</span><span class="cx"> self._name = name
</span><span class="cx"> self._resourceID = resourceID
</span><del>- self._externalID = externalID
</del><span class="cx"> self._bindMode = mode
</span><span class="cx"> self._bindStatus = status
</span><span class="cx"> self._bindRevision = revision
</span><ins>+ self._bindUID = bindUID
</ins><span class="cx"> self._bindMessage = message
</span><span class="cx"> self._ownerHome = home if ownerHome is None else ownerHome
</span><span class="cx"> self._ownerName = name if ownerName is None else ownerName
</span><span class="lines">@@ -5808,9 +3085,10 @@
</span><span class="cx"> # Load from the main table first
</span><span class="cx"> dataRows = (yield cls._childrenAndMetadataForHomeID.on(home._txn, homeID=home._resourceID))
</span><span class="cx">
</span><ins>+ resourceID_index = cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)
</ins><span class="cx"> if dataRows:
</span><span class="cx"> # Get property stores
</span><del>- childResourceIDs = [dataRow[2] for dataRow in dataRows]
</del><ins>+ childResourceIDs = [dataRow[resourceID_index] for dataRow in dataRows]
</ins><span class="cx">
</span><span class="cx"> propertyStores = yield PropertyStore.forMultipleResourcesWithResourceIDs(
</span><span class="cx"> home.uid(), None, None, home._txn, childResourceIDs
</span><span class="lines">@@ -5823,7 +3101,7 @@
</span><span class="cx"> # Create the actual objects merging in properties
</span><span class="cx"> for dataRow in dataRows:
</span><span class="cx"> bindData = dataRow[:cls.bindColumnCount]
</span><del>- resourceID = bindData[cls.bindColumns().index(cls._bindSchema.RESOURCE_ID)]
</del><ins>+ resourceID = bindData[resourceID_index]
</ins><span class="cx"> additionalBindData = dataRow[cls.bindColumnCount:cls.bindColumnCount + len(cls.additionalBindColumns())]
</span><span class="cx"> metadataData = dataRow[cls.bindColumnCount + len(cls.additionalBindColumns()):]
</span><span class="cx"> propstore = propertyStores.get(resourceID, None)
</span><span class="lines">@@ -5846,13 +3124,13 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><del>- def objectWithExternalID(cls, home, externalID, accepted=True):
- return cls.objectWith(home, externalID=externalID, accepted=accepted)
</del><ins>+ def objectWithBindUID(cls, home, bindUID, accepted=True):
+ return cls.objectWith(home, bindUID=bindUID, accepted=accepted)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><del>- def objectWith(cls, home, name=None, resourceID=None, externalID=None, accepted=True):
</del><ins>+ def objectWith(cls, home, name=None, resourceID=None, bindUID=None, accepted=True):
</ins><span class="cx"> """
</span><span class="cx"> Create the object using one of the specified arguments as the key to load it. One
</span><span class="cx"> and only one of the keyword arguments must be set.
</span><span class="lines">@@ -5872,7 +3150,7 @@
</span><span class="cx"> @rtype: C{CommonHomeChild}
</span><span class="cx"> """
</span><span class="cx">
</span><del>- dbData = yield cls._getDBData(home, name, resourceID, externalID)
</del><ins>+ dbData = yield cls._getDBData(home, name, resourceID, bindUID)
</ins><span class="cx"> if dbData is None:
</span><span class="cx"> returnValue(None)
</span><span class="cx"> bindData, additionalBindData, metadataData = dbData
</span><span class="lines">@@ -5909,7 +3187,7 @@
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><del>- def create(cls, home, name, externalID=None):
</del><ins>+ def create(cls, home, name, bindUID=None):
</ins><span class="cx">
</span><span class="cx"> if (yield cls._bindForNameAndHomeID.on(home._txn, name=name, homeID=home._resourceID)):
</span><span class="cx"> raise HomeChildNameAlreadyExistsError(name)
</span><span class="lines">@@ -5924,7 +3202,7 @@
</span><span class="cx"> _created, _modified = (yield cls._insertHomeChildMetaData.on(home._txn, resourceID=resourceID))[0]
</span><span class="cx"> # Bind table needs entry
</span><span class="cx"> yield cls._bindInsertQuery.on(
</span><del>- home._txn, homeID=home._resourceID, resourceID=resourceID, externalID=externalID,
</del><ins>+ home._txn, homeID=home._resourceID, resourceID=resourceID, bindUID=bindUID,
</ins><span class="cx"> name=name, mode=_BIND_MODE_OWN, bindStatus=_BIND_STATUS_ACCEPTED,
</span><span class="cx"> message=None,
</span><span class="cx"> )
</span><span class="lines">@@ -5961,15 +3239,6 @@
</span><span class="cx"> return self._resourceID
</span><span class="cx">
</span><span class="cx">
</span><del>- def external_id(self):
- """
- Retrieve the external store identifier for this collection.
-
- @return: a string.
- """
- return self._externalID
-
-
</del><span class="cx"> def external(self):
</span><span class="cx"> """
</span><span class="cx"> Is this an external home.
</span><span class="lines">@@ -5988,7 +3257,7 @@
</span><span class="cx"> return self.ownerHome().externalClass()
</span><span class="cx">
</span><span class="cx">
</span><del>- def externalize(self):
</del><ins>+ def serialize(self):
</ins><span class="cx"> """
</span><span class="cx"> Create a dictionary mapping key attributes so this object can be sent over a cross-pod call
</span><span class="cx"> and reconstituted at the other end. Note that the other end may have a different schema so
</span><span class="lines">@@ -6003,9 +3272,9 @@
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><del>- def internalize(cls, parent, mapping):
</del><ins>+ def deserialize(cls, parent, mapping):
</ins><span class="cx"> """
</span><del>- Given a mapping generated by L{externalize}, convert the values into an array of database
</del><ins>+ Given a mapping generated by L{serialize}, convert the values into an array of database
</ins><span class="cx"> like items that conforms to the ordering of L{_allColumns} so it can be fed into L{makeClass}.
</span><span class="cx"> Note that there may be a schema mismatch with the external data, so treat missing items as
</span><span class="cx"> C{None} and ignore extra items.
</span><span class="lines">@@ -6117,6 +3386,13 @@
</span><span class="cx"> yield self._home.notifyChanged()
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def purge(self):
+ """
+ Do a "silent" removal of this object resource.
+ """
+ return self.remove()
+
+
</ins><span class="cx"> def ownerHome(self):
</span><span class="cx"> """
</span><span class="cx"> @see: L{ICalendar.ownerCalendarHome}
</span><span class="lines">@@ -7238,7 +4514,7 @@
</span><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><del>- def externalize(self):
</del><ins>+ def serialize(self):
</ins><span class="cx"> """
</span><span class="cx"> Create a dictionary mapping key attributes so this object can be sent over a cross-pod call
</span><span class="cx"> and reconstituted at the other end. Note that the other end may have a different schema so
</span><span class="lines">@@ -7249,9 +4525,9 @@
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><del>- def internalize(cls, parent, mapping):
</del><ins>+ def deserialize(cls, parent, mapping):
</ins><span class="cx"> """
</span><del>- Given a mapping generated by L{externalize}, convert the values into an array of database
</del><ins>+ Given a mapping generated by L{serialize}, convert the values into an array of database
</ins><span class="cx"> like items that conforms to the ordering of L{_allColumns} so it can be fed into L{makeClass}.
</span><span class="cx"> Note that there may be a schema mismatch with the external data, so treat missing items as
</span><span class="cx"> C{None} and ignore extra items.
</span><span class="lines">@@ -7440,6 +4716,13 @@
</span><span class="cx"> self._cachedComponent = None
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def purge(self):
+ """
+ Do a "silent" removal of this object resource.
+ """
+ return self.remove()
+
+
</ins><span class="cx"> def removeNotifyCategory(self):
</span><span class="cx"> """
</span><span class="cx"> Indicates what category to use when determining the priority of push
</span><span class="lines">@@ -7506,1045 +4789,3 @@
</span><span class="cx"> raise ConcurrentModification()
</span><span class="cx"> else:
</span><span class="cx"> returnValue(self._textData)
</span><del>-
-
-
-class NotificationCollection(FancyEqMixin, _SharedSyncLogic):
- log = Logger()
-
- implements(INotificationCollection)
-
- compareAttributes = (
- "_uid",
- "_resourceID",
- )
-
- _revisionsSchema = schema.NOTIFICATION_OBJECT_REVISIONS
- _homeSchema = schema.NOTIFICATION_HOME
-
-
- def __init__(self, txn, uid, resourceID):
-
- self._txn = txn
- self._uid = uid
- self._resourceID = resourceID
- self._dataVersion = None
- self._notifications = {}
- self._notificationNames = None
- self._syncTokenRevision = None
-
- # Make sure we have push notifications setup to push on this collection
- # as well as the home it is in
- self._notifiers = dict([(factory_name, factory.newNotifier(self),) for factory_name, factory in txn._notifierFactories.items()])
-
- _resourceIDFromUIDQuery = Select(
- [_homeSchema.RESOURCE_ID], From=_homeSchema,
- Where=_homeSchema.OWNER_UID == Parameter("uid"))
-
- _UIDFromResourceIDQuery = Select(
- [_homeSchema.OWNER_UID], From=_homeSchema,
- Where=_homeSchema.RESOURCE_ID == Parameter("rid"))
-
- _provisionNewNotificationsQuery = Insert(
- {_homeSchema.OWNER_UID: Parameter("uid")},
- Return=_homeSchema.RESOURCE_ID
- )
-
-
- @property
- def _home(self):
- """
- L{NotificationCollection} serves as its own C{_home} for the purposes of
- working with L{_SharedSyncLogic}.
- """
- return self
-
-
- @classmethod
- @inlineCallbacks
- def notificationsWithUID(cls, txn, uid, create):
- """
- @param uid: I'm going to assume uid is utf-8 encoded bytes
- """
- rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid)
-
- if rows:
- resourceID = rows[0][0]
- created = False
- elif create:
- # Determine if the user is local or external
- record = yield txn.directoryService().recordWithUID(uid.decode("utf-8"))
- if record is None:
- raise DirectoryRecordNotFoundError("Cannot create home for UID since no directory record exists: {}".format(uid))
-
- state = _HOME_STATUS_NORMAL if record.thisServer() else _HOME_STATUS_EXTERNAL
- if state == _HOME_STATUS_EXTERNAL:
- raise RecordNotAllowedError("Cannot store notifications for external user: {}".format(uid))
-
- # Use savepoint so we can do a partial rollback if there is a race
- # condition where this row has already been inserted
- savepoint = SavepointAction("notificationsWithUID")
- yield savepoint.acquire(txn)
-
- try:
- resourceID = str((
- yield cls._provisionNewNotificationsQuery.on(txn, uid=uid)
- )[0][0])
- except Exception:
- # FIXME: Really want to trap the pg.DatabaseError but in a non-
- # DB specific manner
- yield savepoint.rollback(txn)
-
- # Retry the query - row may exist now, if not re-raise
- rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid)
- if rows:
- resourceID = rows[0][0]
- created = False
- else:
- raise
- else:
- created = True
- yield savepoint.release(txn)
- else:
- returnValue(None)
- collection = cls(txn, uid, resourceID)
- yield collection._loadPropertyStore()
- if created:
- yield collection._initSyncToken()
- yield collection.notifyChanged()
- returnValue(collection)
-
-
- @classmethod
- @inlineCallbacks
- def notificationsWithResourceID(cls, txn, rid):
- rows = yield cls._UIDFromResourceIDQuery.on(txn, rid=rid)
-
- if rows:
- uid = rows[0][0]
- result = (yield cls.notificationsWithUID(txn, uid, create=False))
- returnValue(result)
- else:
- returnValue(None)
-
-
- @inlineCallbacks
- def _loadPropertyStore(self):
- self._propertyStore = yield PropertyStore.load(
- self._uid,
- self._uid,
- None,
- self._txn,
- self._resourceID,
- notifyCallback=self.notifyChanged
- )
-
-
- def __repr__(self):
- return "<%s: %s>" % (self.__class__.__name__, self._resourceID)
-
-
- def id(self):
- """
- Retrieve the store identifier for this collection.
-
- @return: store identifier.
- @rtype: C{int}
- """
- return self._resourceID
-
-
- @classproperty
- def _dataVersionQuery(cls):
- nh = cls._homeSchema
- return Select(
- [nh.DATAVERSION], From=nh,
- Where=nh.RESOURCE_ID == Parameter("resourceID")
- )
-
-
- @inlineCallbacks
- def dataVersion(self):
- if self._dataVersion is None:
- self._dataVersion = (yield self._dataVersionQuery.on(
- self._txn, resourceID=self._resourceID))[0][0]
- returnValue(self._dataVersion)
-
-
- def name(self):
- return "notification"
-
-
- def uid(self):
- return self._uid
-
-
- def owned(self):
- return True
-
-
- def ownerHome(self):
- return self._home
-
-
- def viewerHome(self):
- return self._home
-
-
- @inlineCallbacks
- def notificationObjects(self):
- results = (yield NotificationObject.loadAllObjects(self))
- for result in results:
- self._notifications[result.uid()] = result
- self._notificationNames = sorted([result.name() for result in results])
- returnValue(results)
-
- _notificationUIDsForHomeQuery = Select(
- [schema.NOTIFICATION.NOTIFICATION_UID], From=schema.NOTIFICATION,
- Where=schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID ==
- Parameter("resourceID"))
-
-
- @inlineCallbacks
- def listNotificationObjects(self):
- if self._notificationNames is None:
- rows = yield self._notificationUIDsForHomeQuery.on(
- self._txn, resourceID=self._resourceID)
- self._notificationNames = sorted([row[0] for row in rows])
- returnValue(self._notificationNames)
-
-
- # used by _SharedSyncLogic.resourceNamesSinceRevision()
- def listObjectResources(self):
- return self.listNotificationObjects()
-
-
- def _nameToUID(self, name):
- """
- Based on the file-backed implementation, the 'name' is just uid +
- ".xml".
- """
- return name.rsplit(".", 1)[0]
-
-
- def notificationObjectWithName(self, name):
- return self.notificationObjectWithUID(self._nameToUID(name))
-
-
- @memoizedKey("uid", "_notifications")
- @inlineCallbacks
- def notificationObjectWithUID(self, uid):
- """
- Create an empty notification object first then have it initialize itself
- from the store.
- """
- no = NotificationObject(self, uid)
- no = (yield no.initFromStore())
- returnValue(no)
-
-
- @inlineCallbacks
- def writeNotificationObject(self, uid, notificationtype, notificationdata):
-
- inserting = False
- notificationObject = yield self.notificationObjectWithUID(uid)
- if notificationObject is None:
- notificationObject = NotificationObject(self, uid)
- inserting = True
- yield notificationObject.setData(uid, notificationtype, notificationdata, inserting=inserting)
- if inserting:
- yield self._insertRevision("%s.xml" % (uid,))
- if self._notificationNames is not None:
- self._notificationNames.append(notificationObject.uid())
- else:
- yield self._updateRevision("%s.xml" % (uid,))
- yield self.notifyChanged()
-
-
- def removeNotificationObjectWithName(self, name):
- if self._notificationNames is not None:
- self._notificationNames.remove(self._nameToUID(name))
- return self.removeNotificationObjectWithUID(self._nameToUID(name))
-
- _removeByUIDQuery = Delete(
- From=schema.NOTIFICATION,
- Where=(schema.NOTIFICATION.NOTIFICATION_UID == Parameter("uid")).And(
- schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID
- == Parameter("resourceID")))
-
-
- @inlineCallbacks
- def removeNotificationObjectWithUID(self, uid):
- yield self._removeByUIDQuery.on(
- self._txn, uid=uid, resourceID=self._resourceID)
- self._notifications.pop(uid, None)
- yield self._deleteRevision("%s.xml" % (uid,))
- yield self.notifyChanged()
-
- _initSyncTokenQuery = Insert(
- {
- _revisionsSchema.HOME_RESOURCE_ID : Parameter("resourceID"),
- _revisionsSchema.RESOURCE_NAME : None,
- _revisionsSchema.REVISION : schema.REVISION_SEQ,
- _revisionsSchema.DELETED : False
- }, Return=_revisionsSchema.REVISION
- )
-
-
- @inlineCallbacks
- def _initSyncToken(self):
- self._syncTokenRevision = (yield self._initSyncTokenQuery.on(
- self._txn, resourceID=self._resourceID))[0][0]
-
- _syncTokenQuery = Select(
- [Max(_revisionsSchema.REVISION)], From=_revisionsSchema,
- Where=_revisionsSchema.HOME_RESOURCE_ID == Parameter("resourceID")
- )
-
-
- @inlineCallbacks
- def syncToken(self):
- if self._syncTokenRevision is None:
- self._syncTokenRevision = (
- yield self._syncTokenQuery.on(
- self._txn, resourceID=self._resourceID)
- )[0][0]
- if self._syncTokenRevision is None:
- self._syncTokenRevision = int((yield self._txn.calendarserverValue("MIN-VALID-REVISION")))
- returnValue("%s_%s" % (self._resourceID, self._syncTokenRevision))
-
-
- def properties(self):
- return self._propertyStore
-
-
- def addNotifier(self, factory_name, notifier):
- if self._notifiers is None:
- self._notifiers = {}
- self._notifiers[factory_name] = notifier
-
-
- def getNotifier(self, factory_name):
- return self._notifiers.get(factory_name)
-
-
- def notifierID(self):
- return (self._txn._homeClass[self._txn._primaryHomeType]._notifierPrefix, "%s/notification" % (self.ownerHome().uid(),),)
-
-
- def parentNotifierID(self):
- return (self._txn._homeClass[self._txn._primaryHomeType]._notifierPrefix, "%s" % (self.ownerHome().uid(),),)
-
-
- @inlineCallbacks
- def notifyChanged(self, category=ChangeCategory.default):
- """
- Send notifications, change sync token and bump last modified because
- the resource has changed. We ensure we only do this once per object
- per transaction.
- """
- if self._txn.isNotifiedAlready(self):
- returnValue(None)
- self._txn.notificationAddedForObject(self)
-
- # Send notifications
- if self._notifiers:
- # cache notifiers run in post commit
- notifier = self._notifiers.get("cache", None)
- if notifier:
- self._txn.postCommit(notifier.notify)
- # push notifiers add their work items immediately
- notifier = self._notifiers.get("push", None)
- if notifier:
- yield notifier.notify(self._txn, priority=category.value)
-
- returnValue(None)
-
-
- @classproperty
- def _completelyNewRevisionQuery(cls):
- rev = cls._revisionsSchema
- return Insert({rev.HOME_RESOURCE_ID: Parameter("homeID"),
- # rev.RESOURCE_ID: Parameter("resourceID"),
- rev.RESOURCE_NAME: Parameter("name"),
- rev.REVISION: schema.REVISION_SEQ,
- rev.DELETED: False},
- Return=rev.REVISION)
-
-
- def _maybeNotify(self):
- """
- Emit a push notification after C{_changeRevision}.
- """
- return self.notifyChanged()
-
-
- @inlineCallbacks
- def remove(self):
- """
- Remove DB rows corresponding to this notification home.
- """
- # Delete NOTIFICATION rows
- no = schema.NOTIFICATION
- kwds = {"ResourceID": self._resourceID}
- yield Delete(
- From=no,
- Where=(
- no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("ResourceID")
- ),
- ).on(self._txn, **kwds)
-
- # Delete NOTIFICATION_HOME (will cascade to NOTIFICATION_OBJECT_REVISIONS)
- nh = schema.NOTIFICATION_HOME
- yield Delete(
- From=nh,
- Where=(
- nh.RESOURCE_ID == Parameter("ResourceID")
- ),
- ).on(self._txn, **kwds)
-
-
-
-class NotificationObject(FancyEqMixin, object):
- """
- This used to store XML data and an XML element for the type. But we are now switching it
- to use JSON internally. The app layer will convert that to XML and fill in the "blanks" as
- needed for the app.
- """
- log = Logger()
-
- implements(INotificationObject)
-
- compareAttributes = (
- "_resourceID",
- "_home",
- )
-
- _objectSchema = schema.NOTIFICATION
-
- def __init__(self, home, uid):
- self._home = home
- self._resourceID = None
- self._uid = uid
- self._md5 = None
- self._size = None
- self._created = None
- self._modified = None
- self._notificationType = None
- self._notificationData = None
-
-
- def __repr__(self):
- return "<%s: %s>" % (self.__class__.__name__, self._resourceID)
-
-
- @classproperty
- def _allColumnsByHomeIDQuery(cls):
- """
- DAL query to load all columns by home ID.
- """
- obj = cls._objectSchema
- return Select(
- [obj.RESOURCE_ID, obj.NOTIFICATION_UID, obj.MD5,
- Len(obj.NOTIFICATION_DATA), obj.NOTIFICATION_TYPE, obj.CREATED, obj.MODIFIED],
- From=obj,
- Where=(obj.NOTIFICATION_HOME_RESOURCE_ID == Parameter("homeID"))
- )
-
-
- @classmethod
- @inlineCallbacks
- def loadAllObjects(cls, parent):
- """
- Load all child objects and return a list of them. This must create the
- child classes and initialize them using "batched" SQL operations to keep
- this constant wrt the number of children. This is an optimization for
- Depth:1 operations on the collection.
- """
-
- results = []
-
- # Load from the main table first
- dataRows = (
- yield cls._allColumnsByHomeIDQuery.on(parent._txn,
- homeID=parent._resourceID))
-
- if dataRows:
- # Get property stores for all these child resources (if any found)
- propertyStores = (yield PropertyStore.forMultipleResources(
- parent.uid(),
- None,
- None,
- parent._txn,
- schema.NOTIFICATION.RESOURCE_ID,
- schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID,
- parent._resourceID,
- ))
-
- # Create the actual objects merging in properties
- for row in dataRows:
- child = cls(parent, None)
- (child._resourceID,
- child._uid,
- child._md5,
- child._size,
- child._notificationType,
- child._created,
- child._modified,) = tuple(row)
- try:
- child._notificationType = json.loads(child._notificationType)
- except ValueError:
- pass
- if isinstance(child._notificationType, unicode):
- child._notificationType = child._notificationType.encode("utf-8")
- child._loadPropertyStore(
- props=propertyStores.get(child._resourceID, None)
- )
- results.append(child)
-
- returnValue(results)
-
-
- @classproperty
- def _oneNotificationQuery(cls):
- no = cls._objectSchema
- return Select(
- [
- no.RESOURCE_ID,
- no.MD5,
- Len(no.NOTIFICATION_DATA),
- no.NOTIFICATION_TYPE,
- no.CREATED,
- no.MODIFIED
- ],
- From=no,
- Where=(no.NOTIFICATION_UID ==
- Parameter("uid")).And(no.NOTIFICATION_HOME_RESOURCE_ID ==
- Parameter("homeID")))
-
-
- @inlineCallbacks
- def initFromStore(self):
- """
- Initialise this object from the store, based on its UID and home
- resource ID. We read in and cache all the extra metadata from the DB to
- avoid having to do DB queries for those individually later.
-
- @return: L{self} if object exists in the DB, else C{None}
- """
- rows = (yield self._oneNotificationQuery.on(
- self._txn, uid=self._uid, homeID=self._home._resourceID))
- if rows:
- (self._resourceID,
- self._md5,
- self._size,
- self._notificationType,
- self._created,
- self._modified,) = tuple(rows[0])
- try:
- self._notificationType = json.loads(self._notificationType)
- except ValueError:
- pass
- if isinstance(self._notificationType, unicode):
- self._notificationType = self._notificationType.encode("utf-8")
- self._loadPropertyStore()
- returnValue(self)
- else:
- returnValue(None)
-
-
- def _loadPropertyStore(self, props=None, created=False):
- if props is None:
- props = NonePropertyStore(self._home.uid())
- self._propertyStore = props
-
-
- def properties(self):
- return self._propertyStore
-
-
- def id(self):
- """
- Retrieve the store identifier for this object.
-
- @return: store identifier.
- @rtype: C{int}
- """
- return self._resourceID
-
-
- @property
- def _txn(self):
- return self._home._txn
-
-
- def notificationCollection(self):
- return self._home
-
-
- def uid(self):
- return self._uid
-
-
- def name(self):
- return self.uid() + ".xml"
-
-
- @classproperty
- def _newNotificationQuery(cls):
- no = cls._objectSchema
- return Insert(
- {
- no.NOTIFICATION_HOME_RESOURCE_ID: Parameter("homeID"),
- no.NOTIFICATION_UID: Parameter("uid"),
- no.NOTIFICATION_TYPE: Parameter("notificationType"),
- no.NOTIFICATION_DATA: Parameter("notificationData"),
- no.MD5: Parameter("md5"),
- },
- Return=[no.RESOURCE_ID, no.CREATED, no.MODIFIED]
- )
-
-
- @classproperty
- def _updateNotificationQuery(cls):
- no = cls._objectSchema
- return Update(
- {
- no.NOTIFICATION_TYPE: Parameter("notificationType"),
- no.NOTIFICATION_DATA: Parameter("notificationData"),
- no.MD5: Parameter("md5"),
- },
- Where=(no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("homeID")).And(
- no.NOTIFICATION_UID == Parameter("uid")),
- Return=no.MODIFIED
- )
-
-
- @inlineCallbacks
- def setData(self, uid, notificationtype, notificationdata, inserting=False):
- """
- Set the object resource data and update and cached metadata.
- """
-
- notificationtext = json.dumps(notificationdata)
- self._notificationType = notificationtype
- self._md5 = hashlib.md5(notificationtext).hexdigest()
- self._size = len(notificationtext)
- if inserting:
- rows = yield self._newNotificationQuery.on(
- self._txn, homeID=self._home._resourceID, uid=uid,
- notificationType=json.dumps(self._notificationType),
- notificationData=notificationtext, md5=self._md5
- )
- self._resourceID, self._created, self._modified = rows[0]
- self._loadPropertyStore()
- else:
- rows = yield self._updateNotificationQuery.on(
- self._txn, homeID=self._home._resourceID, uid=uid,
- notificationType=json.dumps(self._notificationType),
- notificationData=notificationtext, md5=self._md5
- )
- self._modified = rows[0][0]
- self._notificationData = notificationdata
-
- _notificationDataFromID = Select(
- [_objectSchema.NOTIFICATION_DATA], From=_objectSchema,
- Where=_objectSchema.RESOURCE_ID == Parameter("resourceID"))
-
-
- @inlineCallbacks
- def notificationData(self):
- if self._notificationData is None:
- self._notificationData = (yield self._notificationDataFromID.on(self._txn, resourceID=self._resourceID))[0][0]
- try:
- self._notificationData = json.loads(self._notificationData)
- except ValueError:
- pass
- if isinstance(self._notificationData, unicode):
- self._notificationData = self._notificationData.encode("utf-8")
- returnValue(self._notificationData)
-
-
- def contentType(self):
- """
- The content type of NotificationObjects is text/xml.
- """
- return MimeType.fromString("text/xml")
-
-
- def md5(self):
- return self._md5
-
-
- def size(self):
- return self._size
-
-
- def notificationType(self):
- return self._notificationType
-
-
- def created(self):
- return datetimeMktime(parseSQLTimestamp(self._created))
-
-
- def modified(self):
- return datetimeMktime(parseSQLTimestamp(self._modified))
-
-
-
-def determineNewest(uid, homeType):
- """
- Construct a query to determine the modification time of the newest object
- in a given home.
-
- @param uid: the UID of the home to scan.
- @type uid: C{str}
-
- @param homeType: The type of home to scan; C{ECALENDARTYPE},
- C{ENOTIFICATIONTYPE}, or C{EADDRESSBOOKTYPE}.
- @type homeType: C{int}
-
- @return: A select query that will return a single row containing a single
- column which is the maximum value.
- @rtype: L{Select}
- """
- if homeType == ENOTIFICATIONTYPE:
- return Select(
- [Max(schema.NOTIFICATION.MODIFIED)],
- From=schema.NOTIFICATION_HOME.join(
- schema.NOTIFICATION,
- on=schema.NOTIFICATION_HOME.RESOURCE_ID ==
- schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID),
- Where=schema.NOTIFICATION_HOME.OWNER_UID == uid
- )
- homeTypeName = {ECALENDARTYPE: "CALENDAR",
- EADDRESSBOOKTYPE: "ADDRESSBOOK"}[homeType]
- home = getattr(schema, homeTypeName + "_HOME")
- bind = getattr(schema, homeTypeName + "_BIND")
- child = getattr(schema, homeTypeName)
- obj = getattr(schema, homeTypeName + "_OBJECT")
- return Select(
- [Max(obj.MODIFIED)],
- From=home.join(bind, on=bind.HOME_RESOURCE_ID == home.RESOURCE_ID).join(
- child, on=child.RESOURCE_ID == bind.RESOURCE_ID).join(
- obj, on=obj.PARENT_RESOURCE_ID == child.RESOURCE_ID),
- Where=(bind.BIND_MODE == 0).And(home.OWNER_UID == uid)
- )
-
-
-
-@inlineCallbacks
-def mergeHomes(sqlTxn, one, other, homeType):
- """
- Merge two homes together. This determines which of C{one} or C{two} is
- newer - that is, has been modified more recently - and pulls all the data
- from the older into the newer home. Then, it changes the UID of the old
- home to its UID, normalized and prefixed with "old.", and then re-names the
- new home to its name, normalized.
-
- Because the UIDs of both homes have changed, B{both one and two will be
- invalid to all other callers from the start of the invocation of this
- function}.
-
- @param sqlTxn: the transaction to use
- @type sqlTxn: A L{CommonTransaction}
-
- @param one: A calendar home.
- @type one: L{ICalendarHome}
-
- @param two: Another, different calendar home.
- @type two: L{ICalendarHome}
-
- @param homeType: The type of home to scan; L{ECALENDARTYPE} or
- L{EADDRESSBOOKTYPE}.
- @type homeType: C{int}
-
- @return: a L{Deferred} which fires with with the newer of C{one} or C{two},
- into which the data from the other home has been merged, when the merge
- is complete.
- """
- from txdav.caldav.datastore.util import migrateHome as migrateCalendarHome
- from txdav.carddav.datastore.util import migrateHome as migrateABHome
- migrateHome = {EADDRESSBOOKTYPE: migrateABHome,
- ECALENDARTYPE: migrateCalendarHome,
- ENOTIFICATIONTYPE: _dontBotherWithNotifications}[homeType]
- homeTable = {EADDRESSBOOKTYPE: schema.ADDRESSBOOK_HOME,
- ECALENDARTYPE: schema.CALENDAR_HOME,
- ENOTIFICATIONTYPE: schema.NOTIFICATION_HOME}[homeType]
- both = []
- both.append([one,
- (yield determineNewest(one.uid(), homeType).on(sqlTxn))])
- both.append([other,
- (yield determineNewest(other.uid(), homeType).on(sqlTxn))])
- both.sort(key=lambda x: x[1])
-
- older = both[0][0]
- newer = both[1][0]
- yield migrateHome(older, newer, merge=True)
- # Rename the old one to 'old.<correct-guid>'
- newNormalized = normalizeUUIDOrNot(newer.uid())
- oldNormalized = normalizeUUIDOrNot(older.uid())
- yield _renameHome(sqlTxn, homeTable, older.uid(), "old." + oldNormalized)
- # Rename the new one to '<correct-guid>'
- if newer.uid() != newNormalized:
- yield _renameHome(sqlTxn, homeTable, newer.uid(), newNormalized)
- yield returnValue(newer)
-
-
-
-def _renameHome(txn, table, oldUID, newUID):
- """
- Rename a calendar, addressbook, or notification home. Note that this
- function is only safe in transactions that have had caching disabled, and
- more specifically should only ever be used during upgrades. Running this
- in a normal transaction will have unpredictable consequences, especially
- with respect to memcache.
-
- @param txn: an SQL transaction to use for this update
- @type txn: L{twext.enterprise.ienterprise.IAsyncTransaction}
-
- @param table: the storage table of the desired home type
- @type table: L{TableSyntax}
-
- @param oldUID: the old UID, the existing home's UID
- @type oldUID: L{str}
-
- @param newUID: the new UID, to change the UID to
- @type newUID: L{str}
-
- @return: a L{Deferred} which fires when the home is renamed.
- """
- return Update({table.OWNER_UID: newUID},
- Where=table.OWNER_UID == oldUID).on(txn)
-
-
-
-def _dontBotherWithNotifications(older, newer, merge):
- """
- Notifications are more transient and can be easily worked around; don't
- bother to migrate all of them when there is a UUID case mismatch.
- """
- pass
-
-
-
-@inlineCallbacks
-def _normalizeHomeUUIDsIn(t, homeType):
- """
- Normalize the UUIDs in the given L{txdav.common.datastore.CommonStore}.
-
- This changes the case of the UUIDs in the calendar home.
-
- @param t: the transaction to normalize all the UUIDs in.
- @type t: L{CommonStoreTransaction}
-
- @param homeType: The type of home to scan, L{ECALENDARTYPE},
- L{EADDRESSBOOKTYPE}, or L{ENOTIFICATIONTYPE}.
- @type homeType: C{int}
-
- @return: a L{Deferred} which fires with C{None} when the UUID normalization
- is complete.
- """
- from txdav.caldav.datastore.util import fixOneCalendarHome
- homeTable = {EADDRESSBOOKTYPE: schema.ADDRESSBOOK_HOME,
- ECALENDARTYPE: schema.CALENDAR_HOME,
- ENOTIFICATIONTYPE: schema.NOTIFICATION_HOME}[homeType]
- homeTypeName = homeTable.model.name.split("_")[0]
-
- allUIDs = yield Select([homeTable.OWNER_UID],
- From=homeTable,
- OrderBy=homeTable.OWNER_UID).on(t)
- total = len(allUIDs)
- allElapsed = []
- for n, [UID] in enumerate(allUIDs):
- start = time.time()
- if allElapsed:
- estimate = "%0.3d" % ((sum(allElapsed) / len(allElapsed)) *
- total - n)
- else:
- estimate = "unknown"
- log.info(
- "Scanning UID {uid} [{homeType}] "
- "({pct!0.2d}%, {estimate} seconds remaining)...",
- uid=UID, pct=(n / float(total)) * 100, estimate=estimate,
- homeType=homeTypeName
- )
- other = None
- this = yield _getHome(t, homeType, UID)
- if homeType == ECALENDARTYPE:
- fixedThisHome = yield fixOneCalendarHome(this)
- else:
- fixedThisHome = 0
- fixedOtherHome = 0
- if this is None:
- log.info(
- "{uid!r} appears to be missing, already processed", uid=UID
- )
- try:
- uuidobj = UUID(UID)
- except ValueError:
- pass
- else:
- newname = str(uuidobj).upper()
- if UID != newname:
- log.info(
- "Detected case variance: {uid} {newuid}[{homeType}]",
- uid=UID, newuid=newname, homeType=homeTypeName
- )
- other = yield _getHome(t, homeType, newname)
- if other is None:
- # No duplicate: just fix the name.
- yield _renameHome(t, homeTable, UID, newname)
- else:
- if homeType == ECALENDARTYPE:
- fixedOtherHome = yield fixOneCalendarHome(other)
- this = yield mergeHomes(t, this, other, homeType)
- # NOTE: WE MUST NOT TOUCH EITHER HOME OBJECT AFTER THIS POINT.
- # THE UIDS HAVE CHANGED AND ALL OPERATIONS WILL FAIL.
-
- end = time.time()
- elapsed = end - start
- allElapsed.append(elapsed)
- log.info(
- "Scanned UID {uid}; {elapsed} seconds elapsed,"
- " {fixes} properties fixed ({duplicate} fixes in duplicate).",
- uid=UID, elapsed=elapsed, fixes=fixedThisHome,
- duplicate=fixedOtherHome
- )
- returnValue(None)
-
-
-
-def _getHome(txn, homeType, uid):
- """
- Like L{CommonHome.homeWithUID} but also honoring ENOTIFICATIONTYPE which
- isn't I{really} a type of home.
-
- @param txn: the transaction to retrieve the home from
- @type txn: L{CommonStoreTransaction}
-
- @param homeType: L{ENOTIFICATIONTYPE}, L{ECALENDARTYPE}, or
- L{EADDRESSBOOKTYPE}.
-
- @param uid: the UID of the home to retrieve.
- @type uid: L{str}
-
- @return: a L{Deferred} that fires with the L{CommonHome} or
- L{NotificationHome} when it has been retrieved.
- """
- if homeType == ENOTIFICATIONTYPE:
- return txn.notificationsWithUID(uid, create=False)
- else:
- return txn.homeWithUID(homeType, uid)
-
-
-
-@inlineCallbacks
-def _normalizeColumnUUIDs(txn, column):
- """
- Upper-case the UUIDs in the given SQL DAL column.
-
- @param txn: The transaction.
- @type txn: L{CommonStoreTransaction}
-
- @param column: the column, which may contain UIDs, to normalize.
- @type column: L{ColumnSyntax}
-
- @return: A L{Deferred} that will fire when the UUID normalization of the
- given column has completed.
- """
- tableModel = column.model.table
- # Get a primary key made of column syntax objects for querying and
- # comparison later.
- pkey = [ColumnSyntax(columnModel)
- for columnModel in tableModel.primaryKey]
- for row in (yield Select([column] + pkey,
- From=TableSyntax(tableModel)).on(txn)):
- before = row[0]
- pkeyparts = row[1:]
- after = normalizeUUIDOrNot(before)
- if after != before:
- where = _AndNothing
- # Build a where clause out of the primary key and the parts of the
- # primary key that were found.
- for pkeycol, pkeypart in zip(pkeyparts, pkey):
- where = where.And(pkeycol == pkeypart)
- yield Update({column: after}, Where=where).on(txn)
-
-
-
-class _AndNothing(object):
- """
- Simple placeholder for iteratively generating a 'Where' clause; the 'And'
- just returns its argument, so it can be used at the start of the loop.
- """
- @staticmethod
- def And(self):
- """
- Return the argument.
- """
- return self
-
-
-
-@inlineCallbacks
-def _needsNormalizationUpgrade(txn):
- """
- Determine whether a given store requires a UUID normalization data upgrade.
-
- @param txn: the transaction to use
- @type txn: L{CommonStoreTransaction}
-
- @return: a L{Deferred} that fires with C{True} or C{False} depending on
- whether we need the normalization upgrade or not.
- """
- for x in [schema.CALENDAR_HOME, schema.ADDRESSBOOK_HOME,
- schema.NOTIFICATION_HOME]:
- slct = Select([x.OWNER_UID], From=x,
- Where=x.OWNER_UID != Upper(x.OWNER_UID))
- rows = yield slct.on(txn)
- if rows:
- for [uid] in rows:
- if normalizeUUIDOrNot(uid) != uid:
- returnValue(True)
- returnValue(False)
-
-
-
-@inlineCallbacks
-def fixUUIDNormalization(store):
- """
- Fix all UUIDs in the given SQL store to be in a canonical form;
- 00000000-0000-0000-0000-000000000000 format and upper-case.
- """
- t = store.newTransaction(disableCache=True)
-
- # First, let's see if there are any calendar, addressbook, or notification
- # homes that have a de-normalized OWNER_UID. If there are none, then we can
- # early-out and avoid the tedious and potentially expensive inspection of
- # oodles of calendar data.
- if not (yield _needsNormalizationUpgrade(t)):
- log.info("No potentially denormalized UUIDs detected, "
- "skipping normalization upgrade.")
- yield t.abort()
- returnValue(None)
- try:
- yield _normalizeHomeUUIDsIn(t, ECALENDARTYPE)
- yield _normalizeHomeUUIDsIn(t, EADDRESSBOOKTYPE)
- yield _normalizeHomeUUIDsIn(t, ENOTIFICATIONTYPE)
- yield _normalizeColumnUUIDs(t, schema.RESOURCE_PROPERTY.VIEWER_UID)
- yield _normalizeColumnUUIDs(t, schema.APN_SUBSCRIPTIONS.SUBSCRIBER_GUID)
- except:
- log.failure("Unable to normalize UUIDs")
- yield t.abort()
- # There's a lot of possible problems here which are very hard to test
- # for individually; unexpected data that might cause constraint
- # violations under one of the manipulations done by
- # normalizeHomeUUIDsIn. Since this upgrade does not come along with a
- # schema version bump and may be re- attempted at any time, just raise
- # the exception and log it so that we can try again later, and the
- # service will survive for everyone _not_ affected by this somewhat
- # obscure bug.
- else:
- yield t.commit()
</del></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_apnpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_apnpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_apn.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_apn.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_apn.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_apn.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,121 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import SerializableRecord, fromTable
+from twext.python.log import Logger
+from twisted.internet.defer import inlineCallbacks
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.icommondatastore import InvalidSubscriptionValues
+
+log = Logger()
+
+"""
+Classes and methods that relate to APN objects in the SQL store.
+"""
+
+class APNSubscriptionsRecord(SerializableRecord, fromTable(schema.APN_SUBSCRIPTIONS)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.APN_SUBSCRIPTIONS}.
+ """
+ pass
+
+
+
+class APNSubscriptionsMixin(object):
+ """
+ A mixin for L{CommonStoreTransaction} that covers the APN API.
+ """
+
+ @inlineCallbacks
+ def addAPNSubscription(
+ self, token, key, timestamp, subscriber,
+ userAgent, ipAddr
+ ):
+ if not (token and key and timestamp and subscriber):
+ raise InvalidSubscriptionValues()
+
+ # Cap these values at 255 characters
+ userAgent = userAgent[:255]
+ ipAddr = ipAddr[:255]
+
+ records = yield APNSubscriptionsRecord.querysimple(
+ self,
+ token=token, resourceKey=key
+ )
+ if not records: # Subscription does not yet exist
+ try:
+ yield APNSubscriptionsRecord.create(
+ self,
+ token=token,
+ resourceKey=key,
+ modified=timestamp,
+ subscriberGUID=subscriber,
+ userAgent=userAgent,
+ ipAddr=ipAddr
+ )
+ except Exception:
+ # Subscription may have been added by someone else, which is fine
+ pass
+
+ else: # Subscription exists, so update with new timestamp and subscriber
+ try:
+ yield records[0].update(
+ modified=timestamp,
+ subscriberGUID=subscriber,
+ userAgent=userAgent,
+ ipAddr=ipAddr,
+ )
+ except Exception:
+ # Subscription may have been added by someone else, which is fine
+ pass
+
+
+ def removeAPNSubscription(self, token, key):
+ return APNSubscriptionsRecord.deletesimple(
+ self,
+ token=token,
+ resourceKey=key
+ )
+
+
+ def purgeOldAPNSubscriptions(self, olderThan):
+ return APNSubscriptionsRecord.deletesome(
+ self,
+ APNSubscriptionsRecord.modified < olderThan,
+ )
+
+
+ def apnSubscriptionsByToken(self, token):
+ return APNSubscriptionsRecord.querysimple(
+ self,
+ token=token,
+ )
+
+
+ def apnSubscriptionsByKey(self, key):
+ return APNSubscriptionsRecord.querysimple(
+ self,
+ resourceKey=key,
+ )
+
+
+ def apnSubscriptionsBySubscriber(self, guid):
+ return APNSubscriptionsRecord.querysimple(
+ self,
+ subscriberGUID=guid,
+ )
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_directorypyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_directorypy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_directory.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_directory.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_directory.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_directory.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,848 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import SerializableRecord, fromTable
+from twext.enterprise.dal.syntax import SavepointAction, Select
+from twext.python.log import Logger
+from twisted.internet.defer import inlineCallbacks, returnValue
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.icommondatastore import AllRetriesFailed, NotFoundError
+import datetime
+import hashlib
+from txdav.who.delegates import Delegates
+
+log = Logger()
+
+"""
+Classes and methods that relate to directory objects in the SQL store. e.g.,
+delegates, groups etc
+"""
+
+class GroupsRecord(SerializableRecord, fromTable(schema.GROUPS)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.GROUPS}.
+ """
+
+ @classmethod
+ def groupsForMember(cls, txn, memberUID):
+
+ return GroupsRecord.query(
+ txn,
+ GroupsRecord.groupID.In(
+ GroupMembershipRecord.queryExpr(
+ GroupMembershipRecord.memberUID == memberUID.encode("utf-8"),
+ attributes=(GroupMembershipRecord.groupID,),
+ )
+ ),
+ )
+
+
+
+class GroupMembershipRecord(SerializableRecord, fromTable(schema.GROUP_MEMBERSHIP)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.GROUP_MEMBERSHIP}.
+ """
+ pass
+
+
+
+class DelegateRecord(SerializableRecord, fromTable(schema.DELEGATES)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.DELEGATES}.
+ """
+ pass
+
+
+
+class DelegateGroupsRecord(SerializableRecord, fromTable(schema.DELEGATE_GROUPS)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.DELEGATE_GROUPS}.
+ """
+
+ @classmethod
+ def allGroupDelegates(cls, txn):
+ """
+ Get the directly-delegated-to groups.
+ """
+
+ return GroupsRecord.query(
+ txn,
+ GroupsRecord.groupID.In(
+ DelegateGroupsRecord.queryExpr(
+ None,
+ attributes=(DelegateGroupsRecord.groupID,),
+ )
+ ),
+ )
+
+
+ @classmethod
+ def delegateGroups(cls, txn, delegator, readWrite):
+ """
+ Get the directly-delegated-to groups.
+ """
+
+ return GroupsRecord.query(
+ txn,
+ GroupsRecord.groupID.In(
+ DelegateGroupsRecord.queryExpr(
+ (DelegateGroupsRecord.delegator == delegator.encode("utf-8")).And(
+ DelegateGroupsRecord.readWrite == (1 if readWrite else 0)
+ ),
+ attributes=(DelegateGroupsRecord.groupID,),
+ )
+ ),
+ )
+
+
+ @classmethod
+ def indirectDelegators(cls, txn, delegate, readWrite):
+ """
+ Get delegators who have delegated to groups the delegate is a member of.
+ """
+
+ return cls.query(
+ txn,
+ cls.groupID.In(
+ GroupMembershipRecord.queryExpr(
+ GroupMembershipRecord.memberUID == delegate.encode("utf-8"),
+ attributes=(GroupMembershipRecord.groupID,),
+ )
+ ).And(cls.readWrite == (1 if readWrite else 0)),
+ )
+
+
+ @classmethod
+ def indirectDelegates(cls, txn, delegator, readWrite):
+ """
+ Get delegates who are in groups which have been delegated to.
+ """
+
+ return GroupMembershipRecord.query(
+ txn,
+ GroupMembershipRecord.groupID.In(
+ DelegateGroupsRecord.queryExpr(
+ (DelegateGroupsRecord.delegator == delegator.encode("utf-8")).And(
+ DelegateGroupsRecord.readWrite == (1 if readWrite else 0)
+ ),
+ attributes=(DelegateGroupsRecord.groupID,),
+ )
+ ),
+ )
+
+
+ @classmethod
+ @inlineCallbacks
+ def delegatorGroups(cls, txn, delegator):
+ """
+ Get delegator/group pairs for the specified delegator.
+ """
+
+ # Do a join to get what we need
+ rows = yield Select(
+ list(DelegateGroupsRecord.table) + list(GroupsRecord.table),
+ From=DelegateGroupsRecord.table.join(GroupsRecord.table, DelegateGroupsRecord.groupID == GroupsRecord.groupID),
+ Where=(DelegateGroupsRecord.delegator == delegator.encode("utf-8"))
+ ).on(txn)
+
+ results = []
+ delegatorNames = [DelegateGroupsRecord.__colmap__[column] for column in list(DelegateGroupsRecord.table)]
+ groupsNames = [GroupsRecord.__colmap__[column] for column in list(GroupsRecord.table)]
+ split_point = len(delegatorNames)
+ for row in rows:
+ delegatorRow = row[:split_point]
+ delegatorRecord = DelegateGroupsRecord()
+ delegatorRecord._attributesFromRow(zip(delegatorNames, delegatorRow))
+ delegatorRecord.transaction = txn
+ groupsRow = row[split_point:]
+ groupsRecord = GroupsRecord()
+ groupsRecord._attributesFromRow(zip(groupsNames, groupsRow))
+ groupsRecord.transaction = txn
+ results.append((delegatorRecord, groupsRecord,))
+
+ returnValue(results)
+
+
+
+class ExternalDelegateGroupsRecord(SerializableRecord, fromTable(schema.EXTERNAL_DELEGATE_GROUPS)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.EXTERNAL_DELEGATE_GROUPS}.
+ """
+ pass
+
+
+
+class GroupsAPIMixin(object):
+ """
+ A mixin for L{CommonStoreTransaction} that covers the groups API.
+ """
+
+ @inlineCallbacks
+ def addGroup(self, groupUID, name, membershipHash):
+ """
+ @type groupUID: C{unicode}
+ @type name: C{unicode}
+ @type membershipHash: C{str}
+ """
+ record = yield self.directoryService().recordWithUID(groupUID)
+ if record is None:
+ returnValue(None)
+
+ group = yield GroupsRecord.create(
+ self,
+ name=name.encode("utf-8"),
+ groupUID=groupUID.encode("utf-8"),
+ membershipHash=membershipHash,
+ )
+
+ yield self.refreshGroup(group, record)
+ returnValue(group)
+
+
+ def updateGroup(self, groupUID, name, membershipHash, extant=True):
+ """
+ @type groupUID: C{unicode}
+ @type name: C{unicode}
+ @type membershipHash: C{str}
+ @type extant: C{boolean}
+ """
+ timestamp = datetime.datetime.utcnow()
+ group = yield self.groupByUID(groupUID, create=False)
+ if group is not None:
+ yield group.update(
+ name=name.encode("utf-8"),
+ membershipHash=membershipHash,
+ extant=(1 if extant else 0),
+ modified=timestamp,
+ )
+
+
+ @inlineCallbacks
+ def groupByUID(self, groupUID, create=True):
+ """
+ Return or create a record for the group UID.
+
+ @type groupUID: C{unicode}
+
+ @return: Deferred firing with tuple of group ID C{str}, group name
+ C{unicode}, membership hash C{str}, modified timestamp, and
+ extant C{boolean}
+ """
+ results = yield GroupsRecord.query(
+ self,
+ GroupsRecord.groupUID == groupUID.encode("utf-8")
+ )
+ if results:
+ returnValue(results[0])
+ elif create:
+ savepoint = SavepointAction("groupByUID")
+ yield savepoint.acquire(self)
+ try:
+ group = yield self.addGroup(groupUID, u"", "")
+ if group is None:
+ # The record does not actually exist within the directory
+ yield savepoint.release(self)
+ returnValue(None)
+
+ except Exception:
+ yield savepoint.rollback(self)
+ results = yield GroupsRecord.query(
+ self,
+ GroupsRecord.groupUID == groupUID.encode("utf-8")
+ )
+ returnValue(results[0] if results else None)
+ else:
+ yield savepoint.release(self)
+ returnValue(group)
+ else:
+ returnValue(None)
+
+
+ @inlineCallbacks
+ def groupByID(self, groupID):
+ """
+ Given a group ID, return the group UID, or raise NotFoundError
+
+ @type groupID: C{str}
+ @return: Deferred firing with a tuple of group UID C{unicode},
+ group name C{unicode}, membership hash C{str}, and extant C{boolean}
+ """
+ results = yield GroupsRecord.query(
+ self,
+ GroupsRecord.groupID == groupID,
+ )
+ if results:
+ returnValue(results[0])
+ else:
+ raise NotFoundError
+
+
+
+class GroupCacherAPIMixin(object):
+ """
+ A mixin for L{CommonStoreTransaction} that covers the group cacher API.
+ """
+
+ def addMemberToGroup(self, memberUID, groupID):
+ return GroupMembershipRecord.create(self, groupID=groupID, memberUID=memberUID.encode("utf-8"))
+
+
+ def removeMemberFromGroup(self, memberUID, groupID):
+ return GroupMembershipRecord.deletesimple(
+ self, groupID=groupID, memberUID=memberUID.encode("utf-8")
+ )
+
+
+ @inlineCallbacks
+ def groupMemberUIDs(self, groupID):
+ """
+ Returns the cached set of UIDs for members of the given groupID.
+ Sub-groups are not returned in the results but their members are,
+ because the group membership has already been expanded/flattened
+ before storing in the db.
+
+ @param groupID: the group ID
+ @type groupID: C{int}
+ @return: the set of member UIDs
+ @rtype: a Deferred which fires with a set() of C{str} UIDs
+ """
+
+ members = yield GroupMembershipRecord.query(self, GroupMembershipRecord.groupID == groupID)
+ returnValue(set([record.memberUID.decode("utf-8") for record in members]))
+
+
+ @inlineCallbacks
+ def refreshGroup(self, group, record):
+ """
+ @param group: the group record
+ @type group: L{GroupsRecord}
+ @param record: the directory record
+ @type record: C{iDirectoryRecord}
+
+ @return: Deferred firing with membershipChanged C{boolean}
+
+ """
+
+ if record is not None:
+ memberUIDs = yield record.expandedMemberUIDs()
+ name = record.displayName
+ extant = True
+ else:
+ memberUIDs = frozenset()
+ name = group.name
+ extant = False
+
+ membershipHashContent = hashlib.md5()
+ for memberUID in sorted(memberUIDs):
+ membershipHashContent.update(str(memberUID))
+ membershipHash = membershipHashContent.hexdigest()
+
+ if group.membershipHash != membershipHash:
+ membershipChanged = True
+ log.debug(
+ "Group '{group}' changed", group=name
+ )
+ else:
+ membershipChanged = False
+
+ if membershipChanged or extant != group.extant:
+ # also updates group mod date
+ yield group.update(
+ name=name,
+ membershipHash=membershipHash,
+ extant=(1 if extant else 0),
+ )
+
+ if membershipChanged:
+ addedUIDs, removedUIDs = yield self.synchronizeMembers(group.groupID, set(memberUIDs))
+ else:
+ addedUIDs = removedUIDs = None
+
+ returnValue((membershipChanged, addedUIDs, removedUIDs,))
+
+
+ @inlineCallbacks
+ def synchronizeMembers(self, groupID, newMemberUIDs):
+ """
+ Update the group membership table in the database to match the new membership list. This
+ method will diff the existing set with the new set and apply the changes. It also calls out
+ to a groupChanged() method with the set of added and removed members so that other modules
+ that depend on groups can monitor the changes.
+
+ @param groupID: group id of group to update
+ @type groupID: L{str}
+ @param newMemberUIDs: set of new member UIDs in the group
+ @type newMemberUIDs: L{set} of L{str}
+ """
+ cachedMemberUIDs = yield self.groupMemberUIDs(groupID)
+
+ removed = cachedMemberUIDs - newMemberUIDs
+ for memberUID in removed:
+ yield self.removeMemberFromGroup(memberUID, groupID)
+
+ added = newMemberUIDs - cachedMemberUIDs
+ for memberUID in added:
+ yield self.addMemberToGroup(memberUID, groupID)
+
+ yield self.groupChanged(groupID, added, removed)
+
+ returnValue((added, removed,))
+
+
+ @inlineCallbacks
+ def groupChanged(self, groupID, addedUIDs, removedUIDs):
+ """
+ Called when membership of a group changes.
+
+ @param groupID: group id of group that changed
+ @type groupID: L{str}
+ @param addedUIDs: set of new member UIDs added to the group
+ @type addedUIDs: L{set} of L{str}
+ @param removedUIDs: set of old member UIDs removed from the group
+ @type removedUIDs: L{set} of L{str}
+ """
+ yield Delegates.groupChanged(self, groupID, addedUIDs, removedUIDs)
+
+
+ @inlineCallbacks
+ def groupMembers(self, groupID):
+ """
+ The members of the given group as recorded in the db
+ """
+ members = set()
+ memberUIDs = (yield self.groupMemberUIDs(groupID))
+ for uid in memberUIDs:
+ record = (yield self.directoryService().recordWithUID(uid))
+ if record is not None:
+ members.add(record)
+ returnValue(members)
+
+
+ @inlineCallbacks
+ def groupUIDsFor(self, uid):
+ """
+ Returns the cached set of UIDs for the groups this given uid is
+ a member of.
+
+ @param uid: the uid
+ @type uid: C{unicode}
+ @return: the set of group IDs
+ @rtype: a Deferred which fires with a set() of C{int} group IDs
+ """
+ groups = yield GroupsRecord.groupsForMember(self, uid)
+ returnValue(set([group.groupUID.decode("utf-8") for group in groups]))
+
+
+
+class DelegatesAPIMixin(object):
+ """
+ A mixin for L{CommonStoreTransaction} that covers the delegates API.
+ """
+
+ @inlineCallbacks
+ def addDelegate(self, delegator, delegate, readWrite):
+ """
+ Adds a row to the DELEGATES table. The delegate should not be a
+ group. To delegate to a group, call addDelegateGroup() instead.
+
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
+ @param delegate: the UID of the delegate
+ @type delegate: C{unicode}
+ @param readWrite: grant read and write access if True, otherwise
+ read-only access
+ @type readWrite: C{boolean}
+ """
+
+ def _addDelegate(subtxn):
+ return DelegateRecord.create(
+ subtxn,
+ delegator=delegator.encode("utf-8"),
+ delegate=delegate.encode("utf-8"),
+ readWrite=1 if readWrite else 0
+ )
+
+ try:
+ yield self.subtransaction(_addDelegate, retries=0, failureOK=True)
+ except AllRetriesFailed:
+ pass
+
+
+ @inlineCallbacks
+ def addDelegateGroup(self, delegator, delegateGroupID, readWrite,
+ isExternal=False):
+ """
+ Adds a row to the DELEGATE_GROUPS table. The delegate should be a
+ group. To delegate to a person, call addDelegate() instead.
+
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
+ @param delegateGroupID: the GROUP_ID of the delegate group
+ @type delegateGroupID: C{int}
+ @param readWrite: grant read and write access if True, otherwise
+ read-only access
+ @type readWrite: C{boolean}
+ """
+
+ def _addDelegateGroup(subtxn):
+ return DelegateGroupsRecord.create(
+ subtxn,
+ delegator=delegator.encode("utf-8"),
+ groupID=delegateGroupID,
+ readWrite=1 if readWrite else 0,
+ isExternal=1 if isExternal else 0
+ )
+
+ try:
+ yield self.subtransaction(_addDelegateGroup, retries=0, failureOK=True)
+ except AllRetriesFailed:
+ pass
+
+
+ def removeDelegate(self, delegator, delegate, readWrite):
+ """
+ Removes a row from the DELEGATES table. The delegate should not be a
+ group. To remove a delegate group, call removeDelegateGroup() instead.
+
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
+ @param delegate: the UID of the delegate
+ @type delegate: C{unicode}
+ @param readWrite: remove read and write access if True, otherwise
+ read-only access
+ @type readWrite: C{boolean}
+ """
+ return DelegateRecord.deletesimple(
+ self,
+ delegator=delegator.encode("utf-8"),
+ delegate=delegate.encode("utf-8"),
+ readWrite=(1 if readWrite else 0),
+ )
+
+
+ def removeDelegates(self, delegator, readWrite):
+ """
+ Removes all rows for this delegator/readWrite combination from the
+ DELEGATES table.
+
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
+ @param readWrite: remove read and write access if True, otherwise
+ read-only access
+ @type readWrite: C{boolean}
+ """
+ return DelegateRecord.deletesimple(
+ self,
+ delegator=delegator.encode("utf-8"),
+ readWrite=(1 if readWrite else 0)
+ )
+
+
+ def removeDelegateGroup(self, delegator, delegateGroupID, readWrite):
+ """
+ Removes a row from the DELEGATE_GROUPS table. The delegate should be a
+ group. To remove a delegate person, call removeDelegate() instead.
+
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
+ @param delegateGroupID: the GROUP_ID of the delegate group
+ @type delegateGroupID: C{int}
+ @param readWrite: remove read and write access if True, otherwise
+ read-only access
+ @type readWrite: C{boolean}
+ """
+ return DelegateGroupsRecord.deletesimple(
+ self,
+ delegator=delegator.encode("utf-8"),
+ groupID=delegateGroupID,
+ readWrite=(1 if readWrite else 0),
+ )
+
+
+ def removeDelegateGroups(self, delegator, readWrite):
+ """
+ Removes all rows for this delegator/readWrite combination from the
+ DELEGATE_GROUPS table.
+
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
+ @param readWrite: remove read and write access if True, otherwise
+ read-only access
+ @type readWrite: C{boolean}
+ """
+ return DelegateGroupsRecord.deletesimple(
+ self,
+ delegator=delegator.encode("utf-8"),
+ readWrite=(1 if readWrite else 0),
+ )
+
+
+ @inlineCallbacks
+ def delegates(self, delegator, readWrite, expanded=False):
+ """
+ Returns the UIDs of all delegates for the given delegator. If
+ expanded is False, only the direct delegates (users and groups)
+ are returned. If expanded is True, the expanded membership is
+ returned, not including the groups themselves.
+
+ @param delegator: the UID of the delegator
+ @type delegator: C{unicode}
+ @param readWrite: the access-type to check for; read and write
+ access if True, otherwise read-only access
+ @type readWrite: C{boolean}
+ @returns: the UIDs of the delegates (for the specified access
+ type)
+ @rtype: a Deferred resulting in a set
+ """
+ delegates = set()
+ delegatorU = delegator.encode("utf-8")
+
+ # First get the direct delegates
+ results = yield DelegateRecord.query(
+ self,
+ (DelegateRecord.delegator == delegatorU).And(
+ DelegateRecord.readWrite == (1 if readWrite else 0)
+ )
+ )
+ delegates.update([record.delegate.decode("utf-8") for record in results])
+
+ if expanded:
+ # Get those who are in groups which have been delegated to
+ results = yield DelegateGroupsRecord.indirectDelegates(
+ self, delegator, readWrite
+ )
+ # Skip the delegator if they are in one of the groups
+ delegates.update([record.memberUID.decode("utf-8") for record in results if record.memberUID != delegatorU])
+
+ else:
+ # Get the directly-delegated-to groups
+ results = yield DelegateGroupsRecord.delegateGroups(
+ self, delegator, readWrite,
+ )
+ delegates.update([record.groupUID.decode("utf-8") for record in results])
+
+ returnValue(delegates)
+
+
+ @inlineCallbacks
+ def delegators(self, delegate, readWrite):
+ """
+ Returns the UIDs of all delegators which have granted access to
+ the given delegate, either directly or indirectly via groups.
+
+ @param delegate: the UID of the delegate
+ @type delegate: C{unicode}
+ @param readWrite: the access-type to check for; read and write
+ access if True, otherwise read-only access
+ @type readWrite: C{boolean}
+ @returns: the UIDs of the delegators (for the specified access
+ type)
+ @rtype: a Deferred resulting in a set
+ """
+ delegators = set()
+ delegateU = delegate.encode("utf-8")
+
+ # First get the direct delegators
+ results = yield DelegateRecord.query(
+ self,
+ (DelegateRecord.delegate == delegateU).And(
+ DelegateRecord.readWrite == (1 if readWrite else 0)
+ )
+ )
+ delegators.update([record.delegator.decode("utf-8") for record in results])
+
+ # Finally get those who have delegated to groups the delegate
+ # is a member of
+ results = yield DelegateGroupsRecord.indirectDelegators(
+ self, delegate, readWrite
+ )
+ # Skip the delegator if they are in one of the groups
+ delegators.update([record.delegator.decode("utf-8") for record in results if record.delegator != delegateU])
+
+ returnValue(delegators)
+
+
+ @inlineCallbacks
+ def delegatorsToGroup(self, delegateGroupID, readWrite):
+ """
+ Return the UIDs of those who have delegated to the given group with the
+ given access level.
+
+ @param delegateGroupID: the group ID of the delegate group
+ @type delegateGroupID: C{int}
+ @param readWrite: the access-type to check for; read and write
+ access if True, otherwise read-only access
+ @type readWrite: C{boolean}
+ @returns: the UIDs of the delegators (for the specified access
+ type)
+ @rtype: a Deferred resulting in a set
+
+ """
+ results = yield DelegateGroupsRecord.query(
+ self,
+ (DelegateGroupsRecord.groupID == delegateGroupID).And(
+ DelegateGroupsRecord.readWrite == (1 if readWrite else 0)
+ )
+ )
+ delegators = set([record.delegator.decode("utf-8") for record in results])
+ returnValue(delegators)
+
+
+ @inlineCallbacks
+ def allGroupDelegates(self):
+ """
+ Return the UIDs of all groups which have been delegated to. Useful
+ for obtaining the set of groups which need to be synchronized from
+ the directory.
+
+ @returns: the UIDs of all delegated-to groups
+ @rtype: a Deferred resulting in a set
+ """
+
+ results = yield DelegateGroupsRecord.allGroupDelegates(self)
+ delegates = set([record.groupUID.decode("utf-8") for record in results])
+
+ returnValue(delegates)
+
+
+ @inlineCallbacks
+ def externalDelegates(self):
+ """
+ Returns a dictionary mapping delegate UIDs to (read-group, write-group)
+ tuples, including only those assignments that originated from the
+ directory.
+
+ @returns: dictionary mapping delegator uid to (readDelegateUID,
+ writeDelegateUID) tuples
+ @rtype: a Deferred resulting in a dictionary
+ """
+ delegates = {}
+
+ # Get the externally managed delegates (which are all groups)
+ results = yield ExternalDelegateGroupsRecord.all(self)
+ for record in results:
+ delegates[record.delegator.encode("utf-8")] = (
+ record.groupUIDRead.encode("utf-8") if record.groupUIDRead else None,
+ record.groupUIDWrite.encode("utf-8") if record.groupUIDWrite else None
+ )
+
+ returnValue(delegates)
+
+
+ @inlineCallbacks
+ def assignExternalDelegates(
+ self, delegator, readDelegateGroupID, writeDelegateGroupID,
+ readDelegateUID, writeDelegateUID
+ ):
+ """
+ Update the external delegate group table so we can quickly identify
+ diffs next time, and update the delegate group table itself
+
+ @param delegator
+ @type delegator: C{UUID}
+ """
+
+ # Delete existing external assignments for the delegator
+ yield DelegateGroupsRecord.deletesimple(
+ self,
+ delegator=str(delegator),
+ isExternal=1,
+ )
+
+ # Remove from the external comparison table
+ yield ExternalDelegateGroupsRecord.deletesimple(
+ self,
+ delegator=str(delegator),
+ )
+
+ # Store new assignments in the external comparison table
+ if readDelegateUID or writeDelegateUID:
+ readDelegateForDB = (
+ readDelegateUID.encode("utf-8") if readDelegateUID else ""
+ )
+ writeDelegateForDB = (
+ writeDelegateUID.encode("utf-8") if writeDelegateUID else ""
+ )
+ yield ExternalDelegateGroupsRecord.create(
+ self,
+ delegator=str(delegator),
+ groupUIDRead=readDelegateForDB,
+ groupUIDWrite=writeDelegateForDB,
+ )
+
+ # Apply new assignments
+ if readDelegateGroupID is not None:
+ yield self.addDelegateGroup(
+ delegator, readDelegateGroupID, False, isExternal=True
+ )
+ if writeDelegateGroupID is not None:
+ yield self.addDelegateGroup(
+ delegator, writeDelegateGroupID, True, isExternal=True
+ )
+
+
+ def dumpIndividualDelegatesLocal(self, delegator):
+ """
+ Get the L{DelegateRecord} for all delegates associated with this delegator.
+ """
+ return DelegateRecord.querysimple(self, delegator=delegator.encode("utf-8"))
+
+
+ @inlineCallbacks
+ def dumpIndividualDelegatesExternal(self, delegator):
+ """
+ Get the L{DelegateRecord} for all delegates associated with this delegator.
+ """
+ raw_results = yield self.store().conduit.send_dump_individual_delegates(self, delegator)
+ returnValue([DelegateRecord.deserialize(row) for row in raw_results])
+
+
+ def dumpGroupDelegatesLocal(self, delegator):
+ """
+ Get the L{DelegateGroupsRecord},L{GroupsRecord} for all group delegates associated with this delegator.
+ """
+ return DelegateGroupsRecord.delegatorGroups(self, delegator)
+
+
+ @inlineCallbacks
+ def dumpGroupDelegatesExternal(self, delegator):
+ """
+ Get the L{DelegateGroupsRecord},L{GroupsRecord} for all delegates associated with this delegator.
+ """
+ raw_results = yield self.store().conduit.send_dump_group_delegates(self, delegator)
+ returnValue([(DelegateGroupsRecord.deserialize(row[0]), GroupsRecord.deserialize(row[1]),) for row in raw_results])
+
+
+ def dumpExternalDelegatesLocal(self, delegator):
+ """
+ Get the L{ExternalDelegateGroupsRecord} for all delegates associated with this delegator.
+ """
+ return ExternalDelegateGroupsRecord.querysimple(self, delegator=delegator.encode("utf-8"))
+
+
+ @inlineCallbacks
+ def dumpExternalDelegatesExternal(self, delegator):
+ """
+ Get the L{ExternalDelegateGroupsRecord} for all delegates associated with this delegator.
+ """
+ raw_results = yield self.store().conduit.send_dump_external_delegates(self, delegator)
+ returnValue([ExternalDelegateGroupsRecord.deserialize(row) for row in raw_results])
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_externalpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/sql_external.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_external.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/sql_external.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -26,6 +26,8 @@
</span><span class="cx"> from txdav.base.propertystore.sql import PropertyStore
</span><span class="cx"> from txdav.common.datastore.sql import CommonHome, CommonHomeChild, \
</span><span class="cx"> CommonObjectResource
</span><ins>+from txdav.common.datastore.sql_notification import NotificationCollection, \
+ NotificationObjectRecord
</ins><span class="cx"> from txdav.common.datastore.sql_tables import _HOME_STATUS_EXTERNAL
</span><span class="cx"> from txdav.common.icommondatastore import NonExistentExternalShare, \
</span><span class="cx"> ExternalShareFailed
</span><span class="lines">@@ -40,19 +42,63 @@
</span><span class="cx"> are all stubbed out since no data for the user is actually hosted in this store.
</span><span class="cx"> """
</span><span class="cx">
</span><del>- def __init__(self, transaction, ownerUID, resourceID):
- super(CommonHomeExternal, self).__init__(transaction, ownerUID)
- self._resourceID = resourceID
- self._status = _HOME_STATUS_EXTERNAL
</del><ins>+ @classmethod
+ def makeSyntheticExternalHome(cls, transaction, diruid, resourceID):
+ """
+ During migration we need to refer to the remote home as an external home but without have a local representation
+ of it in the store. There will be a new local store home for the migrating user that will operate on local store
+ objects. The synthetic home operates only on remote objects.
</ins><span class="cx">
</span><ins>+ @param diruid: directory UID of user
+ @type diruid: L{str}
+ @param resourceID: resource ID in the remote store
+ @type resourceID: L{int}
+ """
+ attrMap = {
+ "_resourceID": resourceID,
+ "_ownerUID": diruid,
+ "_status": _HOME_STATUS_EXTERNAL,
+ }
+ homeData = [attrMap.get(attr) for attr in cls.homeAttributes()]
+ result = cls(transaction, homeData)
+ result._childClass = result._childClass._externalClass
+ return result
</ins><span class="cx">
</span><del>- def initFromStore(self, no_cache=False):
</del><ins>+
+ def __init__(self, transaction, homeData):
+ super(CommonHomeExternal, self).__init__(transaction, homeData)
+
+
+ def initFromStore(self):
</ins><span class="cx"> """
</span><del>- Never called - this should be done by CommonHome.initFromStore only.
</del><ins>+ NoOp for an external share as there is no metadata or properties.
</ins><span class="cx"> """
</span><del>- raise AssertionError("CommonHomeExternal: not supported")
</del><ins>+ return succeed(self)
</ins><span class="cx">
</span><span class="cx">
</span><ins>+ @inlineCallbacks
+ def readMetaData(self):
+ """
+ Read the home metadata from remote home and save as attributes on this object.
+ """
+ mapping = yield self._txn.store().conduit.send_home_metadata(self)
+ self.deserialize(mapping)
+
+
+ def setStatus(self, newStatus):
+ return self._txn.store().conduit.send_home_set_status(self, newStatus)
+
+
+ def setLocalStatus(self, newStatus):
+ """
+ Set the status on the object in the local store not the remote one.
+
+ @param newStatus: the new status to set
+ @type newStatus: L{int}
+ """
+ return super(CommonHomeExternal, self).setStatus(newStatus)
+
+
</ins><span class="cx"> def external(self):
</span><span class="cx"> """
</span><span class="cx"> Is this an external home.
</span><span class="lines">@@ -78,13 +124,13 @@
</span><span class="cx">
</span><span class="cx"> @memoizedKey("name", "_children")
</span><span class="cx"> @inlineCallbacks
</span><del>- def createChildWithName(self, name, externalID=None):
</del><ins>+ def createChildWithName(self, name, bindUID=None):
</ins><span class="cx"> """
</span><span class="cx"> No real children - only external ones.
</span><span class="cx"> """
</span><del>- if externalID is None:
</del><ins>+ if bindUID is None:
</ins><span class="cx"> raise AssertionError("CommonHomeExternal: not supported")
</span><del>- child = yield super(CommonHomeExternal, self).createChildWithName(name, externalID)
</del><ins>+ child = yield super(CommonHomeExternal, self).createChildWithName(name, bindUID)
</ins><span class="cx"> returnValue(child)
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -101,7 +147,7 @@
</span><span class="cx"> Remove an external child. Check that it is invalid or unused before calling this because if there
</span><span class="cx"> are valid references to it, removing will break things.
</span><span class="cx"> """
</span><del>- if child._externalID is None:
</del><ins>+ if child._bindUID is None:
</ins><span class="cx"> raise AssertionError("CommonHomeExternal: not supported")
</span><span class="cx"> yield super(CommonHomeExternal, self).removeChildWithName(child.name())
</span><span class="cx">
</span><span class="lines">@@ -175,11 +221,17 @@
</span><span class="cx"> raise AssertionError("CommonHomeExternal: not supported")
</span><span class="cx">
</span><span class="cx">
</span><del>-# def ownerHomeAndChildNameForChildID(self, resourceID):
-# """
-# No children.
-# """
-# raise AssertionError("CommonHomeExternal: not supported")
</del><ins>+ @inlineCallbacks
+ def sharedToBindRecords(self):
+ results = yield self._txn.store().conduit.send_home_shared_to_records(self)
+ returnValue(dict([(
+ k,
+ (
+ self._childClass._bindRecordClass.deserialize(v[0]),
+ self._childClass._bindRecordClass.deserialize(v[1]),
+ self._childClass._metadataRecordClass.deserialize(v[2]),
+ ),
+ ) for k, v in results.items()]))
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -190,7 +242,6 @@
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><del>- @inlineCallbacks
</del><span class="cx"> def listObjects(cls, home):
</span><span class="cx"> """
</span><span class="cx"> Retrieve the names of the children that exist in the given home.
</span><span class="lines">@@ -198,8 +249,7 @@
</span><span class="cx"> @return: an iterable of C{str}s.
</span><span class="cx"> """
</span><span class="cx">
</span><del>- results = yield home._txn.store().conduit.send_homechild_listobjects(home)
- returnValue(results)
</del><ins>+ return home._txn.store().conduit.send_homechild_listobjects(home)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="lines">@@ -209,18 +259,18 @@
</span><span class="cx">
</span><span class="cx"> results = []
</span><span class="cx"> for mapping in raw_results:
</span><del>- child = yield cls.internalize(home, mapping)
</del><ins>+ child = yield cls.deserialize(home, mapping)
</ins><span class="cx"> results.append(child)
</span><span class="cx"> returnValue(results)
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="cx"> @inlineCallbacks
</span><del>- def objectWith(cls, home, name=None, resourceID=None, externalID=None, accepted=True):
- mapping = yield home._txn.store().conduit.send_homechild_objectwith(home, name, resourceID, externalID, accepted)
</del><ins>+ def objectWith(cls, home, name=None, resourceID=None, bindUID=None, accepted=True):
+ mapping = yield home._txn.store().conduit.send_homechild_objectwith(home, name, resourceID, bindUID, accepted)
</ins><span class="cx">
</span><span class="cx"> if mapping:
</span><del>- child = yield cls.internalize(home, mapping)
</del><ins>+ child = yield cls.deserialize(home, mapping)
</ins><span class="cx"> returnValue(child)
</span><span class="cx"> else:
</span><span class="cx"> returnValue(None)
</span><span class="lines">@@ -310,15 +360,14 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def syncToken(self):
</del><ins>+ def syncTokenRevision(self):
</ins><span class="cx"> if self._syncTokenRevision is None:
</span><span class="cx"> try:
</span><del>- token = yield self._txn.store().conduit.send_homechild_synctoken(self)
- self._syncTokenRevision = self.revisionFromToken(token)
</del><ins>+ revision = yield self._txn.store().conduit.send_homechild_synctokenrevision(self)
</ins><span class="cx"> except NonExistentExternalShare:
</span><span class="cx"> yield self.fixNonExistentExternalShare()
</span><span class="cx"> raise ExternalShareFailed("External share does not exist")
</span><del>- returnValue(("%s_%s" % (self._externalID, self._syncTokenRevision,)))
</del><ins>+ returnValue(revision)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="lines">@@ -343,7 +392,17 @@
</span><span class="cx"> returnValue(results)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ @inlineCallbacks
+ def sharingBindRecords(self):
+ results = yield self._txn.store().conduit.send_homechild_sharing_records(self)
+ returnValue(dict([(k, self._bindRecordClass.deserialize(v),) for k, v in results.items()]))
</ins><span class="cx">
</span><ins>+
+ def migrateBindRecords(self, bindUID):
+ return self._txn.store().conduit.send_homechild_migrate_sharing_records(self, bindUID)
+
+
+
</ins><span class="cx"> class CommonObjectResourceExternal(CommonObjectResource):
</span><span class="cx"> """
</span><span class="cx"> A CommonObjectResource for a resource not hosted on this system, but on another pod. This will forward
</span><span class="lines">@@ -358,7 +417,7 @@
</span><span class="cx"> results = []
</span><span class="cx"> if mapping_list:
</span><span class="cx"> for mapping in mapping_list:
</span><del>- child = yield cls.internalize(parent, mapping)
</del><ins>+ child = yield cls.deserialize(parent, mapping)
</ins><span class="cx"> results.append(child)
</span><span class="cx"> returnValue(results)
</span><span class="cx">
</span><span class="lines">@@ -371,23 +430,19 @@
</span><span class="cx"> results = []
</span><span class="cx"> if mapping_list:
</span><span class="cx"> for mapping in mapping_list:
</span><del>- child = yield cls.internalize(parent, mapping)
</del><ins>+ child = yield cls.deserialize(parent, mapping)
</ins><span class="cx"> results.append(child)
</span><span class="cx"> returnValue(results)
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><del>- @inlineCallbacks
</del><span class="cx"> def listObjects(cls, parent):
</span><del>- results = yield parent._txn.store().conduit.send_objectresource_listobjects(parent)
- returnValue(results)
</del><ins>+ return parent._txn.store().conduit.send_objectresource_listobjects(parent)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><del>- @inlineCallbacks
</del><span class="cx"> def countObjects(cls, parent):
</span><del>- result = yield parent._txn.store().conduit.send_objectresource_countobjects(parent)
- returnValue(result)
</del><ins>+ return parent._txn.store().conduit.send_objectresource_countobjects(parent)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="lines">@@ -396,24 +451,20 @@
</span><span class="cx"> mapping = yield parent._txn.store().conduit.send_objectresource_objectwith(parent, name, uid, resourceID)
</span><span class="cx">
</span><span class="cx"> if mapping:
</span><del>- child = yield cls.internalize(parent, mapping)
</del><ins>+ child = yield cls.deserialize(parent, mapping)
</ins><span class="cx"> returnValue(child)
</span><span class="cx"> else:
</span><span class="cx"> returnValue(None)
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><del>- @inlineCallbacks
</del><span class="cx"> def resourceNameForUID(cls, parent, uid):
</span><del>- result = yield parent._txn.store().conduit.send_objectresource_resourcenameforuid(parent, uid)
- returnValue(result)
</del><ins>+ return parent._txn.store().conduit.send_objectresource_resourcenameforuid(parent, uid)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><del>- @inlineCallbacks
</del><span class="cx"> def resourceUIDForName(cls, parent, name):
</span><del>- result = yield parent._txn.store().conduit.send_objectresource_resourceuidforname(parent, name)
- returnValue(result)
</del><ins>+ return parent._txn.store().conduit.send_objectresource_resourceuidforname(parent, name)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @classmethod
</span><span class="lines">@@ -422,7 +473,7 @@
</span><span class="cx"> mapping = yield parent._txn.store().conduit.send_objectresource_create(parent, name, str(component), options=options)
</span><span class="cx">
</span><span class="cx"> if mapping:
</span><del>- child = yield cls.internalize(parent, mapping)
</del><ins>+ child = yield cls.deserialize(parent, mapping)
</ins><span class="cx"> returnValue(child)
</span><span class="cx"> else:
</span><span class="cx"> returnValue(None)
</span><span class="lines">@@ -444,6 +495,46 @@
</span><span class="cx"> returnValue(self._cachedComponent)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def remove(self):
+ return self._txn.store().conduit.send_objectresource_remove(self)
+
+
+
+class NotificationCollectionExternal(NotificationCollection):
+ """
+ A NotificationCollection for a resource not hosted on this system, but on another pod. This will forward
+ specific apis to the other pod using cross-pod requests.
+ """
+
+ @classmethod
+ def notificationsWithUID(cls, txn, uid, create=False):
+ return super(NotificationCollectionExternal, cls).notificationsWithUID(txn, uid, status=_HOME_STATUS_EXTERNAL, create=create)
+
+
+ def initFromStore(self):
+ """
+ NoOp for an external share as there are no properties.
+ """
+ return succeed(self)
+
+
</ins><span class="cx"> @inlineCallbacks
</span><del>- def remove(self):
- yield self._txn.store().conduit.send_objectresource_remove(self)
</del><ins>+ def notificationObjectRecords(self):
+ results = yield self._txn.store().conduit.send_notification_all_records(self)
+ returnValue(map(NotificationObjectRecord.deserialize, results))
+
+
+ def setStatus(self, newStatus):
+ return self._txn.store().conduit.send_notification_set_status(self, newStatus)
+
+
+ def setLocalStatus(self, newStatus):
+ """
+ Set the status on the object in the local store not the remote one.
+
+ @param newStatus: the new status to set
+ @type newStatus: L{int}
+ """
+ return super(NotificationCollectionExternal, self).setStatus(newStatus)
+
+NotificationCollection._externalClass = NotificationCollectionExternal
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_imippyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_imippy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_imip.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_imip.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_imip.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_imip.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,102 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import SerializableRecord, fromTable
+from twext.enterprise.dal.syntax import utcNowSQL
+from twext.python.log import Logger
+from twisted.internet.defer import inlineCallbacks, returnValue
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.icommondatastore import InvalidIMIPTokenValues
+from uuid import uuid4
+
+log = Logger()
+
+"""
+Classes and methods that relate to iMIP objects in the SQL store.
+"""
+
+class iMIPTokensRecord(SerializableRecord, fromTable(schema.IMIP_TOKENS)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.IMIP_TOKENS}.
+ """
+ pass
+
+
+
+class imipAPIMixin(object):
+ """
+ A mixin for L{CommonStoreTransaction} that covers the iMIP API.
+ """
+
+ # Create IMIP token
+ @inlineCallbacks
+ def imipCreateToken(self, organizer, attendee, icaluid, token=None):
+ if not (organizer and attendee and icaluid):
+ raise InvalidIMIPTokenValues()
+
+ if token is None:
+ token = str(uuid4())
+
+ try:
+ record = yield iMIPTokensRecord.create(
+ self,
+ token=token,
+ organizer=organizer,
+ attendee=attendee,
+ icaluid=icaluid
+ )
+ except Exception:
+ # TODO: is it okay if someone else created the same row just now?
+ record = yield self.imipGetToken(organizer, attendee, icaluid)
+ returnValue(record)
+
+
+ # Lookup IMIP organizer+attendee+icaluid for token
+ def imipLookupByToken(self, token):
+ return iMIPTokensRecord.querysimple(self, token=token)
+
+
+ # Lookup IMIP token for organizer+attendee+icaluid
+ @inlineCallbacks
+ def imipGetToken(self, organizer, attendee, icaluid):
+ records = yield iMIPTokensRecord.querysimple(
+ self,
+ organizer=organizer,
+ attendee=attendee,
+ icaluid=icaluid,
+ )
+ if records:
+ # update the timestamp
+ record = records[0]
+ yield record.update(accessed=utcNowSQL)
+ else:
+ record = None
+ returnValue(record)
+
+
+ # Remove IMIP token
+ def imipRemoveToken(self, token):
+ return iMIPTokensRecord.deletesimple(self, token=token)
+
+
+ # Purge old IMIP tokens
+ def purgeOldIMIPTokens(self, olderThan):
+ """
+ @type olderThan: datetime
+ """
+ return iMIPTokensRecord.delete(self, iMIPTokensRecord.accessed < olderThan)
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_notificationpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_notificationpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_notification.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_notification.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_notification.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_notification.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,884 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import SerializableRecord, fromTable
+from twext.enterprise.dal.syntax import Select, Parameter, Insert, \
+ SavepointAction, Delete, Max, Len, Update
+from twext.enterprise.util import parseSQLTimestamp
+from twext.internet.decorate import memoizedKey
+from twext.python.clsprop import classproperty
+from twext.python.log import Logger
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python.util import FancyEqMixin
+from twistedcaldav.dateops import datetimeMktime
+from txdav.base.propertystore.sql import PropertyStore
+from txdav.common.datastore.sql_tables import schema, _HOME_STATUS_NORMAL, \
+ _HOME_STATUS_EXTERNAL, _HOME_STATUS_DISABLED, _HOME_STATUS_MIGRATING
+from txdav.common.datastore.sql_util import _SharedSyncLogic
+from txdav.common.icommondatastore import RecordNotAllowedError
+from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
+from txdav.common.inotifications import INotificationCollection, \
+ INotificationObject
+from txdav.idav import ChangeCategory
+from txweb2.dav.noneprops import NonePropertyStore
+from txweb2.http_headers import MimeType
+from zope.interface.declarations import implements
+import hashlib
+import json
+
+"""
+Classes and methods that relate to the Notification collection in the SQL store.
+"""
+class NotificationCollection(FancyEqMixin, _SharedSyncLogic):
+ log = Logger()
+
+ implements(INotificationCollection)
+
+ compareAttributes = (
+ "_ownerUID",
+ "_resourceID",
+ )
+
+ _revisionsSchema = schema.NOTIFICATION_OBJECT_REVISIONS
+ _homeSchema = schema.NOTIFICATION_HOME
+
+ _externalClass = None
+
+
+ @classmethod
+ def makeClass(cls, transaction, homeData):
+ """
+ Build the actual home class taking into account the possibility that we might need to
+ switch in the external version of the class.
+
+ @param transaction: transaction
+ @type transaction: L{CommonStoreTransaction}
+ @param homeData: home table column data
+ @type homeData: C{list}
+ """
+
+ status = homeData[cls.homeColumns().index(cls._homeSchema.STATUS)]
+ if status == _HOME_STATUS_EXTERNAL:
+ home = cls._externalClass(transaction, homeData)
+ else:
+ home = cls(transaction, homeData)
+ return home.initFromStore()
+
+
+ @classmethod
+ def homeColumns(cls):
+ """
+ Return a list of column names to retrieve when doing an ownerUID->home lookup.
+ """
+
+ # Common behavior is to have created and modified
+
+ return (
+ cls._homeSchema.RESOURCE_ID,
+ cls._homeSchema.OWNER_UID,
+ cls._homeSchema.STATUS,
+ )
+
+
+ @classmethod
+ def homeAttributes(cls):
+ """
+ Return a list of attributes names to map L{homeColumns} to.
+ """
+
+ # Common behavior is to have created and modified
+
+ return (
+ "_resourceID",
+ "_ownerUID",
+ "_status",
+ )
+
+
+ def __init__(self, txn, homeData):
+
+ self._txn = txn
+
+ for attr, value in zip(self.homeAttributes(), homeData):
+ setattr(self, attr, value)
+
+ self._txn = txn
+ self._dataVersion = None
+ self._notifications = {}
+ self._notificationNames = None
+ self._syncTokenRevision = None
+
+ # Make sure we have push notifications setup to push on this collection
+ # as well as the home it is in
+ self._notifiers = dict([(factory_name, factory.newNotifier(self),) for factory_name, factory in txn._notifierFactories.items()])
+
+
+ @inlineCallbacks
+ def initFromStore(self):
+ """
+ Initialize this object from the store.
+ """
+
+ yield self._loadPropertyStore()
+ returnValue(self)
+
+
+ @property
+ def _home(self):
+ """
+ L{NotificationCollection} serves as its own C{_home} for the purposes of
+ working with L{_SharedSyncLogic}.
+ """
+ return self
+
+
+ @classmethod
+ def notificationsWithUID(cls, txn, uid, status=None, create=False):
+ return cls.notificationsWith(txn, None, uid, status=status, create=create)
+
+
+ @classmethod
+ def notificationsWithResourceID(cls, txn, rid):
+ return cls.notificationsWith(txn, rid, None)
+
+
+ @classmethod
+ @inlineCallbacks
+ def notificationsWith(cls, txn, rid, uid, status=None, create=False):
+ """
+ @param uid: I'm going to assume uid is utf-8 encoded bytes
+ """
+ if rid is not None:
+ query = cls._homeSchema.RESOURCE_ID == rid
+ elif uid is not None:
+ query = cls._homeSchema.OWNER_UID == uid
+ if status is not None:
+ query = query.And(cls._homeSchema.STATUS == status)
+ else:
+ statusSet = (_HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL,)
+ if txn._allowDisabled:
+ statusSet += (_HOME_STATUS_DISABLED,)
+ query = query.And(cls._homeSchema.STATUS.In(statusSet))
+ else:
+ raise AssertionError("One of rid or uid must be set")
+
+ results = yield Select(
+ cls.homeColumns(),
+ From=cls._homeSchema,
+ Where=query,
+ ).on(txn)
+
+ if len(results) > 1:
+ # Pick the best one in order: normal, disabled and external
+ byStatus = dict([(result[cls.homeColumns().index(cls._homeSchema.STATUS)], result) for result in results])
+ result = byStatus.get(_HOME_STATUS_NORMAL)
+ if result is None:
+ result = byStatus.get(_HOME_STATUS_DISABLED)
+ if result is None:
+ result = byStatus.get(_HOME_STATUS_EXTERNAL)
+ elif results:
+ result = results[0]
+ else:
+ result = None
+
+ if result:
+ # Return object that already exists in the store
+ homeObject = yield cls.makeClass(txn, result)
+ returnValue(homeObject)
+ else:
+ # Can only create when uid is specified
+ if not create or uid is None:
+ returnValue(None)
+
+ # Determine if the user is local or external
+ record = yield txn.directoryService().recordWithUID(uid.decode("utf-8"))
+ if record is None:
+ raise DirectoryRecordNotFoundError("Cannot create home for UID since no directory record exists: {}".format(uid))
+
+ if status is None:
+ createStatus = _HOME_STATUS_NORMAL if record.thisServer() else _HOME_STATUS_EXTERNAL
+ elif status == _HOME_STATUS_MIGRATING:
+ if record.thisServer():
+ raise RecordNotAllowedError("Cannot migrate a user data for a user already hosted on this server")
+ createStatus = status
+ elif status in (_HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL,):
+ createStatus = status
+ else:
+ raise RecordNotAllowedError("Cannot create home with status {}: {}".format(status, uid))
+
+ # Use savepoint so we can do a partial rollback if there is a race
+ # condition where this row has already been inserted
+ savepoint = SavepointAction("notificationsWithUID")
+ yield savepoint.acquire(txn)
+
+ try:
+ resourceid = (yield Insert(
+ {
+ cls._homeSchema.OWNER_UID: uid,
+ cls._homeSchema.STATUS: createStatus,
+ },
+ Return=cls._homeSchema.RESOURCE_ID
+ ).on(txn))[0][0]
+ except Exception:
+ # FIXME: Really want to trap the pg.DatabaseError but in a non-
+ # DB specific manner
+ yield savepoint.rollback(txn)
+
+ # Retry the query - row may exist now, if not re-raise
+ results = yield Select(
+ cls.homeColumns(),
+ From=cls._homeSchema,
+ Where=query,
+ ).on(txn)
+ if results:
+ homeObject = yield cls.makeClass(txn, results[0])
+ returnValue(homeObject)
+ else:
+ raise
+ else:
+ yield savepoint.release(txn)
+
+ # Note that we must not cache the owner_uid->resource_id
+ # mapping in the query cacher when creating as we don't want that to appear
+ # until AFTER the commit
+ results = yield Select(
+ cls.homeColumns(),
+ From=cls._homeSchema,
+ Where=cls._homeSchema.RESOURCE_ID == resourceid,
+ ).on(txn)
+ homeObject = yield cls.makeClass(txn, results[0])
+ if homeObject.normal():
+ yield homeObject._initSyncToken()
+ yield homeObject.notifyChanged()
+ returnValue(homeObject)
+
+
+ @inlineCallbacks
+ def _loadPropertyStore(self):
+ self._propertyStore = yield PropertyStore.load(
+ self._ownerUID,
+ self._ownerUID,
+ None,
+ self._txn,
+ self._resourceID,
+ notifyCallback=self.notifyChanged
+ )
+
+
+ def __repr__(self):
+ return "<%s: %s>" % (self.__class__.__name__, self._resourceID)
+
+
+ def id(self):
+ """
+ Retrieve the store identifier for this collection.
+
+ @return: store identifier.
+ @rtype: C{int}
+ """
+ return self._resourceID
+
+
+ @classproperty
+ def _dataVersionQuery(cls):
+ nh = cls._homeSchema
+ return Select(
+ [nh.DATAVERSION], From=nh,
+ Where=nh.RESOURCE_ID == Parameter("resourceID")
+ )
+
+
+ @inlineCallbacks
+ def dataVersion(self):
+ if self._dataVersion is None:
+ self._dataVersion = (yield self._dataVersionQuery.on(
+ self._txn, resourceID=self._resourceID))[0][0]
+ returnValue(self._dataVersion)
+
+
+ def name(self):
+ return "notification"
+
+
+ def uid(self):
+ return self._ownerUID
+
+
+ def status(self):
+ return self._status
+
+
+ @inlineCallbacks
+ def setStatus(self, newStatus):
+ """
+ Mark this home as being purged.
+ """
+ # Only if different
+ if self._status != newStatus:
+ yield Update(
+ {self._homeSchema.STATUS: newStatus},
+ Where=(self._homeSchema.RESOURCE_ID == self._resourceID),
+ ).on(self._txn)
+ self._status = newStatus
+
+
+ def normal(self):
+ """
+ Is this an normal (internal) home.
+
+ @return: a L{bool}.
+ """
+ return self._status == _HOME_STATUS_NORMAL
+
+
+ def external(self):
+ """
+ Is this an external home.
+
+ @return: a L{bool}.
+ """
+ return self._status == _HOME_STATUS_EXTERNAL
+
+
+ def owned(self):
+ return True
+
+
+ def ownerHome(self):
+ return self._home
+
+
+ def viewerHome(self):
+ return self._home
+
+
+ def notificationObjectRecords(self):
+ return NotificationObjectRecord.querysimple(self._txn, notificationHomeResourceID=self.id())
+
+
+ @inlineCallbacks
+ def notificationObjects(self):
+ results = (yield NotificationObject.loadAllObjects(self))
+ for result in results:
+ self._notifications[result.uid()] = result
+ self._notificationNames = sorted([result.name() for result in results])
+ returnValue(results)
+
+ _notificationUIDsForHomeQuery = Select(
+ [schema.NOTIFICATION.NOTIFICATION_UID], From=schema.NOTIFICATION,
+ Where=schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID ==
+ Parameter("resourceID"))
+
+
+ @inlineCallbacks
+ def listNotificationObjects(self):
+ if self._notificationNames is None:
+ rows = yield self._notificationUIDsForHomeQuery.on(
+ self._txn, resourceID=self._resourceID)
+ self._notificationNames = sorted([row[0] for row in rows])
+ returnValue(self._notificationNames)
+
+
+ # used by _SharedSyncLogic.resourceNamesSinceRevision()
+ def listObjectResources(self):
+ return self.listNotificationObjects()
+
+
+ def _nameToUID(self, name):
+ """
+ Based on the file-backed implementation, the 'name' is just uid +
+ ".xml".
+ """
+ return name.rsplit(".", 1)[0]
+
+
+ def notificationObjectWithName(self, name):
+ return self.notificationObjectWithUID(self._nameToUID(name))
+
+
+ @memoizedKey("uid", "_notifications")
+ @inlineCallbacks
+ def notificationObjectWithUID(self, uid):
+ """
+ Create an empty notification object first then have it initialize itself
+ from the store.
+ """
+ no = NotificationObject(self, uid)
+ no = (yield no.initFromStore())
+ returnValue(no)
+
+
+ @inlineCallbacks
+ def writeNotificationObject(self, uid, notificationtype, notificationdata):
+
+ inserting = False
+ notificationObject = yield self.notificationObjectWithUID(uid)
+ if notificationObject is None:
+ notificationObject = NotificationObject(self, uid)
+ inserting = True
+ yield notificationObject.setData(uid, notificationtype, notificationdata, inserting=inserting)
+ if inserting:
+ yield self._insertRevision("%s.xml" % (uid,))
+ if self._notificationNames is not None:
+ self._notificationNames.append(notificationObject.uid())
+ else:
+ yield self._updateRevision("%s.xml" % (uid,))
+ yield self.notifyChanged()
+ returnValue(notificationObject)
+
+
+ def removeNotificationObjectWithName(self, name):
+ if self._notificationNames is not None:
+ self._notificationNames.remove(self._nameToUID(name))
+ return self.removeNotificationObjectWithUID(self._nameToUID(name))
+
+ _removeByUIDQuery = Delete(
+ From=schema.NOTIFICATION,
+ Where=(schema.NOTIFICATION.NOTIFICATION_UID == Parameter("uid")).And(
+ schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID
+ == Parameter("resourceID")))
+
+
+ @inlineCallbacks
+ def removeNotificationObjectWithUID(self, uid):
+ yield self._removeByUIDQuery.on(
+ self._txn, uid=uid, resourceID=self._resourceID)
+ self._notifications.pop(uid, None)
+ yield self._deleteRevision("%s.xml" % (uid,))
+ yield self.notifyChanged()
+
+ _initSyncTokenQuery = Insert(
+ {
+ _revisionsSchema.HOME_RESOURCE_ID : Parameter("resourceID"),
+ _revisionsSchema.RESOURCE_NAME : None,
+ _revisionsSchema.REVISION : schema.REVISION_SEQ,
+ _revisionsSchema.DELETED : False
+ }, Return=_revisionsSchema.REVISION
+ )
+
+
+ @inlineCallbacks
+ def _initSyncToken(self):
+ self._syncTokenRevision = (yield self._initSyncTokenQuery.on(
+ self._txn, resourceID=self._resourceID))[0][0]
+
+ _syncTokenQuery = Select(
+ [Max(_revisionsSchema.REVISION)], From=_revisionsSchema,
+ Where=_revisionsSchema.HOME_RESOURCE_ID == Parameter("resourceID")
+ )
+
+
+ @inlineCallbacks
+ def syncToken(self):
+ if self._syncTokenRevision is None:
+ self._syncTokenRevision = yield self.syncTokenRevision()
+ returnValue("%s_%s" % (self._resourceID, self._syncTokenRevision))
+
+
+ @inlineCallbacks
+ def syncTokenRevision(self):
+ revision = (yield self._syncTokenQuery.on(self._txn, resourceID=self._resourceID))[0][0]
+ if revision is None:
+ revision = int((yield self._txn.calendarserverValue("MIN-VALID-REVISION")))
+ returnValue(revision)
+
+
+ def properties(self):
+ return self._propertyStore
+
+
+ def addNotifier(self, factory_name, notifier):
+ if self._notifiers is None:
+ self._notifiers = {}
+ self._notifiers[factory_name] = notifier
+
+
+ def getNotifier(self, factory_name):
+ return self._notifiers.get(factory_name)
+
+
+ def notifierID(self):
+ return (self._txn._homeClass[self._txn._primaryHomeType]._notifierPrefix, "%s/notification" % (self.ownerHome().uid(),),)
+
+
+ def parentNotifierID(self):
+ return (self._txn._homeClass[self._txn._primaryHomeType]._notifierPrefix, "%s" % (self.ownerHome().uid(),),)
+
+
+ @inlineCallbacks
+ def notifyChanged(self, category=ChangeCategory.default):
+ """
+ Send notifications, change sync token and bump last modified because
+ the resource has changed. We ensure we only do this once per object
+ per transaction.
+ """
+ if self._txn.isNotifiedAlready(self):
+ returnValue(None)
+ self._txn.notificationAddedForObject(self)
+
+ # Send notifications
+ if self._notifiers:
+ # cache notifiers run in post commit
+ notifier = self._notifiers.get("cache", None)
+ if notifier:
+ self._txn.postCommit(notifier.notify)
+ # push notifiers add their work items immediately
+ notifier = self._notifiers.get("push", None)
+ if notifier:
+ yield notifier.notify(self._txn, priority=category.value)
+
+ returnValue(None)
+
+
+ @classproperty
+ def _completelyNewRevisionQuery(cls):
+ rev = cls._revisionsSchema
+ return Insert({rev.HOME_RESOURCE_ID: Parameter("homeID"),
+ # rev.RESOURCE_ID: Parameter("resourceID"),
+ rev.RESOURCE_NAME: Parameter("name"),
+ rev.REVISION: schema.REVISION_SEQ,
+ rev.DELETED: False},
+ Return=rev.REVISION)
+
+
+ def _maybeNotify(self):
+ """
+ Emit a push notification after C{_changeRevision}.
+ """
+ return self.notifyChanged()
+
+
+ @inlineCallbacks
+ def remove(self):
+ """
+ Remove DB rows corresponding to this notification home.
+ """
+ # Delete NOTIFICATION rows
+ no = schema.NOTIFICATION
+ kwds = {"ResourceID": self._resourceID}
+ yield Delete(
+ From=no,
+ Where=(
+ no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("ResourceID")
+ ),
+ ).on(self._txn, **kwds)
+
+ # Delete NOTIFICATION_HOME (will cascade to NOTIFICATION_OBJECT_REVISIONS)
+ nh = schema.NOTIFICATION_HOME
+ yield Delete(
+ From=nh,
+ Where=(
+ nh.RESOURCE_ID == Parameter("ResourceID")
+ ),
+ ).on(self._txn, **kwds)
+
+
+
+class NotificationObjectRecord(SerializableRecord, fromTable(schema.NOTIFICATION)):
+ """
+ @DynamicAttrs
+ L{Record} for L{schema.NOTIFICATION}.
+ """
+ pass
+
+
+
+class NotificationObject(FancyEqMixin, object):
+ """
+ This used to store XML data and an XML element for the type. But we are now switching it
+ to use JSON internally. The app layer will convert that to XML and fill in the "blanks" as
+ needed for the app.
+ """
+ log = Logger()
+
+ implements(INotificationObject)
+
+ compareAttributes = (
+ "_resourceID",
+ "_home",
+ )
+
+ _objectSchema = schema.NOTIFICATION
+
+ def __init__(self, home, uid):
+ self._home = home
+ self._resourceID = None
+ self._uid = uid
+ self._md5 = None
+ self._size = None
+ self._created = None
+ self._modified = None
+ self._notificationType = None
+ self._notificationData = None
+
+
+ def __repr__(self):
+ return "<%s: %s>" % (self.__class__.__name__, self._resourceID)
+
+
+ @classproperty
+ def _allColumnsByHomeIDQuery(cls):
+ """
+ DAL query to load all columns by home ID.
+ """
+ obj = cls._objectSchema
+ return Select(
+ [obj.RESOURCE_ID, obj.NOTIFICATION_UID, obj.MD5,
+ Len(obj.NOTIFICATION_DATA), obj.NOTIFICATION_TYPE, obj.CREATED, obj.MODIFIED],
+ From=obj,
+ Where=(obj.NOTIFICATION_HOME_RESOURCE_ID == Parameter("homeID"))
+ )
+
+
+ @classmethod
+ @inlineCallbacks
+ def loadAllObjects(cls, parent):
+ """
+ Load all child objects and return a list of them. This must create the
+ child classes and initialize them using "batched" SQL operations to keep
+ this constant wrt the number of children. This is an optimization for
+ Depth:1 operations on the collection.
+ """
+
+ results = []
+
+ # Load from the main table first
+ dataRows = (
+ yield cls._allColumnsByHomeIDQuery.on(parent._txn,
+ homeID=parent._resourceID))
+
+ if dataRows:
+ # Get property stores for all these child resources (if any found)
+ propertyStores = (yield PropertyStore.forMultipleResources(
+ parent.uid(),
+ None,
+ None,
+ parent._txn,
+ schema.NOTIFICATION.RESOURCE_ID,
+ schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID,
+ parent._resourceID,
+ ))
+
+ # Create the actual objects merging in properties
+ for row in dataRows:
+ child = cls(parent, None)
+ (child._resourceID,
+ child._uid,
+ child._md5,
+ child._size,
+ child._notificationType,
+ child._created,
+ child._modified,) = tuple(row)
+ try:
+ child._notificationType = json.loads(child._notificationType)
+ except ValueError:
+ pass
+ if isinstance(child._notificationType, unicode):
+ child._notificationType = child._notificationType.encode("utf-8")
+ child._loadPropertyStore(
+ props=propertyStores.get(child._resourceID, None)
+ )
+ results.append(child)
+
+ returnValue(results)
+
+
+ @classproperty
+ def _oneNotificationQuery(cls):
+ no = cls._objectSchema
+ return Select(
+ [
+ no.RESOURCE_ID,
+ no.MD5,
+ Len(no.NOTIFICATION_DATA),
+ no.NOTIFICATION_TYPE,
+ no.CREATED,
+ no.MODIFIED
+ ],
+ From=no,
+ Where=(no.NOTIFICATION_UID ==
+ Parameter("uid")).And(no.NOTIFICATION_HOME_RESOURCE_ID ==
+ Parameter("homeID")))
+
+
+ @inlineCallbacks
+ def initFromStore(self):
+ """
+ Initialise this object from the store, based on its UID and home
+ resource ID. We read in and cache all the extra metadata from the DB to
+ avoid having to do DB queries for those individually later.
+
+ @return: L{self} if object exists in the DB, else C{None}
+ """
+ rows = (yield self._oneNotificationQuery.on(
+ self._txn, uid=self._uid, homeID=self._home._resourceID))
+ if rows:
+ (self._resourceID,
+ self._md5,
+ self._size,
+ self._notificationType,
+ self._created,
+ self._modified,) = tuple(rows[0])
+ try:
+ self._notificationType = json.loads(self._notificationType)
+ except ValueError:
+ pass
+ if isinstance(self._notificationType, unicode):
+ self._notificationType = self._notificationType.encode("utf-8")
+ self._loadPropertyStore()
+ returnValue(self)
+ else:
+ returnValue(None)
+
+
+ def _loadPropertyStore(self, props=None, created=False):
+ if props is None:
+ props = NonePropertyStore(self._home.uid())
+ self._propertyStore = props
+
+
+ def properties(self):
+ return self._propertyStore
+
+
+ def id(self):
+ """
+ Retrieve the store identifier for this object.
+
+ @return: store identifier.
+ @rtype: C{int}
+ """
+ return self._resourceID
+
+
+ @property
+ def _txn(self):
+ return self._home._txn
+
+
+ def notificationCollection(self):
+ return self._home
+
+
+ def uid(self):
+ return self._uid
+
+
+ def name(self):
+ return self.uid() + ".xml"
+
+
+ @classproperty
+ def _newNotificationQuery(cls):
+ no = cls._objectSchema
+ return Insert(
+ {
+ no.NOTIFICATION_HOME_RESOURCE_ID: Parameter("homeID"),
+ no.NOTIFICATION_UID: Parameter("uid"),
+ no.NOTIFICATION_TYPE: Parameter("notificationType"),
+ no.NOTIFICATION_DATA: Parameter("notificationData"),
+ no.MD5: Parameter("md5"),
+ },
+ Return=[no.RESOURCE_ID, no.CREATED, no.MODIFIED]
+ )
+
+
+ @classproperty
+ def _updateNotificationQuery(cls):
+ no = cls._objectSchema
+ return Update(
+ {
+ no.NOTIFICATION_TYPE: Parameter("notificationType"),
+ no.NOTIFICATION_DATA: Parameter("notificationData"),
+ no.MD5: Parameter("md5"),
+ },
+ Where=(no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("homeID")).And(
+ no.NOTIFICATION_UID == Parameter("uid")),
+ Return=no.MODIFIED
+ )
+
+
+ @inlineCallbacks
+ def setData(self, uid, notificationtype, notificationdata, inserting=False):
+ """
+ Set the object resource data and update and cached metadata.
+ """
+
+ notificationtext = json.dumps(notificationdata)
+ self._notificationType = notificationtype
+ self._md5 = hashlib.md5(notificationtext).hexdigest()
+ self._size = len(notificationtext)
+ if inserting:
+ rows = yield self._newNotificationQuery.on(
+ self._txn, homeID=self._home._resourceID, uid=uid,
+ notificationType=json.dumps(self._notificationType),
+ notificationData=notificationtext, md5=self._md5
+ )
+ self._resourceID, self._created, self._modified = rows[0]
+ self._loadPropertyStore()
+ else:
+ rows = yield self._updateNotificationQuery.on(
+ self._txn, homeID=self._home._resourceID, uid=uid,
+ notificationType=json.dumps(self._notificationType),
+ notificationData=notificationtext, md5=self._md5
+ )
+ self._modified = rows[0][0]
+ self._notificationData = notificationdata
+
+ _notificationDataFromID = Select(
+ [_objectSchema.NOTIFICATION_DATA], From=_objectSchema,
+ Where=_objectSchema.RESOURCE_ID == Parameter("resourceID"))
+
+
+ @inlineCallbacks
+ def notificationData(self):
+ if self._notificationData is None:
+ self._notificationData = (yield self._notificationDataFromID.on(self._txn, resourceID=self._resourceID))[0][0]
+ try:
+ self._notificationData = json.loads(self._notificationData)
+ except ValueError:
+ pass
+ if isinstance(self._notificationData, unicode):
+ self._notificationData = self._notificationData.encode("utf-8")
+ returnValue(self._notificationData)
+
+
+ def contentType(self):
+ """
+ The content type of NotificationObjects is text/xml.
+ """
+ return MimeType.fromString("text/xml")
+
+
+ def md5(self):
+ return self._md5
+
+
+ def size(self):
+ return self._size
+
+
+ def notificationType(self):
+ return self._notificationType
+
+
+ def created(self):
+ return datetimeMktime(parseSQLTimestamp(self._created))
+
+
+ def modified(self):
+ return datetimeMktime(parseSQLTimestamp(self._modified))
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_schemacurrentoracledialectsql"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -29,9 +29,10 @@
</span><span class="cx">
</span><span class="cx"> create table CALENDAR_HOME (
</span><span class="cx"> "RESOURCE_ID" integer primary key,
</span><del>- "OWNER_UID" nvarchar2(255) unique,
</del><ins>+ "OWNER_UID" nvarchar2(255),
</ins><span class="cx"> "STATUS" integer default 0 not null,
</span><del>- "DATAVERSION" integer default 0 not null
</del><ins>+ "DATAVERSION" integer default 0 not null,
+ unique ("OWNER_UID", "STATUS")
</ins><span class="cx"> );
</span><span class="cx">
</span><span class="cx"> create table HOME_STATUS (
</span><span class="lines">@@ -42,6 +43,8 @@
</span><span class="cx"> insert into HOME_STATUS (DESCRIPTION, ID) values ('normal', 0);
</span><span class="cx"> insert into HOME_STATUS (DESCRIPTION, ID) values ('external', 1);
</span><span class="cx"> insert into HOME_STATUS (DESCRIPTION, ID) values ('purging', 2);
</span><ins>+insert into HOME_STATUS (DESCRIPTION, ID) values ('migrating', 3);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('disabled', 4);
</ins><span class="cx"> create table CALENDAR (
</span><span class="cx"> "RESOURCE_ID" integer primary key
</span><span class="cx"> );
</span><span class="lines">@@ -68,11 +71,20 @@
</span><span class="cx"> "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
</span><span class="cx"> );
</span><span class="cx">
</span><ins>+create table CALENDAR_MIGRATION (
+ "CALENDAR_HOME_RESOURCE_ID" integer references CALENDAR_HOME on delete cascade,
+ "REMOTE_RESOURCE_ID" integer not null,
+ "LOCAL_RESOURCE_ID" integer references CALENDAR on delete cascade,
+ "LAST_SYNC_TOKEN" nvarchar2(255),
+ primary key ("CALENDAR_HOME_RESOURCE_ID", "REMOTE_RESOURCE_ID")
+);
+
</ins><span class="cx"> create table NOTIFICATION_HOME (
</span><span class="cx"> "RESOURCE_ID" integer primary key,
</span><del>- "OWNER_UID" nvarchar2(255) unique,
</del><ins>+ "OWNER_UID" nvarchar2(255),
</ins><span class="cx"> "STATUS" integer default 0 not null,
</span><del>- "DATAVERSION" integer default 0 not null
</del><ins>+ "DATAVERSION" integer default 0 not null,
+ unique ("OWNER_UID", "STATUS")
</ins><span class="cx"> );
</span><span class="cx">
</span><span class="cx"> create table NOTIFICATION (
</span><span class="lines">@@ -90,11 +102,11 @@
</span><span class="cx"> create table CALENDAR_BIND (
</span><span class="cx"> "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
</span><span class="cx"> "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
</span><del>- "EXTERNAL_ID" integer default null,
</del><span class="cx"> "CALENDAR_RESOURCE_NAME" nvarchar2(255),
</span><span class="cx"> "BIND_MODE" integer not null,
</span><span class="cx"> "BIND_STATUS" integer not null,
</span><span class="cx"> "BIND_REVISION" integer default 0 not null,
</span><ins>+ "BIND_UID" nvarchar2(36) default null,
</ins><span class="cx"> "MESSAGE" nclob,
</span><span class="cx"> "TRANSP" integer default 0 not null,
</span><span class="cx"> "ALARM_VEVENT_TIMED" nclob default null,
</span><span class="lines">@@ -208,6 +220,13 @@
</span><span class="cx"> primary key ("TIME_RANGE_INSTANCE_ID", "USER_ID")
</span><span class="cx"> );
</span><span class="cx">
</span><ins>+create table CALENDAR_OBJECT_MIGRATION (
+ "CALENDAR_HOME_RESOURCE_ID" integer references CALENDAR_HOME on delete cascade,
+ "REMOTE_RESOURCE_ID" integer not null,
+ "LOCAL_RESOURCE_ID" integer references CALENDAR_OBJECT on delete cascade,
+ primary key ("CALENDAR_HOME_RESOURCE_ID", "REMOTE_RESOURCE_ID")
+);
+
</ins><span class="cx"> create table ATTACHMENT (
</span><span class="cx"> "ATTACHMENT_ID" integer primary key,
</span><span class="cx"> "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
</span><span class="lines">@@ -228,6 +247,13 @@
</span><span class="cx"> unique ("MANAGED_ID", "CALENDAR_OBJECT_RESOURCE_ID")
</span><span class="cx"> );
</span><span class="cx">
</span><ins>+create table ATTACHMENT_MIGRATION (
+ "CALENDAR_HOME_RESOURCE_ID" integer references CALENDAR_HOME on delete cascade,
+ "REMOTE_RESOURCE_ID" integer not null,
+ "LOCAL_RESOURCE_ID" integer references ATTACHMENT on delete cascade,
+ primary key ("CALENDAR_HOME_RESOURCE_ID", "REMOTE_RESOURCE_ID")
+);
+
</ins><span class="cx"> create table RESOURCE_PROPERTY (
</span><span class="cx"> "RESOURCE_ID" integer not null,
</span><span class="cx"> "NAME" nvarchar2(255),
</span><span class="lines">@@ -239,9 +265,10 @@
</span><span class="cx"> create table ADDRESSBOOK_HOME (
</span><span class="cx"> "RESOURCE_ID" integer primary key,
</span><span class="cx"> "ADDRESSBOOK_PROPERTY_STORE_ID" integer not null,
</span><del>- "OWNER_UID" nvarchar2(255) unique,
</del><ins>+ "OWNER_UID" nvarchar2(255),
</ins><span class="cx"> "STATUS" integer default 0 not null,
</span><del>- "DATAVERSION" integer default 0 not null
</del><ins>+ "DATAVERSION" integer default 0 not null,
+ unique ("OWNER_UID", "STATUS")
</ins><span class="cx"> );
</span><span class="cx">
</span><span class="cx"> create table ADDRESSBOOK_HOME_METADATA (
</span><span class="lines">@@ -254,11 +281,11 @@
</span><span class="cx"> create table SHARED_ADDRESSBOOK_BIND (
</span><span class="cx"> "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
</span><span class="cx"> "OWNER_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
</span><del>- "EXTERNAL_ID" integer default null,
</del><span class="cx"> "ADDRESSBOOK_RESOURCE_NAME" nvarchar2(255),
</span><span class="cx"> "BIND_MODE" integer not null,
</span><span class="cx"> "BIND_STATUS" integer not null,
</span><span class="cx"> "BIND_REVISION" integer default 0 not null,
</span><ins>+ "BIND_UID" nvarchar2(36) default null,
</ins><span class="cx"> "MESSAGE" nclob,
</span><span class="cx"> primary key ("ADDRESSBOOK_HOME_RESOURCE_ID", "OWNER_HOME_RESOURCE_ID"),
</span><span class="cx"> unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "ADDRESSBOOK_RESOURCE_NAME")
</span><span class="lines">@@ -308,11 +335,11 @@
</span><span class="cx"> create table SHARED_GROUP_BIND (
</span><span class="cx"> "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
</span><span class="cx"> "GROUP_RESOURCE_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
</span><del>- "EXTERNAL_ID" integer default null,
</del><span class="cx"> "GROUP_ADDRESSBOOK_NAME" nvarchar2(255),
</span><span class="cx"> "BIND_MODE" integer not null,
</span><span class="cx"> "BIND_STATUS" integer not null,
</span><span class="cx"> "BIND_REVISION" integer default 0 not null,
</span><ins>+ "BIND_UID" nvarchar2(36) default null,
</ins><span class="cx"> "MESSAGE" nclob,
</span><span class="cx"> primary key ("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_RESOURCE_ID"),
</span><span class="cx"> unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_ADDRESSBOOK_NAME")
</span><span class="lines">@@ -607,7 +634,7 @@
</span><span class="cx"> "VALUE" nvarchar2(255)
</span><span class="cx"> );
</span><span class="cx">
</span><del>-insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '51');
</del><ins>+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '52');
</ins><span class="cx"> insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '6');
</span><span class="cx"> insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
</span><span class="cx"> insert into CALENDARSERVER (NAME, VALUE) values ('NOTIFICATION-DATAVERSION', '1');
</span><span class="lines">@@ -624,6 +651,10 @@
</span><span class="cx"> DEFAULT_POLLS
</span><span class="cx"> );
</span><span class="cx">
</span><ins>+create index CALENDAR_MIGRATION_LO_0525c72b on CALENDAR_MIGRATION (
+ LOCAL_RESOURCE_ID
+);
+
</ins><span class="cx"> create index NOTIFICATION_NOTIFICA_f891f5f9 on NOTIFICATION (
</span><span class="cx"> NOTIFICATION_HOME_RESOURCE_ID
</span><span class="cx"> );
</span><span class="lines">@@ -659,6 +690,15 @@
</span><span class="cx"> CALENDAR_OBJECT_RESOURCE_ID
</span><span class="cx"> );
</span><span class="cx">
</span><ins>+create index CALENDAR_OBJECT_MIGRA_0502cbef on CALENDAR_OBJECT_MIGRATION (
+ CALENDAR_HOME_RESOURCE_ID,
+ LOCAL_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_MIGRA_3577efd9 on CALENDAR_OBJECT_MIGRATION (
+ LOCAL_RESOURCE_ID
+);
+
</ins><span class="cx"> create index ATTACHMENT_CALENDAR_H_0078845c on ATTACHMENT (
</span><span class="cx"> CALENDAR_HOME_RESOURCE_ID
</span><span class="cx"> );
</span><span class="lines">@@ -671,6 +711,15 @@
</span><span class="cx"> CALENDAR_OBJECT_RESOURCE_ID
</span><span class="cx"> );
</span><span class="cx">
</span><ins>+create index ATTACHMENT_MIGRATION__804bf85e on ATTACHMENT_MIGRATION (
+ CALENDAR_HOME_RESOURCE_ID,
+ LOCAL_RESOURCE_ID
+);
+
+create index ATTACHMENT_MIGRATION__816947fe on ATTACHMENT_MIGRATION (
+ LOCAL_RESOURCE_ID
+);
+
</ins><span class="cx"> create index SHARED_ADDRESSBOOK_BI_e9a2e6d4 on SHARED_ADDRESSBOOK_BIND (
</span><span class="cx"> OWNER_HOME_RESOURCE_ID
</span><span class="cx"> );
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_schemacurrentsql"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -70,9 +70,11 @@
</span><span class="cx">
</span><span class="cx"> create table CALENDAR_HOME (
</span><span class="cx"> RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
</span><del>- OWNER_UID varchar(255) not null unique, -- implicit index
</del><ins>+ OWNER_UID varchar(255) not null,                  -- implicit index
</ins><span class="cx"> STATUS integer default 0 not null, -- enum HOME_STATUS
</span><del>- DATAVERSION integer default 0 not null
</del><ins>+ DATAVERSION integer default 0 not null,
+
+ unique (OWNER_UID, STATUS)        -- implicit index
</ins><span class="cx"> );
</span><span class="cx">
</span><span class="cx"> -- Enumeration of statuses
</span><span class="lines">@@ -85,6 +87,8 @@
</span><span class="cx"> insert into HOME_STATUS values (0, 'normal' );
</span><span class="cx"> insert into HOME_STATUS values (1, 'external');
</span><span class="cx"> insert into HOME_STATUS values (2, 'purging');
</span><ins>+insert into HOME_STATUS values (3, 'migrating');
+insert into HOME_STATUS values (4, 'disabled');
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> --------------
</span><span class="lines">@@ -135,15 +139,34 @@
</span><span class="cx"> );
</span><span class="cx">
</span><span class="cx">
</span><ins>+------------------------
+-- Calendar Migration --
+------------------------
+
+create table CALENDAR_MIGRATION (
+ CALENDAR_HOME_RESOURCE_ID                integer references CALENDAR_HOME on delete cascade,
+ REMOTE_RESOURCE_ID                        integer not null,
+ LOCAL_RESOURCE_ID                                integer        references CALENDAR on delete cascade,
+ LAST_SYNC_TOKEN                                varchar(255),
+
+ primary key (CALENDAR_HOME_RESOURCE_ID, REMOTE_RESOURCE_ID) -- implicit index
+);
+
+create index CALENDAR_MIGRATION_LOCAL_RESOURCE_ID on
+ CALENDAR_MIGRATION(LOCAL_RESOURCE_ID);
+
+
</ins><span class="cx"> ---------------------------
</span><span class="cx"> -- Sharing Notifications --
</span><span class="cx"> ---------------------------
</span><span class="cx">
</span><span class="cx"> create table NOTIFICATION_HOME (
</span><span class="cx"> RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
</span><del>- OWNER_UID varchar(255) not null unique, -- implicit index
</del><ins>+ OWNER_UID varchar(255) not null,         -- implicit index
</ins><span class="cx"> STATUS integer default 0 not null, -- enum HOME_STATUS
</span><del>- DATAVERSION integer default 0 not null
</del><ins>+ DATAVERSION integer default 0 not null,
+
+ unique (OWNER_UID, STATUS)        -- implicit index
</ins><span class="cx"> );
</span><span class="cx">
</span><span class="cx"> create table NOTIFICATION (
</span><span class="lines">@@ -172,11 +195,11 @@
</span><span class="cx"> create table CALENDAR_BIND (
</span><span class="cx"> CALENDAR_HOME_RESOURCE_ID integer not null references CALENDAR_HOME,
</span><span class="cx"> CALENDAR_RESOURCE_ID integer not null references CALENDAR on delete cascade,
</span><del>- EXTERNAL_ID integer default null,
</del><span class="cx"> CALENDAR_RESOURCE_NAME varchar(255) not null,
</span><span class="cx"> BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
</span><span class="cx"> BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
</span><span class="cx"> BIND_REVISION integer default 0 not null,
</span><ins>+ BIND_UID varchar(36) default null,
</ins><span class="cx"> MESSAGE text,
</span><span class="cx"> TRANSP integer default 0 not null, -- enum CALENDAR_TRANSP
</span><span class="cx"> ALARM_VEVENT_TIMED text default null,
</span><span class="lines">@@ -363,6 +386,24 @@
</span><span class="cx"> );
</span><span class="cx">
</span><span class="cx">
</span><ins>+-------------------------------
+-- Calendar Object Migration --
+-------------------------------
+
+create table CALENDAR_OBJECT_MIGRATION (
+ CALENDAR_HOME_RESOURCE_ID                integer references CALENDAR_HOME on delete cascade,
+ REMOTE_RESOURCE_ID                        integer not null,
+ LOCAL_RESOURCE_ID                                integer        references CALENDAR_OBJECT on delete cascade,
+
+ primary key (CALENDAR_HOME_RESOURCE_ID, REMOTE_RESOURCE_ID) -- implicit index
+);
+
+create index CALENDAR_OBJECT_MIGRATION_HOME_LOCAL on
+ CALENDAR_OBJECT_MIGRATION(CALENDAR_HOME_RESOURCE_ID, LOCAL_RESOURCE_ID);
+create index CALENDAR_OBJECT_MIGRATION_LOCAL_RESOURCE_ID on
+ CALENDAR_OBJECT_MIGRATION(LOCAL_RESOURCE_ID);
+
+
</ins><span class="cx"> ----------------
</span><span class="cx"> -- Attachment --
</span><span class="cx"> ----------------
</span><span class="lines">@@ -400,6 +441,24 @@
</span><span class="cx"> create index ATTACHMENT_CALENDAR_OBJECT_CALENDAR_OBJECT_RESOURCE_ID on
</span><span class="cx"> ATTACHMENT_CALENDAR_OBJECT(CALENDAR_OBJECT_RESOURCE_ID);
</span><span class="cx">
</span><ins>+-----------------------------------
+-- Calendar Attachment Migration --
+-----------------------------------
+
+create table ATTACHMENT_MIGRATION (
+ CALENDAR_HOME_RESOURCE_ID                integer references CALENDAR_HOME on delete cascade,
+ REMOTE_RESOURCE_ID                        integer not null,
+ LOCAL_RESOURCE_ID                                integer        references ATTACHMENT on delete cascade,
+
+ primary key (CALENDAR_HOME_RESOURCE_ID, REMOTE_RESOURCE_ID) -- implicit index
+);
+
+create index ATTACHMENT_MIGRATION_HOME_LOCAL on
+ ATTACHMENT_MIGRATION(CALENDAR_HOME_RESOURCE_ID, LOCAL_RESOURCE_ID);
+create index ATTACHMENT_MIGRATION_LOCAL_RESOURCE_ID on
+ ATTACHMENT_MIGRATION(LOCAL_RESOURCE_ID);
+
+
</ins><span class="cx"> -----------------------
</span><span class="cx"> -- Resource Property --
</span><span class="cx"> -----------------------
</span><span class="lines">@@ -421,9 +480,11 @@
</span><span class="cx"> create table ADDRESSBOOK_HOME (
</span><span class="cx"> RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
</span><span class="cx"> ADDRESSBOOK_PROPERTY_STORE_ID integer default nextval('RESOURCE_ID_SEQ') not null, -- implicit index
</span><del>- OWNER_UID varchar(255) not null unique, -- implicit index
</del><ins>+ OWNER_UID varchar(255) not null,
</ins><span class="cx"> STATUS integer default 0 not null, -- enum HOME_STATUS
</span><del>- DATAVERSION integer default 0 not null
</del><ins>+ DATAVERSION integer default 0 not null,
+
+ unique (OWNER_UID, STATUS)        -- implicit index
</ins><span class="cx"> );
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -448,11 +509,11 @@
</span><span class="cx"> create table SHARED_ADDRESSBOOK_BIND (
</span><span class="cx"> ADDRESSBOOK_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME,
</span><span class="cx"> OWNER_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME on delete cascade,
</span><del>- EXTERNAL_ID integer default null,
</del><span class="cx"> ADDRESSBOOK_RESOURCE_NAME varchar(255) not null,
</span><span class="cx"> BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
</span><span class="cx"> BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
</span><span class="cx"> BIND_REVISION integer default 0 not null,
</span><ins>+ BIND_UID varchar(36) default null,
</ins><span class="cx"> MESSAGE text, -- FIXME: xml?
</span><span class="cx">
</span><span class="cx"> primary key (ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID), -- implicit index
</span><span class="lines">@@ -549,11 +610,11 @@
</span><span class="cx"> create table SHARED_GROUP_BIND (
</span><span class="cx"> ADDRESSBOOK_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME,
</span><span class="cx"> GROUP_RESOURCE_ID integer not null references ADDRESSBOOK_OBJECT on delete cascade,
</span><del>- EXTERNAL_ID integer default null,
</del><span class="cx"> GROUP_ADDRESSBOOK_NAME varchar(255) not null,
</span><span class="cx"> BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
</span><span class="cx"> BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
</span><span class="cx"> BIND_REVISION integer default 0 not null,
</span><ins>+ BIND_UID varchar(36) default null,
</ins><span class="cx"> MESSAGE text, -- FIXME: xml?
</span><span class="cx">
</span><span class="cx"> primary key (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_RESOURCE_ID), -- implicit index
</span><span class="lines">@@ -873,7 +934,7 @@
</span><span class="cx"> DELEGATOR varchar(255) not null,
</span><span class="cx"> GROUP_ID integer not null references GROUPS on delete cascade,
</span><span class="cx"> READ_WRITE integer not null, -- 1 = ReadWrite, 0 = ReadOnly
</span><del>- IS_EXTERNAL integer not null, -- 1 = ReadWrite, 0 = ReadOnly
</del><ins>+ IS_EXTERNAL integer not null, -- 1 = External, 0 = Internal
</ins><span class="cx">
</span><span class="cx"> primary key (DELEGATOR, READ_WRITE, GROUP_ID)
</span><span class="cx"> );
</span><span class="lines">@@ -1150,7 +1211,7 @@
</span><span class="cx"> VALUE varchar(255)
</span><span class="cx"> );
</span><span class="cx">
</span><del>-insert into CALENDARSERVER values ('VERSION', '51');
</del><ins>+insert into CALENDARSERVER values ('VERSION', '52');
</ins><span class="cx"> insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '6');
</span><span class="cx"> insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');
</span><span class="cx"> insert into CALENDARSERVER values ('NOTIFICATION-DATAVERSION', '1');
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_schemaoldoracledialectv51sqlfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_schemaoldoracledialectv51sql"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v51.sql (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_schema/old/oracle-dialect/v51.sql) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v51.sql         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/old/oracle-dialect/v51.sql        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,939 @@
</span><ins>+create sequence RESOURCE_ID_SEQ;
+create sequence JOB_SEQ;
+create sequence INSTANCE_ID_SEQ;
+create sequence ATTACHMENT_ID_SEQ;
+create sequence REVISION_SEQ;
+create sequence WORKITEM_SEQ;
+create table NODE_INFO (
+ "HOSTNAME" nvarchar2(255),
+ "PID" integer not null,
+ "PORT" integer not null,
+ "TIME" timestamp default CURRENT_TIMESTAMP at time zone 'UTC' not null,
+ primary key ("HOSTNAME", "PORT")
+);
+
+create table NAMED_LOCK (
+ "LOCK_NAME" nvarchar2(255) primary key
+);
+
+create table JOB (
+ "JOB_ID" integer primary key,
+ "WORK_TYPE" nvarchar2(255),
+ "PRIORITY" integer default 0,
+ "WEIGHT" integer default 0,
+ "NOT_BEFORE" timestamp not null,
+ "ASSIGNED" timestamp default null,
+ "OVERDUE" timestamp default null,
+ "FAILED" integer default 0
+);
+
+create table CALENDAR_HOME (
+ "RESOURCE_ID" integer primary key,
+ "OWNER_UID" nvarchar2(255) unique,
+ "STATUS" integer default 0 not null,
+ "DATAVERSION" integer default 0 not null
+);
+
+create table HOME_STATUS (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into HOME_STATUS (DESCRIPTION, ID) values ('normal', 0);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('external', 1);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('purging', 2);
+create table CALENDAR (
+ "RESOURCE_ID" integer primary key
+);
+
+create table CALENDAR_HOME_METADATA (
+ "RESOURCE_ID" integer primary key references CALENDAR_HOME on delete cascade,
+ "QUOTA_USED_BYTES" integer default 0 not null,
+ "DEFAULT_EVENTS" integer default null references CALENDAR on delete set null,
+ "DEFAULT_TASKS" integer default null references CALENDAR on delete set null,
+ "DEFAULT_POLLS" integer default null references CALENDAR on delete set null,
+ "ALARM_VEVENT_TIMED" nclob default null,
+ "ALARM_VEVENT_ALLDAY" nclob default null,
+ "ALARM_VTODO_TIMED" nclob default null,
+ "ALARM_VTODO_ALLDAY" nclob default null,
+ "AVAILABILITY" nclob default null,
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDAR_METADATA (
+ "RESOURCE_ID" integer primary key references CALENDAR on delete cascade,
+ "SUPPORTED_COMPONENTS" nvarchar2(255) default null,
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table NOTIFICATION_HOME (
+ "RESOURCE_ID" integer primary key,
+ "OWNER_UID" nvarchar2(255) unique,
+ "STATUS" integer default 0 not null,
+ "DATAVERSION" integer default 0 not null
+);
+
+create table NOTIFICATION (
+ "RESOURCE_ID" integer primary key,
+ "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME,
+ "NOTIFICATION_UID" nvarchar2(255),
+ "NOTIFICATION_TYPE" nvarchar2(255),
+ "NOTIFICATION_DATA" nclob,
+ "MD5" nchar(32),
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ unique ("NOTIFICATION_UID", "NOTIFICATION_HOME_RESOURCE_ID")
+);
+
+create table CALENDAR_BIND (
+ "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+ "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+ "EXTERNAL_ID" integer default null,
+ "CALENDAR_RESOURCE_NAME" nvarchar2(255),
+ "BIND_MODE" integer not null,
+ "BIND_STATUS" integer not null,
+ "BIND_REVISION" integer default 0 not null,
+ "MESSAGE" nclob,
+ "TRANSP" integer default 0 not null,
+ "ALARM_VEVENT_TIMED" nclob default null,
+ "ALARM_VEVENT_ALLDAY" nclob default null,
+ "ALARM_VTODO_TIMED" nclob default null,
+ "ALARM_VTODO_ALLDAY" nclob default null,
+ "TIMEZONE" nclob default null,
+ primary key ("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_ID"),
+ unique ("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_NAME")
+);
+
+create table CALENDAR_BIND_MODE (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('own', 0);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('write', 2);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('direct', 3);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('indirect', 4);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('group', 5);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('group_read', 6);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('group_write', 7);
+create table CALENDAR_BIND_STATUS (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invited', 0);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('accepted', 1);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('declined', 2);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invalid', 3);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('deleted', 4);
+create table CALENDAR_TRANSP (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('opaque', 0);
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('transparent', 1);
+create table CALENDAR_OBJECT (
+ "RESOURCE_ID" integer primary key,
+ "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+ "RESOURCE_NAME" nvarchar2(255),
+ "ICALENDAR_TEXT" nclob,
+ "ICALENDAR_UID" nvarchar2(255),
+ "ICALENDAR_TYPE" nvarchar2(255),
+ "ATTACHMENTS_MODE" integer default 0 not null,
+ "DROPBOX_ID" nvarchar2(255),
+ "ORGANIZER" nvarchar2(255),
+ "RECURRANCE_MIN" date,
+ "RECURRANCE_MAX" date,
+ "ACCESS" integer default 0 not null,
+ "SCHEDULE_OBJECT" integer default 0,
+ "SCHEDULE_TAG" nvarchar2(36) default null,
+ "SCHEDULE_ETAGS" nclob default null,
+ "PRIVATE_COMMENTS" integer default 0 not null,
+ "MD5" nchar(32),
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "DATAVERSION" integer default 0 not null,
+ unique ("CALENDAR_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table CALENDAR_OBJ_ATTACHMENTS_MODE (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE (DESCRIPTION, ID) values ('none', 0);
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE (DESCRIPTION, ID) values ('write', 2);
+create table CALENDAR_ACCESS_TYPE (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(32) unique
+);
+
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('', 0);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('public', 1);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('private', 2);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('confidential', 3);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('restricted', 4);
+create table TIME_RANGE (
+ "INSTANCE_ID" integer primary key,
+ "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+ "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+ "FLOATING" integer not null,
+ "START_DATE" timestamp not null,
+ "END_DATE" timestamp not null,
+ "FBTYPE" integer not null,
+ "TRANSPARENT" integer not null
+);
+
+create table FREE_BUSY_TYPE (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('unknown', 0);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('free', 1);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy', 2);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-unavailable', 3);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-tentative', 4);
+create table PERUSER (
+ "TIME_RANGE_INSTANCE_ID" integer not null references TIME_RANGE on delete cascade,
+ "USER_ID" nvarchar2(255),
+ "TRANSPARENT" integer not null,
+ "ADJUSTED_START_DATE" timestamp default null,
+ "ADJUSTED_END_DATE" timestamp default null,
+ primary key ("TIME_RANGE_INSTANCE_ID", "USER_ID")
+);
+
+create table ATTACHMENT (
+ "ATTACHMENT_ID" integer primary key,
+ "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+ "DROPBOX_ID" nvarchar2(255),
+ "CONTENT_TYPE" nvarchar2(255),
+ "SIZE" integer not null,
+ "MD5" nchar(32),
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "PATH" nvarchar2(1024)
+);
+
+create table ATTACHMENT_CALENDAR_OBJECT (
+ "ATTACHMENT_ID" integer not null references ATTACHMENT on delete cascade,
+ "MANAGED_ID" nvarchar2(255),
+ "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+ primary key ("ATTACHMENT_ID", "CALENDAR_OBJECT_RESOURCE_ID"),
+ unique ("MANAGED_ID", "CALENDAR_OBJECT_RESOURCE_ID")
+);
+
+create table RESOURCE_PROPERTY (
+ "RESOURCE_ID" integer not null,
+ "NAME" nvarchar2(255),
+ "VALUE" nclob,
+ "VIEWER_UID" nvarchar2(255),
+ primary key ("RESOURCE_ID", "NAME", "VIEWER_UID")
+);
+
+create table ADDRESSBOOK_HOME (
+ "RESOURCE_ID" integer primary key,
+ "ADDRESSBOOK_PROPERTY_STORE_ID" integer not null,
+ "OWNER_UID" nvarchar2(255) unique,
+ "STATUS" integer default 0 not null,
+ "DATAVERSION" integer default 0 not null
+);
+
+create table ADDRESSBOOK_HOME_METADATA (
+ "RESOURCE_ID" integer primary key references ADDRESSBOOK_HOME on delete cascade,
+ "QUOTA_USED_BYTES" integer default 0 not null,
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table SHARED_ADDRESSBOOK_BIND (
+ "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+ "OWNER_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+ "EXTERNAL_ID" integer default null,
+ "ADDRESSBOOK_RESOURCE_NAME" nvarchar2(255),
+ "BIND_MODE" integer not null,
+ "BIND_STATUS" integer not null,
+ "BIND_REVISION" integer default 0 not null,
+ "MESSAGE" nclob,
+ primary key ("ADDRESSBOOK_HOME_RESOURCE_ID", "OWNER_HOME_RESOURCE_ID"),
+ unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "ADDRESSBOOK_RESOURCE_NAME")
+);
+
+create table ADDRESSBOOK_OBJECT (
+ "RESOURCE_ID" integer primary key,
+ "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+ "RESOURCE_NAME" nvarchar2(255),
+ "VCARD_TEXT" nclob,
+ "VCARD_UID" nvarchar2(255),
+ "KIND" integer not null,
+ "MD5" nchar(32),
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "DATAVERSION" integer default 0 not null,
+ unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "RESOURCE_NAME"),
+ unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "VCARD_UID")
+);
+
+create table ADDRESSBOOK_OBJECT_KIND (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('person', 0);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('group', 1);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('resource', 2);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('location', 3);
+create table ABO_MEMBERS (
+ "GROUP_ID" integer not null,
+ "ADDRESSBOOK_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+ "MEMBER_ID" integer not null,
+ "REVISION" integer not null,
+ "REMOVED" integer default 0 not null,
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ primary key ("GROUP_ID", "MEMBER_ID", "REVISION")
+);
+
+create table ABO_FOREIGN_MEMBERS (
+ "GROUP_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+ "ADDRESSBOOK_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+ "MEMBER_ADDRESS" nvarchar2(255),
+ primary key ("GROUP_ID", "MEMBER_ADDRESS")
+);
+
+create table SHARED_GROUP_BIND (
+ "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+ "GROUP_RESOURCE_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+ "EXTERNAL_ID" integer default null,
+ "GROUP_ADDRESSBOOK_NAME" nvarchar2(255),
+ "BIND_MODE" integer not null,
+ "BIND_STATUS" integer not null,
+ "BIND_REVISION" integer default 0 not null,
+ "MESSAGE" nclob,
+ primary key ("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_RESOURCE_ID"),
+ unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_ADDRESSBOOK_NAME")
+);
+
+create table CALENDAR_OBJECT_REVISIONS (
+ "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+ "CALENDAR_RESOURCE_ID" integer references CALENDAR,
+ "CALENDAR_NAME" nvarchar2(255) default null,
+ "RESOURCE_NAME" nvarchar2(255),
+ "REVISION" integer not null,
+ "DELETED" integer not null,
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ unique ("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_ID", "CALENDAR_NAME", "RESOURCE_NAME")
+);
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+ "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+ "OWNER_HOME_RESOURCE_ID" integer references ADDRESSBOOK_HOME,
+ "ADDRESSBOOK_NAME" nvarchar2(255) default null,
+ "OBJECT_RESOURCE_ID" integer default 0,
+ "RESOURCE_NAME" nvarchar2(255),
+ "REVISION" integer not null,
+ "DELETED" integer not null,
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ unique ("ADDRESSBOOK_HOME_RESOURCE_ID", "OWNER_HOME_RESOURCE_ID", "ADDRESSBOOK_NAME", "RESOURCE_NAME")
+);
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+ "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME on delete cascade,
+ "RESOURCE_NAME" nvarchar2(255),
+ "REVISION" integer not null,
+ "DELETED" integer not null,
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ unique ("NOTIFICATION_HOME_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table APN_SUBSCRIPTIONS (
+ "TOKEN" nvarchar2(255),
+ "RESOURCE_KEY" nvarchar2(255),
+ "MODIFIED" integer not null,
+ "SUBSCRIBER_GUID" nvarchar2(255),
+ "USER_AGENT" nvarchar2(255) default null,
+ "IP_ADDR" nvarchar2(255) default null,
+ primary key ("TOKEN", "RESOURCE_KEY")
+);
+
+create table IMIP_TOKENS (
+ "TOKEN" nvarchar2(255),
+ "ORGANIZER" nvarchar2(255),
+ "ATTENDEE" nvarchar2(255),
+ "ICALUID" nvarchar2(255),
+ "ACCESSED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ primary key ("ORGANIZER", "ATTENDEE", "ICALUID")
+);
+
+create table IMIP_INVITATION_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "FROM_ADDR" nvarchar2(255),
+ "TO_ADDR" nvarchar2(255),
+ "ICALENDAR_TEXT" nclob
+);
+
+create table IMIP_POLLING_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB
+);
+
+create table IMIP_REPLY_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "ORGANIZER" nvarchar2(255),
+ "ATTENDEE" nvarchar2(255),
+ "ICALENDAR_TEXT" nclob
+);
+
+create table PUSH_NOTIFICATION_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "PUSH_ID" nvarchar2(255),
+ "PUSH_PRIORITY" integer not null
+);
+
+create table GROUP_CACHER_POLLING_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB
+);
+
+create table GROUP_REFRESH_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "GROUP_UID" nvarchar2(255)
+);
+
+create table GROUP_DELEGATE_CHANGES_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "DELEGATOR_UID" nvarchar2(255),
+ "READ_DELEGATE_UID" nvarchar2(255),
+ "WRITE_DELEGATE_UID" nvarchar2(255)
+);
+
+create table GROUPS (
+ "GROUP_ID" integer primary key,
+ "NAME" nvarchar2(255),
+ "GROUP_UID" nvarchar2(255) unique,
+ "MEMBERSHIP_HASH" nvarchar2(255),
+ "EXTANT" integer default 1,
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table GROUP_MEMBERSHIP (
+ "GROUP_ID" integer not null references GROUPS on delete cascade,
+ "MEMBER_UID" nvarchar2(255),
+ primary key ("GROUP_ID", "MEMBER_UID")
+);
+
+create table GROUP_ATTENDEE_RECONCILE_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+ "GROUP_ID" integer not null references GROUPS on delete cascade
+);
+
+create table GROUP_ATTENDEE (
+ "GROUP_ID" integer not null references GROUPS on delete cascade,
+ "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+ "MEMBERSHIP_HASH" nvarchar2(255),
+ primary key ("GROUP_ID", "RESOURCE_ID")
+);
+
+create table GROUP_SHAREE_RECONCILE_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "CALENDAR_ID" integer not null references CALENDAR on delete cascade,
+ "GROUP_ID" integer not null references GROUPS on delete cascade
+);
+
+create table GROUP_SHAREE (
+ "GROUP_ID" integer not null references GROUPS on delete cascade,
+ "CALENDAR_ID" integer not null references CALENDAR on delete cascade,
+ "GROUP_BIND_MODE" integer not null,
+ "MEMBERSHIP_HASH" nvarchar2(255),
+ primary key ("GROUP_ID", "CALENDAR_ID")
+);
+
+create table DELEGATES (
+ "DELEGATOR" nvarchar2(255),
+ "DELEGATE" nvarchar2(255),
+ "READ_WRITE" integer not null,
+ primary key ("DELEGATOR", "READ_WRITE", "DELEGATE")
+);
+
+create table DELEGATE_GROUPS (
+ "DELEGATOR" nvarchar2(255),
+ "GROUP_ID" integer not null references GROUPS on delete cascade,
+ "READ_WRITE" integer not null,
+ "IS_EXTERNAL" integer not null,
+ primary key ("DELEGATOR", "READ_WRITE", "GROUP_ID")
+);
+
+create table EXTERNAL_DELEGATE_GROUPS (
+ "DELEGATOR" nvarchar2(255) primary key,
+ "GROUP_UID_READ" nvarchar2(255),
+ "GROUP_UID_WRITE" nvarchar2(255)
+);
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create table CALENDAR_OBJECT_UPGRADE_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create table FIND_MIN_VALID_REVISION_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB
+);
+
+create table REVISION_CLEANUP_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB
+);
+
+create table INBOX_CLEANUP_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB
+);
+
+create table CLEANUP_ONE_INBOX_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "HOME_ID" integer not null unique references CALENDAR_HOME on delete cascade
+);
+
+create table SCHEDULE_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "ICALENDAR_UID" nvarchar2(255),
+ "WORK_TYPE" nvarchar2(255)
+);
+
+create table SCHEDULE_REFRESH_WORK (
+ "WORK_ID" integer primary key references SCHEDULE_WORK on delete cascade,
+ "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade,
+ "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+ "ATTENDEE_COUNT" integer
+);
+
+create table SCHEDULE_REFRESH_ATTENDEES (
+ "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+ "ATTENDEE" nvarchar2(255),
+ primary key ("RESOURCE_ID", "ATTENDEE")
+);
+
+create table SCHEDULE_AUTO_REPLY_WORK (
+ "WORK_ID" integer primary key references SCHEDULE_WORK on delete cascade,
+ "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade,
+ "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+ "PARTSTAT" nvarchar2(255)
+);
+
+create table SCHEDULE_ORGANIZER_WORK (
+ "WORK_ID" integer primary key references SCHEDULE_WORK on delete cascade,
+ "SCHEDULE_ACTION" integer not null,
+ "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade,
+ "RESOURCE_ID" integer,
+ "ICALENDAR_TEXT_OLD" nclob,
+ "ICALENDAR_TEXT_NEW" nclob,
+ "ATTENDEE_COUNT" integer,
+ "SMART_MERGE" integer
+);
+
+create table SCHEDULE_ACTION (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into SCHEDULE_ACTION (DESCRIPTION, ID) values ('create', 0);
+insert into SCHEDULE_ACTION (DESCRIPTION, ID) values ('modify', 1);
+insert into SCHEDULE_ACTION (DESCRIPTION, ID) values ('modify-cancelled', 2);
+insert into SCHEDULE_ACTION (DESCRIPTION, ID) values ('remove', 3);
+create table SCHEDULE_ORGANIZER_SEND_WORK (
+ "WORK_ID" integer primary key references SCHEDULE_WORK on delete cascade,
+ "SCHEDULE_ACTION" integer not null,
+ "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade,
+ "RESOURCE_ID" integer,
+ "ATTENDEE" nvarchar2(255),
+ "ITIP_MSG" nclob,
+ "NO_REFRESH" integer
+);
+
+create table SCHEDULE_REPLY_WORK (
+ "WORK_ID" integer primary key references SCHEDULE_WORK on delete cascade,
+ "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade,
+ "RESOURCE_ID" integer,
+ "ITIP_MSG" nclob
+);
+
+create table PRINCIPAL_PURGE_POLLING_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB
+);
+
+create table PRINCIPAL_PURGE_CHECK_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "UID" nvarchar2(255)
+);
+
+create table PRINCIPAL_PURGE_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "UID" nvarchar2(255)
+);
+
+create table PRINCIPAL_PURGE_HOME_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade
+);
+
+create table CALENDARSERVER (
+ "NAME" nvarchar2(255) primary key,
+ "VALUE" nvarchar2(255)
+);
+
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '51');
+insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '6');
+insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
+insert into CALENDARSERVER (NAME, VALUE) values ('NOTIFICATION-DATAVERSION', '1');
+insert into CALENDARSERVER (NAME, VALUE) values ('MIN-VALID-REVISION', '1');
+create index CALENDAR_HOME_METADAT_3cb9049e on CALENDAR_HOME_METADATA (
+ DEFAULT_EVENTS
+);
+
+create index CALENDAR_HOME_METADAT_d55e5548 on CALENDAR_HOME_METADATA (
+ DEFAULT_TASKS
+);
+
+create index CALENDAR_HOME_METADAT_910264ce on CALENDAR_HOME_METADATA (
+ DEFAULT_POLLS
+);
+
+create index NOTIFICATION_NOTIFICA_f891f5f9 on NOTIFICATION (
+ NOTIFICATION_HOME_RESOURCE_ID
+);
+
+create index CALENDAR_BIND_RESOURC_e57964d4 on CALENDAR_BIND (
+ CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_CALEN_a9a453a9 on CALENDAR_OBJECT (
+ CALENDAR_RESOURCE_ID,
+ ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_CALEN_c4dc619c on CALENDAR_OBJECT (
+ CALENDAR_RESOURCE_ID,
+ RECURRANCE_MAX,
+ RECURRANCE_MIN
+);
+
+create index CALENDAR_OBJECT_ICALE_82e731d5 on CALENDAR_OBJECT (
+ ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_DROPB_de041d80 on CALENDAR_OBJECT (
+ DROPBOX_ID
+);
+
+create index TIME_RANGE_CALENDAR_R_beb6e7eb on TIME_RANGE (
+ CALENDAR_RESOURCE_ID
+);
+
+create index TIME_RANGE_CALENDAR_O_acf37bd1 on TIME_RANGE (
+ CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index ATTACHMENT_CALENDAR_H_0078845c on ATTACHMENT (
+ CALENDAR_HOME_RESOURCE_ID
+);
+
+create index ATTACHMENT_DROPBOX_ID_5073cf23 on ATTACHMENT (
+ DROPBOX_ID
+);
+
+create index ATTACHMENT_CALENDAR_O_81508484 on ATTACHMENT_CALENDAR_OBJECT (
+ CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index SHARED_ADDRESSBOOK_BI_e9a2e6d4 on SHARED_ADDRESSBOOK_BIND (
+ OWNER_HOME_RESOURCE_ID
+);
+
+create index ABO_MEMBERS_ADDRESSBO_4effa879 on ABO_MEMBERS (
+ ADDRESSBOOK_ID
+);
+
+create index ABO_MEMBERS_MEMBER_ID_8d66adcf on ABO_MEMBERS (
+ MEMBER_ID
+);
+
+create index ABO_FOREIGN_MEMBERS_A_1fd2c5e9 on ABO_FOREIGN_MEMBERS (
+ ADDRESSBOOK_ID
+);
+
+create index SHARED_GROUP_BIND_RES_cf52f95d on SHARED_GROUP_BIND (
+ GROUP_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_6d9d929c on CALENDAR_OBJECT_REVISIONS (
+ CALENDAR_RESOURCE_ID,
+ RESOURCE_NAME,
+ DELETED,
+ REVISION
+);
+
+create index CALENDAR_OBJECT_REVIS_265c8acf on CALENDAR_OBJECT_REVISIONS (
+ CALENDAR_RESOURCE_ID,
+ REVISION
+);
+
+create index CALENDAR_OBJECT_REVIS_550b1c56 on CALENDAR_OBJECT_REVISIONS (
+ CALENDAR_HOME_RESOURCE_ID,
+ REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_00fe8288 on ADDRESSBOOK_OBJECT_REVISIONS (
+ OWNER_HOME_RESOURCE_ID,
+ RESOURCE_NAME,
+ DELETED,
+ REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_45004780 on ADDRESSBOOK_OBJECT_REVISIONS (
+ OWNER_HOME_RESOURCE_ID,
+ REVISION
+);
+
+create index NOTIFICATION_OBJECT_R_036a9cee on NOTIFICATION_OBJECT_REVISIONS (
+ NOTIFICATION_HOME_RESOURCE_ID,
+ REVISION
+);
+
+create index APN_SUBSCRIPTIONS_RES_9610d78e on APN_SUBSCRIPTIONS (
+ RESOURCE_KEY
+);
+
+create index IMIP_TOKENS_TOKEN_e94b918f on IMIP_TOKENS (
+ TOKEN
+);
+
+create index IMIP_INVITATION_WORK__586d064c on IMIP_INVITATION_WORK (
+ JOB_ID
+);
+
+create index IMIP_POLLING_WORK_JOB_d5535891 on IMIP_POLLING_WORK (
+ JOB_ID
+);
+
+create index IMIP_REPLY_WORK_JOB_I_bf4ae73e on IMIP_REPLY_WORK (
+ JOB_ID
+);
+
+create index PUSH_NOTIFICATION_WOR_8bbab117 on PUSH_NOTIFICATION_WORK (
+ JOB_ID
+);
+
+create index PUSH_NOTIFICATION_WOR_3a3ee588 on PUSH_NOTIFICATION_WORK (
+ PUSH_ID
+);
+
+create index GROUP_CACHER_POLLING__6eb3151c on GROUP_CACHER_POLLING_WORK (
+ JOB_ID
+);
+
+create index GROUP_REFRESH_WORK_JO_717ede20 on GROUP_REFRESH_WORK (
+ JOB_ID
+);
+
+create index GROUP_REFRESH_WORK_GR_0325f3a8 on GROUP_REFRESH_WORK (
+ GROUP_UID
+);
+
+create index GROUP_DELEGATE_CHANGE_8bf9e6d8 on GROUP_DELEGATE_CHANGES_WORK (
+ JOB_ID
+);
+
+create index GROUP_DELEGATE_CHANGE_d8f7af69 on GROUP_DELEGATE_CHANGES_WORK (
+ DELEGATOR_UID
+);
+
+create index GROUPS_GROUP_UID_b35cce23 on GROUPS (
+ GROUP_UID
+);
+
+create index GROUP_MEMBERSHIP_MEMB_0ca508e8 on GROUP_MEMBERSHIP (
+ MEMBER_UID
+);
+
+create index GROUP_ATTENDEE_RECONC_da73d3c2 on GROUP_ATTENDEE_RECONCILE_WORK (
+ JOB_ID
+);
+
+create index GROUP_ATTENDEE_RECONC_b894ee7a on GROUP_ATTENDEE_RECONCILE_WORK (
+ RESOURCE_ID
+);
+
+create index GROUP_ATTENDEE_RECONC_5eabc549 on GROUP_ATTENDEE_RECONCILE_WORK (
+ GROUP_ID
+);
+
+create index GROUP_ATTENDEE_RESOUR_855124dc on GROUP_ATTENDEE (
+ RESOURCE_ID
+);
+
+create index GROUP_SHAREE_RECONCIL_9aad0858 on GROUP_SHAREE_RECONCILE_WORK (
+ JOB_ID
+);
+
+create index GROUP_SHAREE_RECONCIL_4dc60f78 on GROUP_SHAREE_RECONCILE_WORK (
+ CALENDAR_ID
+);
+
+create index GROUP_SHAREE_RECONCIL_1d14c921 on GROUP_SHAREE_RECONCILE_WORK (
+ GROUP_ID
+);
+
+create index GROUP_SHAREE_CALENDAR_28a88850 on GROUP_SHAREE (
+ CALENDAR_ID
+);
+
+create index DELEGATE_TO_DELEGATOR_5e149b11 on DELEGATES (
+ DELEGATE,
+ READ_WRITE,
+ DELEGATOR
+);
+
+create index DELEGATE_GROUPS_GROUP_25117446 on DELEGATE_GROUPS (
+ GROUP_ID
+);
+
+create index CALENDAR_OBJECT_SPLIT_af71dcda on CALENDAR_OBJECT_SPLITTER_WORK (
+ RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_SPLIT_33603b72 on CALENDAR_OBJECT_SPLITTER_WORK (
+ JOB_ID
+);
+
+create index CALENDAR_OBJECT_UPGRA_a5c181eb on CALENDAR_OBJECT_UPGRADE_WORK (
+ RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_UPGRA_39d6f8f9 on CALENDAR_OBJECT_UPGRADE_WORK (
+ JOB_ID
+);
+
+create index FIND_MIN_VALID_REVISI_78d17400 on FIND_MIN_VALID_REVISION_WORK (
+ JOB_ID
+);
+
+create index REVISION_CLEANUP_WORK_eb062686 on REVISION_CLEANUP_WORK (
+ JOB_ID
+);
+
+create index INBOX_CLEANUP_WORK_JO_799132bd on INBOX_CLEANUP_WORK (
+ JOB_ID
+);
+
+create index CLEANUP_ONE_INBOX_WOR_375dac36 on CLEANUP_ONE_INBOX_WORK (
+ JOB_ID
+);
+
+create index SCHEDULE_WORK_JOB_ID_65e810ee on SCHEDULE_WORK (
+ JOB_ID
+);
+
+create index SCHEDULE_WORK_ICALEND_089f33dc on SCHEDULE_WORK (
+ ICALENDAR_UID
+);
+
+create index SCHEDULE_REFRESH_WORK_26084c7b on SCHEDULE_REFRESH_WORK (
+ HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_REFRESH_WORK_989efe54 on SCHEDULE_REFRESH_WORK (
+ RESOURCE_ID
+);
+
+create index SCHEDULE_REFRESH_ATTE_83053b91 on SCHEDULE_REFRESH_ATTENDEES (
+ RESOURCE_ID,
+ ATTENDEE
+);
+
+create index SCHEDULE_AUTO_REPLY_W_0256478d on SCHEDULE_AUTO_REPLY_WORK (
+ HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_AUTO_REPLY_W_0755e754 on SCHEDULE_AUTO_REPLY_WORK (
+ RESOURCE_ID
+);
+
+create index SCHEDULE_ORGANIZER_WO_18ce4edd on SCHEDULE_ORGANIZER_WORK (
+ HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_ORGANIZER_WO_14702035 on SCHEDULE_ORGANIZER_WORK (
+ RESOURCE_ID
+);
+
+create index SCHEDULE_ORGANIZER_SE_9ec9f827 on SCHEDULE_ORGANIZER_SEND_WORK (
+ HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_ORGANIZER_SE_699fefc4 on SCHEDULE_ORGANIZER_SEND_WORK (
+ RESOURCE_ID
+);
+
+create index SCHEDULE_REPLY_WORK_H_745af8cf on SCHEDULE_REPLY_WORK (
+ HOME_RESOURCE_ID
+);
+
+create index SCHEDULE_REPLY_WORK_R_11bd3fbb on SCHEDULE_REPLY_WORK (
+ RESOURCE_ID
+);
+
+create index PRINCIPAL_PURGE_POLLI_6383e68a on PRINCIPAL_PURGE_POLLING_WORK (
+ JOB_ID
+);
+
+create index PRINCIPAL_PURGE_CHECK_b0c024c1 on PRINCIPAL_PURGE_CHECK_WORK (
+ JOB_ID
+);
+
+create index PRINCIPAL_PURGE_CHECK_198388a5 on PRINCIPAL_PURGE_CHECK_WORK (
+ UID
+);
+
+create index PRINCIPAL_PURGE_WORK__7a8141a3 on PRINCIPAL_PURGE_WORK (
+ JOB_ID
+);
+
+create index PRINCIPAL_PURGE_WORK__db35cfdc on PRINCIPAL_PURGE_WORK (
+ UID
+);
+
+create index PRINCIPAL_PURGE_HOME__f35eea7a on PRINCIPAL_PURGE_HOME_WORK (
+ JOB_ID
+);
+
+create index PRINCIPAL_PURGE_HOME__967e4480 on PRINCIPAL_PURGE_HOME_WORK (
+ HOME_RESOURCE_ID
+);
+
+-- Extra schema to add to current-oracle-dialect.sql
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_schemaoldpostgresdialectv51sqlfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_schemaoldpostgresdialectv51sql"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v51.sql (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_schema/old/postgres-dialect/v51.sql) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v51.sql         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/old/postgres-dialect/v51.sql        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,1157 @@
</span><ins>+-- -*- test-case-name: txdav.caldav.datastore.test.test_sql,txdav.carddav.datastore.test.test_sql -*-
+
+----
+-- Copyright (c) 2010-2015 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+
+-----------------
+-- Resource ID --
+-----------------
+
+create sequence RESOURCE_ID_SEQ;
+
+
+-------------------------
+-- Cluster Bookkeeping --
+-------------------------
+
+-- Information about a process connected to this database.
+
+-- Note that this must match the node info schema in twext.enterprise.queue.
+create table NODE_INFO (
+ HOSTNAME varchar(255) not null,
+ PID integer not null,
+ PORT integer not null,
+ TIME timestamp not null default timezone('UTC', CURRENT_TIMESTAMP),
+
+ primary key (HOSTNAME, PORT)
+);
+
+-- Unique named locks. This table should always be empty, but rows are
+-- temporarily created in order to prevent undesirable concurrency.
+create table NAMED_LOCK (
+ LOCK_NAME varchar(255) primary key
+);
+
+
+--------------------
+-- Jobs --
+--------------------
+
+create sequence JOB_SEQ;
+
+create table JOB (
+ JOB_ID integer primary key default nextval('JOB_SEQ'), --implicit index
+ WORK_TYPE varchar(255) not null,
+ PRIORITY integer default 0,
+ WEIGHT integer default 0,
+ NOT_BEFORE timestamp not null,
+ ASSIGNED timestamp default null,
+ OVERDUE timestamp default null,
+ FAILED integer default 0
+);
+
+-------------------
+-- Calendar Home --
+-------------------
+
+create table CALENDAR_HOME (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ OWNER_UID varchar(255) not null unique, -- implicit index
+ STATUS integer default 0 not null, -- enum HOME_STATUS
+ DATAVERSION integer default 0 not null
+);
+
+-- Enumeration of statuses
+
+create table HOME_STATUS (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into HOME_STATUS values (0, 'normal' );
+insert into HOME_STATUS values (1, 'external');
+insert into HOME_STATUS values (2, 'purging');
+
+
+--------------
+-- Calendar --
+--------------
+
+create table CALENDAR (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ') -- implicit index
+);
+
+
+----------------------------
+-- Calendar Home Metadata --
+----------------------------
+
+create table CALENDAR_HOME_METADATA (
+ RESOURCE_ID integer primary key references CALENDAR_HOME on delete cascade, -- implicit index
+ QUOTA_USED_BYTES integer default 0 not null,
+ DEFAULT_EVENTS integer default null references CALENDAR on delete set null,
+ DEFAULT_TASKS integer default null references CALENDAR on delete set null,
+ DEFAULT_POLLS integer default null references CALENDAR on delete set null,
+ ALARM_VEVENT_TIMED text default null,
+ ALARM_VEVENT_ALLDAY text default null,
+ ALARM_VTODO_TIMED text default null,
+ ALARM_VTODO_ALLDAY text default null,
+ AVAILABILITY text default null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index CALENDAR_HOME_METADATA_DEFAULT_EVENTS on
+ CALENDAR_HOME_METADATA(DEFAULT_EVENTS);
+create index CALENDAR_HOME_METADATA_DEFAULT_TASKS on
+ CALENDAR_HOME_METADATA(DEFAULT_TASKS);
+create index CALENDAR_HOME_METADATA_DEFAULT_POLLS on
+ CALENDAR_HOME_METADATA(DEFAULT_POLLS);
+
+
+-----------------------
+-- Calendar Metadata --
+-----------------------
+
+create table CALENDAR_METADATA (
+ RESOURCE_ID integer primary key references CALENDAR on delete cascade, -- implicit index
+ SUPPORTED_COMPONENTS varchar(255) default null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+---------------------------
+-- Sharing Notifications --
+---------------------------
+
+create table NOTIFICATION_HOME (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ OWNER_UID varchar(255) not null unique, -- implicit index
+ STATUS integer default 0 not null, -- enum HOME_STATUS
+ DATAVERSION integer default 0 not null
+);
+
+create table NOTIFICATION (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ NOTIFICATION_HOME_RESOURCE_ID integer not null references NOTIFICATION_HOME,
+ NOTIFICATION_UID varchar(255) not null,
+ NOTIFICATION_TYPE varchar(255) not null,
+ NOTIFICATION_DATA text not null,
+ MD5 char(32) not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ unique (NOTIFICATION_UID, NOTIFICATION_HOME_RESOURCE_ID) -- implicit index
+);
+
+create index NOTIFICATION_NOTIFICATION_HOME_RESOURCE_ID on
+ NOTIFICATION(NOTIFICATION_HOME_RESOURCE_ID);
+
+
+-------------------
+-- Calendar Bind --
+-------------------
+
+-- Joins CALENDAR_HOME and CALENDAR
+
+create table CALENDAR_BIND (
+ CALENDAR_HOME_RESOURCE_ID integer not null references CALENDAR_HOME,
+ CALENDAR_RESOURCE_ID integer not null references CALENDAR on delete cascade,
+ EXTERNAL_ID integer default null,
+ CALENDAR_RESOURCE_NAME varchar(255) not null,
+ BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
+ BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
+ BIND_REVISION integer default 0 not null,
+ MESSAGE text,
+ TRANSP integer default 0 not null, -- enum CALENDAR_TRANSP
+ ALARM_VEVENT_TIMED text default null,
+ ALARM_VEVENT_ALLDAY text default null,
+ ALARM_VTODO_TIMED text default null,
+ ALARM_VTODO_ALLDAY text default null,
+ TIMEZONE text default null,
+
+ primary key (CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID), -- implicit index
+ unique (CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_NAME) -- implicit index
+);
+
+create index CALENDAR_BIND_RESOURCE_ID on
+ CALENDAR_BIND(CALENDAR_RESOURCE_ID);
+
+-- Enumeration of calendar bind modes
+
+create table CALENDAR_BIND_MODE (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_MODE values (0, 'own' );
+insert into CALENDAR_BIND_MODE values (1, 'read' );
+insert into CALENDAR_BIND_MODE values (2, 'write');
+insert into CALENDAR_BIND_MODE values (3, 'direct');
+insert into CALENDAR_BIND_MODE values (4, 'indirect');
+insert into CALENDAR_BIND_MODE values (5, 'group');
+insert into CALENDAR_BIND_MODE values (6, 'group_read');
+insert into CALENDAR_BIND_MODE values (7, 'group_write');
+
+-- Enumeration of statuses
+
+create table CALENDAR_BIND_STATUS (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_STATUS values (0, 'invited' );
+insert into CALENDAR_BIND_STATUS values (1, 'accepted');
+insert into CALENDAR_BIND_STATUS values (2, 'declined');
+insert into CALENDAR_BIND_STATUS values (3, 'invalid');
+insert into CALENDAR_BIND_STATUS values (4, 'deleted');
+
+
+-- Enumeration of transparency
+
+create table CALENDAR_TRANSP (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_TRANSP values (0, 'opaque' );
+insert into CALENDAR_TRANSP values (1, 'transparent');
+
+
+---------------------
+-- Calendar Object --
+---------------------
+
+create table CALENDAR_OBJECT (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ CALENDAR_RESOURCE_ID integer not null references CALENDAR on delete cascade,
+ RESOURCE_NAME varchar(255) not null,
+ ICALENDAR_TEXT text not null,
+ ICALENDAR_UID varchar(255) not null,
+ ICALENDAR_TYPE varchar(255) not null,
+ ATTACHMENTS_MODE integer default 0 not null, -- enum CALENDAR_OBJ_ATTACHMENTS_MODE
+ DROPBOX_ID varchar(255),
+ ORGANIZER varchar(255),
+ RECURRANCE_MIN date, -- minimum date that recurrences have been expanded to.
+ RECURRANCE_MAX date, -- maximum date that recurrences have been expanded to.
+ ACCESS integer default 0 not null,
+ SCHEDULE_OBJECT boolean default false,
+ SCHEDULE_TAG varchar(36) default null,
+ SCHEDULE_ETAGS text default null,
+ PRIVATE_COMMENTS boolean default false not null,
+ MD5 char(32) not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ DATAVERSION integer default 0 not null,
+
+ unique (CALENDAR_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+
+ -- since the 'inbox' is a 'calendar resource' for the purpose of storing
+ -- calendar objects, this constraint has to be selectively enforced by the
+ -- application layer.
+
+ -- unique (CALENDAR_RESOURCE_ID, ICALENDAR_UID)
+);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_AND_ICALENDAR_UID on
+ CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_RECURRANCE_MAX_MIN on
+ CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, RECURRANCE_MAX, RECURRANCE_MIN);
+
+create index CALENDAR_OBJECT_ICALENDAR_UID on
+ CALENDAR_OBJECT(ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_DROPBOX_ID on
+ CALENDAR_OBJECT(DROPBOX_ID);
+
+-- Enumeration of attachment modes
+
+create table CALENDAR_OBJ_ATTACHMENTS_MODE (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE values (0, 'none' );
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE values (1, 'read' );
+insert into CALENDAR_OBJ_ATTACHMENTS_MODE values (2, 'write');
+
+
+-- Enumeration of calendar access types
+
+create table CALENDAR_ACCESS_TYPE (
+ ID integer primary key,
+ DESCRIPTION varchar(32) not null unique
+);
+
+insert into CALENDAR_ACCESS_TYPE values (0, '' );
+insert into CALENDAR_ACCESS_TYPE values (1, 'public' );
+insert into CALENDAR_ACCESS_TYPE values (2, 'private' );
+insert into CALENDAR_ACCESS_TYPE values (3, 'confidential' );
+insert into CALENDAR_ACCESS_TYPE values (4, 'restricted' );
+
+
+-----------------
+-- Instance ID --
+-----------------
+
+create sequence INSTANCE_ID_SEQ;
+
+
+----------------
+-- Time Range --
+----------------
+
+create table TIME_RANGE (
+ INSTANCE_ID integer primary key default nextval('INSTANCE_ID_SEQ'), -- implicit index
+ CALENDAR_RESOURCE_ID integer not null references CALENDAR on delete cascade,
+ CALENDAR_OBJECT_RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade,
+ FLOATING boolean not null,
+ START_DATE timestamp not null,
+ END_DATE timestamp not null,
+ FBTYPE integer not null,
+ TRANSPARENT boolean not null
+);
+
+create index TIME_RANGE_CALENDAR_RESOURCE_ID on
+ TIME_RANGE(CALENDAR_RESOURCE_ID);
+create index TIME_RANGE_CALENDAR_OBJECT_RESOURCE_ID on
+ TIME_RANGE(CALENDAR_OBJECT_RESOURCE_ID);
+
+
+-- Enumeration of free/busy types
+
+create table FREE_BUSY_TYPE (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into FREE_BUSY_TYPE values (0, 'unknown' );
+insert into FREE_BUSY_TYPE values (1, 'free' );
+insert into FREE_BUSY_TYPE values (2, 'busy' );
+insert into FREE_BUSY_TYPE values (3, 'busy-unavailable');
+insert into FREE_BUSY_TYPE values (4, 'busy-tentative' );
+
+
+-------------------
+-- Per-user data --
+-------------------
+
+create table PERUSER (
+ TIME_RANGE_INSTANCE_ID integer not null references TIME_RANGE on delete cascade,
+ USER_ID varchar(255) not null,
+ TRANSPARENT boolean not null,
+ ADJUSTED_START_DATE timestamp default null,
+ ADJUSTED_END_DATE timestamp default null,
+
+ primary key (TIME_RANGE_INSTANCE_ID, USER_ID) -- implicit index
+);
+
+
+----------------
+-- Attachment --
+----------------
+
+create sequence ATTACHMENT_ID_SEQ;
+
+create table ATTACHMENT (
+ ATTACHMENT_ID integer primary key default nextval('ATTACHMENT_ID_SEQ'), -- implicit index
+ CALENDAR_HOME_RESOURCE_ID integer not null references CALENDAR_HOME,
+ DROPBOX_ID varchar(255),
+ CONTENT_TYPE varchar(255) not null,
+ SIZE integer not null,
+ MD5 char(32) not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ PATH varchar(1024) not null
+);
+
+create index ATTACHMENT_CALENDAR_HOME_RESOURCE_ID on
+ ATTACHMENT(CALENDAR_HOME_RESOURCE_ID);
+
+create index ATTACHMENT_DROPBOX_ID on
+ ATTACHMENT(DROPBOX_ID);
+
+-- Many-to-many relationship between attachments and calendar objects
+create table ATTACHMENT_CALENDAR_OBJECT (
+ ATTACHMENT_ID integer not null references ATTACHMENT on delete cascade,
+ MANAGED_ID varchar(255) not null,
+ CALENDAR_OBJECT_RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade,
+
+ primary key (ATTACHMENT_ID, CALENDAR_OBJECT_RESOURCE_ID), -- implicit index
+ unique (MANAGED_ID, CALENDAR_OBJECT_RESOURCE_ID) --implicit index
+);
+
+create index ATTACHMENT_CALENDAR_OBJECT_CALENDAR_OBJECT_RESOURCE_ID on
+ ATTACHMENT_CALENDAR_OBJECT(CALENDAR_OBJECT_RESOURCE_ID);
+
+-----------------------
+-- Resource Property --
+-----------------------
+
+create table RESOURCE_PROPERTY (
+ RESOURCE_ID integer not null, -- foreign key: *.RESOURCE_ID
+ NAME varchar(255) not null,
+ VALUE text not null, -- FIXME: xml?
+ VIEWER_UID varchar(255),
+
+ primary key (RESOURCE_ID, NAME, VIEWER_UID) -- implicit index
+);
+
+
+----------------------
+-- AddressBook Home --
+----------------------
+
+create table ADDRESSBOOK_HOME (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ ADDRESSBOOK_PROPERTY_STORE_ID integer default nextval('RESOURCE_ID_SEQ') not null, -- implicit index
+ OWNER_UID varchar(255) not null unique, -- implicit index
+ STATUS integer default 0 not null, -- enum HOME_STATUS
+ DATAVERSION integer default 0 not null
+);
+
+
+-------------------------------
+-- AddressBook Home Metadata --
+-------------------------------
+
+create table ADDRESSBOOK_HOME_METADATA (
+ RESOURCE_ID integer primary key references ADDRESSBOOK_HOME on delete cascade, -- implicit index
+ QUOTA_USED_BYTES integer default 0 not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+-----------------------------
+-- Shared AddressBook Bind --
+-----------------------------
+
+-- Joins sharee ADDRESSBOOK_HOME and owner ADDRESSBOOK_HOME
+
+create table SHARED_ADDRESSBOOK_BIND (
+ ADDRESSBOOK_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME,
+ OWNER_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME on delete cascade,
+ EXTERNAL_ID integer default null,
+ ADDRESSBOOK_RESOURCE_NAME varchar(255) not null,
+ BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
+ BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
+ BIND_REVISION integer default 0 not null,
+ MESSAGE text, -- FIXME: xml?
+
+ primary key (ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID), -- implicit index
+ unique (ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_NAME) -- implicit index
+);
+
+create index SHARED_ADDRESSBOOK_BIND_RESOURCE_ID on
+ SHARED_ADDRESSBOOK_BIND(OWNER_HOME_RESOURCE_ID);
+
+
+------------------------
+-- AddressBook Object --
+------------------------
+
+create table ADDRESSBOOK_OBJECT (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ ADDRESSBOOK_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME on delete cascade,
+ RESOURCE_NAME varchar(255) not null,
+ VCARD_TEXT text not null,
+ VCARD_UID varchar(255) not null,
+ KIND integer not null, -- enum ADDRESSBOOK_OBJECT_KIND
+ MD5 char(32) not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ DATAVERSION integer default 0 not null,
+
+ unique (ADDRESSBOOK_HOME_RESOURCE_ID, RESOURCE_NAME), -- implicit index
+ unique (ADDRESSBOOK_HOME_RESOURCE_ID, VCARD_UID) -- implicit index
+);
+
+
+-----------------------------
+-- AddressBook Object kind --
+-----------------------------
+
+create table ADDRESSBOOK_OBJECT_KIND (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND values (0, 'person');
+insert into ADDRESSBOOK_OBJECT_KIND values (1, 'group' );
+insert into ADDRESSBOOK_OBJECT_KIND values (2, 'resource');
+insert into ADDRESSBOOK_OBJECT_KIND values (3, 'location');
+
+
+----------------------------------
+-- Revisions, forward reference --
+----------------------------------
+
+create sequence REVISION_SEQ;
+
+---------------------------------
+-- Address Book Object Members --
+---------------------------------
+
+create table ABO_MEMBERS (
+ GROUP_ID integer not null, -- references ADDRESSBOOK_OBJECT on delete cascade, -- AddressBook Object's (kind=='group') RESOURCE_ID
+ ADDRESSBOOK_ID integer not null references ADDRESSBOOK_HOME on delete cascade,
+ MEMBER_ID integer not null, -- references ADDRESSBOOK_OBJECT, -- member AddressBook Object's RESOURCE_ID
+ REVISION integer default nextval('REVISION_SEQ') not null,
+ REMOVED boolean default false not null,
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ primary key (GROUP_ID, MEMBER_ID, REVISION) -- implicit index
+);
+
+create index ABO_MEMBERS_ADDRESSBOOK_ID on
+ ABO_MEMBERS(ADDRESSBOOK_ID);
+create index ABO_MEMBERS_MEMBER_ID on
+ ABO_MEMBERS(MEMBER_ID);
+
+------------------------------------------
+-- Address Book Object Foreign Members --
+------------------------------------------
+
+create table ABO_FOREIGN_MEMBERS (
+ GROUP_ID integer not null references ADDRESSBOOK_OBJECT on delete cascade, -- AddressBook Object's (kind=='group') RESOURCE_ID
+ ADDRESSBOOK_ID integer not null references ADDRESSBOOK_HOME on delete cascade,
+ MEMBER_ADDRESS varchar(255) not null, -- member AddressBook Object's 'calendar' address
+
+ primary key (GROUP_ID, MEMBER_ADDRESS) -- implicit index
+);
+
+create index ABO_FOREIGN_MEMBERS_ADDRESSBOOK_ID on
+ ABO_FOREIGN_MEMBERS(ADDRESSBOOK_ID);
+
+-----------------------
+-- Shared Group Bind --
+-----------------------
+
+-- Joins ADDRESSBOOK_HOME and ADDRESSBOOK_OBJECT (kind == group)
+
+create table SHARED_GROUP_BIND (
+ ADDRESSBOOK_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME,
+ GROUP_RESOURCE_ID integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+ EXTERNAL_ID integer default null,
+ GROUP_ADDRESSBOOK_NAME varchar(255) not null,
+ BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
+ BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
+ BIND_REVISION integer default 0 not null,
+ MESSAGE text, -- FIXME: xml?
+
+ primary key (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_RESOURCE_ID), -- implicit index
+ unique (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_ADDRESSBOOK_NAME) -- implicit index
+);
+
+create index SHARED_GROUP_BIND_RESOURCE_ID on
+ SHARED_GROUP_BIND(GROUP_RESOURCE_ID);
+
+
+---------------
+-- Revisions --
+---------------
+
+-- create sequence REVISION_SEQ;
+
+
+-------------------------------
+-- Calendar Object Revisions --
+-------------------------------
+
+create table CALENDAR_OBJECT_REVISIONS (
+ CALENDAR_HOME_RESOURCE_ID integer not null references CALENDAR_HOME,
+ CALENDAR_RESOURCE_ID integer references CALENDAR,
+ CALENDAR_NAME varchar(255) default null,
+ RESOURCE_NAME varchar(255),
+ REVISION integer default nextval('REVISION_SEQ') not null,
+ DELETED boolean not null,
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ unique(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID, CALENDAR_NAME, RESOURCE_NAME) -- implicit index
+);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+ on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+ on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, REVISION);
+
+create index CALENDAR_OBJECT_REVISIONS_HOME_RESOURCE_ID_REVISION
+ on CALENDAR_OBJECT_REVISIONS(CALENDAR_HOME_RESOURCE_ID, REVISION);
+
+
+----------------------------------
+-- AddressBook Object Revisions --
+----------------------------------
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+ ADDRESSBOOK_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME,
+ OWNER_HOME_RESOURCE_ID integer references ADDRESSBOOK_HOME,
+ ADDRESSBOOK_NAME varchar(255) default null,
+ OBJECT_RESOURCE_ID integer default 0,
+ RESOURCE_NAME varchar(255),
+ REVISION integer default nextval('REVISION_SEQ') not null,
+ DELETED boolean not null,
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ unique(ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID, ADDRESSBOOK_NAME, RESOURCE_NAME) -- implicit index
+);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+ on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_REVISION
+ on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, REVISION);
+
+
+-----------------------------------
+-- Notification Object Revisions --
+-----------------------------------
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+ NOTIFICATION_HOME_RESOURCE_ID integer not null references NOTIFICATION_HOME on delete cascade,
+ RESOURCE_NAME varchar(255),
+ REVISION integer default nextval('REVISION_SEQ') not null,
+ DELETED boolean not null,
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ unique (NOTIFICATION_HOME_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+);
+
+create index NOTIFICATION_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+ on NOTIFICATION_OBJECT_REVISIONS(NOTIFICATION_HOME_RESOURCE_ID, REVISION);
+
+
+-------------------------------------------
+-- Apple Push Notification Subscriptions --
+-------------------------------------------
+
+create table APN_SUBSCRIPTIONS (
+ TOKEN varchar(255) not null,
+ RESOURCE_KEY varchar(255) not null,
+ MODIFIED integer not null,
+ SUBSCRIBER_GUID varchar(255) not null,
+ USER_AGENT varchar(255) default null,
+ IP_ADDR varchar(255) default null,
+
+ primary key (TOKEN, RESOURCE_KEY) -- implicit index
+);
+
+create index APN_SUBSCRIPTIONS_RESOURCE_KEY
+ on APN_SUBSCRIPTIONS(RESOURCE_KEY);
+
+
+-----------------
+-- IMIP Tokens --
+-----------------
+
+create table IMIP_TOKENS (
+ TOKEN varchar(255) not null,
+ ORGANIZER varchar(255) not null,
+ ATTENDEE varchar(255) not null,
+ ICALUID varchar(255) not null,
+ ACCESSED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ primary key (ORGANIZER, ATTENDEE, ICALUID) -- implicit index
+);
+
+create index IMIP_TOKENS_TOKEN
+ on IMIP_TOKENS(TOKEN);
+
+
+----------------
+-- Work Items --
+----------------
+
+create sequence WORKITEM_SEQ;
+
+
+---------------------------
+-- IMIP Inivitation Work --
+---------------------------
+
+create table IMIP_INVITATION_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ FROM_ADDR varchar(255) not null,
+ TO_ADDR varchar(255) not null,
+ ICALENDAR_TEXT text not null
+);
+
+create index IMIP_INVITATION_WORK_JOB_ID on
+ IMIP_INVITATION_WORK(JOB_ID);
+
+-----------------------
+-- IMIP Polling Work --
+-----------------------
+
+create table IMIP_POLLING_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null
+);
+
+create index IMIP_POLLING_WORK_JOB_ID on
+ IMIP_POLLING_WORK(JOB_ID);
+
+
+---------------------
+-- IMIP Reply Work --
+---------------------
+
+create table IMIP_REPLY_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ ORGANIZER varchar(255) not null,
+ ATTENDEE varchar(255) not null,
+ ICALENDAR_TEXT text not null
+);
+
+create index IMIP_REPLY_WORK_JOB_ID on
+ IMIP_REPLY_WORK(JOB_ID);
+
+
+------------------------
+-- Push Notifications --
+------------------------
+
+create table PUSH_NOTIFICATION_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ PUSH_ID varchar(255) not null,
+ PUSH_PRIORITY integer not null -- 1:low 5:medium 10:high
+);
+
+create index PUSH_NOTIFICATION_WORK_JOB_ID on
+ PUSH_NOTIFICATION_WORK(JOB_ID);
+create index PUSH_NOTIFICATION_WORK_PUSH_ID on
+ PUSH_NOTIFICATION_WORK(PUSH_ID);
+
+-----------------
+-- GroupCacher --
+-----------------
+
+create table GROUP_CACHER_POLLING_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null
+);
+
+create index GROUP_CACHER_POLLING_WORK_JOB_ID on
+ GROUP_CACHER_POLLING_WORK(JOB_ID);
+
+create table GROUP_REFRESH_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ GROUP_UID varchar(255) not null
+);
+
+create index GROUP_REFRESH_WORK_JOB_ID on
+ GROUP_REFRESH_WORK(JOB_ID);
+create index GROUP_REFRESH_WORK_GROUP_UID on
+ GROUP_REFRESH_WORK(GROUP_UID);
+
+create table GROUP_DELEGATE_CHANGES_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ DELEGATOR_UID varchar(255) not null,
+ READ_DELEGATE_UID varchar(255) not null,
+ WRITE_DELEGATE_UID varchar(255) not null
+);
+
+create index GROUP_DELEGATE_CHANGES_WORK_JOB_ID on
+ GROUP_DELEGATE_CHANGES_WORK(JOB_ID);
+create index GROUP_DELEGATE_CHANGES_WORK_DELEGATOR_UID on
+ GROUP_DELEGATE_CHANGES_WORK(DELEGATOR_UID);
+
+create table GROUPS (
+ GROUP_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ NAME varchar(255) not null,
+ GROUP_UID varchar(255) not null unique,
+ MEMBERSHIP_HASH varchar(255) not null,
+ EXTANT integer default 1,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+create index GROUPS_GROUP_UID on
+ GROUPS(GROUP_UID);
+
+create table GROUP_MEMBERSHIP (
+ GROUP_ID integer not null references GROUPS on delete cascade,
+ MEMBER_UID varchar(255) not null,
+
+ primary key (GROUP_ID, MEMBER_UID)
+);
+
+create index GROUP_MEMBERSHIP_MEMBER on
+ GROUP_MEMBERSHIP(MEMBER_UID);
+
+create table GROUP_ATTENDEE_RECONCILE_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer not null references JOB,
+ RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade,
+ GROUP_ID integer not null references GROUPS on delete cascade
+);
+
+create index GROUP_ATTENDEE_RECONCILE_WORK_JOB_ID on
+ GROUP_ATTENDEE_RECONCILE_WORK(JOB_ID);
+create index GROUP_ATTENDEE_RECONCILE_WORK_RESOURCE_ID on
+ GROUP_ATTENDEE_RECONCILE_WORK(RESOURCE_ID);
+create index GROUP_ATTENDEE_RECONCILE_WORK_GROUP_ID on
+ GROUP_ATTENDEE_RECONCILE_WORK(GROUP_ID);
+
+
+create table GROUP_ATTENDEE (
+ GROUP_ID integer not null references GROUPS on delete cascade,
+ RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade,
+ MEMBERSHIP_HASH varchar(255) not null,
+
+ primary key (GROUP_ID, RESOURCE_ID)
+);
+
+create index GROUP_ATTENDEE_RESOURCE_ID on
+ GROUP_ATTENDEE(RESOURCE_ID);
+
+
+create table GROUP_SHAREE_RECONCILE_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer not null references JOB,
+ CALENDAR_ID integer        not null references CALENDAR on delete cascade,
+ GROUP_ID integer not null references GROUPS on delete cascade
+);
+
+create index GROUP_SHAREE_RECONCILE_WORK_JOB_ID on
+ GROUP_SHAREE_RECONCILE_WORK(JOB_ID);
+create index GROUP_SHAREE_RECONCILE_WORK_CALENDAR_ID on
+ GROUP_SHAREE_RECONCILE_WORK(CALENDAR_ID);
+create index GROUP_SHAREE_RECONCILE_WORK_GROUP_ID on
+ GROUP_SHAREE_RECONCILE_WORK(GROUP_ID);
+
+
+create table GROUP_SHAREE (
+ GROUP_ID integer not null references GROUPS on delete cascade,
+ CALENDAR_ID                                 integer not null references CALENDAR on delete cascade,
+ GROUP_BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
+ MEMBERSHIP_HASH varchar(255) not null,
+
+ primary key (GROUP_ID, CALENDAR_ID)
+);
+
+create index GROUP_SHAREE_CALENDAR_ID on
+ GROUP_SHAREE(CALENDAR_ID);
+
+---------------
+-- Delegates --
+---------------
+
+create table DELEGATES (
+ DELEGATOR varchar(255) not null,
+ DELEGATE varchar(255) not null,
+ READ_WRITE integer not null, -- 1 = ReadWrite, 0 = ReadOnly
+
+ primary key (DELEGATOR, READ_WRITE, DELEGATE)
+);
+create index DELEGATE_TO_DELEGATOR on
+ DELEGATES(DELEGATE, READ_WRITE, DELEGATOR);
+
+create table DELEGATE_GROUPS (
+ DELEGATOR varchar(255) not null,
+ GROUP_ID integer not null references GROUPS on delete cascade,
+ READ_WRITE integer not null, -- 1 = ReadWrite, 0 = ReadOnly
+ IS_EXTERNAL integer not null, -- 1 = ReadWrite, 0 = ReadOnly
+
+ primary key (DELEGATOR, READ_WRITE, GROUP_ID)
+);
+create index DELEGATE_GROUPS_GROUP_ID on
+ DELEGATE_GROUPS(GROUP_ID);
+
+create table EXTERNAL_DELEGATE_GROUPS (
+ DELEGATOR varchar(255) primary key,
+ GROUP_UID_READ varchar(255),
+ GROUP_UID_WRITE varchar(255)
+);
+
+--------------------------
+-- Object Splitter Work --
+--------------------------
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create index CALENDAR_OBJECT_SPLITTER_WORK_RESOURCE_ID on
+ CALENDAR_OBJECT_SPLITTER_WORK(RESOURCE_ID);
+create index CALENDAR_OBJECT_SPLITTER_WORK_JOB_ID on
+ CALENDAR_OBJECT_SPLITTER_WORK(JOB_ID);
+
+-------------------------
+-- Object Upgrade Work --
+-------------------------
+
+create table CALENDAR_OBJECT_UPGRADE_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create index CALENDAR_OBJECT_UPGRADE_WORK_RESOURCE_ID on
+ CALENDAR_OBJECT_UPGRADE_WORK(RESOURCE_ID);
+create index CALENDAR_OBJECT_UPGRADE_WORK_JOB_ID on
+ CALENDAR_OBJECT_UPGRADE_WORK(JOB_ID);
+
+---------------------------
+-- Revision Cleanup Work --
+---------------------------
+
+create table FIND_MIN_VALID_REVISION_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null
+);
+
+create index FIND_MIN_VALID_REVISION_WORK_JOB_ID on
+ FIND_MIN_VALID_REVISION_WORK(JOB_ID);
+
+create table REVISION_CLEANUP_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null
+);
+
+create index REVISION_CLEANUP_WORK_JOB_ID on
+ REVISION_CLEANUP_WORK(JOB_ID);
+
+------------------------
+-- Inbox Cleanup Work --
+------------------------
+
+create table INBOX_CLEANUP_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null
+);
+
+create index INBOX_CLEANUP_WORK_JOB_ID on
+ INBOX_CLEANUP_WORK(JOB_ID);
+
+create table CLEANUP_ONE_INBOX_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ HOME_ID integer not null unique references CALENDAR_HOME on delete cascade -- implicit index
+);
+
+create index CLEANUP_ONE_INBOX_WORK_JOB_ID on
+ CLEANUP_ONE_INBOX_WORK(JOB_ID);
+
+-------------------
+-- Schedule Work --
+-------------------
+
+create table SCHEDULE_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ ICALENDAR_UID varchar(255) not null,
+ WORK_TYPE varchar(255) not null
+);
+
+create index SCHEDULE_WORK_JOB_ID on
+ SCHEDULE_WORK(JOB_ID);
+create index SCHEDULE_WORK_ICALENDAR_UID on
+ SCHEDULE_WORK(ICALENDAR_UID);
+
+---------------------------
+-- Schedule Refresh Work --
+---------------------------
+
+create table SCHEDULE_REFRESH_WORK (
+ WORK_ID integer primary key references SCHEDULE_WORK on delete cascade, -- implicit index
+ HOME_RESOURCE_ID integer not null references CALENDAR_HOME on delete cascade,
+ RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade,
+ ATTENDEE_COUNT integer
+);
+
+create index SCHEDULE_REFRESH_WORK_HOME_RESOURCE_ID on
+ SCHEDULE_REFRESH_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_REFRESH_WORK_RESOURCE_ID on
+ SCHEDULE_REFRESH_WORK(RESOURCE_ID);
+
+create table SCHEDULE_REFRESH_ATTENDEES (
+ RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade,
+ ATTENDEE varchar(255) not null,
+
+ primary key (RESOURCE_ID, ATTENDEE)
+);
+
+create index SCHEDULE_REFRESH_ATTENDEES_RESOURCE_ID_ATTENDEE on
+ SCHEDULE_REFRESH_ATTENDEES(RESOURCE_ID, ATTENDEE);
+
+------------------------------
+-- Schedule Auto Reply Work --
+------------------------------
+
+create table SCHEDULE_AUTO_REPLY_WORK (
+ WORK_ID integer primary key references SCHEDULE_WORK on delete cascade, -- implicit index
+ HOME_RESOURCE_ID integer not null references CALENDAR_HOME on delete cascade,
+ RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade,
+ PARTSTAT varchar(255) not null
+);
+
+create index SCHEDULE_AUTO_REPLY_WORK_HOME_RESOURCE_ID on
+ SCHEDULE_AUTO_REPLY_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_AUTO_REPLY_WORK_RESOURCE_ID on
+ SCHEDULE_AUTO_REPLY_WORK(RESOURCE_ID);
+
+-----------------------------
+-- Schedule Organizer Work --
+-----------------------------
+
+create table SCHEDULE_ORGANIZER_WORK (
+ WORK_ID integer primary key references SCHEDULE_WORK on delete cascade, -- implicit index
+ SCHEDULE_ACTION integer not null, -- Enum SCHEDULE_ACTION
+ HOME_RESOURCE_ID integer not null references CALENDAR_HOME on delete cascade,
+ RESOURCE_ID integer, -- this references a possibly non-existent CALENDAR_OBJECT
+ ICALENDAR_TEXT_OLD text,
+ ICALENDAR_TEXT_NEW text,
+ ATTENDEE_COUNT integer,
+ SMART_MERGE boolean
+);
+
+create index SCHEDULE_ORGANIZER_WORK_HOME_RESOURCE_ID on
+ SCHEDULE_ORGANIZER_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_ORGANIZER_WORK_RESOURCE_ID on
+ SCHEDULE_ORGANIZER_WORK(RESOURCE_ID);
+
+-- Enumeration of schedule actions
+
+create table SCHEDULE_ACTION (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into SCHEDULE_ACTION values (0, 'create');
+insert into SCHEDULE_ACTION values (1, 'modify');
+insert into SCHEDULE_ACTION values (2, 'modify-cancelled');
+insert into SCHEDULE_ACTION values (3, 'remove');
+
+----------------------------------
+-- Schedule Organizer Send Work --
+----------------------------------
+
+create table SCHEDULE_ORGANIZER_SEND_WORK (
+ WORK_ID integer primary key references SCHEDULE_WORK on delete cascade, -- implicit index
+ SCHEDULE_ACTION integer not null, -- Enum SCHEDULE_ACTION
+ HOME_RESOURCE_ID integer not null references CALENDAR_HOME on delete cascade,
+ RESOURCE_ID integer, -- this references a possibly non-existent CALENDAR_OBJECT
+ ATTENDEE varchar(255) not null,
+ ITIP_MSG text,
+ NO_REFRESH boolean
+);
+
+create index SCHEDULE_ORGANIZER_SEND_WORK_HOME_RESOURCE_ID on
+ SCHEDULE_ORGANIZER_SEND_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_ORGANIZER_SEND_WORK_RESOURCE_ID on
+ SCHEDULE_ORGANIZER_SEND_WORK(RESOURCE_ID);
+
+-------------------------
+-- Schedule Reply Work --
+-------------------------
+
+create table SCHEDULE_REPLY_WORK (
+ WORK_ID integer primary key references SCHEDULE_WORK on delete cascade, -- implicit index
+ HOME_RESOURCE_ID integer not null references CALENDAR_HOME on delete cascade,
+ RESOURCE_ID integer, -- this references a possibly non-existent CALENDAR_OBJECT
+ ITIP_MSG text
+);
+
+create index SCHEDULE_REPLY_WORK_HOME_RESOURCE_ID on
+ SCHEDULE_REPLY_WORK(HOME_RESOURCE_ID);
+create index SCHEDULE_REPLY_WORK_RESOURCE_ID on
+ SCHEDULE_REPLY_WORK(RESOURCE_ID);
+
+----------------------------------
+-- Principal Purge Polling Work --
+----------------------------------
+
+create table PRINCIPAL_PURGE_POLLING_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null
+);
+
+create index PRINCIPAL_PURGE_POLLING_WORK_JOB_ID on
+ PRINCIPAL_PURGE_POLLING_WORK(JOB_ID);
+
+--------------------------------
+-- Principal Purge Check Work --
+--------------------------------
+
+create table PRINCIPAL_PURGE_CHECK_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ UID varchar(255) not null
+);
+
+create index PRINCIPAL_PURGE_CHECK_WORK_JOB_ID on
+ PRINCIPAL_PURGE_CHECK_WORK(JOB_ID);
+create index PRINCIPAL_PURGE_CHECK_WORK_UID on
+ PRINCIPAL_PURGE_CHECK_WORK(UID);
+
+--------------------------
+-- Principal Purge Work --
+--------------------------
+
+create table PRINCIPAL_PURGE_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ UID varchar(255) not null
+);
+
+create index PRINCIPAL_PURGE_WORK_JOB_ID on
+ PRINCIPAL_PURGE_WORK(JOB_ID);
+create index PRINCIPAL_PURGE_WORK_UID on
+ PRINCIPAL_PURGE_WORK(UID);
+
+
+--------------------------------
+-- Principal Home Remove Work --
+--------------------------------
+
+create table PRINCIPAL_PURGE_HOME_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ HOME_RESOURCE_ID integer not null references CALENDAR_HOME on delete cascade
+);
+
+create index PRINCIPAL_PURGE_HOME_WORK_JOB_ID on
+ PRINCIPAL_PURGE_HOME_WORK(JOB_ID);
+create index PRINCIPAL_PURGE_HOME_HOME_RESOURCE_ID on
+ PRINCIPAL_PURGE_HOME_WORK(HOME_RESOURCE_ID);
+
+
+--------------------
+-- Schema Version --
+--------------------
+
+create table CALENDARSERVER (
+ NAME varchar(255) primary key, -- implicit index
+ VALUE varchar(255)
+);
+
+insert into CALENDARSERVER values ('VERSION', '51');
+insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '6');
+insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');
+insert into CALENDARSERVER values ('NOTIFICATION-DATAVERSION', '1');
+insert into CALENDARSERVER values ('MIN-VALID-REVISION', '1');
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_schemaupgradesoracledialectupgrade_from_51_to_52sqlfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_schemaupgradesoracledialectupgrade_from_51_to_52sql"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_51_to_52.sql (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_51_to_52.sql) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_51_to_52.sql         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_51_to_52.sql        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,102 @@
</span><ins>+----
+-- Copyright (c) 2012-2015 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 51 to 52 --
+---------------------------------------------------
+
+-- New status value
+insert into HOME_STATUS (DESCRIPTION, ID) values ('migrating', 3);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('disabled', 4);
+
+-- Home constraints
+alter table CALENDAR_HOME
+        drop unique (OWNER_UID);
+alter table CALENDAR_HOME
+        add unique (OWNER_UID, STATUS);
+
+alter table ADDRESSBOOK_HOME
+        drop unique (OWNER_UID);
+alter table ADDRESSBOOK_HOME
+        add unique (OWNER_UID, STATUS);
+
+alter table NOTIFICATION_HOME
+        drop unique (OWNER_UID);
+alter table NOTIFICATION_HOME
+        add unique (OWNER_UID, STATUS);
+
+-- Change columns
+alter table CALENDAR_BIND
+        drop column EXTERNAL_ID
+        add ("BIND_UID" nvarchar2(36) default null);
+
+alter table SHARED_ADDRESSBOOK_BIND
+        drop column EXTERNAL_ID
+        add ("BIND_UID" nvarchar2(36) default null);
+
+alter table SHARED_GROUP_BIND
+        drop column EXTERNAL_ID
+        add ("BIND_UID" nvarchar2(36) default null);
+
+
+-- New table
+create table CALENDAR_MIGRATION (
+ "CALENDAR_HOME_RESOURCE_ID" integer references CALENDAR_HOME on delete cascade,
+ "REMOTE_RESOURCE_ID" integer not null,
+ "LOCAL_RESOURCE_ID" integer references CALENDAR on delete cascade,
+ "LAST_SYNC_TOKEN" nvarchar2(255),
+ primary key ("CALENDAR_HOME_RESOURCE_ID", "REMOTE_RESOURCE_ID")
+);
+
+create index CALENDAR_MIGRATION_LO_0525c72b on CALENDAR_MIGRATION (
+ LOCAL_RESOURCE_ID
+);
+
+-- New table
+create table CALENDAR_OBJECT_MIGRATION (
+ "CALENDAR_HOME_RESOURCE_ID" integer references CALENDAR_HOME on delete cascade,
+ "REMOTE_RESOURCE_ID" integer not null,
+ "LOCAL_RESOURCE_ID" integer references CALENDAR_OBJECT on delete cascade,
+ primary key ("CALENDAR_HOME_RESOURCE_ID", "REMOTE_RESOURCE_ID")
+);
+
+create index CALENDAR_OBJECT_MIGRA_0502cbef on CALENDAR_OBJECT_MIGRATION (
+ CALENDAR_HOME_RESOURCE_ID,
+ LOCAL_RESOURCE_ID
+);
+create index CALENDAR_OBJECT_MIGRA_3577efd9 on CALENDAR_OBJECT_MIGRATION (
+ LOCAL_RESOURCE_ID
+);
+
+-- New table
+create table ATTACHMENT_MIGRATION (
+ "CALENDAR_HOME_RESOURCE_ID" integer references CALENDAR_HOME on delete cascade,
+ "REMOTE_RESOURCE_ID" integer not null,
+ "LOCAL_RESOURCE_ID" integer references ATTACHMENT on delete cascade,
+ primary key ("CALENDAR_HOME_RESOURCE_ID", "REMOTE_RESOURCE_ID")
+);
+
+create index ATTACHMENT_MIGRATION__804bf85e on ATTACHMENT_MIGRATION (
+ CALENDAR_HOME_RESOURCE_ID,
+ LOCAL_RESOURCE_ID
+);
+create index ATTACHMENT_MIGRATION__816947fe on ATTACHMENT_MIGRATION (
+ LOCAL_RESOURCE_ID
+);
+
+
+-- update the version
+update CALENDARSERVER set VALUE = '52' where NAME = 'VERSION';
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_schemaupgradespostgresdialectupgrade_from_51_to_52sqlfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_schemaupgradespostgresdialectupgrade_from_51_to_52sql"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_51_to_52.sql (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_51_to_52.sql) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_51_to_52.sql         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_51_to_52.sql        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,97 @@
</span><ins>+----
+-- Copyright (c) 2012-2015 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 51 to 52 --
+---------------------------------------------------
+
+-- New status value
+insert into HOME_STATUS values (3, 'migrating');
+insert into HOME_STATUS values (4, 'disabled');
+
+-- Home constraints
+alter table CALENDAR_HOME
+        drop constraint CALENDAR_HOME_OWNER_UID_KEY,
+        add unique (OWNER_UID, STATUS);
+
+alter table ADDRESSBOOK_HOME
+        drop constraint ADDRESSBOOK_HOME_OWNER_UID_KEY,
+        add unique (OWNER_UID, STATUS);
+
+alter table NOTIFICATION_HOME
+        drop constraint NOTIFICATION_HOME_OWNER_UID_KEY,
+        add unique (OWNER_UID, STATUS);
+
+-- Change columns
+alter table CALENDAR_BIND
+        drop column EXTERNAL_ID,
+        add column BIND_UID varchar(36) default null;
+
+alter table SHARED_ADDRESSBOOK_BIND
+        drop column EXTERNAL_ID,
+        add column BIND_UID varchar(36) default null;
+
+alter table SHARED_GROUP_BIND
+        drop column EXTERNAL_ID,
+        add column BIND_UID varchar(36) default null;
+
+
+-- New table
+create table CALENDAR_MIGRATION (
+ CALENDAR_HOME_RESOURCE_ID                integer references CALENDAR_HOME on delete cascade,
+ REMOTE_RESOURCE_ID                        integer not null,
+ LOCAL_RESOURCE_ID                                integer        references CALENDAR on delete cascade,
+ LAST_SYNC_TOKEN                                varchar(255),
+
+ primary key (CALENDAR_HOME_RESOURCE_ID, REMOTE_RESOURCE_ID) -- implicit index
+);
+
+create index CALENDAR_MIGRATION_LOCAL_RESOURCE_ID on
+ CALENDAR_MIGRATION(LOCAL_RESOURCE_ID);
+
+
+-- New table
+create table CALENDAR_OBJECT_MIGRATION (
+ CALENDAR_HOME_RESOURCE_ID                integer references CALENDAR_HOME on delete cascade,
+ REMOTE_RESOURCE_ID                        integer not null,
+ LOCAL_RESOURCE_ID                                integer        references CALENDAR_OBJECT on delete cascade,
+
+ primary key (CALENDAR_HOME_RESOURCE_ID, REMOTE_RESOURCE_ID) -- implicit index
+);
+
+create index CALENDAR_OBJECT_MIGRATION_HOME_LOCAL on
+ CALENDAR_OBJECT_MIGRATION(CALENDAR_HOME_RESOURCE_ID, LOCAL_RESOURCE_ID);
+create index CALENDAR_OBJECT_MIGRATION_LOCAL_RESOURCE_ID on
+ CALENDAR_OBJECT_MIGRATION(LOCAL_RESOURCE_ID);
+
+
+-- New table
+create table ATTACHMENT_MIGRATION (
+ CALENDAR_HOME_RESOURCE_ID                integer references CALENDAR_HOME on delete cascade,
+ REMOTE_RESOURCE_ID                        integer not null,
+ LOCAL_RESOURCE_ID                                integer        references ATTACHMENT on delete cascade,
+
+ primary key (CALENDAR_HOME_RESOURCE_ID, REMOTE_RESOURCE_ID) -- implicit index
+);
+
+create index ATTACHMENT_MIGRATION_HOME_LOCAL on
+ ATTACHMENT_MIGRATION(CALENDAR_HOME_RESOURCE_ID, LOCAL_RESOURCE_ID);
+create index ATTACHMENT_MIGRATION_LOCAL_RESOURCE_ID on
+ ATTACHMENT_MIGRATION(LOCAL_RESOURCE_ID);
+
+
+-- update the version
+update CALENDARSERVER set VALUE = '52' where NAME = 'VERSION';
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_sharingpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_sharingpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_sharing.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_sharing.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_sharing.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_sharing.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,1470 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from collections import namedtuple
+from pycalendar.datetime import DateTime
+
+from twext.enterprise.dal.syntax import Insert, Parameter, Update, Delete, \
+ Select, Max
+from twext.python.clsprop import classproperty
+from twext.python.log import Logger
+
+from twisted.internet.defer import inlineCallbacks, returnValue, succeed
+
+from txdav.base.propertystore.base import PropertyName
+from txdav.common.datastore.sql_tables import _BIND_MODE_OWN, _BIND_MODE_DIRECT, \
+ _BIND_MODE_INDIRECT, _BIND_STATUS_ACCEPTED, _BIND_STATUS_DECLINED, \
+ _BIND_STATUS_INVITED, _BIND_STATUS_INVALID, _BIND_STATUS_DELETED, \
+ _HOME_STATUS_EXTERNAL
+from txdav.common.icommondatastore import ExternalShareFailed, \
+ HomeChildNameAlreadyExistsError, AllRetriesFailed
+from txdav.xml import element
+
+from uuid import uuid4
+
+
+log = Logger()
+
+"""
+Classes and methods that relate to sharing in the SQL store.
+"""
+
+class SharingHomeMixIn(object):
+ """
+ Common class for CommonHome to implement sharing operations
+ """
+
+ @inlineCallbacks
+ def acceptShare(self, shareUID, summary=None):
+ """
+ This share is being accepted.
+ """
+
+ shareeView = yield self.anyObjectWithShareUID(shareUID)
+ if shareeView is not None:
+ yield shareeView.acceptShare(summary)
+
+ returnValue(shareeView)
+
+
+ @inlineCallbacks
+ def declineShare(self, shareUID):
+ """
+ This share is being declined.
+ """
+
+ shareeView = yield self.anyObjectWithShareUID(shareUID)
+ if shareeView is not None:
+ yield shareeView.declineShare()
+
+ returnValue(shareeView is not None)
+
+
+ #
+ # External (cross-pod) sharing - entry point is the sharee's home collection.
+ #
+ @inlineCallbacks
+ def processExternalInvite(
+ self, ownerUID, ownerName, shareUID, bindMode, bindUID, summary,
+ copy_invite_properties, supported_components=None
+ ):
+ """
+ External invite received.
+ """
+
+ # Get the owner home - create external one if not present
+ ownerHome = yield self._txn.homeWithUID(
+ self._homeType, ownerUID, status=_HOME_STATUS_EXTERNAL, create=True
+ )
+ if ownerHome is None or not ownerHome.external():
+ raise ExternalShareFailed("Invalid owner UID: {}".format(ownerUID))
+
+ # Try to find owner calendar via its external id
+ ownerView = yield ownerHome.childWithBindUID(bindUID)
+ if ownerView is None:
+ ownerView = yield ownerHome.createCollectionForExternalShare(ownerName, bindUID, supported_components)
+
+ # Now carry out the share operation
+ if bindMode == _BIND_MODE_DIRECT:
+ shareeView = yield ownerView.directShareWithUser(
+ self.uid(), shareName=shareUID
+ )
+ else:
+ shareeView = yield ownerView.inviteUIDToShare(
+ self.uid(), bindMode, summary, shareName=shareUID
+ )
+
+ shareeView.setInviteCopyProperties(copy_invite_properties)
+
+
+ @inlineCallbacks
+ def processExternalUninvite(self, ownerUID, bindUID, shareUID):
+ """
+ External invite received.
+ """
+
+ # Get the owner home
+ ownerHome = yield self._txn.homeWithUID(self._homeType, ownerUID, status=_HOME_STATUS_EXTERNAL)
+ if ownerHome is None or not ownerHome.external():
+ raise ExternalShareFailed("Invalid owner UID: {}".format(ownerUID))
+
+ # Try to find owner calendar via its external id
+ ownerView = yield ownerHome.childWithBindUID(bindUID)
+ if ownerView is None:
+ raise ExternalShareFailed("Invalid share ID: {}".format(shareUID))
+
+ # Now carry out the share operation
+ yield ownerView.uninviteUIDFromShare(self.uid())
+
+ # See if there are any references to the external share. If not,
+ # remove it
+ invites = yield ownerView.sharingInvites()
+ if len(invites) == 0:
+ yield ownerHome.removeExternalChild(ownerView)
+
+
+ @inlineCallbacks
+ def processExternalReply(
+ self, ownerUID, shareeUID, shareUID, bindStatus, summary=None
+ ):
+ """
+ External invite received.
+ """
+
+ # Make sure the shareeUID and shareUID match
+
+ # Get the owner home - create external one if not present
+ shareeHome = yield self._txn.homeWithUID(self._homeType, shareeUID, status=_HOME_STATUS_EXTERNAL)
+ if shareeHome is None or not shareeHome.external():
+ raise ExternalShareFailed(
+ "Invalid sharee UID: {}".format(shareeUID)
+ )
+
+ # Try to find owner calendar via its external id
+ shareeView = yield shareeHome.anyObjectWithShareUID(shareUID)
+ if shareeView is None:
+ raise ExternalShareFailed("Invalid share UID: {}".format(shareUID))
+
+ # Now carry out the share operation
+ if bindStatus == _BIND_STATUS_ACCEPTED:
+ yield shareeHome.acceptShare(shareUID, summary)
+ elif bindStatus == _BIND_STATUS_DECLINED:
+ if shareeView.direct():
+ yield shareeView.deleteShare()
+ else:
+ yield shareeHome.declineShare(shareUID)
+
+
+ @inlineCallbacks
+ def createCollectionForExternalShare(self, name, bindUID, supported_components):
+ """
+ Create the L{CommonHomeChild} object that used as a "stub" to represent the external
+ object on the other pod for the sharer.
+
+ @param name: name of the collection
+ @type name: L{str}
+ @param bindUID: id on other pod
+ @type bindUID: L{str}
+ @param supported_components: optional set of support components
+ @type supported_components: L{str}
+ """
+ try:
+ ownerView = yield self.createChildWithName(
+ name, bindUID=bindUID
+ )
+ except HomeChildNameAlreadyExistsError:
+ # This is odd - it means we possibly have a left over sharer
+ # collection which the sharer likely removed and re-created
+ # with the same name but now it has a different bindUID and
+ # is not found by the initial query. What we do is check to see
+ # whether any shares still reference the old ID - if they do we
+ # are hosed. If not, we can remove the old item and create a new one.
+ oldOwnerView = yield self.childWithName(name)
+ invites = yield oldOwnerView.sharingInvites()
+ if len(invites) != 0:
+ log.error(
+ "External invite collection name is present with a "
+ "different bindUID and still has shares"
+ )
+ raise
+ log.error(
+ "External invite collection name is present with a "
+ "different bindUID - trying to fix"
+ )
+ yield self.removeExternalChild(oldOwnerView)
+ ownerView = yield self.createChildWithName(
+ name, bindUID=bindUID
+ )
+
+ if (
+ supported_components is not None and
+ hasattr(ownerView, "setSupportedComponents")
+ ):
+ yield ownerView.setSupportedComponents(supported_components)
+
+ returnValue(ownerView)
+
+
+ @inlineCallbacks
+ def sharedToBindRecords(self):
+ """
+ Return an L{dict} that maps home/directory uid to a sharing bind record for collections shared to this user.
+ """
+
+ # Get shared to bind records
+ records = yield self._childClass._bindRecordClass.query(
+ self._txn,
+ (getattr(self._childClass._bindRecordClass, self._childClass._bindHomeIDAttributeName) == self.id()).And(
+ self._childClass._bindRecordClass.bindMode != _BIND_MODE_OWN
+ )
+ )
+ records = dict([(getattr(record, self._childClass._bindResourceIDAttributeName), record) for record in records])
+ if not records:
+ returnValue({})
+
+ # Look up the owner records for each of the shared to records
+ ownerRecords = yield self._childClass._bindRecordClass.query(
+ self._txn,
+ (getattr(self._childClass._bindRecordClass, self._childClass._bindResourceIDAttributeName).In(records.keys())).And(
+ self._childClass._bindRecordClass.bindMode == _BIND_MODE_OWN
+ )
+ )
+
+ # Important - this method is called when migrating shared-to records to some other pod. For that to work all the
+ # owner records must have a bindUID assigned to them. Normally bindUIDs are assigned the first time an external
+ # share is created, but migration will implicitly create the external share
+ for ownerRecord in ownerRecords:
+ if not ownerRecord.bindUID:
+ yield ownerRecord.update(bindUID=str(uuid4()))
+
+ ownerRecords = dict([(getattr(record, self._childClass._bindResourceIDAttributeName), record) for record in ownerRecords])
+
+ # Look up the metadata records for each of the shared to records
+ metadataRecords = yield self._childClass._metadataRecordClass.query(
+ self._txn,
+ self._childClass._metadataRecordClass.resourceID.In(records.keys()),
+ )
+ metadataRecords = dict([(record.resourceID, record) for record in metadataRecords])
+
+ # Map the owner records to home ownerUIDs
+ homeIDs = dict([(
+ getattr(record, self._childClass._bindHomeIDAttributeName), getattr(record, self._childClass._bindResourceIDAttributeName)
+ ) for record in ownerRecords.values()])
+ homes = yield self._childClass._homeRecordClass.query(
+ self._txn,
+ self._childClass._homeRecordClass.resourceID.In(homeIDs.keys()),
+ )
+ homeMap = dict((homeIDs[home.resourceID], home.ownerUID,) for home in homes)
+
+ returnValue(dict([(homeMap[calendarID], (records[calendarID], ownerRecords[calendarID], metadataRecords[calendarID],),) for calendarID in records]))
+
+
+
+SharingInvitation = namedtuple(
+ "SharingInvitation",
+ ["uid", "ownerUID", "ownerHomeID", "shareeUID", "shareeHomeID", "mode", "status", "summary"]
+)
+
+
+
+class SharingMixIn(object):
+ """
+ Common class for CommonHomeChild and AddressBookObject
+ """
+
+ @classproperty
+ def _bindInsertQuery(cls, **kw):
+ """
+ DAL statement to create a bind entry that connects a collection to its
+ home.
+ """
+ bind = cls._bindSchema
+ return Insert({
+ bind.HOME_RESOURCE_ID: Parameter("homeID"),
+ bind.RESOURCE_ID: Parameter("resourceID"),
+ bind.RESOURCE_NAME: Parameter("name"),
+ bind.BIND_MODE: Parameter("mode"),
+ bind.BIND_STATUS: Parameter("bindStatus"),
+ bind.BIND_UID: Parameter("bindUID"),
+ bind.MESSAGE: Parameter("message"),
+ })
+
+
+ @classmethod
+ def _updateBindColumnsQuery(cls, columnMap):
+ bind = cls._bindSchema
+ return Update(
+ columnMap,
+ Where=(bind.RESOURCE_ID == Parameter("resourceID")).And(
+ bind.HOME_RESOURCE_ID == Parameter("homeID")),
+ )
+
+
+ @classproperty
+ def _deleteBindForResourceIDAndHomeID(cls):
+ bind = cls._bindSchema
+ return Delete(
+ From=bind,
+ Where=(bind.RESOURCE_ID == Parameter("resourceID")).And(
+ bind.HOME_RESOURCE_ID == Parameter("homeID")),
+ )
+
+
+ @classmethod
+ def _bindFor(cls, condition):
+ bind = cls._bindSchema
+ columns = cls.bindColumns() + cls.additionalBindColumns()
+ return Select(
+ columns,
+ From=bind,
+ Where=condition
+ )
+
+
+ @classmethod
+ def _bindInviteFor(cls, condition):
+ home = cls._homeSchema
+ bind = cls._bindSchema
+ return Select(
+ [
+ home.OWNER_UID,
+ bind.HOME_RESOURCE_ID,
+ bind.RESOURCE_ID,
+ bind.RESOURCE_NAME,
+ bind.BIND_MODE,
+ bind.BIND_STATUS,
+ bind.MESSAGE,
+ ],
+ From=bind.join(home, on=(bind.HOME_RESOURCE_ID == home.RESOURCE_ID)),
+ Where=condition
+ )
+
+
+ @classproperty
+ def _sharedInvitationBindForResourceID(cls):
+ bind = cls._bindSchema
+ return cls._bindInviteFor(
+ (bind.RESOURCE_ID == Parameter("resourceID")).And
+ (bind.BIND_MODE != _BIND_MODE_OWN)
+ )
+
+
+ @classproperty
+ def _acceptedBindForHomeID(cls):
+ bind = cls._bindSchema
+ return cls._bindFor((bind.HOME_RESOURCE_ID == Parameter("homeID"))
+ .And(bind.BIND_STATUS == _BIND_STATUS_ACCEPTED))
+
+
+ @classproperty
+ def _bindForResourceIDAndHomeID(cls):
+ """
+ DAL query that looks up home bind rows by home child
+ resource ID and home resource ID.
+ """
+ bind = cls._bindSchema
+ return cls._bindFor((bind.RESOURCE_ID == Parameter("resourceID"))
+ .And(bind.HOME_RESOURCE_ID == Parameter("homeID")))
+
+
+ @classproperty
+ def _bindForBindUIDAndHomeID(cls):
+ """
+ DAL query that looks up home bind rows by home child
+ resource ID and home resource ID.
+ """
+ bind = cls._bindSchema
+ return cls._bindFor((bind.BIND_UID == Parameter("bindUID"))
+ .And(bind.HOME_RESOURCE_ID == Parameter("homeID")))
+
+
+ @classproperty
+ def _bindForNameAndHomeID(cls):
+ """
+ DAL query that looks up any bind rows by home child
+ resource ID and home resource ID.
+ """
+ bind = cls._bindSchema
+ return cls._bindFor((bind.RESOURCE_NAME == Parameter("name"))
+ .And(bind.HOME_RESOURCE_ID == Parameter("homeID")))
+
+
+ #
+ # Higher level API
+ #
+ @inlineCallbacks
+ def inviteUIDToShare(self, shareeUID, mode, summary=None, shareName=None):
+ """
+ Invite a user to share this collection - either create the share if it does not exist, or
+ update the existing share with new values. Make sure a notification is sent as well.
+
+ @param shareeUID: UID of the sharee
+ @type shareeUID: C{str}
+ @param mode: access mode
+ @type mode: C{int}
+ @param summary: share message
+ @type summary: C{str}
+ """
+
+ # Look for existing invite and update its fields or create new one
+ shareeView = yield self.shareeView(shareeUID)
+ if shareeView is not None:
+ status = _BIND_STATUS_INVITED if shareeView.shareStatus() in (_BIND_STATUS_DECLINED, _BIND_STATUS_INVALID) else None
+ yield self.updateShare(shareeView, mode=mode, status=status, summary=summary)
+ else:
+ shareeView = yield self.createShare(shareeUID=shareeUID, mode=mode, summary=summary, shareName=shareName)
+
+ # Check for external
+ if shareeView.viewerHome().external():
+ yield self._sendExternalInvite(shareeView)
+ else:
+ # Send invite notification
+ yield self._sendInviteNotification(shareeView)
+ returnValue(shareeView)
+
+
+ @inlineCallbacks
+ def directShareWithUser(self, shareeUID, shareName=None):
+ """
+ Create a direct share with the specified user. Note it is currently up to the app layer
+ to enforce access control - this is not ideal as we really should have control of that in
+ the store. Once we do, this api will need to verify that access is allowed for a direct share.
+
+ NB no invitations are used with direct sharing.
+
+ @param shareeUID: UID of the sharee
+ @type shareeUID: C{str}
+ """
+
+ # Ignore if it already exists
+ shareeView = yield self.shareeView(shareeUID)
+ if shareeView is None:
+ shareeView = yield self.createShare(shareeUID=shareeUID, mode=_BIND_MODE_DIRECT, shareName=shareName)
+ yield shareeView.newShare()
+
+ # Check for external
+ if shareeView.viewerHome().external():
+ yield self._sendExternalInvite(shareeView)
+
+ returnValue(shareeView)
+
+
+ @inlineCallbacks
+ def uninviteUIDFromShare(self, shareeUID):
+ """
+ Remove a user from a share. Make sure a notification is sent as well.
+
+ @param shareeUID: UID of the sharee
+ @type shareeUID: C{str}
+ """
+ # Cancel invites - we'll just use whatever userid we are given
+
+ shareeView = yield self.shareeView(shareeUID)
+ if shareeView is not None:
+ if shareeView.viewerHome().external():
+ yield self._sendExternalUninvite(shareeView)
+ else:
+ # If current user state is accepted then we send an invite with the new state, otherwise
+ # we cancel any existing invites for the user
+ if not shareeView.direct():
+ if shareeView.shareStatus() != _BIND_STATUS_ACCEPTED:
+ yield self._removeInviteNotification(shareeView)
+ else:
+ yield self._sendInviteNotification(shareeView, notificationState=_BIND_STATUS_DELETED)
+
+ # Remove the bind
+ yield self.removeShare(shareeView)
+
+
+ @inlineCallbacks
+ def acceptShare(self, summary=None):
+ """
+ This share is being accepted.
+ """
+
+ if not self.direct() and self.shareStatus() != _BIND_STATUS_ACCEPTED:
+ if self.external():
+ yield self._replyExternalInvite(_BIND_STATUS_ACCEPTED, summary)
+ ownerView = yield self.ownerView()
+ yield ownerView.updateShare(self, status=_BIND_STATUS_ACCEPTED)
+ yield self.newShare(displayname=summary)
+ if not ownerView.external():
+ yield self._sendReplyNotification(ownerView, summary)
+
+
+ @inlineCallbacks
+ def declineShare(self):
+ """
+ This share is being declined.
+ """
+
+ if not self.direct() and self.shareStatus() != _BIND_STATUS_DECLINED:
+ if self.external():
+ yield self._replyExternalInvite(_BIND_STATUS_DECLINED)
+ ownerView = yield self.ownerView()
+ yield ownerView.updateShare(self, status=_BIND_STATUS_DECLINED)
+ if not ownerView.external():
+ yield self._sendReplyNotification(ownerView)
+
+
+ @inlineCallbacks
+ def deleteShare(self):
+ """
+ This share is being deleted (by the sharee) - either decline or remove (for direct shares).
+ """
+
+ ownerView = yield self.ownerView()
+ if self.direct():
+ yield ownerView.removeShare(self)
+ if ownerView.external():
+ yield self._replyExternalInvite(_BIND_STATUS_DECLINED)
+ else:
+ yield self.declineShare()
+
+
+ @inlineCallbacks
+ def ownerDeleteShare(self):
+ """
+ This share is being deleted (by the owner) - either decline or remove (for direct shares).
+ """
+
+ # Change status on store object
+ yield self.setShared(False)
+
+ # Remove all sharees (direct and invited)
+ for invitation in (yield self.sharingInvites()):
+ yield self.uninviteUIDFromShare(invitation.shareeUID)
+
+
+ def newShare(self, displayname=None):
+ """
+ Override in derived classes to do any specific operations needed when a share
+ is first accepted.
+ """
+ return succeed(None)
+
+
+ @inlineCallbacks
+ def allInvitations(self):
+ """
+ Get list of all invitations (non-direct) to this object.
+ """
+ invitations = yield self.sharingInvites()
+
+ # remove direct shares as those are not "real" invitations
+ invitations = filter(lambda x: x.mode != _BIND_MODE_DIRECT, invitations)
+ invitations.sort(key=lambda invitation: invitation.shareeUID)
+ returnValue(invitations)
+
+
+ @inlineCallbacks
+ def _sendInviteNotification(self, shareeView, notificationState=None):
+ """
+ Called on the owner's resource.
+ """
+ # When deleting the message is the sharee's display name
+ displayname = shareeView.shareMessage()
+ if notificationState == _BIND_STATUS_DELETED:
+ displayname = str(shareeView.properties().get(PropertyName.fromElement(element.DisplayName), displayname))
+
+ notificationtype = {
+ "notification-type": "invite-notification",
+ "shared-type": shareeView.sharedResourceType(),
+ }
+ notificationdata = {
+ "notification-type": "invite-notification",
+ "shared-type": shareeView.sharedResourceType(),
+ "dtstamp": DateTime.getNowUTC().getText(),
+ "owner": shareeView.ownerHome().uid(),
+ "sharee": shareeView.viewerHome().uid(),
+ "uid": shareeView.shareUID(),
+ "status": shareeView.shareStatus() if notificationState is None else notificationState,
+ "access": (yield shareeView.effectiveShareMode()),
+ "ownerName": self.shareName(),
+ "summary": displayname,
+ }
+ if hasattr(self, "getSupportedComponents"):
+ notificationdata["supported-components"] = self.getSupportedComponents()
+
+ # Add to sharee's collection
+ notifications = yield self._txn.notificationsWithUID(shareeView.viewerHome().uid(), create=True)
+ yield notifications.writeNotificationObject(shareeView.shareUID(), notificationtype, notificationdata)
+
+
+ @inlineCallbacks
+ def _sendReplyNotification(self, ownerView, summary=None):
+ """
+ Create a reply notification based on the current state of this shared resource.
+ """
+
+ # Generate invite XML
+ notificationUID = "%s-reply" % (self.shareUID(),)
+
+ notificationtype = {
+ "notification-type": "invite-reply",
+ "shared-type": self.sharedResourceType(),
+ }
+
+ notificationdata = {
+ "notification-type": "invite-reply",
+ "shared-type": self.sharedResourceType(),
+ "dtstamp": DateTime.getNowUTC().getText(),
+ "owner": self.ownerHome().uid(),
+ "sharee": self.viewerHome().uid(),
+ "status": self.shareStatus(),
+ "ownerName": ownerView.shareName(),
+ "in-reply-to": self.shareUID(),
+ "summary": summary,
+ }
+
+ # Add to owner notification collection
+ notifications = yield self._txn.notificationsWithUID(self.ownerHome().uid(), create=True)
+ yield notifications.writeNotificationObject(notificationUID, notificationtype, notificationdata)
+
+
+ @inlineCallbacks
+ def _removeInviteNotification(self, shareeView):
+ """
+ Called on the owner's resource.
+ """
+
+ # Remove from sharee's collection
+ notifications = yield self._txn.notificationsWithUID(shareeView.viewerHome().uid())
+ yield notifications.removeNotificationObjectWithUID(shareeView.shareUID())
+
+
+ #
+ # External/cross-pod API
+ #
+ @inlineCallbacks
+ def _sendExternalInvite(self, shareeView):
+
+ # Must make sure this collection has a BIND_UID assigned
+ if not self._bindUID:
+ self._bindUID = str(uuid4())
+ yield self._updateBindColumnsQuery({self._bindSchema.BIND_UID: self._bindUID}).on(
+ self._txn,
+ resourceID=self.id(), homeID=self.ownerHome().id()
+ )
+
+ # Now send the invite
+ yield self._txn.store().conduit.send_shareinvite(
+ self._txn,
+ shareeView.ownerHome()._homeType,
+ shareeView.ownerHome().uid(),
+ self.shareName(),
+ shareeView.viewerHome().uid(),
+ shareeView.shareUID(),
+ shareeView.shareMode(),
+ self.bindUID(),
+ shareeView.shareMessage(),
+ self.getInviteCopyProperties(),
+ supported_components=self.getSupportedComponents() if hasattr(self, "getSupportedComponents") else None,
+ )
+
+
+ @inlineCallbacks
+ def _sendExternalUninvite(self, shareeView):
+
+ yield self._txn.store().conduit.send_shareuninvite(
+ self._txn,
+ shareeView.ownerHome()._homeType,
+ shareeView.ownerHome().uid(),
+ self.bindUID(),
+ shareeView.viewerHome().uid(),
+ shareeView.shareUID(),
+ )
+
+
+ @inlineCallbacks
+ def _replyExternalInvite(self, status, summary=None):
+
+ yield self._txn.store().conduit.send_sharereply(
+ self._txn,
+ self.viewerHome()._homeType,
+ self.ownerHome().uid(),
+ self.viewerHome().uid(),
+ self.shareUID(),
+ status,
+ summary,
+ )
+
+
+ #
+ # Lower level API
+ #
+ @inlineCallbacks
+ def ownerView(self):
+ """
+ Return the owner resource counterpart of this shared resource.
+
+ Note we have to play a trick with the property store to coerce it to match
+ the per-user properties for the owner.
+ """
+ # Get the child of the owner home that has the same resource id as the owned one
+ ownerView = yield self.ownerHome().childWithID(self.id())
+ returnValue(ownerView)
+
+
+ @inlineCallbacks
+ def shareeView(self, shareeUID):
+ """
+ Return the shared resource counterpart of this owned resource for the specified sharee.
+
+ Note we have to play a trick with the property store to coerce it to match
+ the per-user properties for the sharee.
+ """
+
+ # Never return the owner's own resource
+ if self._home.uid() == shareeUID:
+ returnValue(None)
+
+ # Get the child of the sharee home that has the same resource id as the owned one
+ shareeHome = yield self._txn.homeWithUID(self._home._homeType, shareeUID, authzUID=shareeUID)
+ shareeView = (yield shareeHome.allChildWithID(self.id())) if shareeHome is not None else None
+ returnValue(shareeView)
+
+
+ @inlineCallbacks
+ def shareWithUID(self, shareeUID, mode, status=None, summary=None, shareName=None):
+ """
+ Share this (owned) L{CommonHomeChild} with another principal.
+
+ @param shareeUID: The UID of the sharee.
+ @type: L{str}
+
+ @param mode: The sharing mode; L{_BIND_MODE_READ} or
+ L{_BIND_MODE_WRITE} or L{_BIND_MODE_DIRECT}
+ @type mode: L{str}
+
+ @param status: The sharing status; L{_BIND_STATUS_INVITED} or
+ L{_BIND_STATUS_ACCEPTED}
+ @type: L{str}
+
+ @param summary: The proposed message to go along with the share, which
+ will be used as the default display name.
+ @type: L{str}
+
+ @return: the name of the shared calendar in the new calendar home.
+ @rtype: L{str}
+ """
+ shareeHome = yield self._txn.homeWithUID(self._home._homeType, shareeUID, create=True)
+ returnValue(
+ (yield self.shareWith(shareeHome, mode, status, summary, shareName))
+ )
+
+
+ @inlineCallbacks
+ def shareWith(self, shareeHome, mode, status=None, summary=None, shareName=None):
+ """
+ Share this (owned) L{CommonHomeChild} with another home.
+
+ @param shareeHome: The home of the sharee.
+ @type: L{CommonHome}
+
+ @param mode: The sharing mode; L{_BIND_MODE_READ} or
+ L{_BIND_MODE_WRITE} or L{_BIND_MODE_DIRECT}
+ @type: L{str}
+
+ @param status: The sharing status; L{_BIND_STATUS_INVITED} or
+ L{_BIND_STATUS_ACCEPTED}
+ @type: L{str}
+
+ @param summary: The proposed message to go along with the share, which
+ will be used as the default display name.
+ @type: L{str}
+
+ @param shareName: The proposed name of the new share.
+ @type: L{str}
+
+ @return: the name of the shared calendar in the new calendar home.
+ @rtype: L{str}
+ """
+
+ if status is None:
+ status = _BIND_STATUS_ACCEPTED
+
+ @inlineCallbacks
+ def doInsert(subt):
+ newName = shareName if shareName is not None else self.newShareName()
+ yield self._bindInsertQuery.on(
+ subt,
+ homeID=shareeHome._resourceID,
+ resourceID=self._resourceID,
+ name=newName,
+ mode=mode,
+ bindStatus=status,
+ bindUID=None,
+ message=summary
+ )
+ returnValue(newName)
+ try:
+ bindName = yield self._txn.subtransaction(doInsert)
+ except AllRetriesFailed:
+ # FIXME: catch more specific exception
+ child = yield shareeHome.allChildWithID(self._resourceID)
+ yield self.updateShare(
+ child, mode=mode, status=status,
+ summary=summary
+ )
+ bindName = child._name
+ else:
+ if status == _BIND_STATUS_ACCEPTED:
+ shareeView = yield shareeHome.anyObjectWithShareUID(bindName)
+ yield shareeView._initSyncToken()
+ yield shareeView._initBindRevision()
+
+ # Mark this as shared
+ yield self.setShared(True)
+
+ # Must send notification to ensure cache invalidation occurs
+ yield self.notifyPropertyChanged()
+ yield shareeHome.notifyChanged()
+
+ returnValue(bindName)
+
+
+ @inlineCallbacks
+ def createShare(self, shareeUID, mode, summary=None, shareName=None):
+ """
+ Create a new shared resource. If the mode is direct, the share is created in accepted state,
+ otherwise the share is created in invited state.
+ """
+ shareeHome = yield self._txn.homeWithUID(self.ownerHome()._homeType, shareeUID, create=True)
+
+ yield self.shareWith(
+ shareeHome,
+ mode=mode,
+ status=_BIND_STATUS_INVITED if mode != _BIND_MODE_DIRECT else _BIND_STATUS_ACCEPTED,
+ summary=summary,
+ shareName=shareName,
+ )
+ shareeView = yield self.shareeView(shareeUID)
+ returnValue(shareeView)
+
+
+ @inlineCallbacks
+ def updateShare(self, shareeView, mode=None, status=None, summary=None):
+ """
+ Update share mode, status, and message for a home child shared with
+ this (owned) L{CommonHomeChild}.
+
+ @param shareeView: The sharee home child that shares this.
+ @type shareeView: L{CommonHomeChild}
+
+ @param mode: The sharing mode; L{_BIND_MODE_READ} or
+ L{_BIND_MODE_WRITE} or None to not update
+ @type mode: L{str}
+
+ @param status: The sharing status; L{_BIND_STATUS_INVITED} or
+ L{_BIND_STATUS_ACCEPTED} or L{_BIND_STATUS_DECLINED} or
+ L{_BIND_STATUS_INVALID} or None to not update
+ @type status: L{str}
+
+ @param summary: The proposed message to go along with the share, which
+ will be used as the default display name, or None to not update
+ @type summary: L{str}
+ """
+ # TODO: raise a nice exception if shareeView is not, in fact, a shared
+ # version of this same L{CommonHomeChild}
+
+ # remove None parameters, and substitute None for empty string
+ bind = self._bindSchema
+ columnMap = {}
+ if mode != None and mode != shareeView._bindMode:
+ columnMap[bind.BIND_MODE] = mode
+ if status != None and status != shareeView._bindStatus:
+ columnMap[bind.BIND_STATUS] = status
+ if summary != None and summary != shareeView._bindMessage:
+ columnMap[bind.MESSAGE] = summary
+
+ if columnMap:
+
+ # Count accepted
+ if bind.BIND_STATUS in columnMap:
+ previouslyAcceptedCount = yield shareeView._previousAcceptCount()
+
+ yield self._updateBindColumnsQuery(columnMap).on(
+ self._txn,
+ resourceID=self._resourceID, homeID=shareeView._home._resourceID
+ )
+
+ # Update affected attributes
+ if bind.BIND_MODE in columnMap:
+ shareeView._bindMode = columnMap[bind.BIND_MODE]
+
+ if bind.BIND_STATUS in columnMap:
+ shareeView._bindStatus = columnMap[bind.BIND_STATUS]
+ yield shareeView._changedStatus(previouslyAcceptedCount)
+
+ if bind.MESSAGE in columnMap:
+ shareeView._bindMessage = columnMap[bind.MESSAGE]
+
+ yield shareeView.invalidateQueryCache()
+
+ # Must send notification to ensure cache invalidation occurs
+ yield self.notifyPropertyChanged()
+ yield shareeView.viewerHome().notifyChanged()
+
+
+ def _previousAcceptCount(self):
+ return succeed(1)
+
+
+ @inlineCallbacks
+ def _changedStatus(self, previouslyAcceptedCount):
+ if self._bindStatus == _BIND_STATUS_ACCEPTED:
+ yield self._initSyncToken()
+ yield self._initBindRevision()
+ self._home._children[self._name] = self
+ self._home._children[self._resourceID] = self
+ elif self._bindStatus in (_BIND_STATUS_INVITED, _BIND_STATUS_DECLINED):
+ yield self._deletedSyncToken(sharedRemoval=True)
+ self._home._children.pop(self._name, None)
+ self._home._children.pop(self._resourceID, None)
+
+
+ @inlineCallbacks
+ def removeShare(self, shareeView):
+ """
+ Remove the shared version of this (owned) L{CommonHomeChild} from the
+ referenced L{CommonHome}.
+
+ @see: L{CommonHomeChild.shareWith}
+
+ @param shareeView: The shared resource being removed.
+
+ @return: a L{Deferred} which will fire with the previous shareUID
+ """
+
+ # remove sync tokens
+ shareeHome = shareeView.viewerHome()
+ yield shareeView._deletedSyncToken(sharedRemoval=True)
+ shareeHome._children.pop(shareeView._name, None)
+ shareeHome._children.pop(shareeView._resourceID, None)
+
+ # Must send notification to ensure cache invalidation occurs
+ yield self.notifyPropertyChanged()
+ yield shareeHome.notifyChanged()
+
+ # delete binds including invites
+ yield self._deleteBindForResourceIDAndHomeID.on(
+ self._txn,
+ resourceID=self._resourceID,
+ homeID=shareeHome._resourceID,
+ )
+
+ yield shareeView.invalidateQueryCache()
+
+
+ @inlineCallbacks
+ def unshare(self):
+ """
+ Unshares a collection, regardless of which "direction" it was shared.
+ """
+ if self.owned():
+ # This collection may be shared to others
+ invites = yield self.sharingInvites()
+ for invite in invites:
+ shareeView = yield self.shareeView(invite.shareeUID)
+ yield self.removeShare(shareeView)
+ else:
+ # This collection is shared to me
+ ownerView = yield self.ownerView()
+ yield ownerView.removeShare(self)
+
+
+ @inlineCallbacks
+ def sharingInvites(self):
+ """
+ Retrieve the list of all L{SharingInvitation}'s for this L{CommonHomeChild}, irrespective of mode.
+
+ @return: L{SharingInvitation} objects
+ @rtype: a L{Deferred} which fires with a L{list} of L{SharingInvitation}s.
+ """
+ if not self.owned():
+ returnValue([])
+
+ # get all accepted binds
+ invitedRows = yield self._sharedInvitationBindForResourceID.on(
+ self._txn, resourceID=self._resourceID, homeID=self._home._resourceID
+ )
+
+ result = []
+ for homeUID, homeRID, _ignore_resourceID, resourceName, bindMode, bindStatus, bindMessage in invitedRows:
+ invite = SharingInvitation(
+ resourceName,
+ self.ownerHome().name(),
+ self.ownerHome().id(),
+ homeUID,
+ homeRID,
+ bindMode,
+ bindStatus,
+ bindMessage,
+ )
+ result.append(invite)
+ returnValue(result)
+
+
+ @inlineCallbacks
+ def sharingBindRecords(self):
+ """
+ Return an L{dict} that maps home/directory uid to a sharing bind record.
+ """
+ if not self.owned():
+ returnValue({})
+
+ records = yield self._bindRecordClass.querysimple(
+ self._txn,
+ **{self._bindResourceIDAttributeName: self.id()}
+ )
+ homeIDs = [getattr(record, self._bindHomeIDAttributeName) for record in records]
+ homes = yield self._homeRecordClass.query(
+ self._txn,
+ self._homeRecordClass.resourceID.In(homeIDs),
+ )
+ homeMap = dict((home.resourceID, home.ownerUID,) for home in homes)
+
+ returnValue(dict([(homeMap[getattr(record, self._bindHomeIDAttributeName)], record,) for record in records if record.bindMode != _BIND_MODE_OWN]))
+
+
+ def migrateBindRecords(self, bindUID):
+ """
+ The user that owns this collection is being migrated to another pod. We need to switch over
+ the sharing details to point to the new external user.
+ """
+ if self.owned():
+ return self.migrateSharedByRecords(bindUID)
+ else:
+ return self.migrateSharedToRecords()
+
+
+ @inlineCallbacks
+ def migrateSharedByRecords(self, bindUID):
+ """
+ The user that owns this collection is being migrated to another pod. We need to switch over
+ the sharing details to point to the new external user. For sharees hosted on this pod, we
+ update their bind record to point to a new external home/calendar for the sharer. For sharees
+ hosted on other pods, we simply remove their bind entries.
+ """
+
+ # Get the external home and make sure there is a "fake" calendar associated with it
+ home = yield self.externalHome()
+ calendar = yield home.childWithBindUID(bindUID)
+ if calendar is None:
+ calendar = yield home.createCollectionForExternalShare(
+ self.name(),
+ bindUID,
+ self.getSupportedComponents() if hasattr(self, "getSupportedComponents") else None,
+ )
+
+ remaining = False
+ records = yield self._bindRecordClass.querysimple(self._txn, **{self._bindResourceIDAttributeName: self.id()})
+ for record in records:
+ if record.bindMode == _BIND_MODE_OWN:
+ continue
+ shareeHome = yield self._txn.homeWithResourceID(home._homeType, getattr(record, self._bindHomeIDAttributeName))
+ if shareeHome.normal():
+ remaining = True
+ yield record.update(**{
+ self._bindResourceIDAttributeName: calendar.id(),
+ })
+ else:
+ # It is OK to just delete (as opposed to doing a full "unshare") without adjusting other things
+ # like sync revisions since those would not have been used for an external share anyway. Also,
+ # revisions are tied to the calendar id and the original calendar will be removed after migration
+ # is complete.
+ yield record.delete()
+
+ # If there are no external shares remaining, we can remove the external calendar
+ if not remaining:
+ yield calendar.remove()
+
+
+ @inlineCallbacks
+ def migrateSharedToRecords(self):
+ """
+ The user that owns this collection is being migrated to another pod. We need to switch over
+ the sharing details to point to the new external user.
+ """
+
+ # Update the bind record for this calendar to point to the external home
+ records = yield self._bindRecordClass.querysimple(
+ self._txn,
+ **{
+ self._bindHomeIDAttributeName: self.viewerHome().id(),
+ self._bindResourceIDAttributeName: self.id(),
+ }
+ )
+
+ if len(records) == 1:
+
+ # What we do depends on whether the sharer is local to this pod or not
+ if self.ownerHome().normal():
+ # Get the external home for the sharee
+ home = yield self.externalHome()
+
+ yield records[0].update(**{
+ self._bindHomeIDAttributeName: home.id(),
+ })
+ else:
+ # It is OK to just delete (as opposed to doing a full "unshare") without adjusting other things
+ # like sync revisions since those would not have been used for an external share anyway. Also,
+ # revisions are tied to the sharee calendar home id and that will be removed after migration
+ # is complete.
+ yield records[0].delete()
+
+ # Clean up external calendar if no sharees left
+ calendar = yield self.ownerView()
+ invites = yield calendar.sharingInvites()
+ if len(invites) == 0:
+ yield calendar.remove()
+ else:
+ raise AssertionError("We must have a bind record for this calendar.")
+
+
+ def externalHome(self):
+ """
+ Create and return an L{CommonHome} for the user being migrated. Note that when called, the user
+ directory record may still indicate that they are hosted on this pod, so we have to forcibly create
+ a home for the external user.
+ """
+ currentHome = self.viewerHome()
+ return self._txn.homeWithUID(currentHome._homeType, currentHome.uid(), status=_HOME_STATUS_EXTERNAL, create=True)
+
+
+ @inlineCallbacks
+ def _initBindRevision(self):
+ yield self.syncToken() # init self._syncTokenRevision if None
+ self._bindRevision = self._syncTokenRevision
+
+ bind = self._bindSchema
+ yield self._updateBindColumnsQuery(
+ {bind.BIND_REVISION : Parameter("revision"), }
+ ).on(
+ self._txn,
+ revision=self._bindRevision,
+ resourceID=self._resourceID,
+ homeID=self.viewerHome()._resourceID,
+ )
+ yield self.invalidateQueryCache()
+
+
+ def sharedResourceType(self):
+ """
+ The sharing resource type. Needs to be overridden by each type of resource that can be shared.
+
+ @return: an identifier for the type of the share.
+ @rtype: C{str}
+ """
+ return ""
+
+
+ def newShareName(self):
+ """
+ Name used when creating a new share. By default this is a UUID.
+ """
+ return str(uuid4())
+
+
+ def owned(self):
+ """
+ @see: L{ICalendar.owned}
+ """
+ return self._bindMode == _BIND_MODE_OWN
+
+
+ def isShared(self):
+ """
+ For an owned collection indicate whether it is shared.
+
+ @return: C{True} if shared, C{False} otherwise
+ @rtype: C{bool}
+ """
+ return self.owned() and self._bindMessage == "shared"
+
+
+ @inlineCallbacks
+ def setShared(self, shared):
+ """
+ Set an owned collection to shared or unshared state. Technically this is not useful as "shared"
+ really means it has invitees, but the current sharing spec supports a notion of a shared collection
+ that has not yet had invitees added. For the time being we will support that option by using a new
+ MESSAGE value to indicate an owned collection that is "shared".
+
+ @param shared: whether or not the owned collection is "shared"
+ @type shared: C{bool}
+ """
+ assert self.owned(), "Cannot change share mode on a shared collection"
+
+ # Only if change is needed
+ newMessage = "shared" if shared else None
+ if self._bindMessage == newMessage:
+ returnValue(None)
+
+ self._bindMessage = newMessage
+
+ bind = self._bindSchema
+ yield Update(
+ {bind.MESSAGE: self._bindMessage},
+ Where=(bind.RESOURCE_ID == Parameter("resourceID")).And(
+ bind.HOME_RESOURCE_ID == Parameter("homeID")),
+ ).on(self._txn, resourceID=self._resourceID, homeID=self.viewerHome()._resourceID)
+
+ yield self.invalidateQueryCache()
+ yield self.notifyPropertyChanged()
+
+
+ def direct(self):
+ """
+ Is this a "direct" share?
+
+ @return: a boolean indicating whether it's direct.
+ """
+ return self._bindMode == _BIND_MODE_DIRECT
+
+
+ def indirect(self):
+ """
+ Is this an "indirect" share?
+
+ @return: a boolean indicating whether it's indirect.
+ """
+ return self._bindMode == _BIND_MODE_INDIRECT
+
+
+ def shareUID(self):
+ """
+ @see: L{ICalendar.shareUID}
+ """
+ return self.name()
+
+
+ def shareMode(self):
+ """
+ @see: L{ICalendar.shareMode}
+ """
+ return self._bindMode
+
+
+ def _effectiveShareMode(self, bindMode, viewerUID, txn):
+ """
+ Get the effective share mode without a calendar object
+ """
+ return bindMode
+
+
+ def effectiveShareMode(self):
+ """
+ @see: L{ICalendar.shareMode}
+ """
+ return self._bindMode
+
+
+ def shareName(self):
+ """
+ This is a path like name for the resource within the home being shared. For object resource
+ shares this will be a combination of the L{CommonHomeChild} name and the L{CommonObjecrResource}
+ name. Otherwise it is just the L{CommonHomeChild} name. This is needed to expose a value to the
+ app-layer such that it can construct a URI for the actual WebDAV resource being shared.
+ """
+ name = self.name()
+ if self.sharedResourceType() == "group":
+ name = self.parentCollection().name() + "/" + name
+ return name
+
+
+ def shareStatus(self):
+ """
+ @see: L{ICalendar.shareStatus}
+ """
+ return self._bindStatus
+
+
+ def bindUID(self):
+ """
+ @see: L{ICalendar.bindUID}
+ """
+ return self._bindUID
+
+
+ def accepted(self):
+ """
+ @see: L{ICalendar.shareStatus}
+ """
+ return self._bindStatus == _BIND_STATUS_ACCEPTED
+
+
+ def shareMessage(self):
+ """
+ @see: L{ICalendar.shareMessage}
+ """
+ return self._bindMessage
+
+
+ def getInviteCopyProperties(self):
+ """
+ Get a dictionary of property name/values (as strings) for properties that are shadowable and
+ need to be copied to a sharee's collection when an external (cross-pod) share is created.
+ Sub-classes should override to expose the properties they care about.
+ """
+ return {}
+
+
+ def setInviteCopyProperties(self, props):
+ """
+ Copy a set of shadowable properties (as name/value strings) onto this shared resource when
+ a cross-pod invite is processed. Sub-classes should override to expose the properties they
+ care about.
+ """
+ pass
+
+
+ @classmethod
+ def metadataColumns(cls):
+ """
+ Return a list of column name for retrieval of metadata. This allows
+ different child classes to have their own type specific data, but still make use of the
+ common base logic.
+ """
+
+ # Common behavior is to have created and modified
+
+ return (
+ cls._homeChildMetaDataSchema.CREATED,
+ cls._homeChildMetaDataSchema.MODIFIED,
+ )
+
+
+ @classmethod
+ def metadataAttributes(cls):
+ """
+ Return a list of attribute names for retrieval of metadata. This allows
+ different child classes to have their own type specific data, but still make use of the
+ common base logic.
+ """
+
+ # Common behavior is to have created and modified
+
+ return (
+ "_created",
+ "_modified",
+ )
+
+
+ @classmethod
+ def bindColumns(cls):
+ """
+ Return a list of column names for retrieval during creation. This allows
+ different child classes to have their own type specific data, but still make use of the
+ common base logic.
+ """
+
+ return (
+ cls._bindSchema.HOME_RESOURCE_ID,
+ cls._bindSchema.RESOURCE_ID,
+ cls._bindSchema.RESOURCE_NAME,
+ cls._bindSchema.BIND_MODE,
+ cls._bindSchema.BIND_STATUS,
+ cls._bindSchema.BIND_REVISION,
+ cls._bindSchema.BIND_UID,
+ cls._bindSchema.MESSAGE
+ )
+
+
+ @classmethod
+ def bindAttributes(cls):
+ """
+ Return a list of column names for retrieval during creation. This allows
+ different child classes to have their own type specific data, but still make use of the
+ common base logic.
+ """
+
+ return (
+ "_homeResourceID",
+ "_resourceID",
+ "_name",
+ "_bindMode",
+ "_bindStatus",
+ "_bindRevision",
+ "_bindUID",
+ "_bindMessage",
+ )
+
+ bindColumnCount = 8
+
+ @classmethod
+ def additionalBindColumns(cls):
+ """
+ Return a list of column names for retrieval during creation. This allows
+ different child classes to have their own type specific data, but still make use of the
+ common base logic.
+ """
+
+ return ()
+
+
+ @classmethod
+ def additionalBindAttributes(cls):
+ """
+ Return a list of attribute names for retrieval of during creation. This allows
+ different child classes to have their own type specific data, but still make use of the
+ common base logic.
+ """
+
+ return ()
+
+
+ @classproperty
+ def _childrenAndMetadataForHomeID(cls):
+ bind = cls._bindSchema
+ child = cls._homeChildSchema
+ childMetaData = cls._homeChildMetaDataSchema
+
+ columns = cls.bindColumns() + cls.additionalBindColumns() + cls.metadataColumns()
+ return Select(
+ columns,
+ From=child.join(
+ bind, child.RESOURCE_ID == bind.RESOURCE_ID,
+ 'left outer').join(
+ childMetaData, childMetaData.RESOURCE_ID == bind.RESOURCE_ID,
+ 'left outer'),
+ Where=(bind.HOME_RESOURCE_ID == Parameter("homeID")).And(
+ bind.BIND_STATUS == _BIND_STATUS_ACCEPTED)
+ )
+
+
+ @classmethod
+ def _revisionsForResourceIDs(cls, resourceIDs):
+ rev = cls._revisionsSchema
+ return Select(
+ [rev.RESOURCE_ID, Max(rev.REVISION)],
+ From=rev,
+ Where=rev.RESOURCE_ID.In(Parameter("resourceIDs", len(resourceIDs))).And(
+ (rev.RESOURCE_NAME != None).Or(rev.DELETED == False)),
+ GroupBy=rev.RESOURCE_ID
+ )
+
+
+ @inlineCallbacks
+ def invalidateQueryCache(self):
+ queryCacher = self._txn._queryCacher
+ if queryCacher is not None:
+ yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForHomeChildMetaData(self._resourceID))
+ yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithName(self._home._resourceID, self._name))
+ yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID))
+ yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithBindUID(self._home._resourceID, self._bindUID))
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_tablespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/sql_tables.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_tables.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/sql_tables.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -187,6 +187,8 @@
</span><span class="cx"> _HOME_STATUS_NORMAL = _homeStatus('normal')
</span><span class="cx"> _HOME_STATUS_EXTERNAL = _homeStatus('external')
</span><span class="cx"> _HOME_STATUS_PURGING = _homeStatus('purging')
</span><ins>+_HOME_STATUS_MIGRATING = _homeStatus('migrating')
+_HOME_STATUS_DISABLED = _homeStatus('disabled')
</ins><span class="cx">
</span><span class="cx"> _bindStatus = _schemaConstants(
</span><span class="cx"> schema.CALENDAR_BIND_STATUS.DESCRIPTION,
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoresql_utilpyfromrev14520CalendarServerbranchesuserscdaboopod2podmigrationtxdavcommondatastoresql_utilpy"></a>
<div class="copfile"><h4>Copied: CalendarServer/trunk/txdav/common/datastore/sql_util.py (from rev 14520, CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_util.py) (0 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/sql_util.py         (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/sql_util.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -0,0 +1,837 @@
</span><ins>+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.syntax import Max, Select, Parameter, Delete, Insert, \
+ Update, ColumnSyntax, TableSyntax, Upper
+from twext.python.clsprop import classproperty
+from twext.python.log import Logger
+from twisted.internet.defer import succeed, inlineCallbacks, returnValue
+from txdav.base.datastore.util import normalizeUUIDOrNot
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.icommondatastore import SyncTokenValidException, \
+ ENOTIFICATIONTYPE, ECALENDARTYPE, EADDRESSBOOKTYPE
+import time
+from uuid import UUID
+
+log = Logger()
+
+
+"""
+Classes and methods for the SQL store.
+"""
+
+class _EmptyCacher(object):
+
+ def set(self, key, value):
+ return succeed(True)
+
+
+ def get(self, key, withIdentifier=False):
+ return succeed(None)
+
+
+ def delete(self, key):
+ return succeed(True)
+
+
+
+class _SharedSyncLogic(object):
+ """
+ Logic for maintaining sync-token shared between notification collections and
+ shared collections.
+ """
+
+ @classproperty
+ def _childSyncTokenQuery(cls):
+ """
+ DAL query for retrieving the sync token of a L{CommonHomeChild} based on
+ its resource ID.
+ """
+ rev = cls._revisionsSchema
+ return Select([Max(rev.REVISION)], From=rev,
+ Where=rev.RESOURCE_ID == Parameter("resourceID"))
+
+
+ def revisionFromToken(self, token):
+ if token is None:
+ return 0
+ elif isinstance(token, str) or isinstance(token, unicode):
+ _ignore_uuid, revision = token.split("_", 1)
+ return int(revision)
+ else:
+ return token
+
+
+ @inlineCallbacks
+ def syncToken(self):
+ if self._syncTokenRevision is None:
+ self._syncTokenRevision = yield self.syncTokenRevision()
+ returnValue(("%s_%s" % (self._resourceID, self._syncTokenRevision,)))
+
+
+ @inlineCallbacks
+ def syncTokenRevision(self):
+ revision = (yield self._childSyncTokenQuery.on(self._txn, resourceID=self._resourceID))[0][0]
+ if revision is None:
+ revision = int((yield self._txn.calendarserverValue("MIN-VALID-REVISION")))
+ returnValue(revision)
+
+
+ def objectResourcesSinceToken(self, token):
+ raise NotImplementedError()
+
+
+ @classmethod
+ def _objectNamesSinceRevisionQuery(cls, deleted=True):
+ """
+ DAL query for (resource, deleted-flag)
+ """
+ rev = cls._revisionsSchema
+ where = (rev.REVISION > Parameter("revision")).And(rev.RESOURCE_ID == Parameter("resourceID"))
+ if not deleted:
+ where = where.And(rev.DELETED == False)
+ return Select(
+ [rev.RESOURCE_NAME, rev.DELETED],
+ From=rev,
+ Where=where,
+ )
+
+
+ def resourceNamesSinceToken(self, token):
+ """
+ Return the changed and deleted resources since a particular sync-token. This simply extracts
+ the revision from from the token then calls L{resourceNamesSinceRevision}.
+
+ @param revision: the revision to determine changes since
+ @type revision: C{int}
+ """
+
+ return self.resourceNamesSinceRevision(self.revisionFromToken(token))
+
+
+ @inlineCallbacks
+ def resourceNamesSinceRevision(self, revision):
+ """
+ Return the changed and deleted resources since a particular revision.
+
+ @param revision: the revision to determine changes since
+ @type revision: C{int}
+ """
+ changed = []
+ deleted = []
+ invalid = []
+ if revision:
+ minValidRevision = yield self._txn.calendarserverValue("MIN-VALID-REVISION")
+ if revision < int(minValidRevision):
+ raise SyncTokenValidException
+
+ results = [
+ (name if name else "", removed) for name, removed in (
+ yield self._objectNamesSinceRevisionQuery().on(
+ self._txn, revision=revision, resourceID=self._resourceID)
+ )
+ ]
+ results.sort(key=lambda x: x[1])
+
+ for name, wasdeleted in results:
+ if name:
+ if wasdeleted:
+ deleted.append(name)
+ else:
+ changed.append(name)
+ else:
+ changed = yield self.listObjectResources()
+
+ returnValue((changed, deleted, invalid))
+
+
+ @classproperty
+ def _removeDeletedRevision(cls):
+ rev = cls._revisionsSchema
+ return Delete(From=rev,
+ Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And(
+ rev.COLLECTION_NAME == Parameter("collectionName")))
+
+
+ @classproperty
+ def _addNewRevision(cls):
+ rev = cls._revisionsSchema
+ return Insert(
+ {
+ rev.HOME_RESOURCE_ID: Parameter("homeID"),
+ rev.RESOURCE_ID: Parameter("resourceID"),
+ rev.COLLECTION_NAME: Parameter("collectionName"),
+ rev.RESOURCE_NAME: None,
+ # Always starts false; may be updated to be a tombstone
+ # later.
+ rev.DELETED: False
+ },
+ Return=[rev.REVISION]
+ )
+
+
+ @inlineCallbacks
+ def _initSyncToken(self):
+ yield self._removeDeletedRevision.on(
+ self._txn, homeID=self._home._resourceID, collectionName=self._name
+ )
+ self._syncTokenRevision = (yield (
+ self._addNewRevision.on(self._txn, homeID=self._home._resourceID,
+ resourceID=self._resourceID,
+ collectionName=self._name)))[0][0]
+ self._txn.bumpRevisionForObject(self)
+
+
+ @classproperty
+ def _renameSyncTokenQuery(cls):
+ """
+ DAL query to change sync token for a rename (increment and adjust
+ resource name).
+ """
+ rev = cls._revisionsSchema
+ return Update(
+ {
+ rev.REVISION: schema.REVISION_SEQ,
+ rev.COLLECTION_NAME: Parameter("name")
+ },
+ Where=(rev.RESOURCE_ID == Parameter("resourceID")).And
+ (rev.RESOURCE_NAME == None),
+ Return=rev.REVISION
+ )
+
+
+ @inlineCallbacks
+ def _renameSyncToken(self):
+ rows = yield self._renameSyncTokenQuery.on(
+ self._txn, name=self._name, resourceID=self._resourceID)
+ if rows:
+ self._syncTokenRevision = rows[0][0]
+ self._txn.bumpRevisionForObject(self)
+ else:
+ yield self._initSyncToken()
+
+
+ @classproperty
+ def _bumpSyncTokenQuery(cls):
+ """
+ DAL query to change collection sync token. Note this can impact multiple rows if the
+ collection is shared.
+ """
+ rev = cls._revisionsSchema
+ return Update(
+ {rev.REVISION: schema.REVISION_SEQ, },
+ Where=(rev.RESOURCE_ID == Parameter("resourceID")).And
+ (rev.RESOURCE_NAME == None)
+ )
+
+
+ @inlineCallbacks
+ def _bumpSyncToken(self):
+
+ if not self._txn.isRevisionBumpedAlready(self):
+ self._txn.bumpRevisionForObject(self)
+ yield self._bumpSyncTokenQuery.on(
+ self._txn,
+ resourceID=self._resourceID,
+ )
+ self._syncTokenRevision = None
+
+
+ @classproperty
+ def _deleteSyncTokenQuery(cls):
+ """
+ DAL query to remove all child revision information. The revision for the collection
+ itself is not touched.
+ """
+ rev = cls._revisionsSchema
+ return Delete(
+ From=rev,
+ Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And
+ (rev.RESOURCE_ID == Parameter("resourceID")).And
+ (rev.COLLECTION_NAME == None)
+ )
+
+
+ @classproperty
+ def _sharedRemovalQuery(cls):
+ """
+ DAL query to indicate a shared collection has been deleted.
+ """
+ rev = cls._revisionsSchema
+ return Update(
+ {
+ rev.RESOURCE_ID: None,
+ rev.REVISION: schema.REVISION_SEQ,
+ rev.DELETED: True
+ },
+ Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And(
+ rev.RESOURCE_ID == Parameter("resourceID")).And(
+ rev.RESOURCE_NAME == None)
+ )
+
+
+ @classproperty
+ def _unsharedRemovalQuery(cls):
+ """
+ DAL query to indicate an owned collection has been deleted.
+ """
+ rev = cls._revisionsSchema
+ return Update(
+ {
+ rev.RESOURCE_ID: None,
+ rev.REVISION: schema.REVISION_SEQ,
+ rev.DELETED: True
+ },
+ Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
+ rev.RESOURCE_NAME == None),
+ )
+
+
+ @inlineCallbacks
+ def _deletedSyncToken(self, sharedRemoval=False):
+ """
+ When a collection is deleted we remove all the revision information for its child resources.
+ We update the collection's sync token to indicate it has been deleted - that way a sync on
+ the home collection can report the deletion of the collection.
+
+ @param sharedRemoval: indicates whether the collection being removed is shared
+ @type sharedRemoval: L{bool}
+ """
+ # Remove all child entries
+ yield self._deleteSyncTokenQuery.on(self._txn,
+ homeID=self._home._resourceID,
+ resourceID=self._resourceID)
+
+ # If this is a share being removed then we only mark this one specific
+ # home/resource-id as being deleted. On the other hand, if it is a
+ # non-shared collection, then we need to mark all collections
+ # with the resource-id as being deleted to account for direct shares.
+ if sharedRemoval:
+ yield self._sharedRemovalQuery.on(self._txn,
+ homeID=self._home._resourceID,
+ resourceID=self._resourceID)
+ else:
+ yield self._unsharedRemovalQuery.on(self._txn,
+ resourceID=self._resourceID)
+ self._syncTokenRevision = None
+
+
+ def _insertRevision(self, name):
+ return self._changeRevision("insert", name)
+
+
+ def _updateRevision(self, name):
+ return self._changeRevision("update", name)
+
+
+ def _deleteRevision(self, name):
+ return self._changeRevision("delete", name)
+
+
+ @classproperty
+ def _deleteBumpTokenQuery(cls):
+ rev = cls._revisionsSchema
+ return Update(
+ {rev.REVISION: schema.REVISION_SEQ, rev.DELETED: True},
+ Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
+ rev.RESOURCE_NAME == Parameter("name")),
+ Return=rev.REVISION
+ )
+
+
+ @classproperty
+ def _updateBumpTokenQuery(cls):
+ rev = cls._revisionsSchema
+ return Update(
+ {rev.REVISION: schema.REVISION_SEQ},
+ Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
+ rev.RESOURCE_NAME == Parameter("name")),
+ Return=rev.REVISION
+ )
+
+
+ @classproperty
+ def _insertFindPreviouslyNamedQuery(cls):
+ rev = cls._revisionsSchema
+ return Select(
+ [rev.RESOURCE_ID],
+ From=rev,
+ Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
+ rev.RESOURCE_NAME == Parameter("name"))
+ )
+
+
+ @classproperty
+ def _updatePreviouslyNamedQuery(cls):
+ rev = cls._revisionsSchema
+ return Update(
+ {rev.REVISION: schema.REVISION_SEQ, rev.DELETED: False},
+ Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
+ rev.RESOURCE_NAME == Parameter("name")),
+ Return=rev.REVISION
+ )
+
+
+ @classproperty
+ def _completelyNewRevisionQuery(cls):
+ rev = cls._revisionsSchema
+ return Insert(
+ {
+ rev.HOME_RESOURCE_ID: Parameter("homeID"),
+ rev.RESOURCE_ID: Parameter("resourceID"),
+ rev.RESOURCE_NAME: Parameter("name"),
+ rev.REVISION: schema.REVISION_SEQ,
+ rev.DELETED: False
+ },
+ Return=rev.REVISION
+ )
+
+
+ @classproperty
+ def _completelyNewDeletedRevisionQuery(cls):
+ rev = cls._revisionsSchema
+ return Insert(
+ {
+ rev.HOME_RESOURCE_ID: Parameter("homeID"),
+ rev.RESOURCE_ID: Parameter("resourceID"),
+ rev.RESOURCE_NAME: Parameter("name"),
+ rev.REVISION: schema.REVISION_SEQ,
+ rev.DELETED: True
+ },
+ Return=rev.REVISION
+ )
+
+
+ @inlineCallbacks
+ def _changeRevision(self, action, name):
+
+ # Need to handle the case where for some reason the revision entry is
+ # actually missing. For a "delete" we don't care, for an "update" we
+ # will turn it into an "insert".
+ if action == "delete":
+ rows = (
+ yield self._deleteBumpTokenQuery.on(
+ self._txn, resourceID=self._resourceID, name=name))
+ if rows:
+ self._syncTokenRevision = rows[0][0]
+ else:
+ self._syncTokenRevision = (
+ yield self._completelyNewDeletedRevisionQuery.on(
+ self._txn, homeID=self.ownerHome()._resourceID,
+ resourceID=self._resourceID, name=name)
+ )[0][0]
+
+ elif action == "update":
+ rows = (
+ yield self._updateBumpTokenQuery.on(
+ self._txn, resourceID=self._resourceID, name=name))
+ if rows:
+ self._syncTokenRevision = rows[0][0]
+ else:
+ self._syncTokenRevision = (
+ yield self._completelyNewRevisionQuery.on(
+ self._txn, homeID=self.ownerHome()._resourceID,
+ resourceID=self._resourceID, name=name)
+ )[0][0]
+
+ elif action == "insert":
+ # Note that an "insert" may happen for a resource that previously
+ # existed and then was deleted. In that case an entry in the
+ # REVISIONS table still exists so we have to detect that and do db
+ # INSERT or UPDATE as appropriate
+
+ found = bool((
+ yield self._insertFindPreviouslyNamedQuery.on(
+ self._txn, resourceID=self._resourceID, name=name)))
+ if found:
+ self._syncTokenRevision = (
+ yield self._updatePreviouslyNamedQuery.on(
+ self._txn, resourceID=self._resourceID, name=name)
+ )[0][0]
+ else:
+ self._syncTokenRevision = (
+ yield self._completelyNewRevisionQuery.on(
+ self._txn, homeID=self.ownerHome()._resourceID,
+ resourceID=self._resourceID, name=name)
+ )[0][0]
+ yield self._maybeNotify()
+ returnValue(self._syncTokenRevision)
+
+
+ def _maybeNotify(self):
+ """
+ Maybe notify changed. (Overridden in NotificationCollection.)
+ """
+ return succeed(None)
+
+
+
+def determineNewest(uid, homeType):
+ """
+ Construct a query to determine the modification time of the newest object
+ in a given home.
+
+ @param uid: the UID of the home to scan.
+ @type uid: C{str}
+
+ @param homeType: The type of home to scan; C{ECALENDARTYPE},
+ C{ENOTIFICATIONTYPE}, or C{EADDRESSBOOKTYPE}.
+ @type homeType: C{int}
+
+ @return: A select query that will return a single row containing a single
+ column which is the maximum value.
+ @rtype: L{Select}
+ """
+ if homeType == ENOTIFICATIONTYPE:
+ return Select(
+ [Max(schema.NOTIFICATION.MODIFIED)],
+ From=schema.NOTIFICATION_HOME.join(
+ schema.NOTIFICATION,
+ on=schema.NOTIFICATION_HOME.RESOURCE_ID ==
+ schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID),
+ Where=schema.NOTIFICATION_HOME.OWNER_UID == uid
+ )
+ homeTypeName = {ECALENDARTYPE: "CALENDAR",
+ EADDRESSBOOKTYPE: "ADDRESSBOOK"}[homeType]
+ home = getattr(schema, homeTypeName + "_HOME")
+ bind = getattr(schema, homeTypeName + "_BIND")
+ child = getattr(schema, homeTypeName)
+ obj = getattr(schema, homeTypeName + "_OBJECT")
+ return Select(
+ [Max(obj.MODIFIED)],
+ From=home.join(bind, on=bind.HOME_RESOURCE_ID == home.RESOURCE_ID).join(
+ child, on=child.RESOURCE_ID == bind.RESOURCE_ID).join(
+ obj, on=obj.PARENT_RESOURCE_ID == child.RESOURCE_ID),
+ Where=(bind.BIND_MODE == 0).And(home.OWNER_UID == uid)
+ )
+
+
+
+@inlineCallbacks
+def mergeHomes(sqlTxn, one, other, homeType):
+ """
+ Merge two homes together. This determines which of C{one} or C{two} is
+ newer - that is, has been modified more recently - and pulls all the data
+ from the older into the newer home. Then, it changes the UID of the old
+ home to its UID, normalized and prefixed with "old.", and then re-names the
+ new home to its name, normalized.
+
+ Because the UIDs of both homes have changed, B{both one and two will be
+ invalid to all other callers from the start of the invocation of this
+ function}.
+
+ @param sqlTxn: the transaction to use
+ @type sqlTxn: A L{CommonTransaction}
+
+ @param one: A calendar home.
+ @type one: L{ICalendarHome}
+
+ @param two: Another, different calendar home.
+ @type two: L{ICalendarHome}
+
+ @param homeType: The type of home to scan; L{ECALENDARTYPE} or
+ L{EADDRESSBOOKTYPE}.
+ @type homeType: C{int}
+
+ @return: a L{Deferred} which fires with with the newer of C{one} or C{two},
+ into which the data from the other home has been merged, when the merge
+ is complete.
+ """
+ from txdav.caldav.datastore.util import migrateHome as migrateCalendarHome
+ from txdav.carddav.datastore.util import migrateHome as migrateABHome
+ migrateHome = {EADDRESSBOOKTYPE: migrateABHome,
+ ECALENDARTYPE: migrateCalendarHome,
+ ENOTIFICATIONTYPE: _dontBotherWithNotifications}[homeType]
+ homeTable = {EADDRESSBOOKTYPE: schema.ADDRESSBOOK_HOME,
+ ECALENDARTYPE: schema.CALENDAR_HOME,
+ ENOTIFICATIONTYPE: schema.NOTIFICATION_HOME}[homeType]
+ both = []
+ both.append([one,
+ (yield determineNewest(one.uid(), homeType).on(sqlTxn))])
+ both.append([other,
+ (yield determineNewest(other.uid(), homeType).on(sqlTxn))])
+ both.sort(key=lambda x: x[1])
+
+ older = both[0][0]
+ newer = both[1][0]
+ yield migrateHome(older, newer, merge=True)
+ # Rename the old one to 'old.<correct-guid>'
+ newNormalized = normalizeUUIDOrNot(newer.uid())
+ oldNormalized = normalizeUUIDOrNot(older.uid())
+ yield _renameHome(sqlTxn, homeTable, older.uid(), "old." + oldNormalized)
+ # Rename the new one to '<correct-guid>'
+ if newer.uid() != newNormalized:
+ yield _renameHome(sqlTxn, homeTable, newer.uid(), newNormalized)
+ yield returnValue(newer)
+
+
+
+def _renameHome(txn, table, oldUID, newUID):
+ """
+ Rename a calendar, addressbook, or notification home. Note that this
+ function is only safe in transactions that have had caching disabled, and
+ more specifically should only ever be used during upgrades. Running this
+ in a normal transaction will have unpredictable consequences, especially
+ with respect to memcache.
+
+ @param txn: an SQL transaction to use for this update
+ @type txn: L{twext.enterprise.ienterprise.IAsyncTransaction}
+
+ @param table: the storage table of the desired home type
+ @type table: L{TableSyntax}
+
+ @param oldUID: the old UID, the existing home's UID
+ @type oldUID: L{str}
+
+ @param newUID: the new UID, to change the UID to
+ @type newUID: L{str}
+
+ @return: a L{Deferred} which fires when the home is renamed.
+ """
+ return Update({table.OWNER_UID: newUID},
+ Where=table.OWNER_UID == oldUID).on(txn)
+
+
+
+def _dontBotherWithNotifications(older, newer, merge):
+ """
+ Notifications are more transient and can be easily worked around; don't
+ bother to migrate all of them when there is a UUID case mismatch.
+ """
+ pass
+
+
+
+@inlineCallbacks
+def _normalizeHomeUUIDsIn(t, homeType):
+ """
+ Normalize the UUIDs in the given L{txdav.common.datastore.CommonStore}.
+
+ This changes the case of the UUIDs in the calendar home.
+
+ @param t: the transaction to normalize all the UUIDs in.
+ @type t: L{CommonStoreTransaction}
+
+ @param homeType: The type of home to scan, L{ECALENDARTYPE},
+ L{EADDRESSBOOKTYPE}, or L{ENOTIFICATIONTYPE}.
+ @type homeType: C{int}
+
+ @return: a L{Deferred} which fires with C{None} when the UUID normalization
+ is complete.
+ """
+ from txdav.caldav.datastore.util import fixOneCalendarHome
+ homeTable = {EADDRESSBOOKTYPE: schema.ADDRESSBOOK_HOME,
+ ECALENDARTYPE: schema.CALENDAR_HOME,
+ ENOTIFICATIONTYPE: schema.NOTIFICATION_HOME}[homeType]
+ homeTypeName = homeTable.model.name.split("_")[0]
+
+ allUIDs = yield Select([homeTable.OWNER_UID],
+ From=homeTable,
+ OrderBy=homeTable.OWNER_UID).on(t)
+ total = len(allUIDs)
+ allElapsed = []
+ for n, [UID] in enumerate(allUIDs):
+ start = time.time()
+ if allElapsed:
+ estimate = "%0.3d" % ((sum(allElapsed) / len(allElapsed)) *
+ total - n)
+ else:
+ estimate = "unknown"
+ log.info(
+ "Scanning UID {uid} [{homeType}] "
+ "({pct!0.2d}%, {estimate} seconds remaining)...",
+ uid=UID, pct=(n / float(total)) * 100, estimate=estimate,
+ homeType=homeTypeName
+ )
+ other = None
+ this = yield _getHome(t, homeType, UID)
+ if homeType == ECALENDARTYPE:
+ fixedThisHome = yield fixOneCalendarHome(this)
+ else:
+ fixedThisHome = 0
+ fixedOtherHome = 0
+ if this is None:
+ log.info(
+ "{uid!r} appears to be missing, already processed", uid=UID
+ )
+ try:
+ uuidobj = UUID(UID)
+ except ValueError:
+ pass
+ else:
+ newname = str(uuidobj).upper()
+ if UID != newname:
+ log.info(
+ "Detected case variance: {uid} {newuid}[{homeType}]",
+ uid=UID, newuid=newname, homeType=homeTypeName
+ )
+ other = yield _getHome(t, homeType, newname)
+ if other is None:
+ # No duplicate: just fix the name.
+ yield _renameHome(t, homeTable, UID, newname)
+ else:
+ if homeType == ECALENDARTYPE:
+ fixedOtherHome = yield fixOneCalendarHome(other)
+ this = yield mergeHomes(t, this, other, homeType)
+ # NOTE: WE MUST NOT TOUCH EITHER HOME OBJECT AFTER THIS POINT.
+ # THE UIDS HAVE CHANGED AND ALL OPERATIONS WILL FAIL.
+
+ end = time.time()
+ elapsed = end - start
+ allElapsed.append(elapsed)
+ log.info(
+ "Scanned UID {uid}; {elapsed} seconds elapsed,"
+ " {fixes} properties fixed ({duplicate} fixes in duplicate).",
+ uid=UID, elapsed=elapsed, fixes=fixedThisHome,
+ duplicate=fixedOtherHome
+ )
+ returnValue(None)
+
+
+
+def _getHome(txn, homeType, uid):
+ """
+ Like L{CommonHome.homeWithUID} but also honoring ENOTIFICATIONTYPE which
+ isn't I{really} a type of home.
+
+ @param txn: the transaction to retrieve the home from
+ @type txn: L{CommonStoreTransaction}
+
+ @param homeType: L{ENOTIFICATIONTYPE}, L{ECALENDARTYPE}, or
+ L{EADDRESSBOOKTYPE}.
+
+ @param uid: the UID of the home to retrieve.
+ @type uid: L{str}
+
+ @return: a L{Deferred} that fires with the L{CommonHome} or
+ L{NotificationHome} when it has been retrieved.
+ """
+ if homeType == ENOTIFICATIONTYPE:
+ return txn.notificationsWithUID(uid)
+ else:
+ return txn.homeWithUID(homeType, uid)
+
+
+
+@inlineCallbacks
+def _normalizeColumnUUIDs(txn, column):
+ """
+ Upper-case the UUIDs in the given SQL DAL column.
+
+ @param txn: The transaction.
+ @type txn: L{CommonStoreTransaction}
+
+ @param column: the column, which may contain UIDs, to normalize.
+ @type column: L{ColumnSyntax}
+
+ @return: A L{Deferred} that will fire when the UUID normalization of the
+ given column has completed.
+ """
+ tableModel = column.model.table
+ # Get a primary key made of column syntax objects for querying and
+ # comparison later.
+ pkey = [ColumnSyntax(columnModel)
+ for columnModel in tableModel.primaryKey]
+ for row in (yield Select([column] + pkey,
+ From=TableSyntax(tableModel)).on(txn)):
+ before = row[0]
+ pkeyparts = row[1:]
+ after = normalizeUUIDOrNot(before)
+ if after != before:
+ where = _AndNothing
+ # Build a where clause out of the primary key and the parts of the
+ # primary key that were found.
+ for pkeycol, pkeypart in zip(pkeyparts, pkey):
+ where = where.And(pkeycol == pkeypart)
+ yield Update({column: after}, Where=where).on(txn)
+
+
+
+class _AndNothing(object):
+ """
+ Simple placeholder for iteratively generating a 'Where' clause; the 'And'
+ just returns its argument, so it can be used at the start of the loop.
+ """
+ @staticmethod
+ def And(self):
+ """
+ Return the argument.
+ """
+ return self
+
+
+
+@inlineCallbacks
+def _needsNormalizationUpgrade(txn):
+ """
+ Determine whether a given store requires a UUID normalization data upgrade.
+
+ @param txn: the transaction to use
+ @type txn: L{CommonStoreTransaction}
+
+ @return: a L{Deferred} that fires with C{True} or C{False} depending on
+ whether we need the normalization upgrade or not.
+ """
+ for x in [schema.CALENDAR_HOME, schema.ADDRESSBOOK_HOME,
+ schema.NOTIFICATION_HOME]:
+ slct = Select([x.OWNER_UID], From=x,
+ Where=x.OWNER_UID != Upper(x.OWNER_UID))
+ rows = yield slct.on(txn)
+ if rows:
+ for [uid] in rows:
+ if normalizeUUIDOrNot(uid) != uid:
+ returnValue(True)
+ returnValue(False)
+
+
+
+@inlineCallbacks
+def fixUUIDNormalization(store):
+ """
+ Fix all UUIDs in the given SQL store to be in a canonical form;
+ 00000000-0000-0000-0000-000000000000 format and upper-case.
+ """
+ t = store.newTransaction(disableCache=True)
+
+ # First, let's see if there are any calendar, addressbook, or notification
+ # homes that have a de-normalized OWNER_UID. If there are none, then we can
+ # early-out and avoid the tedious and potentially expensive inspection of
+ # oodles of calendar data.
+ if not (yield _needsNormalizationUpgrade(t)):
+ log.info("No potentially denormalized UUIDs detected, "
+ "skipping normalization upgrade.")
+ yield t.abort()
+ returnValue(None)
+ try:
+ yield _normalizeHomeUUIDsIn(t, ECALENDARTYPE)
+ yield _normalizeHomeUUIDsIn(t, EADDRESSBOOKTYPE)
+ yield _normalizeHomeUUIDsIn(t, ENOTIFICATIONTYPE)
+ yield _normalizeColumnUUIDs(t, schema.RESOURCE_PROPERTY.VIEWER_UID)
+ yield _normalizeColumnUUIDs(t, schema.APN_SUBSCRIPTIONS.SUBSCRIBER_GUID)
+ except:
+ log.failure("Unable to normalize UUIDs")
+ yield t.abort()
+ # There's a lot of possible problems here which are very hard to test
+ # for individually; unexpected data that might cause constraint
+ # violations under one of the manipulations done by
+ # normalizeHomeUUIDsIn. Since this upgrade does not come along with a
+ # schema version bump and may be re- attempted at any time, just raise
+ # the exception and log it so that we can try again later, and the
+ # service will survive for everyone _not_ affected by this somewhat
+ # obscure bug.
+ else:
+ yield t.commit()
</ins></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoretesttest_sqlpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/test/test_sql.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/test/test_sql.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/test/test_sql.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -13,6 +13,8 @@
</span><span class="cx"> # See the License for the specific language governing permissions and
</span><span class="cx"> # limitations under the License.
</span><span class="cx"> ##
</span><ins>+from txdav.common.datastore.sql_util import _normalizeColumnUUIDs, \
+ fixUUIDNormalization
</ins><span class="cx">
</span><span class="cx"> """
</span><span class="cx"> Tests for L{txdav.common.datastore.sql}.
</span><span class="lines">@@ -31,7 +33,6 @@
</span><span class="cx"> from txdav.common.datastore.sql_tables import schema
</span><span class="cx"> from txdav.common.datastore.test.util import CommonCommonTests
</span><span class="cx"> from txdav.common.icommondatastore import AllRetriesFailed
</span><del>-from txdav.common.datastore.sql import fixUUIDNormalization
</del><span class="cx"> from txdav.xml import element as davxml
</span><span class="cx">
</span><span class="cx"> from uuid import UUID
</span><span class="lines">@@ -371,7 +372,6 @@
</span><span class="cx"> rp.VIEWER_UID: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"}
</span><span class="cx"> ).on(txn)
</span><span class="cx"> # test
</span><del>- from txdav.common.datastore.sql import _normalizeColumnUUIDs
</del><span class="cx"> yield _normalizeColumnUUIDs(txn, rp.VIEWER_UID)
</span><span class="cx"> self.assertEqual(
</span><span class="cx"> map(
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoretestutilpy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/test/util.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/test/util.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/test/util.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -30,7 +30,7 @@
</span><span class="cx">
</span><span class="cx"> from pycalendar.datetime import DateTime
</span><span class="cx">
</span><del>-from random import Random
</del><ins>+from random import Random, randint
</ins><span class="cx">
</span><span class="cx"> from twext.python.log import Logger
</span><span class="cx"> from twext.python.filepath import CachingFilePath as FilePath
</span><span class="lines">@@ -302,7 +302,7 @@
</span><span class="cx"> # later table. Therefore it's OK to drop them in the (reverse) order
</span><span class="cx"> # that they happen to be in.
</span><span class="cx"> tables = [
</span><del>- t.name for t in schema.model.tables
</del><ins>+ t.name for t in schema.model.tables #@UndefinedVariable
</ins><span class="cx"> # All tables with rows _in_ the schema are populated
</span><span class="cx"> # exclusively _by_ the schema and shouldn't be manipulated
</span><span class="cx"> # while the server is running, so we leave those populated.
</span><span class="lines">@@ -314,13 +314,20 @@
</span><span class="cx"> yield cleanupTxn.execSQL("delete from " + table, [])
</span><span class="cx"> except:
</span><span class="cx"> log.failure("delete table {table} failed", table=table)
</span><ins>+
+ # Change the starting values of sequences to random values
+ for sequence in schema.model.sequences: #@UndefinedVariable
+ try:
+ curval = (yield cleanupTxn.execSQL("select nextval('{}')".format(sequence.name), []))[0][0]
+ yield cleanupTxn.execSQL("select setval('{}', {})".format(sequence.name, curval + randint(1, 10000)), [])
+ except:
+ log.failure("setval sequence '{}' failed", sequence=sequence.name)
+ yield cleanupTxn.execSQL("update CALENDARSERVER set VALUE = '1' where NAME = 'MIN-VALID-REVISION'", [])
+
</ins><span class="cx"> yield cleanupTxn.commit()
</span><span class="cx">
</span><span class="cx"> # Deal with memcached items that must be cleared
</span><del>- from txdav.caldav.datastore.sql import CalendarHome
- CalendarHome._cacher.flushAll()
- from txdav.carddav.datastore.sql import AddressBookHome
- AddressBookHome._cacher.flushAll()
</del><ins>+ storeToClean.queryCacher.flushAll()
</ins><span class="cx"> from txdav.base.propertystore.sql import PropertyStore
</span><span class="cx"> PropertyStore._cacher.flushAll()
</span><span class="cx">
</span><span class="lines">@@ -463,7 +470,7 @@
</span><span class="cx"> populateTxn._migrating = True
</span><span class="cx"> for homeUID in requirements:
</span><span class="cx"> calendars = requirements[homeUID]
</span><del>- home = yield populateTxn.calendarHomeWithUID(homeUID, True)
</del><ins>+ home = yield populateTxn.calendarHomeWithUID(homeUID, create=True)
</ins><span class="cx"> if calendars is not None:
</span><span class="cx"> # We don't want the default calendar or inbox to appear unless it's
</span><span class="cx"> # explicitly listed.
</span><span class="lines">@@ -558,7 +565,7 @@
</span><span class="cx"> for homeUID in md5s:
</span><span class="cx"> calendars = md5s[homeUID]
</span><span class="cx"> if calendars is not None:
</span><del>- home = yield populateTxn.calendarHomeWithUID(homeUID, True)
</del><ins>+ home = yield populateTxn.calendarHomeWithUID(homeUID, create=True)
</ins><span class="cx"> for calendarName in calendars:
</span><span class="cx"> calendarObjNames = calendars[calendarName]
</span><span class="cx"> if calendarObjNames is not None:
</span><span class="lines">@@ -591,7 +598,7 @@
</span><span class="cx"> for homeUID in requirements:
</span><span class="cx"> addressbooks = requirements[homeUID]
</span><span class="cx"> if addressbooks is not None:
</span><del>- home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
</del><ins>+ home = yield populateTxn.addressbookHomeWithUID(homeUID, create=True)
</ins><span class="cx"> # We don't want the default addressbook
</span><span class="cx"> try:
</span><span class="cx"> yield home.removeAddressBookWithName("addressbook")
</span><span class="lines">@@ -630,7 +637,7 @@
</span><span class="cx"> for homeUID in md5s:
</span><span class="cx"> addressbooks = md5s[homeUID]
</span><span class="cx"> if addressbooks is not None:
</span><del>- home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
</del><ins>+ home = yield populateTxn.addressbookHomeWithUID(homeUID, create=True)
</ins><span class="cx"> for addressbookName in addressbooks:
</span><span class="cx"> addressbookObjNames = addressbooks[addressbookName]
</span><span class="cx"> if addressbookObjNames is not None:
</span><span class="lines">@@ -922,35 +929,35 @@
</span><span class="cx"> return self.store
</span><span class="cx">
</span><span class="cx">
</span><del>- def homeUnderTest(self, txn=None, name="home1", create=False):
</del><ins>+ def homeUnderTest(self, txn=None, name="home1", status=None, create=False):
</ins><span class="cx"> """
</span><span class="cx"> Get the calendar home detailed by C{requirements['home1']}.
</span><span class="cx"> """
</span><span class="cx"> if txn is None:
</span><span class="cx"> txn = self.transactionUnderTest()
</span><del>- return txn.calendarHomeWithUID(name, create=create)
</del><ins>+ return txn.calendarHomeWithUID(name, status=status, create=create)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><del>- def calendarUnderTest(self, txn=None, name="calendar_1", home="home1"):
</del><ins>+ def calendarUnderTest(self, txn=None, name="calendar_1", home="home1", status=None):
</ins><span class="cx"> """
</span><span class="cx"> Get the calendar detailed by C{requirements['home1']['calendar_1']}.
</span><span class="cx"> """
</span><del>- home = yield self.homeUnderTest(txn, home)
</del><ins>+ home = yield self.homeUnderTest(txn, home, status=status)
</ins><span class="cx"> calendar = yield home.calendarWithName(name)
</span><span class="cx"> returnValue(calendar)
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def calendarObjectUnderTest(
</span><del>- self, txn=None, name="1.ics", calendar_name="calendar_1", home="home1"
</del><ins>+ self, txn=None, name="1.ics", calendar_name="calendar_1", home="home1", status=None
</ins><span class="cx"> ):
</span><span class="cx"> """
</span><span class="cx"> Get the calendar detailed by
</span><span class="cx"> C{requirements[home][calendar_name][name]}.
</span><span class="cx"> """
</span><span class="cx"> calendar = yield self.calendarUnderTest(
</span><del>- txn, name=calendar_name, home=home
</del><ins>+ txn, name=calendar_name, home=home, status=status
</ins><span class="cx"> )
</span><span class="cx"> object = yield calendar.calendarObjectWithName(name)
</span><span class="cx"> returnValue(object)
</span><span class="lines">@@ -992,6 +999,12 @@
</span><span class="cx"> returnValue(object)
</span><span class="cx">
</span><span class="cx">
</span><ins>+ def notificationCollectionUnderTest(self, txn=None, name="home1", status=None, create=False):
+ if txn is None:
+ txn = self.transactionUnderTest()
+ return txn.notificationsWithUID(name, status=status, create=create)
+
+
</ins><span class="cx"> def userRecordWithShortName(self, shortname):
</span><span class="cx"> return self.directory.recordWithShortName(
</span><span class="cx"> self.directory.recordType.user, shortname
</span><span class="lines">@@ -1015,11 +1028,13 @@
</span><span class="cx"> return self.directory.removeRecords([uid])
</span><span class="cx">
</span><span class="cx">
</span><del>- def changeRecord(self, record, fieldname, value):
</del><ins>+ def changeRecord(self, record, fieldname, value, directory=None):
+ if directory is None:
+ directory = self.directory
</ins><span class="cx"> fields = record.fields.copy()
</span><span class="cx"> fields[fieldname] = value
</span><del>- updatedRecord = DirectoryRecord(self.directory, fields)
- return self.directory.updateRecords((updatedRecord,))
</del><ins>+ updatedRecord = DirectoryRecord(directory, fields)
+ return directory.updateRecords((updatedRecord,))
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoreupgradesqlupgradescalendar_upgrade_from_2_to_3py"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_2_to_3.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_2_to_3.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_2_to_3.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -21,9 +21,9 @@
</span><span class="cx"> as in calendar data and properties.
</span><span class="cx"> """
</span><span class="cx">
</span><del>-from txdav.common.datastore.sql import fixUUIDNormalization
</del><span class="cx"> from twisted.internet.defer import inlineCallbacks
</span><span class="cx"> from txdav.common.datastore.upgrade.sql.upgrades.util import updateCalendarDataVersion
</span><ins>+from txdav.common.datastore.sql_util import fixUUIDNormalization
</ins><span class="cx">
</span><span class="cx"> UPGRADE_TO_VERSION = 3
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoreupgradesqlupgradestesttest_notification_upgrade_from_0_to_1py"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/test/test_notification_upgrade_from_0_to_1.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/test/test_notification_upgrade_from_0_to_1.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/test/test_notification_upgrade_from_0_to_1.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -169,7 +169,7 @@
</span><span class="cx"> )
</span><span class="cx">
</span><span class="cx"> for uid, notificationtype, _ignore_jtype, notificationdata, _ignore_jdata in data:
</span><del>- notifications = yield self.transactionUnderTest().notificationsWithUID("user01")
</del><ins>+ notifications = yield self.transactionUnderTest().notificationsWithUID("user01", create=True)
</ins><span class="cx"> yield notifications.writeNotificationObject(uid, notificationtype, notificationdata)
</span><span class="cx">
</span><span class="cx"> # Force data version to previous
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommondatastoreworktesttest_revision_cleanuppy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/datastore/work/test/test_revision_cleanup.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/datastore/work/test/test_revision_cleanup.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/datastore/work/test/test_revision_cleanup.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -55,7 +55,7 @@
</span><span class="cx"> for homeUID in addressookRequirements:
</span><span class="cx"> addressbooks = addressookRequirements[homeUID]
</span><span class="cx"> if addressbooks is not None:
</span><del>- home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
</del><ins>+ home = yield populateTxn.addressbookHomeWithUID(homeUID, create=True)
</ins><span class="cx"> addressbook = home.addressbook()
</span><span class="cx">
</span><span class="cx"> addressbookObjNames = addressbooks[addressbook.name()]
</span></span></pre></div>
<a id="CalendarServertrunktxdavcommonicommondatastorepy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/common/icommondatastore.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/common/icommondatastore.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/common/icommondatastore.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -40,6 +40,12 @@
</span><span class="cx"> "InternalDataStoreError",
</span><span class="cx"> ]
</span><span class="cx">
</span><ins>+# Constants for top-level store types
+ECALENDARTYPE = 0
+EADDRESSBOOKTYPE = 1
+ENOTIFICATIONTYPE = 2
+
+
</ins><span class="cx"> #
</span><span class="cx"> # Exceptions
</span><span class="cx"> #
</span><span class="lines">@@ -347,7 +353,7 @@
</span><span class="cx"> @param token: The device token of the subscriber
</span><span class="cx"> @type token: C{str}
</span><span class="cx">
</span><del>- @return: tuples of (key, timestamp, guid)
</del><ins>+ @return: list of L{Record}
</ins><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> def apnSubscriptionsByKey(key): #@NoSelf
</span><span class="lines">@@ -357,7 +363,7 @@
</span><span class="cx"> @param key: The push key
</span><span class="cx"> @type key: C{str}
</span><span class="cx">
</span><del>- @return: tuples of (token, guid)
</del><ins>+ @return: list of L{Record}
</ins><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> def apnSubscriptionsBySubscriber(guid): #@NoSelf
</span><span class="lines">@@ -367,7 +373,7 @@
</span><span class="cx"> @param guid: The GUID of the subscribed principal
</span><span class="cx"> @type guid: C{str}
</span><span class="cx">
</span><del>- @return: tuples of (token, key, timestamp, userAgent, ipAddr)
</del><ins>+ @return: list of L{Record}
</ins><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> def imipCreateToken(organizer, attendee, icaluid, token=None): #@NoSelf
</span><span class="lines">@@ -389,8 +395,8 @@
</span><span class="cx"> """
</span><span class="cx"> Returns the organizer, attendee, and icaluid corresponding to the token
</span><span class="cx">
</span><del>- @param token: the token to look up
- @type token: C{str}
</del><ins>+ @param token: the token record
+ @type token: L{Record}
</ins><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> def imipGetToken(organizer, attendee, icaluid): #@NoSelf
</span></span></pre></div>
<a id="CalendarServertrunktxdavwhodelegatespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/who/delegates.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/who/delegates.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/who/delegates.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -353,13 +353,8 @@
</span><span class="cx">
</span><span class="cx"> if delegate.recordType == BaseRecordType.group:
</span><span class="cx"> # find the groupID
</span><del>- (
- groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
- _ignore_extant
- ) = yield txn.groupByUID(
- delegate.uid
- )
- yield txn.addDelegateGroup(delegator.uid, groupID, readWrite)
</del><ins>+ group = yield txn.groupByUID(delegate.uid)
+ yield txn.addDelegateGroup(delegator.uid, group.groupID, readWrite)
</ins><span class="cx"> else:
</span><span class="cx"> yield txn.addDelegate(delegator.uid, delegate.uid, readWrite)
</span><span class="cx">
</span><span class="lines">@@ -393,13 +388,8 @@
</span><span class="cx">
</span><span class="cx"> if delegate.recordType == BaseRecordType.group:
</span><span class="cx"> # find the groupID
</span><del>- (
- groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
- _ignore_extant
- ) = yield txn.groupByUID(
- delegate.uid
- )
- yield txn.removeDelegateGroup(delegator.uid, groupID, readWrite)
</del><ins>+ group = yield txn.groupByUID(delegate.uid)
+ yield txn.removeDelegateGroup(delegator.uid, group.groupID, readWrite)
</ins><span class="cx"> else:
</span><span class="cx"> yield txn.removeDelegate(delegator.uid, delegate.uid, readWrite)
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunktxdavwhogroupspy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/who/groups.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/who/groups.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/who/groups.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -20,13 +20,15 @@
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> from twext.enterprise.dal.record import fromTable
</span><del>-from twext.enterprise.dal.syntax import Delete, Select, Parameter
</del><ins>+from twext.enterprise.dal.syntax import Select
</ins><span class="cx"> from twext.enterprise.jobqueue import AggregatedWorkItem, RegeneratingWorkItem
</span><span class="cx"> from twext.python.log import Logger
</span><span class="cx"> from twisted.internet.defer import inlineCallbacks, returnValue, succeed, \
</span><span class="cx"> DeferredList
</span><span class="cx"> from twistedcaldav.config import config
</span><span class="cx"> from txdav.caldav.datastore.sql import CalendarStoreFeatures
</span><ins>+from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord
+from txdav.common.datastore.sql_directory import GroupsRecord
</ins><span class="cx"> from txdav.common.datastore.sql_tables import schema, _BIND_MODE_OWN
</span><span class="cx"> import datetime
</span><span class="cx"> import itertools
</span><span class="lines">@@ -85,7 +87,7 @@
</span><span class="cx">
</span><span class="cx"> class GroupRefreshWork(AggregatedWorkItem, fromTable(schema.GROUP_REFRESH_WORK)):
</span><span class="cx">
</span><del>- group = property(lambda self: (self.table.GROUP_UID == self.groupUid))
</del><ins>+ group = property(lambda self: (self.table.GROUP_UID == self.groupUID))
</ins><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def doWork(self):
</span><span class="lines">@@ -94,27 +96,27 @@
</span><span class="cx">
</span><span class="cx"> try:
</span><span class="cx"> yield groupCacher.refreshGroup(
</span><del>- self.transaction, self.groupUid.decode("utf-8")
</del><ins>+ self.transaction, self.groupUID.decode("utf-8")
</ins><span class="cx"> )
</span><span class="cx"> except Exception, e:
</span><span class="cx"> log.error(
</span><span class="cx"> "Failed to refresh group {group} {err}",
</span><del>- group=self.groupUid, err=e
</del><ins>+ group=self.groupUID, err=e
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx"> else:
</span><span class="cx"> log.debug(
</span><span class="cx"> "Rescheduling group refresh for {group}: {when}",
</span><del>- group=self.groupUid,
</del><ins>+ group=self.groupUID,
</ins><span class="cx"> when=datetime.datetime.utcnow() + datetime.timedelta(seconds=10)
</span><span class="cx"> )
</span><del>- yield self.reschedule(self.transaction, 10, groupUID=self.groupUid)
</del><ins>+ yield self.reschedule(self.transaction, 10, groupUID=self.groupUID)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> class GroupDelegateChangesWork(AggregatedWorkItem, fromTable(schema.GROUP_DELEGATE_CHANGES_WORK)):
</span><span class="cx">
</span><del>- delegator = property(lambda self: (self.table.DELEGATOR_UID == self.delegatorUid))
</del><ins>+ delegator = property(lambda self: (self.table.DELEGATOR_UID == self.delegatorUID))
</ins><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def doWork(self):
</span><span class="lines">@@ -124,14 +126,14 @@
</span><span class="cx"> try:
</span><span class="cx"> yield groupCacher.applyExternalAssignments(
</span><span class="cx"> self.transaction,
</span><del>- self.delegatorUid.decode("utf-8"),
- self.readDelegateUid.decode("utf-8"),
- self.writeDelegateUid.decode("utf-8")
</del><ins>+ self.delegatorUID.decode("utf-8"),
+ self.readDelegateUID.decode("utf-8"),
+ self.writeDelegateUID.decode("utf-8")
</ins><span class="cx"> )
</span><span class="cx"> except Exception, e:
</span><span class="cx"> log.error(
</span><span class="cx"> "Failed to apply external delegates for {uid} {err}",
</span><del>- uid=self.delegatorUid, err=e
</del><ins>+ uid=self.delegatorUID, err=e
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -182,8 +184,8 @@
</span><span class="cx"> homeID = rows[0][0]
</span><span class="cx"> home = yield self.transaction.calendarHomeWithResourceID(homeID)
</span><span class="cx"> calendar = yield home.childWithID(self.calendarID)
</span><del>- groupUID = ((yield self.transaction.groupByID(self.groupID)))[0]
- yield calendar.reconcileGroupSharee(groupUID)
</del><ins>+ group = (yield self.transaction.groupByID(self.groupID))
+ yield calendar.reconcileGroupSharee(group.groupUID)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -268,33 +270,28 @@
</span><span class="cx"> # "Groups to refresh: {g}", g=groupUIDs
</span><span class="cx"> # )
</span><span class="cx">
</span><del>- gr = schema.GROUPS
</del><span class="cx"> if config.AutomaticPurging.Enabled and groupUIDs:
</span><span class="cx"> # remove unused groups and groups that have not been seen in a while
</span><span class="cx"> dateLimit = (
</span><span class="cx"> datetime.datetime.utcnow() -
</span><span class="cx"> datetime.timedelta(seconds=float(config.AutomaticPurging.GroupPurgeIntervalSeconds))
</span><span class="cx"> )
</span><del>- rows = yield Delete(
- From=gr,
- Where=(
- (gr.EXTANT == 0).And(gr.MODIFIED < dateLimit)
</del><ins>+ rows = yield GroupsRecord.deletesome(
+ txn,
+ (
+ (GroupsRecord.extant == 0).And(GroupsRecord.modified < dateLimit)
</ins><span class="cx"> ).Or(
</span><del>- gr.GROUP_UID.NotIn(
- Parameter("groupUIDs", len(groupUIDs))
- )
- ) if groupUIDs else None,
- Return=[gr.GROUP_UID]
- ).on(txn, groupUIDs=groupUIDs)
</del><ins>+ GroupsRecord.groupUID.NotIn(groupUIDs)
+ ),
+ returnCols=GroupsRecord.groupUID,
+ )
</ins><span class="cx"> else:
</span><span class="cx"> # remove unused groups
</span><del>- rows = yield Delete(
- From=gr,
- Where=gr.GROUP_UID.NotIn(
- Parameter("groupUIDs", len(groupUIDs))
- ) if groupUIDs else None,
- Return=[gr.GROUP_UID]
- ).on(txn, groupUIDs=groupUIDs)
</del><ins>+ rows = yield GroupsRecord.deletesome(
+ txn,
+ GroupsRecord.groupUID.NotIn(groupUIDs) if groupUIDs else None,
+ returnCols=GroupsRecord.groupUID,
+ )
</ins><span class="cx"> deletedGroupUIDs = [row[0] for row in rows]
</span><span class="cx"> if deletedGroupUIDs:
</span><span class="cx"> self.log.debug("Deleted old or unused groups {d}", d=deletedGroupUIDs)
</span><span class="lines">@@ -302,7 +299,7 @@
</span><span class="cx"> # For each of those groups, create a per-group refresh work item
</span><span class="cx"> for groupUID in set(groupUIDs) - set(deletedGroupUIDs):
</span><span class="cx"> self.log.debug("Enqueuing group refresh for {u}", u=groupUID)
</span><del>- yield GroupRefreshWork.reschedule(txn, 0, groupUid=groupUID)
</del><ins>+ yield GroupRefreshWork.reschedule(txn, 0, groupUID=groupUID)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="lines">@@ -335,9 +332,9 @@
</span><span class="cx"> )
</span><span class="cx"> else:
</span><span class="cx"> yield GroupDelegateChangesWork.reschedule(
</span><del>- txn, 0, delegatorUid=delegatorUID,
- readDelegateUid=readDelegateUID,
- writeDelegateUid=writeDelegateUID
</del><ins>+ txn, 0, delegatorUID=delegatorUID,
+ readDelegateUID=readDelegateUID,
+ writeDelegateUID=writeDelegateUID
</ins><span class="cx"> )
</span><span class="cx"> if removed:
</span><span class="cx"> for delegatorUID in removed:
</span><span class="lines">@@ -351,8 +348,8 @@
</span><span class="cx"> )
</span><span class="cx"> else:
</span><span class="cx"> yield GroupDelegateChangesWork.reschedule(
</span><del>- txn, 0, delegatorUid=delegatorUID,
- readDelegateUid="", writeDelegateUid=""
</del><ins>+ txn, 0, delegatorUID=delegatorUID,
+ readDelegateUID="", writeDelegateUID=""
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx">
</span><span class="lines">@@ -367,26 +364,20 @@
</span><span class="cx"> readDelegateGroupID = writeDelegateGroupID = None
</span><span class="cx">
</span><span class="cx"> if readDelegateUID:
</span><del>- (
- readDelegateGroupID, _ignore_name, _ignore_hash,
- _ignore_modified, _ignore_extant
- ) = (
- yield txn.groupByUID(readDelegateUID)
- )
- if readDelegateGroupID is None:
</del><ins>+ readDelegateGroup = yield txn.groupByUID(readDelegateUID)
+ if readDelegateGroup is None:
</ins><span class="cx"> # The group record does not actually exist
</span><span class="cx"> readDelegateUID = None
</span><ins>+ else:
+ readDelegateGroupID = readDelegateGroup.groupID
</ins><span class="cx">
</span><span class="cx"> if writeDelegateUID:
</span><del>- (
- writeDelegateGroupID, _ignore_name, _ignore_hash,
- _ignore_modified, _ignore_extant
- ) = (
- yield txn.groupByUID(writeDelegateUID)
- )
- if writeDelegateGroupID is None:
</del><ins>+ writeDelegateGroup = yield txn.groupByUID(writeDelegateUID)
+ if writeDelegateGroup is None:
</ins><span class="cx"> # The group record does not actually exist
</span><span class="cx"> writeDelegateUID = None
</span><ins>+ else:
+ writeDelegateGroupID = writeDelegateGroup.groupID
</ins><span class="cx">
</span><span class="cx"> yield txn.assignExternalDelegates(
</span><span class="cx"> delegatorUID, readDelegateGroupID, writeDelegateGroupID,
</span><span class="lines">@@ -411,45 +402,36 @@
</span><span class="cx"> else:
</span><span class="cx"> self.log.debug("Got group record: {u}", u=record.uid)
</span><span class="cx">
</span><del>- (
- groupID, cachedName, cachedMembershipHash, _ignore_modified,
- cachedExtant
- ) = yield txn.groupByUID(
- groupUID,
- create=(record is not None)
- )
</del><ins>+ group = yield txn.groupByUID(groupUID, create=(record is not None))
</ins><span class="cx">
</span><del>- if groupID:
- membershipChanged, addedUIDs, removedUIDs = yield txn.refreshGroup(
- groupUID, record, groupID,
- cachedName, cachedMembershipHash, cachedExtant
- )
</del><ins>+ if group:
+ membershipChanged, addedUIDs, removedUIDs = yield txn.refreshGroup(group, record)
</ins><span class="cx">
</span><span class="cx"> if membershipChanged:
</span><span class="cx"> self.log.info(
</span><span class="cx"> "Membership changed for group {uid} {name}:\n\tadded {added}\n\tremoved {removed}",
</span><del>- uid=groupUID,
- name=cachedName,
</del><ins>+ uid=group.groupUID,
+ name=group.name,
</ins><span class="cx"> added=",".join(addedUIDs),
</span><span class="cx"> removed=",".join(removedUIDs),
</span><span class="cx"> )
</span><span class="cx">
</span><span class="cx"> # Send cache change notifications
</span><span class="cx"> if self.cacheNotifier is not None:
</span><del>- self.cacheNotifier.changed(groupUID)
</del><ins>+ self.cacheNotifier.changed(group.groupUID)
</ins><span class="cx"> for uid in itertools.chain(addedUIDs, removedUIDs):
</span><span class="cx"> self.cacheNotifier.changed(uid)
</span><span class="cx">
</span><span class="cx"> # Notifier other store APIs of changes
</span><del>- wpsAttendee = yield self.scheduleGroupAttendeeReconciliations(txn, groupID)
- wpsShareee = yield self.scheduleGroupShareeReconciliations(txn, groupID)
</del><ins>+ wpsAttendee = yield self.scheduleGroupAttendeeReconciliations(txn, group.groupID)
+ wpsShareee = yield self.scheduleGroupShareeReconciliations(txn, group.groupID)
</ins><span class="cx">
</span><span class="cx"> returnValue(wpsAttendee + wpsShareee)
</span><span class="cx"> else:
</span><span class="cx"> self.log.debug(
</span><span class="cx"> "No membership change for group {uid} {name}",
</span><del>- uid=groupUID,
- name=cachedName
</del><ins>+ uid=group.groupUID,
+ name=group.name
</ins><span class="cx"> )
</span><span class="cx">
</span><span class="cx"> returnValue(tuple())
</span><span class="lines">@@ -480,19 +462,15 @@
</span><span class="cx"> work items for them.
</span><span class="cx"> returns: WorkProposal
</span><span class="cx"> """
</span><del>- ga = schema.GROUP_ATTENDEE
- rows = yield Select(
- [ga.RESOURCE_ID, ],
- From=ga,
- Where=ga.GROUP_ID == groupID,
- ).on(txn)
</del><span class="cx">
</span><ins>+ records = yield GroupAttendeeRecord.querysimple(txn, groupID=groupID)
+
</ins><span class="cx"> wps = []
</span><del>- for [eventID] in rows:
</del><ins>+ for record in records:
</ins><span class="cx"> wp = yield GroupAttendeeReconciliationWork.reschedule(
</span><span class="cx"> txn,
</span><span class="cx"> seconds=float(config.GroupAttendees.ReconciliationDelaySeconds),
</span><del>- resourceID=eventID,
</del><ins>+ resourceID=record.resourceID,
</ins><span class="cx"> groupID=groupID,
</span><span class="cx"> )
</span><span class="cx"> wps.append(wp)
</span><span class="lines">@@ -546,20 +524,15 @@
</span><span class="cx"> )
</span><span class="cx">
</span><span class="cx"> # Get groupUIDs for all group attendees
</span><del>- ga = schema.GROUP_ATTENDEE
- gr = schema.GROUPS
- rows = yield Select(
- [gr.GROUP_UID],
- From=gr,
- Where=gr.GROUP_ID.In(
- Select(
- [ga.GROUP_ID],
- From=ga,
- Distinct=True
- )
- )
- ).on(txn)
- attendeeGroupUIDs = frozenset([row[0] for row in rows])
</del><ins>+ groups = yield GroupsRecord.query(
+ txn,
+ GroupsRecord.groupID.In(GroupAttendeeRecord.queryExpr(
+ expr=None,
+ attributes=(GroupAttendeeRecord.groupID,),
+ distinct=True,
+ ))
+ )
+ attendeeGroupUIDs = frozenset([group.groupUID for group in groups])
</ins><span class="cx"> self.log.info(
</span><span class="cx"> "There are {count} group attendees", count=len(attendeeGroupUIDs)
</span><span class="cx"> )
</span></span></pre></div>
<a id="CalendarServertrunktxdavwhotesttest_delegatespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/who/test/test_delegates.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/who/test/test_delegates.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/who/test/test_delegates.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -19,6 +19,8 @@
</span><span class="cx"> """
</span><span class="cx">
</span><span class="cx"> from txdav.common.datastore.sql import CommonStoreTransaction
</span><ins>+from txdav.common.datastore.sql_directory import DelegateRecord, \
+ DelegateGroupsRecord
</ins><span class="cx"> from txdav.who.delegates import Delegates, RecordType as DelegateRecordType
</span><span class="cx"> from txdav.who.groups import GroupCacher
</span><span class="cx"> from twext.who.idirectory import RecordType
</span><span class="lines">@@ -211,12 +213,9 @@
</span><span class="cx"> yield self.directory.recordWithShortName(RecordType.user, name)
</span><span class="cx"> )
</span><span class="cx"> newSet.add(record.uid)
</span><del>- (
- groupID, name, _ignore_membershipHash, _ignore_modified,
- _ignore_extant
- ) = (yield txn.groupByUID(group1.uid))
</del><ins>+ group = yield txn.groupByUID(group1.uid)
</ins><span class="cx"> _ignore_added, _ignore_removed = (
</span><del>- yield self.groupCacher.synchronizeMembers(txn, groupID, newSet)
</del><ins>+ yield self.groupCacher.synchronizeMembers(txn, group.groupID, newSet)
</ins><span class="cx"> )
</span><span class="cx"> delegates = (yield Delegates.delegatesOf(txn, delegator, True, expanded=True))
</span><span class="cx"> self.assertEquals(
</span><span class="lines">@@ -261,15 +260,14 @@
</span><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><span class="cx"> txn = self.store.newTransaction(label="test_noDuplication")
</span><del>- results = (
- yield txn._selectDelegatesQuery.on(
- txn,
- delegator=delegator.uid.encode("utf-8"),
- readWrite=1
</del><ins>+ results = yield DelegateRecord.query(
+ txn,
+ (DelegateRecord.delegator == delegator.uid.encode("utf-8")).And(
+ DelegateRecord.readWrite == 1
</ins><span class="cx"> )
</span><span class="cx"> )
</span><span class="cx"> yield txn.commit()
</span><del>- self.assertEquals([["__sagen1__"]], map(list, results))
</del><ins>+ self.assertEquals(["__sagen1__", ], [record.delegate for record in results])
</ins><span class="cx">
</span><span class="cx"> # Delegate groups:
</span><span class="cx"> group1 = yield self.directory.recordWithUID(u"__top_group_1__")
</span><span class="lines">@@ -283,15 +281,13 @@
</span><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><span class="cx"> txn = self.store.newTransaction(label="test_noDuplication")
</span><del>- results = (
- yield txn._selectDelegateGroupsQuery.on(
- txn,
- delegator=delegator.uid.encode("utf-8"),
- readWrite=1
- )
</del><ins>+ results = yield DelegateGroupsRecord.delegateGroups(
+ txn,
+ delegator.uid,
+ True,
</ins><span class="cx"> )
</span><span class="cx"> yield txn.commit()
</span><del>- self.assertEquals([["__top_group_1__"]], map(list, results))
</del><ins>+ self.assertEquals(["__top_group_1__", ], [record.groupUID for record in results])
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunktxdavwhotesttest_group_attendeespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/who/test/test_group_attendees.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/who/test/test_group_attendees.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/who/test/test_group_attendees.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -18,7 +18,6 @@
</span><span class="cx"> group attendee tests
</span><span class="cx"> """
</span><span class="cx">
</span><del>-from twext.enterprise.dal.syntax import Insert
</del><span class="cx"> from twext.enterprise.jobqueue import JobItem
</span><span class="cx"> from twext.python.filepath import CachingFilePath as FilePath
</span><span class="cx"> from twext.who.directory import DirectoryService
</span><span class="lines">@@ -27,8 +26,8 @@
</span><span class="cx"> from twisted.trial import unittest
</span><span class="cx"> from twistedcaldav.config import config
</span><span class="cx"> from twistedcaldav.ical import Component, normalize_iCalStr
</span><ins>+from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord
</ins><span class="cx"> from txdav.caldav.datastore.test.util import populateCalendarsFrom, CommonCommonTests
</span><del>-from txdav.common.datastore.sql_tables import schema
</del><span class="cx"> from txdav.who.directory import CalendarDirectoryRecordMixin
</span><span class="cx"> from txdav.who.groups import GroupCacher
</span><span class="cx"> import os
</span><span class="lines">@@ -871,16 +870,13 @@
</span><span class="cx"> # finally, simulate an event that has become old
</span><span class="cx"> self.patch(CalendarDirectoryRecordMixin, "expandedMembers", unpatchedExpandedMembers)
</span><span class="cx">
</span><del>- (
- groupID, _ignore_name, _ignore_membershipHash, _ignore_modDate,
- _ignore_extant
- ) = yield self.transactionUnderTest().groupByUID("group01")
- ga = schema.GROUP_ATTENDEE
- yield Insert({
- ga.RESOURCE_ID: cobj._resourceID,
- ga.GROUP_ID: groupID,
- ga.MEMBERSHIP_HASH: (-1),
- }).on(self.transactionUnderTest())
</del><ins>+ group = yield self.transactionUnderTest().groupByUID("group01")
+ yield GroupAttendeeRecord.create(
+ self.transactionUnderTest(),
+ resourceID=cobj._resourceID,
+ groupID=group.groupID,
+ membershipHash=-1,
+ )
</ins><span class="cx"> wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
</span><span class="cx"> self.assertEqual(len(wps), 1)
</span><span class="cx"> yield self.commit()
</span><span class="lines">@@ -1033,16 +1029,13 @@
</span><span class="cx"> # finally, simulate an event that has become old
</span><span class="cx"> self.patch(CalendarDirectoryRecordMixin, "expandedMembers", unpatchedExpandedMembers)
</span><span class="cx">
</span><del>- (
- groupID, _ignore_name, _ignore_membershipHash, _ignore_modDate,
- _ignore_extant
- ) = yield self.transactionUnderTest().groupByUID("group01")
- ga = schema.GROUP_ATTENDEE
- yield Insert({
- ga.RESOURCE_ID: cobj._resourceID,
- ga.GROUP_ID: groupID,
- ga.MEMBERSHIP_HASH: (-1),
- }).on(self.transactionUnderTest())
</del><ins>+ group = yield self.transactionUnderTest().groupByUID("group01")
+ yield GroupAttendeeRecord.create(
+ self.transactionUnderTest(),
+ resourceID=cobj._resourceID,
+ groupID=group.groupID,
+ membershipHash=-1,
+ )
</ins><span class="cx"> wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
</span><span class="cx"> self.assertEqual(len(wps), 1)
</span><span class="cx"> yield self.commit()
</span></span></pre></div>
<a id="CalendarServertrunktxdavwhotesttest_group_shareespy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/who/test/test_group_sharees.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/who/test/test_group_sharees.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/who/test/test_group_sharees.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -84,7 +84,7 @@
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="cx"> def _check_notifications(self, uid, items):
</span><del>- notifyHome = yield self.transactionUnderTest().notificationsWithUID(uid)
</del><ins>+ notifyHome = yield self.transactionUnderTest().notificationsWithUID(uid, create=True)
</ins><span class="cx"> notifications = yield notifyHome.listNotificationObjects()
</span><span class="cx"> self.assertEqual(set(notifications), set(items))
</span><span class="cx">
</span></span></pre></div>
<a id="CalendarServertrunktxdavwhotesttest_groupspy"></a>
<div class="modfile"><h4>Modified: CalendarServer/trunk/txdav/who/test/test_groups.py (14521 => 14522)</h4>
<pre class="diff"><span>
<span class="info">--- CalendarServer/trunk/txdav/who/test/test_groups.py        2015-03-06 21:39:59 UTC (rev 14521)
+++ CalendarServer/trunk/txdav/who/test/test_groups.py        2015-03-06 21:42:49 UTC (rev 14522)
</span><span class="lines">@@ -67,27 +67,24 @@
</span><span class="cx"> record = yield self.directory.recordWithUID(u"__top_group_1__")
</span><span class="cx"> yield self.groupCacher.refreshGroup(txn, record.uid)
</span><span class="cx">
</span><del>- (
- groupID, _ignore_name, membershipHash, _ignore_modified,
- extant
- ) = (yield txn.groupByUID(record.uid))
</del><ins>+ group = (yield txn.groupByUID(record.uid))
</ins><span class="cx">
</span><del>- self.assertEquals(extant, True)
- self.assertEquals(membershipHash, "553eb54e3bbb26582198ee04541dbee4")
</del><ins>+ self.assertEquals(group.extant, True)
+ self.assertEquals(group.membershipHash, "553eb54e3bbb26582198ee04541dbee4")
</ins><span class="cx">
</span><del>- groupUID, name, membershipHash, extant = (yield txn.groupByID(groupID))
- self.assertEquals(groupUID, record.uid)
- self.assertEquals(name, u"Top Group 1")
- self.assertEquals(membershipHash, "553eb54e3bbb26582198ee04541dbee4")
- self.assertEquals(extant, True)
</del><ins>+ group = yield txn.groupByID(group.groupID)
+ self.assertEquals(group.groupUID, record.uid)
+ self.assertEquals(group.name, u"Top Group 1")
+ self.assertEquals(group.membershipHash, "553eb54e3bbb26582198ee04541dbee4")
+ self.assertEquals(group.extant, True)
</ins><span class="cx">
</span><del>- members = (yield txn.groupMemberUIDs(groupID))
</del><ins>+ members = (yield txn.groupMemberUIDs(group.groupID))
</ins><span class="cx"> self.assertEquals(
</span><span class="cx"> set([u'__cdaboo1__', u'__glyph1__', u'__sagen1__', u'__wsanchez1__']),
</span><span class="cx"> members
</span><span class="cx"> )
</span><span class="cx">
</span><del>- records = (yield self.groupCacher.cachedMembers(txn, groupID))
</del><ins>+ records = (yield self.groupCacher.cachedMembers(txn, group.groupID))
</ins><span class="cx"> self.assertEquals(
</span><span class="cx"> set([r.uid for r in records]),
</span><span class="cx"> set([u'__cdaboo1__', u'__glyph1__', u'__sagen1__', u'__wsanchez1__'])
</span><span class="lines">@@ -116,10 +113,7 @@
</span><span class="cx"> # Refresh the group so it's assigned a group_id
</span><span class="cx"> uid = u"__top_group_1__"
</span><span class="cx"> yield self.groupCacher.refreshGroup(txn, uid)
</span><del>- (
- groupID, name, _ignore_membershipHash, _ignore_modified,
- _ignore_extant
- ) = yield txn.groupByUID(uid)
</del><ins>+ group = yield txn.groupByUID(uid)
</ins><span class="cx">
</span><span class="cx"> # Remove two members, and add one member
</span><span class="cx"> newSet = set()
</span><span class="lines">@@ -133,12 +127,12 @@
</span><span class="cx"> newSet.add(record.uid)
</span><span class="cx"> added, removed = (
</span><span class="cx"> yield self.groupCacher.synchronizeMembers(
</span><del>- txn, groupID, newSet
</del><ins>+ txn, group.groupID, newSet
</ins><span class="cx"> )
</span><span class="cx"> )
</span><span class="cx"> self.assertEquals(added, set(["__dre1__", ]))
</span><span class="cx"> self.assertEquals(removed, set(["__glyph1__", "__sagen1__", ]))
</span><del>- records = (yield self.groupCacher.cachedMembers(txn, groupID))
</del><ins>+ records = (yield self.groupCacher.cachedMembers(txn, group.groupID))
</ins><span class="cx"> self.assertEquals(
</span><span class="cx"> set([r.shortNames[0] for r in records]),
</span><span class="cx"> set(["wsanchez1", "cdaboo1", "dre1"])
</span><span class="lines">@@ -146,11 +140,11 @@
</span><span class="cx">
</span><span class="cx"> # Remove all members
</span><span class="cx"> added, removed = (
</span><del>- yield self.groupCacher.synchronizeMembers(txn, groupID, set())
</del><ins>+ yield self.groupCacher.synchronizeMembers(txn, group.groupID, set())
</ins><span class="cx"> )
</span><span class="cx"> self.assertEquals(added, set())
</span><span class="cx"> self.assertEquals(removed, set(["__wsanchez1__", "__cdaboo1__", "__dre1__", ]))
</span><del>- records = (yield self.groupCacher.cachedMembers(txn, groupID))
</del><ins>+ records = (yield self.groupCacher.cachedMembers(txn, group.groupID))
</ins><span class="cx"> self.assertEquals(len(records), 0)
</span><span class="cx">
</span><span class="cx"> yield txn.commit()
</span><span class="lines">@@ -168,12 +162,12 @@
</span><span class="cx"> uid = u"__top_group_1__"
</span><span class="cx"> hash = "553eb54e3bbb26582198ee04541dbee4"
</span><span class="cx"> yield self.groupCacher.refreshGroup(txn, uid)
</span><del>- (
- groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
- _ignore_extant
- ) = yield txn.groupByUID(uid)
- results = yield txn.groupByID(groupID)
- self.assertEquals((uid, u"Top Group 1", hash, True), results)
</del><ins>+ group = yield txn.groupByUID(uid)
+ group = yield txn.groupByID(group.groupID)
+ self.assertEqual(group.groupUID, uid)
+ self.assertEqual(group.name, u"Top Group 1")
+ self.assertEqual(group.membershipHash, hash)
+ self.assertEqual(group.extant, True)
</ins><span class="cx">
</span><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><span class="lines">@@ -683,31 +677,25 @@
</span><span class="cx">
</span><span class="cx"> txn = store.newTransaction()
</span><span class="cx"> yield self.groupCacher.refreshGroup(txn, uid)
</span><del>- (
- _ignore_groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
- extant
- ) = (yield txn.groupByUID(uid))
</del><ins>+ group = yield txn.groupByUID(uid)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><del>- self.assertTrue(extant)
</del><ins>+ self.assertTrue(group.extant)
</ins><span class="cx">
</span><span class="cx"> # Remove the group
</span><span class="cx"> yield self.directory.removeRecords([uid])
</span><span class="cx">
</span><span class="cx"> txn = store.newTransaction()
</span><span class="cx"> yield self.groupCacher.refreshGroup(txn, uid)
</span><del>- (
- groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
- extant
- ) = (yield txn.groupByUID(uid))
</del><ins>+ group = (yield txn.groupByUID(uid))
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><span class="cx"> # Extant = False
</span><del>- self.assertFalse(extant)
</del><ins>+ self.assertFalse(group.extant)
</ins><span class="cx">
</span><span class="cx"> # The list of members stored in the DB for this group is now empty
</span><span class="cx"> txn = store.newTransaction()
</span><del>- members = yield txn.groupMemberUIDs(groupID)
</del><ins>+ members = yield txn.groupMemberUIDs(group.groupID)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx"> self.assertEquals(members, set())
</span><span class="cx">
</span><span class="lines">@@ -732,18 +720,15 @@
</span><span class="cx">
</span><span class="cx"> txn = store.newTransaction()
</span><span class="cx"> yield self.groupCacher.refreshGroup(txn, uid)
</span><del>- (
- groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
- extant
- ) = (yield txn.groupByUID(uid))
</del><ins>+ group = (yield txn.groupByUID(uid))
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><span class="cx"> # Extant = True
</span><del>- self.assertTrue(extant)
</del><ins>+ self.assertTrue(group.extant)
</ins><span class="cx">
</span><span class="cx"> # The list of members stored in the DB for this group has 100 users
</span><span class="cx"> txn = store.newTransaction()
</span><del>- members = yield txn.groupMemberUIDs(groupID)
</del><ins>+ members = yield txn.groupMemberUIDs(group.groupID)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx"> self.assertEquals(len(members), 100 if uid == u"testgroup" else 0)
</span><span class="cx">
</span><span class="lines">@@ -760,27 +745,27 @@
</span><span class="cx">
</span><span class="cx"> txn = store.newTransaction()
</span><span class="cx"> yield self.groupCacher.refreshGroup(txn, uid)
</span><del>- groupID = (yield txn.groupByUID(uid, create=False))[0]
</del><ins>+ group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><del>- self.assertNotEqual(groupID, None)
</del><ins>+ self.assertNotEqual(group, None)
</ins><span class="cx">
</span><span class="cx"> txn = store.newTransaction()
</span><span class="cx"> yield self.groupCacher.update(txn)
</span><del>- groupID = (yield txn.groupByUID(uid, create=False))[0]
</del><ins>+ group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><del>- self.assertEqual(groupID, None)
</del><ins>+ self.assertEqual(group, None)
</ins><span class="cx">
</span><span class="cx"> # delegate groups not deleted
</span><span class="cx"> for uid in (u"testgroup", u"emptygroup",):
</span><span class="cx">
</span><span class="cx"> txn = store.newTransaction()
</span><del>- groupID = (yield txn.groupByUID(uid))[0]
- yield txn.addDelegateGroup(delegator=u"sagen", delegateGroupID=groupID, readWrite=True)
</del><ins>+ group = yield txn.groupByUID(uid)
+ yield txn.addDelegateGroup(delegator=u"sagen", delegateGroupID=group.groupID, readWrite=True)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><del>- self.assertNotEqual(groupID, None)
</del><ins>+ self.assertNotEqual(group, None)
</ins><span class="cx">
</span><span class="cx"> txn = store.newTransaction()
</span><span class="cx"> yield self.groupCacher.update(txn)
</span><span class="lines">@@ -788,21 +773,21 @@
</span><span class="cx"> yield JobItem.waitEmpty(store.newTransaction, reactor, 60)
</span><span class="cx">
</span><span class="cx"> txn = store.newTransaction()
</span><del>- groupID = (yield txn.groupByUID(uid, create=False))[0]
</del><ins>+ group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><del>- self.assertNotEqual(groupID, None)
</del><ins>+ self.assertNotEqual(group, None)
</ins><span class="cx">
</span><span class="cx"> # delegate group is deleted. unused group is deleted
</span><span class="cx"> txn = store.newTransaction()
</span><del>- testGroupID = (yield txn.groupByUID(u"testgroup", create=False))[0]
- yield txn.removeDelegateGroup(delegator=u"sagen", delegateGroupID=testGroupID, readWrite=True)
- testGroupID = (yield txn.groupByUID(u"testgroup", create=False))[0]
- emptyGroupID = (yield txn.groupByUID(u"emptygroup", create=False))[0]
</del><ins>+ testGroup = yield txn.groupByUID(u"testgroup", create=False)
+ yield txn.removeDelegateGroup(delegator=u"sagen", delegateGroupID=testGroup.groupID, readWrite=True)
+ testGroup = yield txn.groupByUID(u"testgroup", create=False)
+ emptyGroup = yield txn.groupByUID(u"emptygroup", create=False)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><del>- self.assertNotEqual(testGroupID, None)
- self.assertNotEqual(emptyGroupID, None)
</del><ins>+ self.assertNotEqual(testGroup, None)
+ self.assertNotEqual(emptyGroup, None)
</ins><span class="cx">
</span><span class="cx"> txn = store.newTransaction()
</span><span class="cx"> yield self.groupCacher.update(txn)
</span><span class="lines">@@ -810,12 +795,12 @@
</span><span class="cx"> yield JobItem.waitEmpty(store.newTransaction, reactor, 60)
</span><span class="cx">
</span><span class="cx"> txn = store.newTransaction()
</span><del>- testGroupID = (yield txn.groupByUID(u"testgroup", create=False))[0]
- emptyGroupID = (yield txn.groupByUID(u"emptygroup", create=False))[0]
</del><ins>+ testGroup = yield txn.groupByUID(u"testgroup", create=False)
+ emptyGroup = yield txn.groupByUID(u"emptygroup", create=False)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><del>- self.assertEqual(testGroupID, None)
- self.assertNotEqual(emptyGroupID, None)
</del><ins>+ self.assertEqual(testGroup, None)
+ self.assertNotEqual(emptyGroup, None)
</ins><span class="cx">
</span><span class="cx">
</span><span class="cx"> @inlineCallbacks
</span><span class="lines">@@ -831,42 +816,33 @@
</span><span class="cx">
</span><span class="cx"> config.AutomaticPurging.GroupPurgeIntervalSeconds = oldGroupPurgeIntervalSeconds
</span><span class="cx"> txn = store.newTransaction()
</span><del>- groupID = (yield txn.groupByUID(uid))[0]
- yield txn.addDelegateGroup(delegator=u"sagen", delegateGroupID=groupID, readWrite=True)
- (
- groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
- extant
- ) = yield txn.groupByUID(uid, create=False)
</del><ins>+ group = yield txn.groupByUID(uid)
+ yield txn.addDelegateGroup(delegator=u"sagen", delegateGroupID=group.groupID, readWrite=True)
+ group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx">
</span><del>- self.assertTrue(extant)
- self.assertNotEqual(groupID, None)
</del><ins>+ self.assertNotEqual(group, None)
+ self.assertTrue(group.extant)
</ins><span class="cx">
</span><span class="cx"> # Remove the group, still cached
</span><span class="cx"> yield self.directory.removeRecords([uid])
</span><span class="cx"> txn = store.newTransaction()
</span><span class="cx"> yield self.groupCacher.update(txn)
</span><del>- (
- groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
- extant
- ) = yield txn.groupByUID(uid, create=False)
</del><ins>+ group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx"> yield txn.commit()
</span><span class="cx"> yield JobItem.waitEmpty(store.newTransaction, reactor, 60)
</span><span class="cx">
</span><span class="cx"> txn = store.newTransaction()
</span><del>- (
- groupID, _ignore_name, _ignore_membershipHash, _ignore_modified,
- extant
- ) = yield txn.groupByUID(uid, create=False)
</del><ins>+ group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx"> yield txn.commit()
</span><del>- self.assertNotEqual(groupID, None)
- self.assertFalse(extant)
</del><ins>+ self.assertNotEqual(group, None)
+ self.assertFalse(group.extant)
</ins><span class="cx">
</span><span class="cx"> # delete the group
</span><span class="cx"> config.AutomaticPurging.GroupPurgeIntervalSeconds = "0.0"
</span><span class="cx">
</span><span class="cx"> txn = store.newTransaction()
</span><span class="cx"> yield self.groupCacher.update(txn)
</span><del>- groupID = (yield txn.groupByUID(uid, create=False))[0]
</del><ins>+ group = yield txn.groupByUID(uid, create=False)
</ins><span class="cx"> yield txn.commit()
</span><del>- self.assertEqual(groupID, None)
</del><ins>+ self.assertEqual(group, None)
</ins></span></pre>
</div>
</div>
</body>
</html>