[CalendarServer-changes] [14449] CalendarServer/branches/users/cdaboo/pod2pod-migration

source_changes at macosforge.org source_changes at macosforge.org
Thu Feb 19 13:53:05 PST 2015


Revision: 14449
          http://trac.calendarserver.org//changeset/14449
Author:   cdaboo at apple.com
Date:     2015-02-19 13:53:05 -0800 (Thu, 19 Feb 2015)
Log Message:
-----------
Support group attendee migration. Fix a bunch of stuff related to other meta-data needed for sync'ing. More use of DAL Record.

Modified Paths:
--------------
    CalendarServer/branches/users/cdaboo/pod2pod-migration/requirements-stable.txt
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql.py
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql_external.py
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/home_sync.py
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/test_home_sync.py
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/store_api.py
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/test/util.py
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql.py
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_external.py
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_schema/current.sql
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/test/util.py
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/who/groups.py
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/who/test/test_group_attendees.py

Added Paths:
-----------
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql_directory.py
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/
    CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/requirements-stable.txt
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/requirements-stable.txt	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/requirements-stable.txt	2015-02-19 21:53:05 UTC (rev 14449)
@@ -36,7 +36,7 @@
             #pyOpenSSL
         pycrypto==2.6.1
 
-    --editable svn+http://svn.calendarserver.org/repository/calendarserver/twext/branches/users/cdaboo/pod2pod-migration@14422#egg=twextpy
+    --editable svn+http://svn.calendarserver.org/repository/calendarserver/twext/branches/users/cdaboo/pod2pod-migration@14448#egg=twextpy
         cffi==0.8.6
             pycparser==2.10
         #twisted

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql.py	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql.py	2015-02-19 21:53:05 UTC (rev 14449)
@@ -64,6 +64,7 @@
 from txdav.caldav.datastore.scheduling.icalsplitter import iCalSplitter
 from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
 from txdav.caldav.datastore.scheduling.utils import uidFromCalendarUserAddress
+from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord
 from txdav.caldav.datastore.util import AttachmentRetrievalTransport, \
     normalizationLookup
 from txdav.caldav.datastore.util import CalendarObjectBase
@@ -503,40 +504,22 @@
 
 
     @inlineCallbacks
-    def copyMetadata(self, other):
+    def copyMetadata(self, other, calendarIDMap):
         """
         Copy metadata from one L{CalendarObjectResource} to another. This is only
         used during a migration step.
         """
-        assert self._txn._migrating
 
-        # Simple attributes that can be copied over as-is
+        # Simple attributes that can be copied over as-is, but the calendar id's need to be mapped
         chm = self._homeMetaDataSchema
-        values = {
-            chm.ALARM_VEVENT_TIMED : other._alarm_vevent_timed,
-            chm.ALARM_VEVENT_ALLDAY : other._alarm_vevent_allday,
-            chm.ALARM_VTODO_TIMED : other._alarm_vtodo_timed,
-            chm.ALARM_VTODO_ALLDAY : other._alarm_vtodo_allday,
-            chm.AVAILABILITY : other._availability,
-        }
+        values = {}
+        for attr, col in zip(self.metadataAttributes(), self.metadataColumns()):
+            value = getattr(other, attr)
+            if attr in self._componentDefaultAttribute.values():
+                value = calendarIDMap.get(value)
+            setattr(self, attr, value)
+            values[col] = value
 
-        # Need to map the default collection references from the remote ids to
-        # the local ones using names
-        remote_calendars = yield other.loadChildren()
-        remote_calendars = dict([(calendar.id(), calendar,) for calendar in remote_calendars])
-        local_calendars = yield self.loadChildren()
-        local_calendars = dict([(calendar.name(), calendar,) for calendar in local_calendars])
-
-        for componentType in self._componentDefaultColumn.keys():
-            attr_name = self._componentDefaultAttribute[componentType]
-            remote_id = getattr(other, attr_name)
-            if remote_id is not None:
-                remote_calendar = remote_calendars.get(remote_id)
-                if remote_calendar is not None:
-                    remote_id = local_calendars.get(remote_calendar.name())
-            setattr(self, attr_name, remote_id)
-            values[self._componentDefaultColumn[componentType]] = remote_id
-
         # Update the local data
         yield Update(
             values,
@@ -701,6 +684,27 @@
 
 
     @inlineCallbacks
+    def getAllGroupAttendees(self):
+        """
+        Return a list of L{GroupAttendeeRecord},L{GroupRecord} for each group attendee referenced in calendar data
+        owned by this home.
+        """
+
+        results = []
+        calendars = yield self.loadChildren()
+        for calendar in calendars:
+            if not calendar.owned():
+                continue
+            children = yield calendar.objectResources()
+            cobjs = [child.id() for child in children]
+            if cobjs:
+                result = yield GroupAttendeeRecord.groupAttendeesForObjects(self._txn, cobjs)
+                results.extend(result)
+
+        returnValue(results)
+
+
+    @inlineCallbacks
     def createdHome(self):
 
         # Check whether components type must be separate
@@ -2519,19 +2523,14 @@
         @return: a L{dict} with group ids as the key and membership hash as the value
         @rtype: L{dict}
         """
-        ga = schema.GROUP_ATTENDEE
-        rows = yield Select(
-            [ga.GROUP_ID, ga.MEMBERSHIP_HASH],
-            From=ga,
-            Where=ga.RESOURCE_ID == self._resourceID,
-        ).on(self._txn)
-        returnValue(dict(rows))
+        records = yield GroupAttendeeRecord.querysimple(self._txn, resourceID=self._resourceID)
+        returnValue(dict([(record.groupID, record,) for record in records]))
 
 
     @inlineCallbacks
     def updateEventGroupLink(self, groupCUAToAttendeeMemberPropMap=None):
         """
-        update schema.GROUP_ATTENDEE
+        update group event links
         """
         if groupCUAToAttendeeMemberPropMap is None:
             if hasattr(self, "_groupCUAToAttendeeMemberPropMap"):
@@ -2550,37 +2549,25 @@
                 groupUID = uidFromCalendarUserAddress(groupCUA)
             group = yield self._txn.groupByUID(groupUID)
 
-            ga = schema.GROUP_ATTENDEE
             if group.groupID in groupIDToMembershipHashMap:
-                if groupIDToMembershipHashMap[group.groupID] != group.membershipHash:
-                    yield Update(
-                        {ga.MEMBERSHIP_HASH: group.membershipHash, },
-                        Where=(ga.RESOURCE_ID == self._resourceID).And(
-                            ga.GROUP_ID == group.groupID)
-                    ).on(self._txn)
+                if groupIDToMembershipHashMap[group.groupID].membershipHash != group.membershipHash:
+                    yield groupIDToMembershipHashMap[group.groupID].update(membershipHash=group.membershipHash)
                     changed = True
                 del groupIDToMembershipHashMap[group.groupID]
             else:
-                yield Insert({
-                    ga.RESOURCE_ID: self._resourceID,
-                    ga.GROUP_ID: group.groupID,
-                    ga.MEMBERSHIP_HASH: group.membershipHash,
-                }).on(self._txn)
+                yield GroupAttendeeRecord.create(
+                    self._txn,
+                    resourceID=self._resourceID,
+                    groupID=group.groupID,
+                    membershipHash=group.membershipHash,
+                )
                 changed = True
 
         if groupIDToMembershipHashMap:
-            groupIDsToRemove = groupIDToMembershipHashMap.keys()
-            yield Delete(
-                From=ga,
-                Where=(ga.RESOURCE_ID == self._resourceID).And(
-                    ga.GROUP_ID.In(
-                        Parameter(
-                            "groupIDsToRemove",
-                            len(groupIDsToRemove)
-                        )
-                    )
-                )
-            ).on(self._txn, groupIDsToRemove=groupIDsToRemove)
+            yield GroupAttendeeRecord.deletesome(
+                self._txn,
+                GroupAttendeeRecord.groupID.In(groupIDToMembershipHashMap.keys()),
+            )
             changed = True
 
         returnValue(changed)
@@ -2641,11 +2628,7 @@
                     del self._groupCUAToAttendeeMemberPropMap
                 else:
                     # delete existing group rows
-                    ga = schema.GROUP_ATTENDEE
-                    yield Delete(
-                        From=ga,
-                        Where=ga.RESOURCE_ID == self._resourceID,
-                    ).on(txn)
+                    yield GroupAttendeeRecord.deletesimple(self._txn, resourceID=self._resourceID)
 
         returnValue(isOldEventWithGroupAttendees)
 
@@ -2691,13 +2674,11 @@
                     # remove group link to ensure update (update to unknown hash would work too)
                     # FIXME: its possible that more than one group id gets updated during this single work item, so we
                     # need to make sure that ALL the group_id's are removed by this query.
-                    ga = schema.GROUP_ATTENDEE
-                    yield Delete(
-                        From=ga,
-                        Where=(ga.RESOURCE_ID == self._resourceID).And(
-                            ga.GROUP_ID == groupID
-                        )
-                    ).on(self._txn)
+                    yield GroupAttendeeRecord.deletesimple(
+                        self._txn,
+                        resourceID=self._resourceID,
+                        groupID=groupID,
+                    )
 
                     # update group attendee in remaining component
                     component = yield self.componentForUser()
@@ -3807,8 +3788,6 @@
         Copy metadata from one L{CalendarObjectResource} to another. This is only
         used during a migration step.
         """
-        assert self._txn._migrating
-
         co = self._objectSchema
         values = {
             co.ATTACHMENTS_MODE                : other._attachment,

Added: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql_directory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql_directory.py	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql_directory.py	2015-02-19 21:53:05 UTC (rev 14449)
@@ -0,0 +1,67 @@
+# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import SerializableRecord, fromTable
+from twext.enterprise.dal.syntax import Select, Parameter
+from twext.python.log import Logger
+from twisted.internet.defer import inlineCallbacks, returnValue
+from txdav.common.datastore.sql_tables import schema
+from txdav.common.datastore.sql_directory import GroupsRecord
+
+log = Logger()
+
+"""
+Classes and methods that relate to directory objects in the SQL store. e.g.,
+delegates, groups etc
+"""
+
+class GroupAttendeeRecord(SerializableRecord, fromTable(schema.GROUP_ATTENDEE)):
+    """
+    @DynamicAttrs
+    L{Record} for L{schema.GROUP_ATTENDEE}.
+    """
+
+    @classmethod
+    @inlineCallbacks
+    def groupAttendeesForObjects(cls, txn, cobjs):
+        """
+        Get delegator/group pairs for each of the specified calendar objects.
+        """
+
+        # Do a join to get what we need
+        rows = yield Select(
+            list(GroupAttendeeRecord.table) + list(GroupsRecord.table),
+            From=GroupAttendeeRecord.table.join(GroupsRecord.table, GroupAttendeeRecord.groupID == GroupsRecord.groupID),
+            Where=(GroupAttendeeRecord.resourceID.In(Parameter("cobjs", len(cobjs))))
+        ).on(txn, cobjs=cobjs)
+
+        results = []
+        groupAttendeeNames = [GroupAttendeeRecord.__colmap__[column] for column in list(GroupAttendeeRecord.table)]
+        groupsNames = [GroupsRecord.__colmap__[column] for column in list(GroupsRecord.table)]
+        split_point = len(groupAttendeeNames)
+        for row in rows:
+            groupAttendeeRow = row[:split_point]
+            groupAttendeeRecord = GroupAttendeeRecord()
+            groupAttendeeRecord._attributesFromRow(zip(groupAttendeeNames, groupAttendeeRow))
+            groupAttendeeRecord.transaction = txn
+            groupsRow = row[split_point:]
+            groupsRecord = GroupsRecord()
+            groupsRecord._attributesFromRow(zip(groupsNames, groupsRow))
+            groupsRecord.transaction = txn
+            results.append((groupAttendeeRecord, groupsRecord,))
+
+        returnValue(results)

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql_external.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql_external.py	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/caldav/datastore/sql_external.py	2015-02-19 21:53:05 UTC (rev 14449)
@@ -14,6 +14,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ##
+from txdav.common.datastore.sql_directory import GroupsRecord
+from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord
 """
 SQL backend for CalDAV storage when resources are external.
 """
@@ -113,6 +115,17 @@
         raise AssertionError("CommonHomeExternal: not supported")
 
 
+    @inlineCallbacks
+    def getAllGroupAttendees(self):
+        """
+        Return a list of L{GroupAttendeeRecord},L{GroupRecord} for each group attendee referenced in calendar data
+        owned by this home.
+        """
+
+        raw_results = yield self._txn.store().conduit.send_home_get_all_group_attendees(self)
+        returnValue([(GroupAttendeeRecord.deserialize(item[0]), GroupsRecord.deserialize(item[1]),) for item in raw_results])
+
+
     def createdHome(self):
         """
         No children - make this a no-op.

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/home_sync.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/home_sync.py	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/home_sync.py	2015-02-19 21:53:05 UTC (rev 14449)
@@ -173,18 +173,18 @@
         # TODO: Re-write attachment URIs - not sure if we need this as reverse proxy may take care of it
         pass
 
+        # TODO: group attendee reconcile
+        yield self.groupAttendeeReconcile()
+
+        # TODO: delegates reconcile
+        yield self.delegateReconcile()
+
         # TODO: shared collections reconcile
         pass
 
-        # TODO: group attendee reconcile
-        pass
-
         # TODO: group sharee reconcile
         pass
 
-        # TODO: delegates reconcile
-        yield self.delegateReconcile()
-
         # TODO: notifications
         pass
 
@@ -259,8 +259,13 @@
         """
 
         remote_home = yield self._remoteHome(txn=txn)
+        yield remote_home.readMetaData()
+
+        calendars = yield CalendarMigrationRecord.querysimple(txn, calendarHomeResourceID=self.homeId)
+        calendarIDMap = dict((item.remoteResourceID, item.localResourceID) for item in calendars)
+
         local_home = yield txn.calendarHomeWithUID(self.migratingUid())
-        yield local_home.copyMetadata(remote_home)
+        yield local_home.copyMetadata(remote_home, calendarIDMap)
 
 
     @inlineCallbacks
@@ -832,3 +837,56 @@
         remote_records = yield txn.dumpExternalDelegatesExternal(self.record)
         for record in remote_records:
             yield record.insert(txn)
+
+
+    @inlineCallbacks
+    def groupAttendeeReconcile(self):
+        """
+        Sync the remote group attendee links to the local store.
+        """
+
+        # Get remote data and local mapping information
+        remote_group_attendees, objectIDMap = yield self.groupAttendeeData()
+
+        # Map each result to a local resource (in batches)
+        number_of_links = len(remote_group_attendees)
+        while remote_group_attendees:
+            yield self.groupAttendeeProcess(remote_group_attendees[:50], objectIDMap)
+            remote_group_attendees = remote_group_attendees[50:]
+
+        returnValue(number_of_links)
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def groupAttendeeData(self, txn):
+        """
+        Sync the remote group attendee links to the local store.
+        """
+        remote_home = yield self._remoteHome(txn)
+        remote_group_attendees = yield remote_home.getAllGroupAttendees()
+
+        # Get all remote->local object maps
+        records = yield CalendarObjectMigrationRecord.querysimple(
+            txn, calendarHomeResourceID=self.homeId
+        )
+        objectIDMap = dict([(record.remoteResourceID, record.localResourceID) for record in records])
+
+        returnValue((remote_group_attendees, objectIDMap,))
+
+
+    @inTransactionWrapper
+    @inlineCallbacks
+    def groupAttendeeProcess(self, txn, results, objectIDMap):
+        """
+        Sync the remote group attendee links to the local store.
+        """
+        # Map each result to a local resource
+        for groupAttendee, group in results:
+            local_group = yield txn.groupByUID(group.groupUID)
+            groupAttendee.groupID = local_group.groupID
+            try:
+                groupAttendee.resourceID = objectIDMap[groupAttendee.resourceID]
+            except KeyError:
+                continue
+            yield groupAttendee.insert(txn)

Added: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml	                        (rev 0)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/accounts/groupAccounts.xml	2015-02-19 21:53:05 UTC (rev 14449)
@@ -0,0 +1,140 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Copyright (c) 2006-2015 Apple Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+ -->
+
+<!DOCTYPE accounts SYSTEM "accounts.dtd">
+
+<directory realm="Test Realm">
+	<record type="user">
+	    <short-name>user01</short-name>
+	    <uid>user01</uid>
+	    <guid>10000000-0000-0000-0000-000000000001</guid>
+	    <password>user01</password>
+	    <full-name>User 01</full-name>
+	    <email>user01 at example.com</email>
+	</record>
+	<record type="user">
+	    <short-name>user02</short-name>
+	    <uid>user02</uid>
+	    <guid>10000000-0000-0000-0000-000000000002</guid>
+	    <password>user02</password>
+	    <full-name>User 02</full-name>
+	    <email>user02 at example.com</email>
+	</record>
+	<record type="user">
+	    <short-name>user03</short-name>
+	    <uid>user03</uid>
+	    <guid>10000000-0000-0000-0000-000000000003</guid>
+	    <password>user03</password>
+	    <full-name>User 03</full-name>
+	    <email>user03 at example.com</email>
+	</record>
+	<record type="user">
+	    <short-name>user04</short-name>
+	    <uid>user04</uid>
+	    <guid>10000000-0000-0000-0000-000000000004</guid>
+	    <password>user04</password>
+	    <full-name>User 04</full-name>
+	    <email>user04 at example.com</email>
+	</record>
+	<record type="user">
+	    <short-name>user05</short-name>
+	    <uid>user05</uid>
+	    <guid>10000000-0000-0000-0000-000000000005</guid>
+	    <password>user05</password>
+	    <full-name>User 05</full-name>
+	    <email>user05 at example.com</email>
+	</record>
+	<record type="user">
+	    <short-name>user06</short-name>
+	    <uid>user06</uid>
+	    <guid>10000000-0000-0000-0000-000000000006</guid>
+	    <password>user06</password>
+	    <full-name>User 06</full-name>
+	    <email>user06 at example.com</email>
+	</record>
+	<record type="user">
+	    <short-name>user07</short-name>
+	    <uid>user07</uid>
+	    <guid>10000000-0000-0000-0000-000000000007</guid>
+	    <password>user07</password>
+	    <full-name>User 07</full-name>
+	    <email>user07 at example.com</email>
+	</record>
+	<record type="user">
+	    <short-name>user08</short-name>
+	    <uid>user08</uid>
+	    <guid>10000000-0000-0000-0000-000000000008</guid>
+	    <password>user08</password>
+	    <full-name>User 08</full-name>
+	    <email>user08 at example.com</email>
+	</record>
+	<record type="user">
+	    <short-name>user09</short-name>
+	    <uid>user09</uid>
+	    <guid>10000000-0000-0000-0000-000000000009</guid>
+	    <password>user09</password>
+	    <full-name>User 09</full-name>
+	    <email>user09 at example.com</email>
+	</record>
+	<record type="user">
+	    <short-name>user10</short-name>
+	    <uid>user10</uid>
+	    <guid>10000000-0000-0000-0000-000000000010</guid>
+	    <password>user10</password>
+	    <full-name>User 10</full-name>
+	    <email>user10 at example.com</email>
+	</record>
+	<record type="group">
+	    <short-name>group01</short-name>
+	    <uid>group01</uid>
+	    <guid>20000000-0000-0000-0000-000000000001</guid>
+	    <full-name>Group 01</full-name>
+	    <email>group01 at example.com</email>
+	    <member-uid>user01</member-uid>
+	</record>
+	<record type="group">
+	    <short-name>group02</short-name>
+	    <uid>group02</uid>
+	    <guid>20000000-0000-0000-0000-000000000002</guid>
+	    <full-name>Group 02</full-name>
+	    <email>group02 at example.com</email>
+	    <member-uid>user06</member-uid>
+	    <member-uid>user07</member-uid>
+	    <member-uid>user08</member-uid>
+	</record>
+	<record type="group">
+	    <short-name>group03</short-name>
+	    <uid>group03</uid>
+	    <guid>20000000-0000-0000-0000-000000000003</guid>
+	    <full-name>Group 03</full-name>
+	    <email>group03 at example.com</email>
+	    <member-uid>user07</member-uid>
+	    <member-uid>user08</member-uid>
+	    <member-uid>user09</member-uid>
+	</record>
+	<record type="group">
+	    <short-name>group04</short-name>
+	    <uid>group04</uid>
+	    <guid>20000000-0000-0000-0000-000000000004</guid>
+	    <full-name>Group 04</full-name>
+	    <email>group04 at example.com</email>
+	    <member-uid>group02</member-uid>
+	    <member-uid>group03</member-uid>
+	    <member-uid>user10</member-uid>
+	</record>
+</directory>

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/test_home_sync.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/test_home_sync.py	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/migration/test/test_home_sync.py	2015-02-19 21:53:05 UTC (rev 14449)
@@ -16,15 +16,21 @@
 
 from pycalendar.datetime import DateTime
 from twext.enterprise.dal.syntax import Select
-from twisted.internet.defer import inlineCallbacks
+from twext.enterprise.jobqueue import JobItem
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python.filepath import FilePath
+from twistedcaldav.config import config
 from twistedcaldav.ical import Component, normalize_iCalStr
 from txdav.caldav.datastore.sql import ManagedAttachment
 from txdav.common.datastore.podding.migration.home_sync import CrossPodHomeSync
-from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord
+from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord, \
+    AttachmentMigrationRecord
 from txdav.common.datastore.podding.test.util import MultiStoreConduitTest
 from txdav.common.datastore.sql_directory import DelegateRecord, \
     ExternalDelegateGroupsRecord, DelegateGroupsRecord
 from txdav.common.datastore.sql_tables import schema
+from txdav.common.datastore.test.util import populateCalendarsFrom
 from txdav.who.delegates import Delegates
 from txweb2.http_headers import MimeType
 from txweb2.stream import MemoryStream
@@ -117,6 +123,7 @@
 END:VCALENDAR
 """.replace("\n", "\r\n").format(**nowYear)
 
+
     @inlineCallbacks
     def test_remote_home(self):
         """
@@ -181,6 +188,115 @@
 
 
     @inlineCallbacks
+    def test_home_metadata(self):
+        """
+        Test that L{syncCalendarHomeMetaData} sync home metadata correctly.
+        """
+
+        alarm_event_timed = """BEGIN:VALARM
+ACTION:DISPLAY
+DESCRIPTION:alarm_event_timed
+TRIGGER:-PT10M
+END:VALARM
+"""
+        alarm_event_allday = """BEGIN:VALARM
+ACTION:DISPLAY
+DESCRIPTION:alarm_event_allday
+TRIGGER:-PT10M
+END:VALARM
+"""
+        alarm_todo_timed = """BEGIN:VALARM
+ACTION:DISPLAY
+DESCRIPTION:alarm_todo_timed
+TRIGGER:-PT10M
+END:VALARM
+"""
+        alarm_todo_allday = """BEGIN:VALARM
+ACTION:DISPLAY
+DESCRIPTION:alarm_todo_allday
+TRIGGER:-PT10M
+END:VALARM
+"""
+        availability = """BEGIN:VCALENDAR
+VERSION:2.0
+PRODID:-//Example Inc.//Example Calendar//EN
+BEGIN:VAVAILABILITY
+UID:20061005T133225Z-00001-availability at example.com
+DTSTART:20060101T000000Z
+DTEND:20060108T000000Z
+DTSTAMP:20061005T133225Z
+ORGANIZER:mailto:bernard at example.com
+BEGIN:AVAILABLE
+UID:20061005T133225Z-00001-A-availability at example.com
+DTSTART:20060102T090000Z
+DTEND:20060102T120000Z
+DTSTAMP:20061005T133225Z
+RRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
+SUMMARY:Weekdays from 9:00 to 12:00
+END:AVAILABLE
+END:VAVAILABILITY
+END:VCALENDAR
+"""
+
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+        events0 = yield home0.createChildWithName("events")
+        yield home0.setDefaultCalendar(events0, "VEVENT")
+        yield home0.setDefaultAlarm(alarm_event_timed, True, True)
+        yield home0.setDefaultAlarm(alarm_event_allday, True, False)
+        yield home0.setDefaultAlarm(alarm_todo_timed, False, True)
+        yield home0.setDefaultAlarm(alarm_todo_allday, False, False)
+        yield home0.setAvailability(Component.fromString(availability))
+        yield self.commitTransaction(0)
+
+        # Trigger sync
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+        yield syncer.sync()
+
+        # Home is present with correct metadata
+        home1 = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=syncer.migratingUid())
+        self.assertTrue(home1 is not None)
+        calendar1 = yield home1.childWithName("calendar")
+        events1 = yield home1.childWithName("events")
+        tasks1 = yield home1.childWithName("tasks")
+        self.assertFalse(home1.isDefaultCalendar(calendar1))
+        self.assertTrue(home1.isDefaultCalendar(events1))
+        self.assertTrue(home1.isDefaultCalendar(tasks1))
+        self.assertEqual(home1.getDefaultAlarm(True, True), alarm_event_timed)
+        self.assertEqual(home1.getDefaultAlarm(True, False), alarm_event_allday)
+        self.assertEqual(home1.getDefaultAlarm(False, True), alarm_todo_timed)
+        self.assertEqual(home1.getDefaultAlarm(False, False), alarm_todo_allday)
+        self.assertEqual(normalize_iCalStr(home1.getAvailability()), normalize_iCalStr(availability))
+        yield self.commitTransaction(1)
+
+        # Make some changes
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+        calendar0 = yield home0.childWithName("calendar")
+        yield home0.setDefaultCalendar(calendar0, "VEVENT")
+        yield home0.setDefaultAlarm(None, True, True)
+        yield home0.setDefaultAlarm(None, False, True)
+        yield self.commitTransaction(0)
+
+        # Trigger sync again
+        yield syncer.sync()
+
+        # Home is present with correct metadata
+        home1 = yield self.homeUnderTest(self.theTransactionUnderTest(1), name=syncer.migratingUid())
+        self.assertTrue(home1 is not None)
+        calendar1 = yield home1.childWithName("calendar")
+        events1 = yield home1.childWithName("events")
+        tasks1 = yield home1.childWithName("tasks")
+        self.assertTrue(home1.isDefaultCalendar(calendar1))
+        self.assertFalse(home1.isDefaultCalendar(events1))
+        self.assertTrue(home1.isDefaultCalendar(tasks1))
+        self.assertEqual(home1.getDefaultAlarm(True, True), None)
+        self.assertEqual(home1.getDefaultAlarm(True, False), alarm_event_allday)
+        self.assertEqual(home1.getDefaultAlarm(False, True), None)
+        self.assertEqual(home1.getDefaultAlarm(False, False), alarm_todo_allday)
+        self.assertEqual(normalize_iCalStr(home1.getAvailability()), normalize_iCalStr(availability))
+        yield self.commitTransaction(1)
+
+
+    @inlineCallbacks
     def test_get_calendar_sync_list(self):
         """
         Test that L{getCalendarSyncList} returns the correct results.
@@ -475,6 +591,16 @@
         self.assertEqual(len(local_sync_state), 1)
         self.assertEqual(local_sync_state[remote_id].lastSyncToken, remote_sync_state[remote_id].lastSyncToken)
 
+        @inlineCallbacks
+        def _mapLocalIDToRemote(remote_id):
+            records = yield AttachmentMigrationRecord.all(self.theTransactionUnderTest(1))
+            yield self.commitTransaction(1)
+            for record in records:
+                if record.remoteResourceID == remote_id:
+                    returnValue(record.localResourceID)
+            else:
+                returnValue(None)
+
         # Sync attachments
         changed, removed = yield syncer.syncAttachments()
         self.assertEqual(changed, set())
@@ -515,7 +641,7 @@
 
         # Sync attachments
         changed, removed = yield syncer.syncAttachments()
-        self.assertEqual(changed, set((id0_1,)))
+        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1)),)))
         self.assertEqual(removed, set())
 
         # Validate changes
@@ -534,7 +660,7 @@
 
         # Sync attachments
         changed, removed = yield syncer.syncAttachments()
-        self.assertEqual(changed, set((id0_2,)))
+        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_2)),)))
         self.assertEqual(removed, set())
 
         # Validate changes
@@ -555,7 +681,7 @@
 
         # Sync attachments
         changed, removed = yield syncer.syncAttachments()
-        self.assertEqual(changed, set((id0_1_changed,)))
+        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1_changed)),)))
         self.assertEqual(removed, set((id0_1,)))
 
         # Validate changes
@@ -596,7 +722,7 @@
 
         # Sync attachments
         changed, removed = yield syncer.syncAttachments()
-        self.assertEqual(changed, set((id0_1_changed_again,)))
+        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1_changed_again)),)))
         self.assertEqual(removed, set())
 
         # Validate changes
@@ -661,7 +787,18 @@
 
         # Sync attachments
         changed, removed = yield syncer.syncAttachments()
-        self.assertEqual(changed, set((id0_1, id0_2,)))
+
+        @inlineCallbacks
+        def _mapLocalIDToRemote(remote_id):
+            records = yield AttachmentMigrationRecord.all(self.theTransactionUnderTest(1))
+            yield self.commitTransaction(1)
+            for record in records:
+                if record.remoteResourceID == remote_id:
+                    returnValue(record.localResourceID)
+            else:
+                returnValue(None)
+
+        self.assertEqual(changed, set(((yield _mapLocalIDToRemote(id0_1)), (yield _mapLocalIDToRemote(id0_2)),)))
         self.assertEqual(removed, set())
 
         # Link attachments
@@ -787,3 +924,131 @@
         )
 
         yield self.commitTransaction(1)
+
+
+
+class TestGroupAttendeeSync(MultiStoreConduitTest):
+    """
+    GroupAttendeeReconciliation tests
+    """
+
+    now = {"now1": DateTime.getToday().getYear() + 1}
+
+    groupdata1 = """BEGIN:VCALENDAR
+CALSCALE:GREGORIAN
+PRODID:-//Example Inc.//Example Calendar//EN
+VERSION:2.0
+BEGIN:VEVENT
+DTSTAMP:20051222T205953Z
+CREATED:20060101T150000Z
+DTSTART:{now1:04d}0101T100000Z
+DURATION:PT1H
+SUMMARY:event 1
+UID:event1 at ninevah.local
+END:VEVENT
+END:VCALENDAR""".format(**now)
+
+    groupdata2 = """BEGIN:VCALENDAR
+CALSCALE:GREGORIAN
+PRODID:-//Example Inc.//Example Calendar//EN
+VERSION:2.0
+BEGIN:VEVENT
+DTSTAMP:20051222T205953Z
+CREATED:20060101T150000Z
+DTSTART:{now1:04d}0101T100000Z
+DURATION:PT1H
+SUMMARY:event 2
+UID:event2 at ninevah.local
+ORGANIZER:mailto:user01 at example.com
+ATTENDEE:mailto:user01 at example.com
+ATTENDEE:mailto:group02 at example.com
+END:VEVENT
+END:VCALENDAR""".format(**now)
+
+    groupdata3 = """BEGIN:VCALENDAR
+CALSCALE:GREGORIAN
+PRODID:-//Example Inc.//Example Calendar//EN
+VERSION:2.0
+BEGIN:VEVENT
+DTSTAMP:20051222T205953Z
+CREATED:20060101T150000Z
+DTSTART:{now1:04d}0101T100000Z
+DURATION:PT1H
+SUMMARY:event 3
+UID:event3 at ninevah.local
+ORGANIZER:mailto:user01 at example.com
+ATTENDEE:mailto:user01 at example.com
+ATTENDEE:mailto:group04 at example.com
+END:VEVENT
+END:VCALENDAR""".format(**now)
+
+    @inlineCallbacks
+    def setUp(self):
+        self.accounts = FilePath(__file__).sibling("accounts").child("groupAccounts.xml")
+        yield super(TestGroupAttendeeSync, self).setUp()
+        yield self.populate()
+
+
+    def configure(self):
+        super(TestGroupAttendeeSync, self).configure()
+        config.GroupAttendees.Enabled = True
+        config.GroupAttendees.ReconciliationDelaySeconds = 0
+        config.GroupAttendees.AutoUpdateSecondsFromNow = 0
+
+
+    @inlineCallbacks
+    def populate(self):
+        yield populateCalendarsFrom(self.requirements, self.theStoreUnderTest(0))
+
+    requirements = {
+        "user01" : None,
+        "user02" : None,
+        "user06" : None,
+        "user07" : None,
+        "user08" : None,
+        "user09" : None,
+        "user10" : None,
+
+    }
+
+    @inlineCallbacks
+    def test_group_attendees(self):
+        """
+        Test that L{groupAttendeeReconcile} links groups to the associated calendar object.
+        """
+
+        home0 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+        calendar0 = yield home0.childWithName("calendar")
+        yield calendar0.createCalendarObjectWithName("1.ics", Component.fromString(self.groupdata1))
+        yield calendar0.createCalendarObjectWithName("2.ics", Component.fromString(self.groupdata2))
+        yield calendar0.createCalendarObjectWithName("3.ics", Component.fromString(self.groupdata3))
+        yield self.commitTransaction(0)
+
+        yield JobItem.waitEmpty(self.theStoreUnderTest(0).newTransaction, reactor, 60.0)
+
+        # Trigger sync
+        syncer = CrossPodHomeSync(self.theStoreUnderTest(1), "user01")
+        yield syncer.sync()
+
+        # Link groups
+        len_links = yield syncer.groupAttendeeReconcile()
+        self.assertEqual(len_links, 2)
+
+        # Local calendar exists
+        home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name=syncer.migratingUid())
+        calendar1 = yield home1.childWithName("calendar")
+        self.assertTrue(calendar1 is not None)
+        children = yield calendar1.objectResources()
+        self.assertEqual(set([child.name() for child in children]), set(("1.ics", "2.ics", "3.ics",)))
+
+        object2 = yield calendar1.objectResourceWithName("2.ics")
+        record = (yield object2.groupEventLinks()).values()[0]
+        group02 = yield self.theTransactionUnderTest(1).groupByUID(u"group02")
+        self.assertEqual(record.groupID, group02.groupID)
+        self.assertEqual(record.membershipHash, group02.membershipHash)
+
+        object3 = yield calendar1.objectResourceWithName("3.ics")
+        record = (yield object3.groupEventLinks()).values()[0]
+        group04 = yield self.theTransactionUnderTest(1).groupByUID(u"group04")
+        self.assertEqual(record.groupID, group04.groupID)
+        self.assertEqual(record.membershipHash, group04.membershipHash)

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/store_api.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/store_api.py	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/store_api.py	2015-02-19 21:53:05 UTC (rev 14449)
@@ -127,9 +127,19 @@
             "matchtotal": matchtotal,
         })
 
+
+    @staticmethod
+    def _to_serialize_pair_list(value):
+        """
+        Convert the value to the external (JSON-based) representation.
+        """
+        return [[a.serialize(), b.serialize(), ] for a, b in value]
+
 # These are the actions on store objects we need to expose via the conduit api
 
 # Calls on L{CommonHome} objects
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "home_metadata", "serialize", classMethod=False)
+UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "home_get_all_group_attendees", "getAllGroupAttendees", classMethod=False, transform_recv_result=StoreAPIConduitMixin._to_serialize_pair_list)
 
 # Calls on L{CommonHomeChild} objects
 UtilityConduitMixin._make_simple_action(StoreAPIConduitMixin, "homechild_listobjects", "listObjects", classMethod=True)

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/test/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/test/util.py	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/podding/test/util.py	2015-02-19 21:53:05 UTC (rev 14449)
@@ -128,6 +128,7 @@
     theStoreBuilders = []
     theStores = []
     activeTransactions = []
+    accounts = None
 
     def __init__(self, methodName='runTest'):
         txweb2.dav.test.util.TestCase.__init__(self, methodName)
@@ -153,13 +154,17 @@
             if i == 0:
                 yield self.buildStoreAndDirectory(
                     serversDB=serversDB,
-                    storeBuilder=self.theStoreBuilders[i]
+                    storeBuilder=self.theStoreBuilders[i],
+                    accounts=self.accounts,
                 )
                 self.theStores[i] = self.store
             else:
                 self.theStores[i] = yield self.buildStore(self.theStoreBuilders[i])
                 directory = buildTestDirectory(
-                    self.theStores[i], self.mktemp(), serversDB=serversDB
+                    self.theStores[i],
+                    self.mktemp(),
+                    serversDB=serversDB,
+                    accounts=self.accounts,
                 )
                 self.theStores[i].setDirectoryService(directory)
 

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql.py	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql.py	2015-02-19 21:53:05 UTC (rev 14449)
@@ -2191,6 +2191,24 @@
             setattr(self, attr, value)
 
 
+    def serialize(self):
+        """
+        Create a dictionary mapping metadata attributes so this object can be sent over a cross-pod call
+        and reconstituted at the other end. Note that the other end may have a different schema so
+        the attributes may not match exactly and will need to be processed accordingly.
+        """
+        return dict([(attr[1:], getattr(self, attr, None)) for attr in self.metadataAttributes()])
+
+
+    def deserialize(self, mapping):
+        """
+        Given a mapping generated by L{serialize}, convert the values to attributes on this object.
+        """
+
+        for attr in self.metadataAttributes():
+            setattr(self, attr, mapping.get(attr[1:]))
+
+
     @classmethod
     @inlineCallbacks
     def listHomes(cls, txn):

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_external.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_external.py	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_external.py	2015-02-19 21:53:05 UTC (rev 14449)
@@ -53,6 +53,15 @@
         raise AssertionError("CommonHomeExternal: not supported")
 
 
+    @inlineCallbacks
+    def readMetaData(self):
+        """
+        Read the home metadata from remote home and save as attributes on this object.
+        """
+        mapping = yield self._txn.store().conduit.send_home_metadata(self)
+        self.deserialize(mapping)
+
+
     def external(self):
         """
         Is this an external home.

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_schema/current.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_schema/current.sql	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/sql_schema/current.sql	2015-02-19 21:53:05 UTC (rev 14449)
@@ -927,7 +927,7 @@
   DELEGATOR                     varchar(255) not null,
   GROUP_ID                      integer      not null references GROUPS on delete cascade,
   READ_WRITE                    integer      not null, -- 1 = ReadWrite, 0 = ReadOnly
-  IS_EXTERNAL                   integer      not null, -- 1 = ReadWrite, 0 = ReadOnly
+  IS_EXTERNAL                   integer      not null, -- 1 = External, 0 = Internal
 
   primary key (DELEGATOR, READ_WRITE, GROUP_ID)
 );

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/test/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/test/util.py	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/common/datastore/test/util.py	2015-02-19 21:53:05 UTC (rev 14449)
@@ -30,7 +30,7 @@
 
 from pycalendar.datetime import DateTime
 
-from random import Random
+from random import Random, randint
 
 from twext.python.log import Logger
 from twext.python.filepath import CachingFilePath as FilePath
@@ -290,6 +290,16 @@
                 yield cleanupTxn.execSQL("delete from " + table, [])
             except:
                 log.failure("delete table {table} failed", table=table)
+
+        # Change the starting values of sequences to random values
+        for sequence in schema.model.sequences: #@UndefinedVariable
+            try:
+                curval = (yield cleanupTxn.execSQL("select nextval('{}')".format(sequence.name), []))[0][0]
+                yield cleanupTxn.execSQL("select setval('{}', {})".format(sequence.name, curval + randint(1, 10000)), [])
+            except:
+                log.failure("setval sequence '{}' failed", sequence=sequence.name)
+        yield cleanupTxn.execSQL("update CALENDARSERVER set VALUE = '1' where NAME = 'MIN-VALID-REVISION'", [])
+
         yield cleanupTxn.commit()
 
         # Deal with memcached items that must be cleared

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/who/groups.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/who/groups.py	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/who/groups.py	2015-02-19 21:53:05 UTC (rev 14449)
@@ -20,13 +20,15 @@
 """
 
 from twext.enterprise.dal.record import fromTable
-from twext.enterprise.dal.syntax import Delete, Select, Parameter
+from twext.enterprise.dal.syntax import Select
 from twext.enterprise.jobqueue import AggregatedWorkItem, RegeneratingWorkItem
 from twext.python.log import Logger
 from twisted.internet.defer import inlineCallbacks, returnValue, succeed, \
     DeferredList
 from twistedcaldav.config import config
 from txdav.caldav.datastore.sql import CalendarStoreFeatures
+from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord
+from txdav.common.datastore.sql_directory import GroupsRecord
 from txdav.common.datastore.sql_tables import schema, _BIND_MODE_OWN
 import datetime
 import itertools
@@ -268,33 +270,28 @@
         #     "Groups to refresh: {g}", g=groupUIDs
         # )
 
-        gr = schema.GROUPS
         if config.AutomaticPurging.Enabled and groupUIDs:
             # remove unused groups and groups that have not been seen in a while
             dateLimit = (
                 datetime.datetime.utcnow() -
                 datetime.timedelta(seconds=float(config.AutomaticPurging.GroupPurgeIntervalSeconds))
             )
-            rows = yield Delete(
-                From=gr,
-                Where=(
-                    (gr.EXTANT == 0).And(gr.MODIFIED < dateLimit)
+            rows = yield GroupsRecord.deletesome(
+                txn,
+                (
+                    (GroupsRecord.extant == 0).And(GroupsRecord.modified < dateLimit)
                 ).Or(
-                    gr.GROUP_UID.NotIn(
-                        Parameter("groupUIDs", len(groupUIDs))
-                    )
-                ) if groupUIDs else None,
-                Return=[gr.GROUP_UID]
-            ).on(txn, groupUIDs=groupUIDs)
+                    GroupsRecord.groupUID.NotIn(groupUIDs)
+                ),
+                returnCols=GroupsRecord.groupUID,
+            )
         else:
             # remove unused groups
-            rows = yield Delete(
-                From=gr,
-                Where=gr.GROUP_UID.NotIn(
-                    Parameter("groupUIDs", len(groupUIDs))
-                ) if groupUIDs else None,
-                Return=[gr.GROUP_UID]
-            ).on(txn, groupUIDs=groupUIDs)
+            rows = yield GroupsRecord.deletesome(
+                txn,
+                GroupsRecord.groupUID.NotIn(groupUIDs) if groupUIDs else None,
+                returnCols=GroupsRecord.groupUID,
+            )
         deletedGroupUIDs = [row[0] for row in rows]
         if deletedGroupUIDs:
             self.log.debug("Deleted old or unused groups {d}", d=deletedGroupUIDs)
@@ -465,19 +462,15 @@
         work items for them.
         returns: WorkProposal
         """
-        ga = schema.GROUP_ATTENDEE
-        rows = yield Select(
-            [ga.RESOURCE_ID, ],
-            From=ga,
-            Where=ga.GROUP_ID == groupID,
-        ).on(txn)
 
+        records = yield GroupAttendeeRecord.querysimple(txn, groupID=groupID)
+
         wps = []
-        for [eventID] in rows:
+        for record in records:
             wp = yield GroupAttendeeReconciliationWork.reschedule(
                 txn,
                 seconds=float(config.GroupAttendees.ReconciliationDelaySeconds),
-                resourceID=eventID,
+                resourceID=record.resourceID,
                 groupID=groupID,
             )
             wps.append(wp)
@@ -531,20 +524,15 @@
             )
 
         # Get groupUIDs for all group attendees
-        ga = schema.GROUP_ATTENDEE
-        gr = schema.GROUPS
-        rows = yield Select(
-            [gr.GROUP_UID],
-            From=gr,
-            Where=gr.GROUP_ID.In(
-                Select(
-                    [ga.GROUP_ID],
-                    From=ga,
-                    Distinct=True
-                )
-            )
-        ).on(txn)
-        attendeeGroupUIDs = frozenset([row[0] for row in rows])
+        groups = yield GroupsRecord.query(
+            txn,
+            GroupsRecord.groupID.In(GroupAttendeeRecord.queryExpr(
+                expr=None,
+                attributes=(GroupAttendeeRecord.groupID,),
+                distinct=True,
+            ))
+        )
+        attendeeGroupUIDs = frozenset([group.groupUID for group in groups])
         self.log.info(
             "There are {count} group attendees", count=len(attendeeGroupUIDs)
         )

Modified: CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/who/test/test_group_attendees.py
===================================================================
--- CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/who/test/test_group_attendees.py	2015-02-19 21:40:25 UTC (rev 14448)
+++ CalendarServer/branches/users/cdaboo/pod2pod-migration/txdav/who/test/test_group_attendees.py	2015-02-19 21:53:05 UTC (rev 14449)
@@ -18,7 +18,6 @@
     group attendee tests
 """
 
-from twext.enterprise.dal.syntax import Insert
 from twext.enterprise.jobqueue import JobItem
 from twext.python.filepath import CachingFilePath as FilePath
 from twext.who.directory import DirectoryService
@@ -27,8 +26,8 @@
 from twisted.trial import unittest
 from twistedcaldav.config import config
 from twistedcaldav.ical import Component, normalize_iCalStr
+from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord
 from txdav.caldav.datastore.test.util import populateCalendarsFrom, CommonCommonTests
-from txdav.common.datastore.sql_tables import schema
 from txdav.who.directory import CalendarDirectoryRecordMixin
 from txdav.who.groups import GroupCacher
 import os
@@ -872,12 +871,12 @@
         self.patch(CalendarDirectoryRecordMixin, "expandedMembers", unpatchedExpandedMembers)
 
         group = yield self.transactionUnderTest().groupByUID("group01")
-        ga = schema.GROUP_ATTENDEE
-        yield Insert({
-            ga.RESOURCE_ID: cobj._resourceID,
-            ga.GROUP_ID: group.groupID,
-            ga.MEMBERSHIP_HASH: (-1),
-        }).on(self.transactionUnderTest())
+        yield GroupAttendeeRecord.create(
+            self.transactionUnderTest(),
+            resourceID=cobj._resourceID,
+            groupID=group.groupID,
+            membershipHash=-1,
+        )
         wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
         self.assertEqual(len(wps), 1)
         yield self.commit()
@@ -1031,12 +1030,12 @@
         self.patch(CalendarDirectoryRecordMixin, "expandedMembers", unpatchedExpandedMembers)
 
         group = yield self.transactionUnderTest().groupByUID("group01")
-        ga = schema.GROUP_ATTENDEE
-        yield Insert({
-            ga.RESOURCE_ID: cobj._resourceID,
-            ga.GROUP_ID: group.groupID,
-            ga.MEMBERSHIP_HASH: (-1),
-        }).on(self.transactionUnderTest())
+        yield GroupAttendeeRecord.create(
+            self.transactionUnderTest(),
+            resourceID=cobj._resourceID,
+            groupID=group.groupID,
+            membershipHash=-1,
+        )
         wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
         self.assertEqual(len(wps), 1)
         yield self.commit()
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20150219/6dcf572b/attachment-0001.html>


More information about the calendarserver-changes mailing list