[CalendarServer-changes] [14783] CalendarServer/trunk
source_changes at macosforge.org
source_changes at macosforge.org
Wed May 13 11:18:41 PDT 2015
Revision: 14783
http://trac.calendarserver.org//changeset/14783
Author: cdaboo at apple.com
Date: 2015-05-13 11:18:41 -0700 (Wed, 13 May 2015)
Log Message:
-----------
Implement final pod migration clean-up steps.
Modified Paths:
--------------
CalendarServer/trunk/requirements-stable.txt
CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py
CalendarServer/trunk/txdav/caldav/datastore/sql.py
CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py
CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py
CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py
CalendarServer/trunk/txdav/common/datastore/podding/store_api.py
CalendarServer/trunk/txdav/common/datastore/sql.py
CalendarServer/trunk/txdav/common/datastore/sql_notification.py
CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql
CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql
CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_54_to_55.sql
CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_54_to_55.sql
CalendarServer/trunk/txdav/common/datastore/sql_sharing.py
Added Paths:
-----------
CalendarServer/trunk/txdav/common/datastore/podding/migration/work.py
Modified: CalendarServer/trunk/requirements-stable.txt
===================================================================
--- CalendarServer/trunk/requirements-stable.txt 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/requirements-stable.txt 2015-05-13 18:18:41 UTC (rev 14783)
@@ -36,7 +36,7 @@
#pyOpenSSL
pycrypto==2.6.1
- --editable svn+http://svn.calendarserver.org/repository/calendarserver/twext/trunk@14775#egg=twextpy
+ --editable svn+http://svn.calendarserver.org/repository/calendarserver/twext/trunk@14782#egg=twextpy
cffi==0.8.6
pycparser==2.10
#twisted
Modified: CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/caldav/datastore/scheduling/work.py 2015-05-13 18:18:41 UTC (rev 14783)
@@ -1108,5 +1108,6 @@
log.debug("ImplicitProcessing - skipping auto-reply of missing ID: '{rid}'", rid=self.resourceID)
-for workClass in (ScheduleOrganizerWork, ScheduleOrganizerSendWork, ScheduleReplyWork, ScheduleRefreshWork, ScheduleAutoReplyWork,):
+allScheduleWork = (ScheduleOrganizerWork, ScheduleOrganizerSendWork, ScheduleReplyWork, ScheduleRefreshWork, ScheduleAutoReplyWork,)
+for workClass in allScheduleWork:
ScheduleWork._classForWorkType[workClass.__name__] = workClass
Modified: CalendarServer/trunk/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/sql.py 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql.py 2015-05-13 18:18:41 UTC (rev 14783)
@@ -14,9 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
##
-from txdav.caldav.datastore.scheduling.work import ScheduleOrganizerWork, \
- ScheduleWork, ScheduleAutoReplyWork, ScheduleOrganizerSendWork, \
- ScheduleRefreshWork, ScheduleReplyWork
"""
@@ -65,6 +62,7 @@
from txdav.caldav.datastore.scheduling.imip.token import iMIPTokenRecord
from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
from txdav.caldav.datastore.scheduling.utils import uidFromCalendarUserAddress
+from txdav.caldav.datastore.scheduling.work import allScheduleWork, ScheduleWork
from txdav.caldav.datastore.sql_attachment import Attachment, DropBoxAttachment, \
AttachmentLink, ManagedAttachment
from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord, \
@@ -538,6 +536,19 @@
@inlineCallbacks
+ def purgeAll(self):
+ """
+ Do a complete purge of all data associated with this calendar home. For now this will assume
+ a "silent" non-implicit behavior. In the future we will want to build in some of the options
+ the current set of "purge" CLI tools have to allow for cancels of future events etc.
+ """
+ # delete attachments corresponding to this home, also removing from disk
+ yield Attachment.removedHome(self._txn, self._resourceID)
+
+ yield super(CalendarHome, self).purgeAll()
+
+
+ @inlineCallbacks
def copyMetadata(self, other, calendarIDMap):
"""
Copy metadata from one L{CalendarObjectResource} to another. This is only
@@ -1065,7 +1076,7 @@
L{ScheduleOrganizerSendWork}, L{ScheduleReplyWork}, L{ScheduleRefreshWork}, L{ScheduleAutoReplyWork}
"""
- for workType in (ScheduleOrganizerWork, ScheduleOrganizerSendWork, ScheduleReplyWork, ScheduleRefreshWork, ScheduleAutoReplyWork,):
+ for workType in allScheduleWork:
yield JobItem.updatesome(
self._txn,
where=JobItem.jobID.In(
@@ -1083,7 +1094,7 @@
"""
results = collections.defaultdict(list)
- for workType in (ScheduleOrganizerWork, ScheduleOrganizerSendWork, ScheduleReplyWork, ScheduleRefreshWork, ScheduleAutoReplyWork,):
+ for workType in allScheduleWork:
workItems = yield workType.query(self._txn, workType.homeResourceID == self.id())
for item in workItems:
serialized = yield item.serializeWithAncillaryData()
Modified: CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/home_sync.py 2015-05-13 18:18:41 UTC (rev 14783)
@@ -14,23 +14,26 @@
# limitations under the License.
##
-from functools import wraps
+from twext.python.log import Logger
-from twext.python.log import Logger
from twisted.internet.defer import returnValue, inlineCallbacks
from twisted.python.failure import Failure
+
from twistedcaldav.accounting import emitAccounting
+
+from txdav.caldav.datastore.sql import ManagedAttachment, CalendarBindRecord
from txdav.caldav.icalendarstore import ComponentUpdateState
from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord, \
CalendarObjectMigrationRecord, AttachmentMigrationRecord, \
MigrationCleanupWork
-from txdav.caldav.datastore.sql import ManagedAttachment, CalendarBindRecord
+from txdav.common.datastore.podding.migration.work import HomeCleanupWork
from txdav.common.datastore.sql_external import NotificationCollectionExternal
from txdav.common.datastore.sql_notification import NotificationCollection
from txdav.common.datastore.sql_tables import _HOME_STATUS_MIGRATING, _HOME_STATUS_DISABLED, \
_HOME_STATUS_EXTERNAL, _HOME_STATUS_NORMAL
from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
+from functools import wraps
from uuid import uuid4
import datetime
@@ -287,8 +290,12 @@
homeResourceID=newhome.id(),
)
- # TODO: purge the old ones
- pass
+ # Purge the old ones
+ yield HomeCleanupWork.reschedule(
+ txn,
+ HomeCleanupWork.notBeforeDelay,
+ ownerUID=newhome.uid(),
+ )
self.accounting("Completed: enableLocalHome.\n")
@@ -305,10 +312,19 @@
yield self.loadRecord()
self.accounting("Starting: removeRemoteHome...")
yield self.prepareCalendarHome()
+ yield self._migratedHome()
self.accounting("Completed: removeRemoteHome.\n")
+ @inTransactionWrapper
+ def _migratedHome(self, txn):
+ """
+ Send cross-pod message to tell the old pod to remove the migrated data.
+ """
+ return txn.store().conduit.send_migrated_home(txn, self.diruid)
+
+
@inlineCallbacks
def loadRecord(self):
"""
Modified: CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/sync_metadata.py 2015-05-13 18:18:41 UTC (rev 14783)
@@ -16,7 +16,7 @@
from twext.enterprise.dal.record import Record, fromTable
from twext.enterprise.dal.syntax import Parameter, Delete
-from twext.enterprise.jobqueue import SingletonWorkItem
+from twext.enterprise.jobqueue import WorkItem
from twisted.internet.defer import inlineCallbacks
from txdav.common.datastore.sql_tables import schema
@@ -60,7 +60,7 @@
-class MigrationCleanupWork(SingletonWorkItem, fromTable(schema.MIGRATION_CLEANUP_WORK)):
+class MigrationCleanupWork(WorkItem, fromTable(schema.MIGRATION_CLEANUP_WORK)):
group = "homeResourceID"
Modified: CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/test/test_migration.py 2015-05-13 18:18:41 UTC (rev 14783)
@@ -27,12 +27,16 @@
from txdav.caldav.datastore.scheduling.ischedule.delivery import IScheduleRequest
from txdav.caldav.datastore.scheduling.ischedule.resource import IScheduleInboxResource
+from txdav.caldav.datastore.scheduling.work import allScheduleWork
from txdav.caldav.datastore.test.common import CaptureProtocol
from txdav.common.datastore.podding.migration.home_sync import CrossPodHomeSync
from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord, \
AttachmentMigrationRecord, CalendarObjectMigrationRecord, \
MigrationCleanupWork
+from txdav.common.datastore.podding.migration.work import HomeCleanupWork, MigratedHomeCleanupWork
from txdav.common.datastore.podding.test.util import MultiStoreConduitTest
+from txdav.common.datastore.sql_directory import DelegateRecord,\
+ DelegateGroupsRecord, ExternalDelegateGroupsRecord
from txdav.common.datastore.sql_tables import _BIND_MODE_READ, \
_HOME_STATUS_DISABLED, _HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL, \
_HOME_STATUS_MIGRATING
@@ -78,6 +82,8 @@
# Speed up work
self.patch(MigrationCleanupWork, "notBeforeDelay", 1)
+ self.patch(HomeCleanupWork, "notBeforeDelay", 1)
+ self.patch(MigratedHomeCleanupWork, "notBeforeDelay", 1)
def configure(self):
@@ -375,6 +381,7 @@
# Data for user01
home = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name="user01", create=True)
+ self.stash["user01_pod0_home_id"] = home.id()
calendar = yield home.childWithName("calendar")
yield calendar.createCalendarObjectWithName("01_1.ics", Component.fromString(self.data01_1))
yield calendar.createCalendarObjectWithName("01_2.ics", Component.fromString(self.data01_2))
@@ -661,14 +668,48 @@
data = yield self.attachmentToString(attachment)
self.assertEqual(data, "Here is some text #1.")
- # No migration data left
+ # Check removal of data from new pod
+
+ # Make sure all jobs are done
+ yield JobItem.waitEmpty(self.theStoreUnderTest(1).newTransaction, reactor, 60)
+
+ # No migration state data left
txn = self.theTransactionUnderTest(1)
- yield JobItem.waitEmpty(self.theStoreUnderTest(1).newTransaction, reactor, 60)
for migrationType in (CalendarMigrationRecord, CalendarObjectMigrationRecord, AttachmentMigrationRecord,):
records = yield migrationType.all(txn)
self.assertEqual(len(records), 0, msg=migrationType.__name__)
+ yield self.commitTransaction(1)
+ # No homes
+ txn = self.theTransactionUnderTest(1)
+ oldhome = yield txn.calendarHomeWithUID("user01", status=_HOME_STATUS_DISABLED)
+ self.assertTrue(oldhome is None)
+ oldhome = yield txn.notificationsWithUID("user01", status=_HOME_STATUS_DISABLED)
+ self.assertTrue(oldhome is None)
+ # Check removal of data from old pod
+
+ # Make sure all jobs are done
+ yield JobItem.waitEmpty(self.theStoreUnderTest(0).newTransaction, reactor, 60)
+
+ # No homes
+ txn = self.theTransactionUnderTest(0)
+ oldhome = yield txn.calendarHomeWithUID("user01", status=_HOME_STATUS_DISABLED)
+ self.assertTrue(oldhome is None)
+ oldhome = yield txn.notificationsWithUID("user01", status=_HOME_STATUS_DISABLED)
+ self.assertTrue(oldhome is None)
+
+ # No delegates
+ for delegateType in (DelegateRecord, DelegateGroupsRecord, ExternalDelegateGroupsRecord):
+ records = yield delegateType.query(txn, delegateType.delegator == "user01")
+ self.assertEqual(len(records), 0, msg=delegateType.__name__)
+
+ # No work items
+ for workType in allScheduleWork:
+ records = yield workType.query(txn, workType.homeResourceID == self.stash["user01_pod0_home_id"])
+ self.assertEqual(len(records), 0, msg=workType.__name__)
+
+
@inlineCallbacks
def test_migration(self):
"""
Added: CalendarServer/trunk/txdav/common/datastore/podding/migration/work.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/podding/migration/work.py (rev 0)
+++ CalendarServer/trunk/txdav/common/datastore/podding/migration/work.py 2015-05-13 18:18:41 UTC (rev 14783)
@@ -0,0 +1,101 @@
+##
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.enterprise.dal.record import fromTable
+from twext.enterprise.jobqueue import WorkItem
+
+from twisted.internet.defer import inlineCallbacks
+
+from txdav.caldav.datastore.scheduling.imip.token import iMIPTokenRecord
+from txdav.caldav.datastore.scheduling.work import allScheduleWork
+from txdav.common.datastore.sql_directory import DelegateRecord, \
+ DelegateGroupsRecord, ExternalDelegateGroupsRecord
+from txdav.common.datastore.sql_tables import schema, _HOME_STATUS_DISABLED
+
+
+class HomeCleanupWork(WorkItem, fromTable(schema.HOME_CLEANUP_WORK)):
+ """
+ Work item to clean up any previously "external" homes on the pod to which data was migrated to. Those
+ old homes will now be marked as disabled and need to be silently removed without any side effects
+ (i.e., no implicit scheduling, no sharing cancels, etc).
+ """
+
+ group = "ownerUID"
+
+ notBeforeDelay = 300 # 5 minutes
+
+ @inlineCallbacks
+ def doWork(self):
+ """
+ Delete all the corresponding homes.
+ """
+
+ oldhome = yield self.transaction.calendarHomeWithUID(self.ownerUID, status=_HOME_STATUS_DISABLED)
+ if oldhome is not None:
+ yield oldhome.purgeAll()
+
+ oldnotifications = yield self.transaction.notificationsWithUID(self.ownerUID, status=_HOME_STATUS_DISABLED)
+ if oldnotifications is not None:
+ yield oldnotifications.purge()
+
+
+
+class MigratedHomeCleanupWork(WorkItem, fromTable(schema.MIGRATED_HOME_CLEANUP_WORK)):
+ """
+ Work item to clean up the old home data left behind after migration, as well
+ as other unwanted items like iMIP tokens, delegates etc. The old homes will
+ now be marked as disabled and need to be silently removed without any side
+ effects (i.e., no implicit scheduling, no sharing cancels, etc).
+ """
+
+ group = "ownerUID"
+
+ notBeforeDelay = 300 # 5 minutes
+
+ @inlineCallbacks
+ def doWork(self):
+ """
+ Delete all the corresponding homes, then the ancillary data.
+ """
+
+ oldhome = yield self.transaction.calendarHomeWithUID(self.ownerUID, status=_HOME_STATUS_DISABLED)
+ if oldhome is not None:
+ # Work items - we need to clean these up before the home goes away because we have an "on delete cascade" on the WorkItem
+ # table, and if that ran it would leave orphaned Job rows set to a pause state and those would remain for ever in the table.
+ for workType in allScheduleWork:
+ items = yield workType.query(self.transaction, workType.homeResourceID == oldhome.id())
+ for item in items:
+ yield item.remove()
+
+ yield oldhome.purgeAll()
+
+ oldnotifications = yield self.transaction.notificationsWithUID(self.ownerUID, status=_HOME_STATUS_DISABLED)
+ if oldnotifications is not None:
+ yield oldnotifications.purge()
+
+ # These are things that reference the home id or the user UID but don't get removed via a cascade
+
+ # iMIP tokens
+ cuaddr = "urn:x-uid:{}".format(self.ownerUID)
+ yield iMIPTokenRecord.deletesome(
+ self.transaction,
+ iMIPTokenRecord.organizer == cuaddr,
+ )
+
+ # Delegators - individual and group
+ yield DelegateRecord.deletesome(self.transaction, DelegateRecord.delegator == self.ownerUID)
+ yield DelegateGroupsRecord.deletesome(self.transaction, DelegateGroupsRecord.delegator == self.ownerUID)
+ yield ExternalDelegateGroupsRecord.deletesome(self.transaction, ExternalDelegateGroupsRecord.delegator == self.ownerUID)
Modified: CalendarServer/trunk/txdav/common/datastore/podding/store_api.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/podding/store_api.py 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/common/datastore/podding/store_api.py 2015-05-13 18:18:41 UTC (rev 14783)
@@ -135,6 +135,39 @@
})
+ @inlineCallbacks
+ def send_migrated_home(self, txn, ownerUID):
+ """
+ Tell another pod that user data migration is done.
+
+ @param txn: transaction to use
+ @type txn: L{CommonStoreTransaction}
+ @param server: server to query
+ @type server: L{Server}
+ @param ownerUID: directory UID of the user whose data has been migrated
+ @type ownerUID: L{str}
+ """
+
+ recipient = yield self.store.directoryService().recordWithUID(ownerUID)
+ request = {
+ "action": "migrated-home",
+ "ownerUID": ownerUID,
+ }
+ yield self.sendRequestToServer(txn, recipient.server(), request)
+
+
+ @inlineCallbacks
+ def recv_migrated_home(self, txn, request):
+ """
+ Process a migrated homes cross-pod request. Request arguments as per L{send_migrated_home}.
+
+ @param request: request arguments
+ @type request: C{dict}
+ """
+
+ yield txn.migratedHome(request["ownerUID"])
+
+
@staticmethod
def _to_serialize_pair_list(value):
"""
Modified: CalendarServer/trunk/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql.py 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/common/datastore/sql.py 2015-05-13 18:18:41 UTC (rev 14783)
@@ -58,6 +58,7 @@
from txdav.carddav.iaddressbookstore import IAddressBookTransaction
from txdav.common.datastore.common import HomeChildBase
from txdav.common.datastore.podding.conduit import PoddingConduit
+from txdav.common.datastore.podding.migration.work import MigratedHomeCleanupWork
from txdav.common.datastore.sql_apn import APNSubscriptionsMixin
from txdav.common.datastore.sql_directory import DelegatesAPIMixin, \
GroupsAPIMixin, GroupCacherAPIMixin
@@ -802,6 +803,23 @@
returnValue(result)
+ def migratedHome(self, ownerUID):
+ """
+ This pod is being told that user data migration to another pod has been completed, and the old data now
+ needs to be removed.
+
+ @param ownerUID: directory UID of the user whose data has been migrated
+ @type ownerUID: L{str}
+ """
+
+ # All we do is schedule a work item to run the actual clean-up
+ return MigratedHomeCleanupWork.reschedule(
+ self,
+ MigratedHomeCleanupWork.notBeforeDelay,
+ ownerUID=ownerUID,
+ )
+
+
def preCommit(self, operation):
"""
Run things before C{commit}. (Note: only provided by SQL
@@ -2137,6 +2155,62 @@
self._children.pop(child.id(), None)
+ @inlineCallbacks
+ def purgeAll(self):
+ """
+ Do a complete purge of all data associated with this calendar home. For now this will assume
+ a "silent" non-implicit behavior. In the future we will want to build in some of the options
+ the current set of "purge" CLI tools have to allow for cancels of future events etc.
+ """
+
+ # Removing the home table entry does NOT remove the child class entry - it does remove
+ # the associated bind entry. So manually remove each child.
+ yield self.purgeAllChildren()
+
+ r = self._childClass._revisionsSchema
+ yield Delete(
+ From=r,
+ Where=r.HOME_RESOURCE_ID == self._resourceID,
+ ).on(self._txn)
+
+ h = self._homeSchema
+ yield Delete(
+ From=h,
+ Where=h.RESOURCE_ID == self._resourceID,
+ ).on(self._txn)
+
+ yield self.properties()._removeResource()
+
+ if self._txn._queryCacher:
+ yield self._txn._queryCacher.delete(self._txn._queryCacher.keyForHomeWithUID(
+ self._homeType,
+ self.uid(),
+ self._status,
+ ))
+ yield self._txn._queryCacher.delete(self._txn._queryCacher.keyForHomeWithID(
+ self._homeType,
+ self.id(),
+ self._status,
+ ))
+
+
+ @inlineCallbacks
+ def purgeAllChildren(self):
+ """
+ Purge each child (non-implicit).
+ """
+
+ children = yield self.loadChildren()
+ for child in children:
+ yield child.unshare()
+ if child.owned():
+ yield child.purge()
+ self._children.pop(child.name(), None)
+ self._children.pop(child.id(), None)
+
+ yield self.removeUnacceptedShares()
+
+
def transaction(self):
return self._txn
@@ -3599,6 +3673,11 @@
@inlineCallbacks
def _reallyRemove(self):
+ """
+ Actually remove this collection from the database. All the child resources will be automatically
+ removed by virtue of an on delete cascade. Note that means no implicit scheduling cancels will
+ occur.
+ """
# Stop sharing first
yield self.ownerDeleteShare()
Modified: CalendarServer/trunk/txdav/common/datastore/sql_notification.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_notification.py 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/common/datastore/sql_notification.py 2015-05-13 18:18:41 UTC (rev 14783)
@@ -586,8 +586,10 @@
),
).on(self._txn, **kwds)
+ purge = remove
+
class NotificationObjectRecord(SerializableRecord, fromTable(schema.NOTIFICATION)):
"""
@DynamicAttrs
Modified: CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/current-oracle-dialect.sql 2015-05-13 18:18:41 UTC (rev 14783)
@@ -652,6 +652,18 @@
"HOME_RESOURCE_ID" integer not null references CALENDAR_HOME on delete cascade
);
+create table HOME_CLEANUP_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "OWNER_UID" nvarchar2(255)
+);
+
+create table MIGRATED_HOME_CLEANUP_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "OWNER_UID" nvarchar2(255)
+);
+
create table CALENDARSERVER (
"NAME" nvarchar2(255) primary key,
"VALUE" nvarchar2(255)
@@ -1011,4 +1023,12 @@
"HOME_RESOURCE_ID"
);
+create index HOME_CLEANUP_WORK_JOB_9631dfb0 on HOME_CLEANUP_WORK (
+ "JOB_ID"
+);
+
+create index MIGRATED_HOME_CLEANUP_4c714fd4 on MIGRATED_HOME_CLEANUP_WORK (
+ "JOB_ID"
+);
+
-- Extra schema to add to current-oracle-dialect.sql
Modified: CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/current.sql 2015-05-13 18:18:41 UTC (rev 14783)
@@ -1234,6 +1234,32 @@
create index MIGRATION_CLEANUP_WORK_HOME_RESOURCE_ID on
MIGRATION_CLEANUP_WORK(HOME_RESOURCE_ID);
+-----------------------
+-- Home Cleanup Work --
+-----------------------
+
+create table HOME_CLEANUP_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ OWNER_UID varchar(255) not null
+);
+
+create index HOME_CLEANUP_WORK_JOB_ID on
+ HOME_CLEANUP_WORK(JOB_ID);
+
+--------------------------------
+-- Migrated Home Cleanup Work --
+--------------------------------
+
+create table MIGRATED_HOME_CLEANUP_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ OWNER_UID varchar(255) not null
+);
+
+create index MIGRATED_HOME_CLEANUP_WORK_JOB_ID on
+ MIGRATED_HOME_CLEANUP_WORK(JOB_ID);
+
--------------------
-- Schema Version --
--------------------
Modified: CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_54_to_55.sql
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_54_to_55.sql 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_54_to_55.sql 2015-05-13 18:18:41 UTC (rev 14783)
@@ -22,7 +22,7 @@
alter table JOB
add ("PAUSE" integer default 0);
--- New table
+-- New tables
create table MIGRATION_CLEANUP_WORK (
"WORK_ID" integer primary key,
"JOB_ID" integer not null references JOB,
@@ -36,5 +36,25 @@
"HOME_RESOURCE_ID"
);
+create table HOME_CLEANUP_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "OWNER_UID" nvarchar2(255)
+);
+
+create index HOME_CLEANUP_WORK_JOB_9631dfb0 on HOME_CLEANUP_WORK (
+ "JOB_ID"
+);
+
+create table MIGRATED_HOME_CLEANUP_WORK (
+ "WORK_ID" integer primary key,
+ "JOB_ID" integer not null references JOB,
+ "OWNER_UID" nvarchar2(255)
+);
+
+create index MIGRATED_HOME_CLEANUP_4c714fd4 on MIGRATED_HOME_CLEANUP_WORK (
+ "JOB_ID"
+);
+
-- update the version
update CALENDARSERVER set VALUE = '55' where NAME = 'VERSION';
Modified: CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_54_to_55.sql
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_54_to_55.sql 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_54_to_55.sql 2015-05-13 18:18:41 UTC (rev 14783)
@@ -22,7 +22,7 @@
alter table JOB
add column PAUSE integer default 0;
--- New Table
+-- New tables
create table MIGRATION_CLEANUP_WORK (
WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
JOB_ID integer references JOB not null,
@@ -34,5 +34,23 @@
create index MIGRATION_CLEANUP_WORK_HOME_RESOURCE_ID on
MIGRATION_CLEANUP_WORK(HOME_RESOURCE_ID);
+ create table HOME_CLEANUP_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ OWNER_UID varchar(255) not null
+);
+
+create index HOME_CLEANUP_WORK_JOB_ID on
+ HOME_CLEANUP_WORK(JOB_ID);
+
+create table MIGRATED_HOME_CLEANUP_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ'), -- implicit index
+ JOB_ID integer references JOB not null,
+ OWNER_UID varchar(255) not null
+);
+
+create index MIGRATED_HOME_CLEANUP_WORK_JOB_ID on
+ MIGRATED_HOME_CLEANUP_WORK(JOB_ID);
+
-- update the version
update CALENDARSERVER set VALUE = '55' where NAME = 'VERSION';
Modified: CalendarServer/trunk/txdav/common/datastore/sql_sharing.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_sharing.py 2015-05-13 18:09:46 UTC (rev 14782)
+++ CalendarServer/trunk/txdav/common/datastore/sql_sharing.py 2015-05-13 18:18:41 UTC (rev 14783)
@@ -29,7 +29,7 @@
from txdav.common.datastore.sql_tables import _BIND_MODE_OWN, _BIND_MODE_DIRECT, \
_BIND_MODE_INDIRECT, _BIND_STATUS_ACCEPTED, _BIND_STATUS_DECLINED, \
_BIND_STATUS_INVITED, _BIND_STATUS_INVALID, _BIND_STATUS_DELETED, \
- _HOME_STATUS_EXTERNAL
+ _HOME_STATUS_EXTERNAL, _HOME_STATUS_DISABLED
from txdav.common.icommondatastore import ExternalShareFailed, \
HomeChildNameAlreadyExistsError, AllRetriesFailed
from txdav.xml import element
@@ -479,8 +479,9 @@
yield self._sendExternalUninvite(shareeView)
else:
# If current user state is accepted then we send an invite with the new state, otherwise
- # we cancel any existing invites for the user
- if not shareeView.direct():
+ # we cancel any existing invites for the user. Also, if the ownerHome is disabled, we assume
+ # that no sharing invites are sent.
+ if not shareeView.direct() and shareeView.ownerHome().status() != _HOME_STATUS_DISABLED:
if shareeView.shareStatus() != _BIND_STATUS_ACCEPTED:
yield self._removeInviteNotification(shareeView)
else:
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20150513/99de853a/attachment-0001.html>
More information about the calendarserver-changes
mailing list