[CalendarServer-changes] [13515] twext/trunk/twext/enterprise

source_changes at macosforge.org source_changes at macosforge.org
Tue May 20 11:38:40 PDT 2014


Revision: 13515
          http://trac.calendarserver.org//changeset/13515
Author:   cdaboo at apple.com
Date:     2014-05-20 11:38:40 -0700 (Tue, 20 May 2014)
Log Message:
-----------
Add singleton and regenerating WorkItem types.

Modified Paths:
--------------
    twext/trunk/twext/enterprise/dal/record.py
    twext/trunk/twext/enterprise/dal/test/test_record.py
    twext/trunk/twext/enterprise/jobqueue.py
    twext/trunk/twext/enterprise/test/test_jobqueue.py

Modified: twext/trunk/twext/enterprise/dal/record.py
===================================================================
--- twext/trunk/twext/enterprise/dal/record.py	2014-05-20 13:53:21 UTC (rev 13514)
+++ twext/trunk/twext/enterprise/dal/record.py	2014-05-20 18:38:40 UTC (rev 13515)
@@ -417,6 +417,17 @@
 
 
     @classmethod
+    def deleteall(cls, transaction):
+        """
+        Delete all rows from the table that corresponds to C{cls}.
+        """
+        return Delete(
+            From=cls.table,
+            Where=None,
+        ).on(transaction)
+
+
+    @classmethod
     @inlineCallbacks
     def _rowsFromQuery(cls, transaction, qry, rozrc):
         """

Modified: twext/trunk/twext/enterprise/dal/test/test_record.py
===================================================================
--- twext/trunk/twext/enterprise/dal/test/test_record.py	2014-05-20 13:53:21 UTC (rev 13514)
+++ twext/trunk/twext/enterprise/dal/test/test_record.py	2014-05-20 18:38:40 UTC (rev 13515)
@@ -319,6 +319,23 @@
 
 
     @inlineCallbacks
+    def test_deleteall(self):
+        """
+        L{Record.deleteall} will delete all instances of the record.
+        """
+        txn = self.pool.connection()
+        data = [(123, u"one"), (456, u"four"), (345, u"three"),
+                (234, u"two"), (356, u"three")]
+        for beta, gamma in data:
+            yield txn.execSQL("insert into ALPHA values (:1, :2)",
+                              [beta, gamma])
+
+        yield TestRecord.deleteall(txn)
+        all = yield TestRecord.all(txn)
+        self.assertEqual(len(all), 0)
+
+
+    @inlineCallbacks
     def test_repr(self):
         """
         The C{repr} of a L{Record} presents all its values.

Modified: twext/trunk/twext/enterprise/jobqueue.py
===================================================================
--- twext/trunk/twext/enterprise/jobqueue.py	2014-05-20 13:53:21 UTC (rev 13514)
+++ twext/trunk/twext/enterprise/jobqueue.py	2014-05-20 18:38:40 UTC (rev 13515)
@@ -513,33 +513,18 @@
         # date.
         workItem = yield self.workItem()
         if workItem is not None:
-            if workItem.group is not None:
-                try:
-                    yield NamedLock.acquire(self.transaction, workItem.group)
-                except Exception as e:
-                    log.error("JobItem: {jobid}, WorkItem: {workid} lock failed: {exc}".format(
-                        jobid=self.jobID,
-                        workid=workItem.workID,
-                        exc=e,
-                    ))
-                    raise JobFailedError(e)
-
             try:
-                # Once the work is done we delete ourselves
-                yield workItem.delete()
-            except NoSuchRecord:
-                # The record has already been removed
-                pass
-            else:
-                try:
+                okToGo = yield workItem.beforeWork()
+                if okToGo:
                     yield workItem.doWork()
-                except Exception as e:
-                    log.error("JobItem: {jobid}, WorkItem: {workid} failed: {exc}".format(
-                        jobid=self.jobID,
-                        workid=workItem.workID,
-                        exc=e,
-                    ))
-                    raise JobFailedError(e)
+                    yield workItem.afterWork()
+            except Exception as e:
+                log.error("JobItem: {jobid}, WorkItem: {workid} failed: {exc}".format(
+                    jobid=self.jobID,
+                    workid=workItem.workID,
+                    exc=e,
+                ))
+                raise JobFailedError(e)
 
         try:
             # Once the work is done we delete ourselves
@@ -826,6 +811,40 @@
         returnValue(workItems)
 
 
+    @inlineCallbacks
+    def beforeWork(self):
+        """
+        A hook that gets called before the L{WorkItem} does its real work. This can be used
+        for common behaviors need by work items. The base implementation handles the group
+        locking behavior.
+
+        @return: an L{Deferred} that fires with L{True} if processing of the L{WorkItem}
+            should continue, L{False} if it should be skipped without error.
+        @rtype: L{Deferred}
+        """
+        if self.group is not None:
+            try:
+                yield NamedLock.acquire(self.transaction, self.group)
+            except Exception as e:
+                log.error("JobItem: {jobid}, WorkItem: {workid} lock failed: {exc}".format(
+                    jobid=self.jobID,
+                    workid=self.workID,
+                    exc=e,
+                ))
+                raise JobFailedError(e)
+
+        try:
+            # Work item is deleted before doing work - but someone else may have
+            # done it whilst we waited on the lock so handle that by simply
+            # ignoring the work
+            yield self.delete()
+        except NoSuchRecord:
+            # The record has already been removed
+            returnValue(False)
+        else:
+            returnValue(True)
+
+
     def doWork(self):
         """
         Subclasses must implement this to actually perform the queued work.
@@ -838,7 +857,123 @@
         raise NotImplementedError
 
 
+    def afterWork(self):
+        """
+        A hook that gets called after the L{WorkItem} does its real work. This can be used
+        for common clean-up behaviors. The base implementation does nothing.
+        """
+        return succeed(None)
 
+
+    @classmethod
+    @inlineCallbacks
+    def reschedule(cls, transaction, seconds, **kwargs):
+        """
+        Reschedule this work.
+
+        @param seconds: optional seconds delay - if not present use the class value.
+        @type seconds: L{int} or L{None}
+        """
+        if seconds is not None and seconds >= 0:
+            notBefore = (
+                datetime.utcnow() +
+                timedelta(seconds=seconds)
+            )
+            log.info(
+                "Scheduling next {cls}: {when}",
+                cls=cls.__name__,
+                when=notBefore,
+            )
+            wp = yield transaction._queuer.enqueueWork(
+                transaction,
+                cls,
+                notBefore=notBefore,
+                **kwargs
+            )
+            returnValue(wp)
+        else:
+            returnValue(None)
+
+
+
+class SingletonWorkItem(WorkItem):
+    """
+    An L{WorkItem} that can only appear once no matter how many times an attempt is
+    made to create one. The L{allowOverride} class property determines whether the attempt
+    to create a new job is simply ignored, or whether the new job overrides any existing
+    one.
+    """
+
+    @classmethod
+    @inlineCallbacks
+    def makeJob(cls, transaction, **kwargs):
+        """
+        A new work item needs to be created. First we create a Job record, then
+        we create the actual work item related to the job.
+
+        @param transaction: the transaction to use
+        @type transaction: L{IAsyncTransaction}
+        """
+
+        all = yield cls.all(transaction)
+        if len(all):
+            # Silently ignore the creation of this work
+            returnValue(None)
+
+        result = yield super(SingletonWorkItem, cls).makeJob(transaction, **kwargs)
+        returnValue(result)
+
+
+    @inlineCallbacks
+    def beforeWork(self):
+        """
+        No need to lock - for safety just delete any others.
+        """
+
+        # Delete all other work items
+        yield self.deleteall(self.transaction)
+        returnValue(True)
+
+
+    @classmethod
+    @inlineCallbacks
+    def reschedule(cls, transaction, seconds, force=False, **kwargs):
+        """
+        Reschedule a singleton. If L{force} is set then delete any existing item before
+        creating the new one. This allows the caller to explicitly override an existing
+        singleton.
+        """
+        if force:
+            yield cls.deleteall(transaction)
+            all = yield cls.all(transaction)
+        result = yield super(SingletonWorkItem, cls).reschedule(transaction, seconds, **kwargs)
+        returnValue(result)
+
+
+
+class RegeneratingWorkItem(SingletonWorkItem):
+    """
+    An L{SingletonWorkItem} that regenerates itself when work is done.
+    """
+
+    def regenerateInterval(self):
+        """
+        Return the interval in seconds between regenerating instances.
+        """
+        return None
+
+
+    @inlineCallbacks
+    def afterWork(self):
+        """
+        A hook that gets called after the L{WorkItem} does its real work. This can be used
+        for common clean-up behaviors. The base implementation does nothing.
+        """
+        yield super(RegeneratingWorkItem, self).afterWork()
+        yield self.reschedule(self.transaction, self.regenerateInterval())
+
+
+
 class PerformJob(Command):
     """
     Notify another process that it must do a job that has been persisted to
@@ -1217,13 +1352,20 @@
     def __init__(self, transactionFactory, whenConnected,
                  boxReceiver=None, locator=None):
         super(ConnectionFromController, self).__init__(boxReceiver, locator)
-        self.transactionFactory = transactionFactory
+        self._txnFactory = transactionFactory
         self.whenConnected = whenConnected
         # FIXME: Glyph it appears WorkProposal expects this to have reactor...
         from twisted.internet import reactor
         self.reactor = reactor
 
 
+    @property
+    def transactionFactory(self):
+        txn = self._txnFactory
+        txn._queuer = self
+        return txn
+
+
     def startReceivingBoxes(self, sender):
         super(ConnectionFromController, self).startReceivingBoxes(sender)
         self.whenConnected(self)

Modified: twext/trunk/twext/enterprise/test/test_jobqueue.py
===================================================================
--- twext/trunk/twext/enterprise/test/test_jobqueue.py	2014-05-20 13:53:21 UTC (rev 13514)
+++ twext/trunk/twext/enterprise/test/test_jobqueue.py	2014-05-20 18:38:40 UTC (rev 13515)
@@ -42,7 +42,7 @@
     ConnectionFromPeerNode, LocalQueuer,
     _BaseQueuer, NonPerformingQueuer, JobItem,
     WORK_PRIORITY_LOW, WORK_PRIORITY_HIGH, WORK_PRIORITY_MEDIUM,
-    JobDescriptor
+    JobDescriptor, SingletonWorkItem
 )
 import twext.enterprise.jobqueue
 
@@ -229,15 +229,15 @@
         for table in ("DUMMY_WORK_ITEM",)
     ] + ["delete from job"]
 except SkipTest as e:
-    DummyWorkItem = object
+    DummyWorkItemTable = object
     skip = e
 else:
-    DummyWorkItem = fromTable(schema.DUMMY_WORK_ITEM)
+    DummyWorkItemTable = fromTable(schema.DUMMY_WORK_ITEM)
     skip = False
 
 
 
-class DummyWorkItem(WorkItem, DummyWorkItem):
+class DummyWorkItem(WorkItem, DummyWorkItemTable):
     """
     Sample L{WorkItem} subclass that adds two integers together and stores them
     in another table.
@@ -261,7 +261,7 @@
         a concurrent transaction, then commit it.
         """
         workItems = yield super(DummyWorkItem, cls).loadForJob(txn, *a)
-        if workItems[0].deleteOnLoad:
+        if len(workItems) and workItems[0].deleteOnLoad:
             otherTransaction = txn.store().newTransaction()
             otherSelf = yield super(DummyWorkItem, cls).loadForJob(txn, *a)
             yield otherSelf[0].delete()
@@ -270,6 +270,22 @@
 
 
 
+class DummyWorkSingletonItem(SingletonWorkItem, DummyWorkItemTable):
+    """
+    Sample L{SingletonWorkItem} subclass that adds two integers together and stores them
+    in another table.
+    """
+
+    results = {}
+
+    def doWork(self):
+        if self.a == -1:
+            raise ValueError("Ooops")
+        self.results[self.jobID] = self.a + self.b
+        return succeed(None)
+
+
+
 class AMPTests(TestCase):
     """
     Tests for L{AMP} faithfully relaying ids across the wire.
@@ -318,7 +334,7 @@
 
 
     @inlineCallbacks
-    def _enqueue(self, dbpool, a, b, notBefore=None, priority=None, weight=None):
+    def _enqueue(self, dbpool, a, b, notBefore=None, priority=None, weight=None, cl=DummyWorkItem):
         fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)
         if notBefore is None:
             notBefore = datetime.datetime(2012, 12, 13, 12, 12, 0)
@@ -339,7 +355,7 @@
         @transactionally(dbpool.connection)
         def check(txn):
             return qpool.enqueueWork(
-                txn, DummyWorkItem,
+                txn, cl,
                 a=a, b=b, priority=priority, weight=weight,
                 notBefore=notBefore
             )
@@ -347,7 +363,9 @@
         proposal = yield check
         yield proposal.whenProposed()
 
+        returnValue(qpool)
 
+
     @inlineCallbacks
     def test_enqueue(self):
         """
@@ -466,7 +484,84 @@
         self.assertTrue(work.a == 2)
 
 
+    @inlineCallbacks
+    def test_notsingleton(self):
+        """
+        L{PeerConnectionPool.enqueueWork} will insert a job and a work item.
+        """
+        dbpool = buildConnectionPool(self, nodeSchema + jobSchema + schemaText)
 
+        yield self._enqueue(dbpool, 1, 2, cl=DummyWorkItem)
+
+        def allJobs(txn):
+            return DummyWorkItem.all(txn)
+
+        jobs = yield inTransaction(dbpool.connection, allJobs)
+        self.assertTrue(len(jobs) == 1)
+
+        yield self._enqueue(dbpool, 2, 3)
+
+        jobs = yield inTransaction(dbpool.connection, allJobs)
+        self.assertTrue(len(jobs) == 2)
+
+
+    @inlineCallbacks
+    def test_singleton(self):
+        """
+        L{PeerConnectionPool.enqueueWork} will insert a job and a work item.
+        """
+        dbpool = buildConnectionPool(self, nodeSchema + jobSchema + schemaText)
+
+        yield self._enqueue(dbpool, 1, 2, cl=DummyWorkSingletonItem)
+
+        def allJobs(txn):
+            return DummyWorkSingletonItem.all(txn)
+
+        jobs = yield inTransaction(dbpool.connection, allJobs)
+        self.assertTrue(len(jobs) == 1)
+
+        yield self._enqueue(dbpool, 2, 3, cl=DummyWorkSingletonItem)
+
+        jobs = yield inTransaction(dbpool.connection, allJobs)
+        self.assertTrue(len(jobs) == 1)
+
+
+    @inlineCallbacks
+    def test_singleton_reschedule(self):
+        """
+        L{PeerConnectionPool.enqueueWork} will insert a job and a work item.
+        """
+        dbpool = buildConnectionPool(self, nodeSchema + jobSchema + schemaText)
+
+        qpool = yield self._enqueue(dbpool, 1, 2, cl=DummyWorkSingletonItem, notBefore=datetime.datetime(2014, 5, 17, 12, 0, 0))
+
+        @inlineCallbacks
+        def allWork(txn):
+            jobs = yield JobItem.all(txn)
+            work = [((yield job.workItem()), job) for job in jobs]
+            returnValue(filter(lambda x: x[0], work))
+
+        work = yield inTransaction(dbpool.connection, allWork)
+        self.assertTrue(len(work) == 1)
+        self.assertTrue(work[0][1].notBefore == datetime.datetime(2014, 5, 17, 12, 0, 0))
+
+        def _reschedule_force(txn, force):
+            txn._queuer = qpool
+            return DummyWorkSingletonItem.reschedule(txn, 60, force=force)
+        yield inTransaction(dbpool.connection, _reschedule_force, force=False)
+
+        work = yield inTransaction(dbpool.connection, allWork)
+        self.assertTrue(len(work) == 1)
+        self.assertTrue(work[0][1].notBefore == datetime.datetime(2014, 5, 17, 12, 0, 0))
+
+        yield inTransaction(dbpool.connection, _reschedule_force, force=True)
+
+        work = yield inTransaction(dbpool.connection, allWork)
+        self.assertTrue(len(work) == 1)
+        self.assertTrue(work[0][1].notBefore != datetime.datetime(2014, 5, 17, 12, 0, 0))
+
+
+
 class WorkerConnectionPoolTests(TestCase):
     """
     A L{WorkerConnectionPool} is responsible for managing, in a node's
@@ -627,7 +722,7 @@
             return dummy
 
         peer.choosePerformer = chooseDummy
-        performed = local.performJob(JobDescriptor(7384, 1))
+        performed = local.performJob(JobDescriptor(7384, 1, "ABC"))
         performResult = []
         performed.addCallback(performResult.append)
 
@@ -802,12 +897,12 @@
         worker2, _ignore_trans2 = peer()
 
         # Ask the worker to do something.
-        worker1.performJob(JobDescriptor(1, 1))
+        worker1.performJob(JobDescriptor(1, 1, "ABC"))
         self.assertEquals(worker1.currentLoad, 1)
         self.assertEquals(worker2.currentLoad, 0)
 
         # Now ask the pool to do something
-        peerPool.workerPool.performJob(JobDescriptor(2, 1))
+        peerPool.workerPool.performJob(JobDescriptor(2, 1, "ABC"))
         self.assertEquals(worker1.currentLoad, 1)
         self.assertEquals(worker2.currentLoad, 1)
 
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20140520/1dc5a186/attachment-0001.html>


More information about the calendarserver-changes mailing list