[CalendarServer-changes] [12091] CalendarServer/trunk/twistedcaldav
source_changes at macosforge.org
source_changes at macosforge.org
Wed Mar 12 11:17:22 PDT 2014
Revision: 12091
http://trac.calendarserver.org//changeset/12091
Author: wsanchez at apple.com
Date: 2013-12-13 16:19:41 -0800 (Fri, 13 Dec 2013)
Log Message:
-----------
Use twisted.protocols.memcache instead of twext.protocols.memcache.
Modified Paths:
--------------
CalendarServer/trunk/twistedcaldav/memcachepool.py
CalendarServer/trunk/twistedcaldav/test/test_memcachelock.py
Modified: CalendarServer/trunk/twistedcaldav/memcachepool.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/memcachepool.py 2013-12-14 00:18:20 UTC (rev 12090)
+++ CalendarServer/trunk/twistedcaldav/memcachepool.py 2013-12-14 00:19:41 UTC (rev 12091)
@@ -18,13 +18,14 @@
from twisted.internet.defer import Deferred, fail
from twisted.internet.protocol import ReconnectingClientFactory
+from twisted.protocols.memcache import MemCacheProtocol, NoSuchCommand
+from twext.python.log import Logger
from twext.internet.gaiendpoint import GAIEndpoint
from twext.internet.adaptendpoint import connect
-from twext.python.log import Logger
-from twext.protocols.memcache import MemCacheProtocol, NoSuchCommand
+
class PooledMemCacheProtocol(MemCacheProtocol):
"""
A MemCacheProtocol that will notify a connectionPool that it is ready
@@ -161,7 +162,9 @@
self.shutdown_deferred = None
self.shutdown_requested = False
- reactor.addSystemEventTrigger('before', 'shutdown', self._shutdownCallback)
+ reactor.addSystemEventTrigger(
+ 'before', 'shutdown', self._shutdownCallback
+ )
self._busyClients = set([])
self._freeClients = set([])
@@ -191,8 +194,9 @@
@return: A L{Deferred} that fires with the L{IProtocol} instance.
"""
- self.log.debug("Initiating new client connection to: %r" % (
- self._endpoint,))
+ self.log.debug(
+ "Initiating new client connection to: %r" % (self._endpoint,)
+ )
self._logClientStats()
self._pendingConnects += 1
@@ -239,9 +243,12 @@
Upon memcache error, log the failed request along with the error
message and free the client.
"""
- self.log.error("Memcache error: %s; request: %s %s" %
- (failure.value, command,
- " ".join(args)[:self.REQUEST_LOGGING_SIZE],))
+ self.log.error(
+ "Memcache error: %s; request: %s %s" % (
+ failure.value, command,
+ " ".join(args)[:self.REQUEST_LOGGING_SIZE],
+ )
+ )
self.clientFree(client)
self.clientBusy(client)
@@ -276,11 +283,14 @@
d = self._performRequestOnClient(
client, command, *args, **kwargs)
- elif len(self._busyClients) + self._pendingConnects >= self._maxClients:
+ elif (
+ len(self._busyClients) + self._pendingConnects >= self._maxClients
+ ):
d = Deferred()
self._commands.append((d, command, args, kwargs))
- self.log.debug("Command queued: %s, %r, %r" % (
- command, args, kwargs))
+ self.log.debug(
+ "Command queued: %s, %r, %r" % (command, args, kwargs)
+ )
self._logClientStats()
else:
@@ -292,12 +302,14 @@
def _logClientStats(self):
- self.log.debug("Clients #free: %d, #busy: %d, "
- "#pending: %d, #queued: %d" % (
+ self.log.debug(
+ "Clients #free: %d, #busy: %d, #pending: %d, #queued: %d" % (
len(self._freeClients),
len(self._busyClients),
self._pendingConnects,
- len(self._commands)))
+ len(self._commands),
+ )
+ )
def clientGone(self, client):
@@ -349,8 +361,10 @@
if len(self._commands) > 0:
d, command, args, kwargs = self._commands.pop(0)
- self.log.debug("Performing Queued Command: %s, %r, %r" % (
- command, args, kwargs))
+ self.log.debug(
+ "Performing Queued Command: %s, %r, %r"
+ % (command, args, kwargs)
+ )
self._logClientStats()
_ign_d = self.performRequest(
@@ -425,6 +439,8 @@
_memCachePools = {} # Maps a name to a pool object
_memCachePoolHandler = {} # Maps a handler id to a named pool
+
+
def installPools(pools, maxClients=5, reactor=None):
if reactor is None:
from twisted.internet import reactor
@@ -440,7 +456,9 @@
-def _installPool(name, handleTypes, serverEndpoint, maxClients=5, reactor=None):
+def _installPool(
+ name, handleTypes, serverEndpoint, maxClients=5, reactor=None
+):
pool = MemCachePool(serverEndpoint, maxClients=maxClients, reactor=None)
_memCachePools[name] = pool
Modified: CalendarServer/trunk/twistedcaldav/test/test_memcachelock.py
===================================================================
--- CalendarServer/trunk/twistedcaldav/test/test_memcachelock.py 2013-12-14 00:18:20 UTC (rev 12090)
+++ CalendarServer/trunk/twistedcaldav/test/test_memcachelock.py 2013-12-14 00:19:41 UTC (rev 12091)
@@ -5,16 +5,17 @@
Test the memcache client protocol.
"""
-from twext.protocols.memcache import MemCacheProtocol
-
from twisted.test.proto_helpers import StringTransportWithDisconnection
from twisted.internet.task import Clock
from twisted.internet.defer import inlineCallbacks
+from twisted.protocols.memcache import MemCacheProtocol
from twistedcaldav.memcachelock import MemcacheLock, MemcacheLockTimeoutError
from twistedcaldav.test.util import TestCase
+
+
class MemCacheTestCase(TestCase):
"""
Test client protocol class L{MemCacheProtocol}.
@@ -22,22 +23,32 @@
class FakedMemcacheLock(MemcacheLock):
- def __init__(self, faked, namespace, locktoken, timeout=5.0, retry_interval=0.1, expire_time=0):
+ def __init__(
+ self, faked, namespace, locktoken,
+ timeout=5.0, retry_interval=0.1, expire_time=0
+ ):
"""
-
@param namespace: a unique namespace for this lock's tokens
@type namespace: C{str}
+
@param locktoken: the name of the locktoken
@type locktoken: C{str}
- @param timeout: the maximum time in seconds that the lock should block
+
+ @param timeout: the maximum time in seconds that the lock should
+ block
@type timeout: C{float}
+
@param retry_interval: the interval to retry acquiring the lock
@type retry_interval: C{float}
- @param expiryTime: the time in seconds for the lock to expire. Zero: no expiration.
+
+ @param expiryTime: the time in seconds for the lock to expire.
+ Zero: no expiration.
@type expiryTime: C{float}
"""
- super(MemCacheTestCase.FakedMemcacheLock, self).__init__(namespace, locktoken, timeout, retry_interval, expire_time)
+ super(MemCacheTestCase.FakedMemcacheLock, self).__init__(
+ namespace, locktoken, timeout, retry_interval, expire_time
+ )
self.faked = faked
def _getMemcacheProtocol(self):
@@ -90,11 +101,16 @@
called back with the value and the flag associated with the given key
if the server returns a successful result.
"""
- lock = MemCacheTestCase.FakedMemcacheLock(self.proto, "lock", "locking")
+ lock = MemCacheTestCase.FakedMemcacheLock(
+ self.proto, "lock", "locking"
+ )
return self._test(
lock.get("foo"),
"get lock:foo-acbd18db4cc2f85cedef654fccc4a4d8\r\n",
- "VALUE lock:foo-acbd18db4cc2f85cedef654fccc4a4d8 0 3\r\nbar\r\nEND\r\n",
+ (
+ "VALUE lock:foo-acbd18db4cc2f85cedef654fccc4a4d8 0 3\r\n"
+ "bar\r\nEND\r\n"
+ ),
"bar"
)
@@ -105,7 +121,9 @@
called back with the value and the flag associated with the given key
if the server returns a successful result.
"""
- lock = MemCacheTestCase.FakedMemcacheLock(self.proto, "lock", "locking")
+ lock = MemCacheTestCase.FakedMemcacheLock(
+ self.proto, "lock", "locking"
+ )
return self._test(
lock.set("foo", "bar"),
"set lock:foo-acbd18db4cc2f85cedef654fccc4a4d8 0 0 3\r\nbar\r\n",
@@ -121,7 +139,9 @@
called back with the value and the flag associated with the given key
if the server returns a successful result.
"""
- lock = MemCacheTestCase.FakedMemcacheLock(self.proto, "lock", "locking")
+ lock = MemCacheTestCase.FakedMemcacheLock(
+ self.proto, "lock", "locking"
+ )
yield self._test(
lock.acquire(),
"add lock:locking-559159aa00cc525bfe5c4b34cf16cccb 0 0 1\r\n1\r\n",
@@ -138,7 +158,9 @@
called back with the value and the flag associated with the given key
if the server returns a successful result.
"""
- lock = MemCacheTestCase.FakedMemcacheLock(self.proto, "lock", "locking", timeout=0)
+ lock = MemCacheTestCase.FakedMemcacheLock(
+ self.proto, "lock", "locking", timeout=0
+ )
yield self._test(
lock.acquire(),
"add lock:locking-559159aa00cc525bfe5c4b34cf16cccb 0 0 1\r\n1\r\n",
@@ -155,11 +177,17 @@
called back with the value and the flag associated with the given key
if the server returns a successful result.
"""
- lock = MemCacheTestCase.FakedMemcacheLock(self.proto, "lock", "locking", timeout=0)
+ lock = MemCacheTestCase.FakedMemcacheLock(
+ self.proto, "lock", "locking", timeout=0
+ )
try:
yield self._test(
lock.acquire(),
- "add lock:locking-559159aa00cc525bfe5c4b34cf16cccb 0 0 1\r\n1\r\n",
+ (
+ "add lock:"
+ "locking-559159aa00cc525bfe5c4b34cf16cccb 0 0 1\r\n"
+ "1\r\n"
+ ),
"NOT_STORED\r\n",
True
)
@@ -179,7 +207,9 @@
called back with the value and the flag associated with the given key
if the server returns a successful result.
"""
- lock = MemCacheTestCase.FakedMemcacheLock(self.proto, "lock", "locking")
+ lock = MemCacheTestCase.FakedMemcacheLock(
+ self.proto, "lock", "locking"
+ )
yield self._test(
lock.acquire(),
"add lock:locking-559159aa00cc525bfe5c4b34cf16cccb 0 0 1\r\n1\r\n",
@@ -203,7 +233,9 @@
called back with the value and the flag associated with the given key
if the server returns a successful result.
"""
- lock = MemCacheTestCase.FakedMemcacheLock(self.proto, "lock", "locking")
+ lock = MemCacheTestCase.FakedMemcacheLock(
+ self.proto, "lock", "locking"
+ )
yield self._test(
lock.acquire(),
"add lock:locking-559159aa00cc525bfe5c4b34cf16cccb 0 0 1\r\n1\r\n",
@@ -225,7 +257,9 @@
called back with the value and the flag associated with the given key
if the server returns a successful result.
"""
- lock = MemCacheTestCase.FakedMemcacheLock(self.proto, "lock", u"locking")
+ lock = MemCacheTestCase.FakedMemcacheLock(
+ self.proto, "lock", u"locking"
+ )
yield self._test(
lock.acquire(),
"add lock:locking-559159aa00cc525bfe5c4b34cf16cccb 0 0 1\r\n1\r\n",
@@ -253,7 +287,9 @@
self.fail("AssertionError not raised")
try:
- lock = MemCacheTestCase.FakedMemcacheLock(self.proto, "lock", ("abc",))
+ lock = MemCacheTestCase.FakedMemcacheLock(
+ self.proto, "lock", ("abc",)
+ )
yield lock.acquire()
self.fail("AssertionError not raised")
except AssertionError:
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20140312/53190650/attachment.html>
More information about the calendarserver-changes
mailing list