[CalendarServer-changes] [11222] CalendarServer/trunk/txdav
source_changes at macosforge.org
source_changes at macosforge.org
Mon May 20 12:45:21 PDT 2013
Revision: 11222
http://trac.calendarserver.org//changeset/11222
Author: cdaboo at apple.com
Date: 2013-05-20 12:45:21 -0700 (Mon, 20 May 2013)
Log Message:
-----------
Whitespace.
Modified Paths:
--------------
CalendarServer/trunk/txdav/__init__.py
CalendarServer/trunk/txdav/base/datastore/dbapiclient.py
CalendarServer/trunk/txdav/base/datastore/file.py
CalendarServer/trunk/txdav/base/datastore/test/__init__.py
CalendarServer/trunk/txdav/base/datastore/test/test_subpostgres.py
CalendarServer/trunk/txdav/base/datastore/util.py
CalendarServer/trunk/txdav/base/propertystore/none.py
CalendarServer/trunk/txdav/base/propertystore/test/base.py
CalendarServer/trunk/txdav/base/propertystore/test/test_appledouble.py
CalendarServer/trunk/txdav/base/propertystore/test/test_base.py
CalendarServer/trunk/txdav/base/propertystore/test/test_none.py
CalendarServer/trunk/txdav/base/propertystore/test/test_xattr.py
CalendarServer/trunk/txdav/base/propertystore/xattr.py
CalendarServer/trunk/txdav/carddav/datastore/index_file.py
CalendarServer/trunk/txdav/carddav/datastore/test/test_index_file.py
CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py
CalendarServer/trunk/txdav/carddav/datastore/util.py
CalendarServer/trunk/txdav/common/datastore/sql.py
CalendarServer/trunk/txdav/common/datastore/sql_tables.py
CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py
CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_1_to_2.py
CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_2_to_3.py
Modified: CalendarServer/trunk/txdav/__init__.py
===================================================================
--- CalendarServer/trunk/txdav/__init__.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/__init__.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -22,4 +22,3 @@
# Make sure we have twext's required Twisted patches loaded before we do
# anything at all.
__import__("twext")
-
Modified: CalendarServer/trunk/txdav/base/datastore/dbapiclient.py
===================================================================
--- CalendarServer/trunk/txdav/base/datastore/dbapiclient.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/base/datastore/dbapiclient.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -45,6 +45,7 @@
cx_Oracle = None
+
class DiagnosticCursorWrapper(object):
"""
Diagnostic wrapper around a DB-API 2.0 cursor for debugging connection
@@ -90,6 +91,7 @@
return results
+
class OracleCursorWrapper(DiagnosticCursorWrapper):
"""
Wrapper for cx_Oracle DB-API connections which implements fetchall() to read
@@ -241,6 +243,7 @@
c.close()
+
def postgresPreflight(connection):
"""
Pre-flight function for PostgreSQL connections: enable standard conforming
Modified: CalendarServer/trunk/txdav/base/datastore/file.py
===================================================================
--- CalendarServer/trunk/txdav/base/datastore/file.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/base/datastore/file.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -43,10 +43,12 @@
return not name.startswith(".")
+
def hidden(path):
return path.sibling('.' + path.basename())
+
def writeOperation(thunk):
# FIXME: tests
def inner(self, *a, **kw):
@@ -78,9 +80,11 @@
# FIXME: Add DataStoreNotFoundError?
# raise NotFoundError("No such data store")
+
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._path.path)
+
def newTransaction(self, name='no name'):
"""
Create a new transaction.
@@ -101,6 +105,7 @@
self.done = False
self.info = []
+
def __del__(self):
if not self.done and self.info:
print("**** UNCOMMITTED TRANSACTION (%s) BEING GARBAGE COLLECTED ****") % (
@@ -203,9 +208,9 @@
class FileMetaDataMixin(object):
-
+
implements(IDataStoreObject)
-
+
def name(self):
"""
Identify the name of the object
@@ -216,6 +221,7 @@
return self._path.basename()
+
def contentType(self):
"""
The content type of the object's content.
@@ -227,6 +233,7 @@
except KeyError:
return None
+
def md5(self):
"""
The MD5 hex digest of this object's content.
@@ -246,6 +253,7 @@
md5 = hashlib.md5(data).hexdigest()
return md5
+
def size(self):
"""
The octet-size of this object's content.
@@ -257,6 +265,7 @@
else:
return 0
+
def created(self):
"""
The creation date-time stamp of this object.
@@ -268,6 +277,7 @@
else:
return None
+
def modified(self):
"""
The last modification date-time stamp of this object.
Modified: CalendarServer/trunk/txdav/base/datastore/test/__init__.py
===================================================================
--- CalendarServer/trunk/txdav/base/datastore/test/__init__.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/base/datastore/test/__init__.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -13,4 +13,3 @@
# See the License for the specific language governing permissions and
# limitations under the License.
##
-
Modified: CalendarServer/trunk/txdav/base/datastore/test/test_subpostgres.py
===================================================================
--- CalendarServer/trunk/txdav/base/datastore/test/test_subpostgres.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/base/datastore/test/test_subpostgres.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -20,7 +20,7 @@
from twisted.trial.unittest import TestCase
-# NOTE: This import will fail eventuall when this functionality is added to
+# NOTE: This import will fail eventually when this functionality is added to
# MemoryReactor:
from twisted.runner.test.test_procmon import DummyProcessReactor
@@ -88,6 +88,7 @@
values = cursor.fetchall()
self.assertEquals(values, [["dummy"]])
+
@inlineCallbacks
def test_startService_Socket(self):
"""
@@ -127,7 +128,7 @@
SimpleService2,
"create table TEST_DUMMY_TABLE (stub varchar)",
databaseName="dummy_db",
- listenAddresses=['127.0.0.1',],
+ listenAddresses=['127.0.0.1', ],
testMode=True
)
svc.startService()
@@ -139,6 +140,7 @@
values = cursor.fetchall()
self.assertEquals(values, [["dummy"]])
+
@inlineCallbacks
def test_startService_withDumpFile(self):
"""
@@ -190,8 +192,9 @@
cursor = connection.cursor()
cursor.execute("select * from import_test_table")
values = cursor.fetchall()
- self.assertEquals(values, [["value1"],["value2"]])
+ self.assertEquals(values, [["value1"], ["value2"]])
+
def test_startDatabaseRunning(self):
""" Ensure that if we can connect to postgres we don't spawn pg_ctl """
@@ -199,7 +202,7 @@
class DummyCursor(object):
def __init__(self, historyHolder):
- self.historyHolder = historyHolder
+ self.historyHolder = historyHolder
def execute(self, *args):
self.historyHolder.cursorHistory.append(args)
Modified: CalendarServer/trunk/txdav/base/datastore/util.py
===================================================================
--- CalendarServer/trunk/txdav/base/datastore/util.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/base/datastore/util.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -53,6 +53,7 @@
return inner
+
class QueryCacher(Memcacher):
"""
A Memcacher for the object-with-name query (more to come)
@@ -62,9 +63,11 @@
super(QueryCacher, self).__init__(cachePool, pickle=True)
self.cacheExpireSeconds = cacheExpireSeconds
+
def set(self, key, value):
return super(QueryCacher, self).set(key, value, expireTime=self.cacheExpireSeconds)
+
def delete(self, key):
return super(QueryCacher, self).delete(key)
@@ -72,6 +75,7 @@
def setAfterCommit(self, transaction, key, value):
transaction.postCommit(lambda: self.set(key, value))
+
def invalidateAfterCommit(self, transaction, key):
# Invalidate now (so that operations within this transaction see it)
# and *also* post-commit (because there could be a scheduled setAfterCommit
@@ -79,16 +83,19 @@
transaction.postCommit(lambda: self.delete(key))
return self.delete(key)
+
# Home child objects by name
def keyForObjectWithName(self, homeResourceID, name):
return "objectWithName:%s:%s" % (homeResourceID, name)
+
# Home metadata (Created/Modified)
def keyForHomeMetaData(self, homeResourceID):
return "homeMetaData:%s" % (homeResourceID)
+
# HomeChild metadata (Created/Modified (and SUPPORTED_COMPONENTS))
def keyForHomeChildMetaData(self, resourceID):
@@ -139,6 +146,3 @@
return uuu + normalForm
else:
return normalForm
-
-
-
Modified: CalendarServer/trunk/txdav/base/propertystore/none.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/none.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/base/propertystore/none.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -39,20 +39,25 @@
validKey(key)
raise KeyError(key)
+
def __setitem__(self, key, value):
validKey(key)
raise PropertyChangeNotAllowedError("Property store is read-only.", (key,))
+
def __delitem__(self, key):
validKey(key)
raise KeyError(key)
+
def keys(self):
return ()
+
def _removeResource(self):
pass
+
#
# I/O
#
@@ -60,5 +65,6 @@
def flush(self):
return None
+
def abort(self):
return None
Modified: CalendarServer/trunk/txdav/base/propertystore/test/base.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/test/base.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/base/propertystore/test/base.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -69,11 +69,14 @@
self.assertRaises(TypeError, doContains)
+
class PropertyStoreTest(NonePropertyStoreTest):
# Subclass must define self.propertyStore in setUp().
def _changed(self, store):
store.flush()
+
+
def _abort(self, store):
store.abort()
@@ -304,9 +307,13 @@
yield self._changed(self.propertyStore1)
self.failUnless(name in self.propertyStore2.keys())
-
+
+
+
def propertyName(name):
return PropertyName("http://calendarserver.org/ns/test/", name)
+
+
def propertyValue(value):
return davxml.ResponseDescription(value)
Modified: CalendarServer/trunk/txdav/base/propertystore/test/test_appledouble.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/test/test_appledouble.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/base/propertystore/test/test_appledouble.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -129,6 +129,3 @@
sample.setContent("")
props = PropertyStore("nobody", lambda : sample)
self.assertEqual(props.attrs, {})
-
-
-
Modified: CalendarServer/trunk/txdav/base/propertystore/test/test_base.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/test/test_base.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/base/propertystore/test/test_base.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -34,18 +34,21 @@
except BrokenMethodImplementation, e:
self.fail(e)
+
def test_init(self):
name = PropertyName("http://calendarserver.org/", "bleargh")
self.assertEquals(name.namespace, "http://calendarserver.org/")
self.assertEquals(name.name, "bleargh")
+
def test_fromString(self):
name = PropertyName.fromString("{http://calendarserver.org/}bleargh")
self.assertEquals(name.namespace, "http://calendarserver.org/")
self.assertEquals(name.name, "bleargh")
+
def test_toString(self):
name = PropertyName("http://calendarserver.org/", "bleargh")
Modified: CalendarServer/trunk/txdav/base/propertystore/test/test_none.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/test/test_none.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/base/propertystore/test/test_none.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -26,25 +26,32 @@
class PropertyStoreTest(base.NonePropertyStoreTest):
+
def setUp(self):
self.propertyStore = PropertyStore("user01")
+
def test_set(self):
def doSet():
self.propertyStore[propertyName("foo")] = propertyValue("bar")
self.assertRaises(PropertyChangeNotAllowedError, doSet)
+
def test_get(self):
self.assertRaises(KeyError, lambda: self.propertyStore[propertyName("foo")])
+
def test_len(self):
self.assertEquals(len(self.propertyStore), 0)
+
def test_keys(self):
self.assertEquals(self.propertyStore.keys(), ())
+
def test_flush(self):
self.propertyStore.flush()
+
def test_abort(self):
self.propertyStore.abort()
Modified: CalendarServer/trunk/txdav/base/propertystore/test/test_xattr.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/test/test_xattr.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/base/propertystore/test/test_xattr.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -31,7 +31,9 @@
importErrorMessage = str(e)
+
class PropertyStoreTest(base.PropertyStoreTest):
+
def setUp(self):
tempDir = FilePath(self.mktemp())
tempDir.makedirs()
@@ -42,18 +44,21 @@
self.propertyStore2 = PropertyStore("user01", lambda : tempFile)
self.propertyStore2._setPerUserUID("user02")
+
def test_init(self):
store = self.propertyStore
self.failUnless(isinstance(store.attrs, xattr))
self.assertEquals(store.removed, set())
self.assertEquals(store.modified, {})
+
def test_abort(self):
super(PropertyStoreTest, self).test_abort()
store = self.propertyStore
self.assertEquals(store.removed, set())
self.assertEquals(store.modified, {})
+
def test_compress(self):
class DummyProperty (WebDAVTextElement):
@@ -70,6 +75,7 @@
self.assertTrue(compressedKey in self.propertyStore.attrs)
self.assertFalse(uncompressedKey in self.propertyStore.attrs)
+
def test_compress_upgrade(self):
class DummyProperty (WebDAVTextElement):
@@ -82,6 +88,7 @@
self.assertEqual(self.propertyStore[name], DummyProperty.fromString("data"))
self.assertRaises(KeyError, lambda: self.propertyStore.attrs[uncompressedKey])
+
def test_copy(self):
tempDir = FilePath(self.mktemp())
@@ -90,7 +97,7 @@
tempFile1.touch()
tempFile2 = tempDir.child("test2")
tempFile2.touch()
-
+
# Existing store
store1_user1 = PropertyStore("user01", lambda : tempFile1)
store1_user2 = PropertyStore("user01", lambda : tempFile1)
@@ -131,7 +138,7 @@
# Do copy and check results
store2_user1.copyAllProperties(store1_user1)
store2_user1.flush()
-
+
self.assertEqual(store1_user1.attrs.items(), store2_user1.attrs.items())
self.assertEqual(store1_user2.attrs.items(), store2_user2.attrs.items())
Modified: CalendarServer/trunk/txdav/base/propertystore/xattr.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/xattr.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/base/propertystore/xattr.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -36,7 +36,7 @@
from txdav.xml.base import encodeXMLName
from txdav.xml.parser import WebDAVDocument
-from txdav.base.propertystore.base import AbstractPropertyStore, PropertyName,\
+from txdav.base.propertystore.base import AbstractPropertyStore, PropertyName, \
validKey
from txdav.idav import PropertyStoreError
@@ -53,6 +53,7 @@
_ERRNO_NO_ATTR = errno.ENODATA
+
class PropertyStore(AbstractPropertyStore):
"""
Property store using filesystem extended attributes.
@@ -71,16 +72,16 @@
# compress/expand overly long namespaces to help stay under that limit now
# that GUIDs are also encoded in the keys.
_namespaceCompress = {
- "urn:ietf:params:xml:ns:caldav" :"CALDAV:",
- "urn:ietf:params:xml:ns:carddav" :"CARDDAV:",
- "http://calendarserver.org/ns/" :"CS:",
- "http://cal.me.com/_namespace/" :"ME:",
- "http://twistedmatrix.com/xml_namespace/dav/" :"TD:",
- "http://twistedmatrix.com/xml_namespace/dav/private/" :"TDP:",
+ "urn:ietf:params:xml:ns:caldav": "CALDAV:",
+ "urn:ietf:params:xml:ns:carddav": "CARDDAV:",
+ "http://calendarserver.org/ns/": "CS:",
+ "http://cal.me.com/_namespace/": "ME:",
+ "http://twistedmatrix.com/xml_namespace/dav/": "TD:",
+ "http://twistedmatrix.com/xml_namespace/dav/private/": "TDP:",
}
_namespaceExpand = dict(
- [ (v, k) for k, v in _namespaceCompress.iteritems() ]
+ [(v, k) for k, v in _namespaceCompress.iteritems()]
)
def __init__(self, defaultuser, pathFactory):
@@ -102,13 +103,16 @@
def path(self):
return self._pathFactory()
+
@property
def attrs(self):
return xattr(self.path.path)
+
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.path.path)
+
def _encodeKey(self, effective, compressNamespace=True):
qname, uid = effective
@@ -123,6 +127,7 @@
r = self.deadPropertyXattrPrefix + result
return r
+
def _decodeKey(self, name):
name = urllib.unquote(name[len(self.deadPropertyXattrPrefix):])
@@ -130,7 +135,7 @@
index1 = name.find("{")
index2 = name.find("}")
- if (index1 is - 1 or index2 is - 1 or not len(name) > index2):
+ if (index1 is -1 or index2 is -1 or not len(name) > index2):
raise ValueError("Invalid encoded name: %r" % (name,))
if index1 == 0:
uid = self._defaultUser
@@ -142,6 +147,7 @@
return PropertyName(propnamespace, propname), uid
+
#
# Required implementations
#
@@ -226,6 +232,7 @@
return doc.root_element
+
def _setitem_uid(self, key, value, uid):
validKey(key)
effectiveKey = (key, uid)
@@ -234,6 +241,7 @@
self.removed.remove(effectiveKey)
self.modified[effectiveKey] = value
+
def _delitem_uid(self, key, uid):
validKey(key)
effectiveKey = (key, uid)
@@ -245,6 +253,7 @@
self.removed.add(effectiveKey)
+
def _keys_uid(self, uid):
seen = set()
@@ -267,12 +276,14 @@
if effectivekey[1] == uid and effectivekey not in seen:
yield effectivekey[0]
+
def _removeResource(self):
# xattrs are removed when the underlying file is deleted so just clear
# out cached changes
self.removed.clear()
self.modified.clear()
+
#
# I/O
#
@@ -310,10 +321,12 @@
self.removed.clear()
self.modified.clear()
+
def abort(self):
self.removed.clear()
self.modified.clear()
+
def copyAllProperties(self, other):
"""
Copy all the properties from another store into this one. This needs to be done
Modified: CalendarServer/trunk/txdav/carddav/datastore/index_file.py
===================================================================
--- CalendarServer/trunk/txdav/carddav/datastore/index_file.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/carddav/datastore/index_file.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -38,7 +38,7 @@
from twisted.internet.defer import maybeDeferred
from twistedcaldav import carddavxml
-from txdav.common.icommondatastore import SyncTokenValidException,\
+from txdav.common.icommondatastore import SyncTokenValidException, \
ReservationError
from twistedcaldav.query import addressbookquery
from twistedcaldav.sql import AbstractSQLDatabase
@@ -61,16 +61,19 @@
return _
+
class MemcachedUIDReserver(CachePoolUserMixIn, LoggingMixIn):
def __init__(self, index, cachePool=None):
self.index = index
self._cachePool = cachePool
+
def _key(self, uid):
return 'reservation:%s' % (
hashlib.md5('%s:%s' % (uid,
self.index.resource.fp.path)).hexdigest())
+
def reserveUID(self, uid):
uid = uid.encode('utf-8')
self.log_debug("Reserving UID %r @ %r" % (
@@ -104,7 +107,7 @@
% (uid, self.index.resource)
)
- d =self.getCachePool().delete(self._key(uid))
+ d = self.getCachePool().delete(self._key(uid))
d.addCallback(_handleFalse)
return d
@@ -131,6 +134,7 @@
def __init__(self, index):
self.index = index
+
@wrapInDeferred
def reserveUID(self, uid):
"""
@@ -153,6 +157,7 @@
self.index._db_rollback()
raise
+
def unreserveUID(self, uid):
"""
Unreserve a UID for this index's resource.
@@ -194,7 +199,7 @@
# Double check that the time is within a reasonable period of now
# otherwise we probably have a stale reservation
tm = time.strptime(attime[:19], "%Y-%m-%d %H:%M:%S")
- dt = datetime.datetime(year=tm.tm_year, month=tm.tm_mon, day=tm.tm_mday, hour=tm.tm_hour, minute=tm.tm_min, second = tm.tm_sec)
+ dt = datetime.datetime(year=tm.tm_year, month=tm.tm_mon, day=tm.tm_mday, hour=tm.tm_hour, minute=tm.tm_min, second=tm.tm_sec)
if datetime.datetime.now() - dt > datetime.timedelta(seconds=config.UIDReservationTimeOut):
try:
self.index._db_execute("delete from RESERVED where UID = :1", uid)
@@ -209,6 +214,8 @@
return False
+
+
class AddressBookIndex(AbstractSQLDatabase):
"""
AddressBook collection index abstract base class that defines the apis for the index.
@@ -233,12 +240,14 @@
else:
self.reserver = SQLUIDReserver(self)
+
def create(self):
"""
Create the index and initialize it.
"""
self._db()
+
def recreate(self):
"""
Delete the database and re-create it
@@ -249,20 +258,24 @@
pass
self.create()
+
#
# A dict of sets. The dict keys are address book collection paths,
# and the sets contains reserved UIDs for each path.
#
-
+
def reserveUID(self, uid):
return self.reserver.reserveUID(uid)
-
+
+
def unreserveUID(self, uid):
return self.reserver.unreserveUID(uid)
-
+
+
def isReservedUID(self, uid):
return self.reserver.isReservedUID(uid)
-
+
+
def isAllowedUID(self, uid, *names):
"""
Checks to see whether to allow an operation which would add the
@@ -276,7 +289,8 @@
"""
rname = self.resourceNameForUID(uid)
return (rname is None or rname in names)
-
+
+
def resourceNamesForUID(self, uid):
"""
Looks up the names of the resources with the given UID.
@@ -302,6 +316,7 @@
return resources
+
def resourceNameForUID(self, uid):
"""
Looks up the name of the resource with the given UID.
@@ -313,9 +328,10 @@
for name in self.resourceNamesForUID(uid):
assert result is None, "More than one resource with UID %s in address book collection %r" % (uid, self)
result = name
-
+
return result
+
def resourceUIDForName(self, name):
"""
Looks up the UID of the resource with the given name.
@@ -327,6 +343,7 @@
return uid
+
def addResource(self, name, vcard, fast=False):
"""
Adding or updating an existing resource.
@@ -345,6 +362,7 @@
if not fast:
self._db_commit()
+
def deleteResource(self, name):
"""
Remove this resource from the index.
@@ -355,7 +373,8 @@
if uid is not None:
self._delete_from_db(name, uid)
self._db_commit()
-
+
+
def resourceExists(self, name):
"""
Determines whether the specified resource name exists in the index.
@@ -364,7 +383,8 @@
"""
uid = self._db_value_for_sql("select UID from RESOURCE where NAME = :1", name)
return uid is not None
-
+
+
def resourcesExist(self, names):
"""
Determines whether the specified resource name exists in the index.
@@ -379,12 +399,13 @@
statement += ")"
results = self._db_values_for_sql(statement, *names)
return results
-
+
+
def whatchanged(self, revision):
results = [(name.encode("utf-8"), deleted) for name, deleted in self._db_execute("select NAME, DELETED from REVISIONS where REVISION > :1", revision)]
- results.sort(key=lambda x:x[1])
-
+ results.sort(key=lambda x: x[1])
+
changed = []
deleted = []
for name, wasdeleted in results:
@@ -396,14 +417,16 @@
changed.append(name)
else:
raise SyncTokenValidException
-
+
return changed, deleted,
+
def lastRevision(self):
return self._db_value_for_sql(
"select REVISION from REVISION_SEQUENCE"
)
+
def bumpRevision(self, fast=False):
self._db_execute(
"""
@@ -418,14 +441,16 @@
""",
)
+
def searchValid(self, filter):
if isinstance(filter, carddavxml.Filter):
qualifiers = addressbookquery.sqladdressbookquery(filter)
else:
qualifiers = None
-
+
return qualifiers is not None
+
def search(self, filter):
"""
Finds resources matching the given qualifiers.
@@ -437,7 +462,7 @@
"""
# FIXME: Don't forget to use maximum_future_expansion_duration when we
# start caching...
-
+
# Make sure we have a proper Filter element and get the partial SQL statement to use.
if isinstance(filter, carddavxml.Filter):
qualifiers = addressbookquery.sqladdressbookquery(filter)
@@ -447,7 +472,7 @@
rowiter = self._db_execute("select DISTINCT RESOURCE.NAME, RESOURCE.UID" + qualifiers[0], *qualifiers[1])
else:
rowiter = self._db_execute("select NAME, UID from RESOURCE")
-
+
for row in rowiter:
name = row[0]
if self.resource.getChild(name.encode("utf-8")):
@@ -457,6 +482,7 @@
% (name, self.resource))
self.deleteResource(name, None)
+
def bruteForceSearch(self):
"""
List the whole index and tests for existence, updating the index
@@ -482,22 +508,25 @@
@return: the schema version assigned to this index.
"""
return schema_version
-
+
+
def _db_type(self):
"""
@return: the collection type assigned to this index.
"""
return "AddressBook"
-
+
+
def _db_init_data_tables(self, q):
"""
Initialise the underlying database tables.
@param q: a database cursor to use.
"""
-
+
# Create database where the RESOURCE table has unique UID column.
self._db_init_data_tables_base(q, True)
+
def _db_init_data_tables_base(self, q, uidunique):
"""
Initialise the underlying database tables.
@@ -564,11 +593,12 @@
"""
)
+
def _db_recreate(self, do_commit=True):
"""
Re-create the database tables from existing address book data.
"""
-
+
#
# Populate the DB with data from already existing resources.
# This allows for index recovery if the DB file gets
@@ -598,19 +628,21 @@
self.addResource(name, vcard, True)
finally:
stream.close()
-
+
# Do commit outside of the loop for better performance
if do_commit:
self._db_commit()
+
def _db_can_upgrade(self, old_version):
"""
Can we do an in-place upgrade
"""
-
+
# v2 is a minor change
return True
+
def _db_upgrade_data_tables(self, q, old_version):
"""
Upgrade the data from an older version of the DB.
@@ -645,16 +677,16 @@
create index REVISION on REVISIONS (REVISION)
"""
)
-
+
self._db_execute(
"""
insert into REVISIONS (NAME)
select NAME from RESOURCE
"""
)
-
- def _add_to_db(self, name, vcard, cursor = None):
+
+ def _add_to_db(self, name, vcard, cursor=None):
"""
Records the given address book resource in the index with the given name.
Resource names and UIDs must both be unique; only one resource name may
@@ -680,7 +712,8 @@
values (:1, :2, :3)
""", name, self.bumpRevision(fast=True), 'N',
)
-
+
+
def _delete_from_db(self, name, uid, dorevision=True):
"""
Deletes the specified entry from all dbs.
Modified: CalendarServer/trunk/txdav/carddav/datastore/test/test_index_file.py
===================================================================
--- CalendarServer/trunk/txdav/carddav/datastore/test/test_index_file.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/carddav/datastore/test/test_index_file.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -18,7 +18,7 @@
from twisted.internet.task import deferLater
from txdav.common.icommondatastore import ReservationError
-from txdav.carddav.datastore.index_file import AddressBookIndex,\
+from txdav.carddav.datastore.index_file import AddressBookIndex, \
MemcachedUIDReserver
from twistedcaldav.test.util import InMemoryMemcacheProtocol
@@ -150,6 +150,7 @@
for description, name, vcard_txt in data:
self.assertTrue(self.db.resourceExists(name), msg=description)
+
def test_index_revisions(self):
data1 = """BEGIN:VCARD
VERSION:3.0
@@ -188,17 +189,19 @@
self.db.deleteResource("data3.vcf")
tests = (
- (0, (["data1.vcf", "data2.vcf",], [],)),
- (1, (["data2.vcf",], ["data3.vcf",],)),
- (2, ([], ["data3.vcf",],)),
- (3, ([], ["data3.vcf",],)),
+ (0, (["data1.vcf", "data2.vcf", ], [],)),
+ (1, (["data2.vcf", ], ["data3.vcf", ],)),
+ (2, ([], ["data3.vcf", ],)),
+ (3, ([], ["data3.vcf", ],)),
(4, ([], [],)),
(5, ([], [],)),
)
-
+
for revision, results in tests:
self.assertEquals(self.db.whatchanged(revision), results, "Mismatched results for whatchanged with revision %d" % (revision,))
+
+
class MemcacheTests(SQLIndexTests):
def setUp(self):
super(MemcacheTests, self).setUp()
Modified: CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py
===================================================================
--- CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/carddav/datastore/test/test_sql.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -54,7 +54,6 @@
AddressBook SQL storage tests.
"""
-
@inlineCallbacks
def setUp(self):
yield super(AddressBookSQLStorageTests, self).setUp()
@@ -97,7 +96,7 @@
events).
"""
@inlineCallbacks
- def namesAndComponents(x, filter=lambda x:x.component()):
+ def namesAndComponents(x, filter=lambda x: x.component()):
fromObjs = yield x.addressbookObjects()
returnValue(dict([(fromObj.name(), (yield filter(fromObj)))
for fromObj in fromObjs]))
@@ -113,7 +112,7 @@
"""
Assert that two objects with C{properties} methods have similar
properties.
-
+
@param disregard: a list of L{PropertyName} keys to discard from both
input and output.
"""
@@ -607,7 +606,6 @@
foreignMemberRows = yield Select([aboForeignMembers.GROUP_ID, aboForeignMembers.MEMBER_ADDRESS], From=aboForeignMembers).on(txn)
self.assertEqual(foreignMemberRows, [])
-
yield subgroupObject.remove()
memberRows = yield Select([aboMembers.GROUP_ID, aboMembers.MEMBER_ID], From=aboMembers,).on(txn)
self.assertEqual(memberRows, [])
@@ -621,7 +619,6 @@
yield txn.commit()
-
@inlineCallbacks
def test_removeAddressBookPropertiesOnDelete(self):
"""
@@ -856,7 +853,6 @@
yield self.commit()
-
@inlineCallbacks
def test_objectResourceWithID(self):
"""
@@ -953,5 +949,3 @@
changed, deleted = yield otherHome.resourceNamesSinceRevision(otherAB._bindRevision, depth)
self.assertEqual(len(changed), 0)
self.assertEqual(len(deleted), 0)
-
-
Modified: CalendarServer/trunk/txdav/carddav/datastore/util.py
===================================================================
--- CalendarServer/trunk/txdav/carddav/datastore/util.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/carddav/datastore/util.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -82,7 +82,7 @@
outAddressbook.properties().update(inAddressbook.properties())
inObjects = yield inAddressbook.addressbookObjects()
for addressbookObject in inObjects:
-
+
try:
component = (yield addressbookObject.component()) # XXX WRONG SHOULD CALL getComponent
component.md5 = addressbookObject.md5()
@@ -90,7 +90,7 @@
addressbookObject.name(),
component,
)
-
+
# Only the owner's properties are migrated, since previous releases of
# addressbook server didn't have per-user properties.
outObject = yield outAddressbook.addressbookObjectWithName(
@@ -98,7 +98,7 @@
if outAddressbook.objectResourcesHaveProperties():
outObject.properties().update(addressbookObject.properties())
- ok_count += 1
+ ok_count += 1
except InternalDataStoreError:
log.error(" InternalDataStoreError: Failed to migrate address book object: %s/%s/%s" % (
@@ -106,7 +106,7 @@
inAddressbook.name(),
addressbookObject.name(),
))
- bad_count += 1
+ bad_count += 1
except Exception, e:
log.error(" %s: Failed to migrate address book object: %s/%s/%s" % (
@@ -115,13 +115,14 @@
inAddressbook.name(),
addressbookObject.name(),
))
- bad_count += 1
+ bad_count += 1
returnValue((ok_count, bad_count,))
+
@inlineCallbacks
-def migrateHome(inHome, outHome, getComponent=lambda x:x.component(),
+def migrateHome(inHome, outHome, getComponent=lambda x: x.component(),
merge=None):
yield outHome.removeAddressBookWithName("addressbook")
outHome.properties().update(inHome.properties())
@@ -134,6 +135,3 @@
yield _migrateAddressbook(addressbook, outAddressbook, getComponent)
except InternalDataStoreError:
log.error(" Failed to migrate address book: %s/%s" % (inHome.name(), name,))
-
-
-
Modified: CalendarServer/trunk/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/common/datastore/sql.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -216,7 +216,7 @@
@inlineCallbacks
- def _withEachHomeDo(self, homeTable, homeFromTxn, action, batchSize): #@UnusedVariable
+ def _withEachHomeDo(self, homeTable, homeFromTxn, action, batchSize): #@UnusedVariable
"""
Implementation of L{ICalendarStore.withEachCalendarHomeDo} and
L{IAddressbookStore.withEachAddressbookHomeDo}.
@@ -516,7 +516,7 @@
@classproperty
- def _calendarserver(cls): #@NoSelf
+ def _calendarserver(cls): #@NoSelf
cs = schema.CALENDARSERVER
return Select(
[cs.VALUE, ],
@@ -561,7 +561,7 @@
return self.homeWithUID(EADDRESSBOOKTYPE, uid, create=create)
- def _determineMemo(self, storeType, uid, create=False): #@UnusedVariable
+ def _determineMemo(self, storeType, uid, create=False): #@UnusedVariable
"""
Determine the memo dictionary to use for homeWithUID.
"""
@@ -625,7 +625,7 @@
@classproperty
- def _insertAPNSubscriptionQuery(cls): #@NoSelf
+ def _insertAPNSubscriptionQuery(cls): #@NoSelf
apn = schema.APN_SUBSCRIPTIONS
return Insert({apn.TOKEN: Parameter("token"),
apn.RESOURCE_KEY: Parameter("resourceKey"),
@@ -636,7 +636,7 @@
@classproperty
- def _updateAPNSubscriptionQuery(cls): #@NoSelf
+ def _updateAPNSubscriptionQuery(cls): #@NoSelf
apn = schema.APN_SUBSCRIPTIONS
return Update({apn.MODIFIED: Parameter("modified"),
apn.SUBSCRIBER_GUID: Parameter("subscriber"),
@@ -647,7 +647,7 @@
@classproperty
- def _selectAPNSubscriptionQuery(cls): #@NoSelf
+ def _selectAPNSubscriptionQuery(cls): #@NoSelf
apn = schema.APN_SUBSCRIPTIONS
return Select([apn.MODIFIED, apn.SUBSCRIBER_GUID], From=apn,
Where=(
@@ -691,7 +691,7 @@
@classproperty
- def _removeAPNSubscriptionQuery(cls): #@NoSelf
+ def _removeAPNSubscriptionQuery(cls): #@NoSelf
apn = schema.APN_SUBSCRIPTIONS
return Delete(From=apn,
Where=(apn.TOKEN == Parameter("token")).And(
@@ -704,7 +704,7 @@
@classproperty
- def _purgeOldAPNSubscriptionQuery(cls): #@NoSelf
+ def _purgeOldAPNSubscriptionQuery(cls): #@NoSelf
apn = schema.APN_SUBSCRIPTIONS
return Delete(From=apn,
Where=(apn.MODIFIED < Parameter("olderThan")))
@@ -716,7 +716,7 @@
@classproperty
- def _apnSubscriptionsByTokenQuery(cls): #@NoSelf
+ def _apnSubscriptionsByTokenQuery(cls): #@NoSelf
apn = schema.APN_SUBSCRIPTIONS
return Select([apn.RESOURCE_KEY, apn.MODIFIED, apn.SUBSCRIBER_GUID],
From=apn, Where=apn.TOKEN == Parameter("token"))
@@ -727,7 +727,7 @@
@classproperty
- def _apnSubscriptionsByKeyQuery(cls): #@NoSelf
+ def _apnSubscriptionsByKeyQuery(cls): #@NoSelf
apn = schema.APN_SUBSCRIPTIONS
return Select([apn.TOKEN, apn.SUBSCRIBER_GUID],
From=apn, Where=apn.RESOURCE_KEY == Parameter("resourceKey"))
@@ -738,7 +738,7 @@
@classproperty
- def _apnSubscriptionsBySubscriberQuery(cls): #@NoSelf
+ def _apnSubscriptionsBySubscriberQuery(cls): #@NoSelf
apn = schema.APN_SUBSCRIPTIONS
return Select([apn.TOKEN, apn.RESOURCE_KEY, apn.MODIFIED, apn.USER_AGENT, apn.IP_ADDR],
From=apn, Where=apn.SUBSCRIBER_GUID == Parameter("subscriberGUID"))
@@ -751,7 +751,7 @@
# Create IMIP token
@classproperty
- def _insertIMIPTokenQuery(cls): #@NoSelf
+ def _insertIMIPTokenQuery(cls): #@NoSelf
imip = schema.IMIP_TOKENS
return Insert({imip.TOKEN: Parameter("token"),
imip.ORGANIZER: Parameter("organizer"),
@@ -781,7 +781,7 @@
@classproperty
- def _selectIMIPTokenByTokenQuery(cls): #@NoSelf
+ def _selectIMIPTokenByTokenQuery(cls): #@NoSelf
imip = schema.IMIP_TOKENS
return Select([imip.ORGANIZER, imip.ATTENDEE, imip.ICALUID], From=imip,
Where=(imip.TOKEN == Parameter("token")))
@@ -794,7 +794,7 @@
@classproperty
- def _selectIMIPTokenQuery(cls): #@NoSelf
+ def _selectIMIPTokenQuery(cls): #@NoSelf
imip = schema.IMIP_TOKENS
return Select([imip.TOKEN], From=imip,
Where=(imip.ORGANIZER == Parameter("organizer")).And(
@@ -803,7 +803,7 @@
@classproperty
- def _updateIMIPTokenQuery(cls): #@NoSelf
+ def _updateIMIPTokenQuery(cls): #@NoSelf
imip = schema.IMIP_TOKENS
return Update({imip.ACCESSED: utcNowSQL, },
Where=(imip.ORGANIZER == Parameter("organizer")).And(
@@ -827,7 +827,7 @@
# Remove IMIP token
@classproperty
- def _removeIMIPTokenQuery(cls): #@NoSelf
+ def _removeIMIPTokenQuery(cls): #@NoSelf
imip = schema.IMIP_TOKENS
return Delete(From=imip,
Where=(imip.TOKEN == Parameter("token")))
@@ -839,7 +839,7 @@
# Purge old IMIP tokens
@classproperty
- def _purgeOldIMIPTokensQuery(cls): #@NoSelf
+ def _purgeOldIMIPTokensQuery(cls): #@NoSelf
imip = schema.IMIP_TOKENS
return Delete(From=imip,
Where=(imip.ACCESSED < Parameter("olderThan")))
@@ -1378,15 +1378,15 @@
class _EmptyCacher(object):
- def set(self, key, value): #@UnusedVariable
+ def set(self, key, value): #@UnusedVariable
return succeed(True)
- def get(self, key, withIdentifier=False): #@UnusedVariable
+ def get(self, key, withIdentifier=False): #@UnusedVariable
return succeed(None)
- def delete(self, key): #@UnusedVariable
+ def delete(self, key): #@UnusedVariable
return succeed(True)
@@ -1435,14 +1435,14 @@
@classproperty
- def _resourceIDFromOwnerQuery(cls): #@NoSelf
+ def _resourceIDFromOwnerQuery(cls): #@NoSelf
home = cls._homeSchema
return Select([home.RESOURCE_ID],
From=home, Where=home.OWNER_UID == Parameter("ownerUID"))
@classproperty
- def _ownerFromResourceID(cls): #@NoSelf
+ def _ownerFromResourceID(cls): #@NoSelf
home = cls._homeSchema
return Select([home.OWNER_UID],
From=home,
@@ -1450,7 +1450,7 @@
@classproperty
- def _metaDataQuery(cls): #@NoSelf
+ def _metaDataQuery(cls): #@NoSelf
metadata = cls._homeMetaDataSchema
return Select(cls.metadataColumns(),
From=metadata,
@@ -1768,7 +1768,7 @@
@classproperty
- def _syncTokenQuery(cls): #@NoSelf
+ def _syncTokenQuery(cls): #@NoSelf
"""
DAL Select statement to find the sync token.
@@ -1832,7 +1832,7 @@
@classproperty
- def _changesQuery(cls): #@NoSelf
+ def _changesQuery(cls): #@NoSelf
bind = cls._bindSchema
rev = cls._revisionsSchema
return Select(
@@ -2040,12 +2040,12 @@
@classproperty
- def _resourceByUIDQuery(cls): #@NoSelf
+ def _resourceByUIDQuery(cls): #@NoSelf
return cls._objectResourceQuery(checkBindMode=False)
@classproperty
- def _resourceByUIDBindQuery(cls): #@NoSelf
+ def _resourceByUIDBindQuery(cls): #@NoSelf
return cls._objectResourceQuery(checkBindMode=True)
@@ -2106,7 +2106,7 @@
@classproperty
- def _quotaQuery(cls): #@NoSelf
+ def _quotaQuery(cls): #@NoSelf
meta = cls._homeMetaDataSchema
return Select(
[meta.QUOTA_USED_BYTES], From=meta,
@@ -2123,7 +2123,7 @@
@classproperty
- def _preLockResourceIDQuery(cls): #@NoSelf
+ def _preLockResourceIDQuery(cls): #@NoSelf
meta = cls._homeMetaDataSchema
return Select(From=meta,
Where=meta.RESOURCE_ID == Parameter("resourceID"),
@@ -2131,7 +2131,7 @@
@classproperty
- def _increaseQuotaQuery(cls): #@NoSelf
+ def _increaseQuotaQuery(cls): #@NoSelf
meta = cls._homeMetaDataSchema
return Update({meta.QUOTA_USED_BYTES: meta.QUOTA_USED_BYTES +
Parameter("delta")},
@@ -2140,7 +2140,7 @@
@classproperty
- def _resetQuotaQuery(cls): #@NoSelf
+ def _resetQuotaQuery(cls): #@NoSelf
meta = cls._homeMetaDataSchema
return Update({meta.QUOTA_USED_BYTES: 0},
Where=meta.RESOURCE_ID == Parameter("resourceID"))
@@ -2206,7 +2206,7 @@
@classproperty
- def _lockLastModifiedQuery(cls): #@NoSelf
+ def _lockLastModifiedQuery(cls): #@NoSelf
meta = cls._homeMetaDataSchema
return Select(
From=meta,
@@ -2217,7 +2217,7 @@
@classproperty
- def _changeLastModifiedQuery(cls): #@NoSelf
+ def _changeLastModifiedQuery(cls): #@NoSelf
meta = cls._homeMetaDataSchema
return Update({meta.MODIFIED: utcNowSQL},
Where=meta.RESOURCE_ID == Parameter("resourceID"),
@@ -2284,7 +2284,7 @@
"""
@classproperty
- def _childSyncTokenQuery(cls): #@NoSelf
+ def _childSyncTokenQuery(cls): #@NoSelf
"""
DAL query for retrieving the sync token of a L{CommonHomeChild} based on
its resource ID.
@@ -2317,7 +2317,7 @@
@classproperty
- def _objectNamesSinceRevisionQuery(cls): #@NoSelf
+ def _objectNamesSinceRevisionQuery(cls): #@NoSelf
"""
DAL query for (resource, deleted-flag)
"""
@@ -2371,7 +2371,7 @@
@classproperty
- def _removeDeletedRevision(cls): #@NoSelf
+ def _removeDeletedRevision(cls): #@NoSelf
rev = cls._revisionsSchema
return Delete(From=rev,
Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And(
@@ -2379,7 +2379,7 @@
@classproperty
- def _addNewRevision(cls): #@NoSelf
+ def _addNewRevision(cls): #@NoSelf
rev = cls._revisionsSchema
return Insert({rev.HOME_RESOURCE_ID: Parameter("homeID"),
rev.RESOURCE_ID: Parameter("resourceID"),
@@ -2404,7 +2404,7 @@
@classproperty
- def _renameSyncTokenQuery(cls): #@NoSelf
+ def _renameSyncTokenQuery(cls): #@NoSelf
"""
DAL query to change sync token for a rename (increment and adjust
resource name).
@@ -2429,7 +2429,7 @@
@classproperty
- def _bumpSyncTokenQuery(cls): #@NoSelf
+ def _bumpSyncTokenQuery(cls): #@NoSelf
"""
DAL query to change collection sync token.
"""
@@ -2452,7 +2452,7 @@
@classproperty
- def _deleteSyncTokenQuery(cls): #@NoSelf
+ def _deleteSyncTokenQuery(cls): #@NoSelf
"""
DAL query to update a sync revision to be a tombstone instead.
"""
@@ -2466,7 +2466,7 @@
@classproperty
- def _sharedRemovalQuery(cls): #@NoSelf
+ def _sharedRemovalQuery(cls): #@NoSelf
"""
DAL query to update the sync token for a shared collection.
"""
@@ -2481,7 +2481,7 @@
@classproperty
- def _unsharedRemovalQuery(cls): #@NoSelf
+ def _unsharedRemovalQuery(cls): #@NoSelf
"""
DAL query to update the sync token for an owned collection.
"""
@@ -2528,7 +2528,7 @@
@classproperty
- def _deleteBumpTokenQuery(cls): #@NoSelf
+ def _deleteBumpTokenQuery(cls): #@NoSelf
rev = cls._revisionsSchema
return Update({rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: True},
@@ -2538,7 +2538,7 @@
@classproperty
- def _updateBumpTokenQuery(cls): #@NoSelf
+ def _updateBumpTokenQuery(cls): #@NoSelf
rev = cls._revisionsSchema
return Update({rev.REVISION: schema.REVISION_SEQ},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
@@ -2547,7 +2547,7 @@
@classproperty
- def _insertFindPreviouslyNamedQuery(cls): #@NoSelf
+ def _insertFindPreviouslyNamedQuery(cls): #@NoSelf
rev = cls._revisionsSchema
return Select([rev.RESOURCE_ID], From=rev,
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
@@ -2555,7 +2555,7 @@
@classproperty
- def _updatePreviouslyNamedQuery(cls): #@NoSelf
+ def _updatePreviouslyNamedQuery(cls): #@NoSelf
rev = cls._revisionsSchema
return Update({rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: False},
@@ -2565,7 +2565,7 @@
@classproperty
- def _completelyNewRevisionQuery(cls): #@NoSelf
+ def _completelyNewRevisionQuery(cls): #@NoSelf
rev = cls._revisionsSchema
return Insert({rev.HOME_RESOURCE_ID: Parameter("homeID"),
rev.RESOURCE_ID: Parameter("resourceID"),
@@ -2632,7 +2632,7 @@
"""
@classproperty
- def _bindInsertQuery(cls, **kw): #@NoSelf #@UnusedVariable
+ def _bindInsertQuery(cls, **kw): #@NoSelf #@UnusedVariable
"""
DAL statement to create a bind entry that connects a collection to its
home.
@@ -2649,7 +2649,7 @@
@classmethod
- def _updateBindColumnsQuery(cls, columnMap): #@NoSelf
+ def _updateBindColumnsQuery(cls, columnMap): #@NoSelf
bind = cls._bindSchema
return Update(columnMap,
Where=(bind.RESOURCE_ID == Parameter("resourceID"))
@@ -2658,7 +2658,7 @@
@classproperty
- def _updateBindQuery(cls): #@NoSelf
+ def _updateBindQuery(cls): #@NoSelf
bind = cls._bindSchema
return cls._updateBindColumnsQuery(
{bind.BIND_MODE: Parameter("mode"),
@@ -2667,7 +2667,7 @@
@classproperty
- def _deleteBindForResourceIDAndHomeID(cls): #@NoSelf
+ def _deleteBindForResourceIDAndHomeID(cls): #@NoSelf
bind = cls._bindSchema
return Delete(
From=bind,
@@ -2678,7 +2678,7 @@
@classmethod
- def _bindFor(cls, condition): #@NoSelf
+ def _bindFor(cls, condition): #@NoSelf
bind = cls._bindSchema
columns = cls.bindColumns() + cls.additionalBindColumns()
return Select(
@@ -2689,22 +2689,23 @@
@classproperty
- def _sharedBindForResourceID(cls): #@NoSelf
+ def _sharedBindForResourceID(cls): #@NoSelf
bind = cls._bindSchema
return cls._bindFor((bind.RESOURCE_ID == Parameter("resourceID"))
.And(bind.BIND_STATUS == _BIND_STATUS_ACCEPTED)
.And(bind.BIND_MODE != _BIND_MODE_OWN)
)
+
@classproperty
- def _acceptedBindForHomeID(cls): #@NoSelf
+ def _acceptedBindForHomeID(cls): #@NoSelf
bind = cls._bindSchema
return cls._bindFor((bind.HOME_RESOURCE_ID == Parameter("homeID"))
.And(bind.BIND_STATUS == _BIND_STATUS_ACCEPTED))
@classproperty
- def _unacceptedBindForResourceID(cls): #@NoSelf
+ def _unacceptedBindForResourceID(cls): #@NoSelf
bind = cls._bindSchema
return cls._bindFor((bind.RESOURCE_ID == Parameter("resourceID"))
.And(bind.BIND_STATUS != _BIND_STATUS_ACCEPTED)
@@ -2712,7 +2713,7 @@
@classproperty
- def _bindForResourceIDAndHomeID(cls): #@NoSelf
+ def _bindForResourceIDAndHomeID(cls): #@NoSelf
"""
DAL query that looks up home bind rows by home child
resource ID and home resource ID.
@@ -2724,7 +2725,7 @@
@classproperty
- def _bindForNameAndHomeID(cls): #@NoSelf
+ def _bindForNameAndHomeID(cls): #@NoSelf
"""
DAL query that looks up any bind rows by home child
resource ID and home resource ID.
@@ -2953,7 +2954,7 @@
result = []
for row in acceptedRows:
- bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = row[:self.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = row[:self.bindColumnCount] #@UnusedVariable
home = yield self._txn.homeWithResourceID(self._home._homeType, homeID)
new = yield home.objectWithShareUID(bindName)
result.append(new)
@@ -2982,7 +2983,7 @@
result = []
for row in rows:
- bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = row[:self.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = row[:self.bindColumnCount] #@UnusedVariable
home = yield self._txn.homeWithResourceID(self._home._homeType, homeID)
new = yield home.invitedObjectWithShareUID(bindName)
result.append(new)
@@ -3151,8 +3152,9 @@
return ()
+
@classproperty
- def _childrenAndMetadataForHomeID(cls): #@NoSelf
+ def _childrenAndMetadataForHomeID(cls): #@NoSelf
bind = cls._bindSchema
child = cls._homeChildSchema
childMetaData = cls._homeChildMetaDataSchema
@@ -3241,7 +3243,7 @@
self._index = None # Derived classes need to set this
- def memoMe(self, key, memo): #@UnusedVariable
+ def memoMe(self, key, memo): #@UnusedVariable
"""
Add this object to the memo dictionary in whatever fashion is appropriate.
@@ -3299,7 +3301,7 @@
# Create the actual objects merging in properties
for dataRow in dataRows:
- bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
additionalBind = dataRow[cls.bindColumnCount:cls.bindColumnCount + len(cls.additionalBindColumns())]
metadata = dataRow[cls.bindColumnCount + len(cls.additionalBindColumns()):]
@@ -3368,7 +3370,7 @@
returnValue(None)
row = rows[0]
- bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable
if (bindStatus == _BIND_STATUS_ACCEPTED) != bool(accepted):
returnValue(None)
additionalBind = row[cls.bindColumnCount:cls.bindColumnCount + len(cls.additionalBindColumns())]
@@ -3409,7 +3411,7 @@
returnValue(None)
row = rows[0]
- bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable]
+ bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable]
if (bindStatus == _BIND_STATUS_ACCEPTED) != bool(accepted):
returnValue(None)
@@ -3420,7 +3422,7 @@
@classproperty
- def _insertHomeChild(cls): #@NoSelf
+ def _insertHomeChild(cls): #@NoSelf
"""
DAL statement to create a home child with all default values.
"""
@@ -3430,7 +3432,7 @@
@classproperty
- def _insertHomeChildMetaData(cls): #@NoSelf
+ def _insertHomeChildMetaData(cls): #@NoSelf
"""
DAL statement to create a home child with all default values.
"""
@@ -3480,7 +3482,7 @@
@classproperty
- def _metadataByIDQuery(cls): #@NoSelf
+ def _metadataByIDQuery(cls): #@NoSelf
"""
DAL query to retrieve created/modified dates based on a resource ID.
"""
@@ -3561,7 +3563,7 @@
@classproperty
- def _renameQuery(cls): #@NoSelf
+ def _renameQuery(cls): #@NoSelf
"""
DAL statement to rename a L{CommonHomeChild}
"""
@@ -3600,7 +3602,7 @@
@classproperty
- def _deleteQuery(cls): #@NoSelf
+ def _deleteQuery(cls): #@NoSelf
"""
DAL statement to delete a L{CommonHomeChild} by its resource ID.
"""
@@ -3650,7 +3652,7 @@
@classproperty
- def _ownerHomeWithResourceID(cls): #@NoSelf
+ def _ownerHomeWithResourceID(cls): #@NoSelf
"""
DAL query to retrieve the home resource ID and resource name of the owner from the bound
home-child ID.
@@ -3691,7 +3693,7 @@
@classproperty
- def _objectResourceNamesQuery(cls): #@NoSelf
+ def _objectResourceNamesQuery(cls): #@NoSelf
"""
DAL query to load all object resource names for a home child.
"""
@@ -3710,7 +3712,7 @@
@classproperty
- def _objectCountQuery(cls): #@NoSelf
+ def _objectCountQuery(cls): #@NoSelf
"""
DAL query to count all object resources for a home child.
"""
@@ -3776,7 +3778,7 @@
@classproperty
- def _resourceNameForUIDQuery(cls): #@NoSelf
+ def _resourceNameForUIDQuery(cls): #@NoSelf
"""
DAL query to retrieve the resource name for an object resource based on
its UID column.
@@ -3805,7 +3807,7 @@
@classproperty
- def _resourceUIDForNameQuery(cls): #@NoSelf
+ def _resourceUIDForNameQuery(cls): #@NoSelf
"""
DAL query to retrieve the UID for an object resource based on its
resource name column.
@@ -3873,7 +3875,7 @@
@classproperty
- def _moveParentUpdateQuery(cls, adjustName=False): #@NoSelf
+ def _moveParentUpdateQuery(cls, adjustName=False): #@NoSelf
"""
DAL query to update a child to be in a new parent.
"""
@@ -3889,7 +3891,7 @@
)
- def _movedObjectResource(self, child, newparent): #@UnusedVariable
+ def _movedObjectResource(self, child, newparent): #@UnusedVariable
"""
Method that subclasses can override to do an extra DB adjustments when a resource
is moved.
@@ -4085,7 +4087,7 @@
@classproperty
- def _lockLastModifiedQuery(cls): #@NoSelf
+ def _lockLastModifiedQuery(cls): #@NoSelf
schema = cls._homeChildMetaDataSchema
return Select(
From=schema,
@@ -4096,7 +4098,7 @@
@classproperty
- def _changeLastModifiedQuery(cls): #@NoSelf
+ def _changeLastModifiedQuery(cls): #@NoSelf
schema = cls._homeChildMetaDataSchema
return Update({schema.MODIFIED: utcNowSQL},
Where=schema.RESOURCE_ID == Parameter("resourceID"),
@@ -4145,7 +4147,7 @@
BATCH_LOAD_SIZE = 50
- def __init__(self, parent, name, uid, resourceID=None, options=None): #@UnusedVariable
+ def __init__(self, parent, name, uid, resourceID=None, options=None): #@UnusedVariable
self._parentCollection = parent
self._resourceID = resourceID
self._name = name
@@ -4160,11 +4162,12 @@
@classproperty
- def _allColumnsWithParentQuery(cls): #@NoSelf
+ def _allColumnsWithParentQuery(cls): #@NoSelf
obj = cls._objectSchema
return Select(cls._allColumns, From=obj,
Where=obj.PARENT_RESOURCE_ID == Parameter("parentID"))
+
@classmethod
@inlineCallbacks
def _allColumnsWithParent(cls, parent):
@@ -4242,6 +4245,7 @@
returnValue((yield cls._allColumnsWithParentAndNamesQuery(names).on(
parent._txn, parentID=parent._resourceID, names=names)))
+
@classmethod
@inlineCallbacks
def _loadAllObjectsWithNames(cls, parent, names):
@@ -4332,17 +4336,17 @@
@classproperty
- def _allColumnsWithParentAndName(cls): #@NoSelf
+ def _allColumnsWithParentAndName(cls): #@NoSelf
return cls._allColumnsWithParentAnd(cls._objectSchema.RESOURCE_NAME, "name")
@classproperty
- def _allColumnsWithParentAndUID(cls): #@NoSelf
+ def _allColumnsWithParentAndUID(cls): #@NoSelf
return cls._allColumnsWithParentAnd(cls._objectSchema.UID, "uid")
@classproperty
- def _allColumnsWithParentAndID(cls): #@NoSelf
+ def _allColumnsWithParentAndID(cls): #@NoSelf
return cls._allColumnsWithParentAnd(cls._objectSchema.RESOURCE_ID, "resourceID")
@@ -4378,7 +4382,7 @@
@classproperty
- def _allColumns(cls): #@NoSelf
+ def _allColumns(cls): #@NoSelf
"""
Full set of columns in the object table that need to be loaded to
initialize the object resource state.
@@ -4437,6 +4441,7 @@
@param props: the L{PropertyStore} from C{properties()}.
"""
+ pass
def __repr__(self):
@@ -4476,7 +4481,7 @@
@classmethod
- def _selectForUpdateQuery(cls, nowait): #@NoSelf
+ def _selectForUpdateQuery(cls, nowait): #@NoSelf
"""
DAL statement to lock a L{CommonObjectResource} by its resource ID.
"""
@@ -4520,7 +4525,7 @@
@classproperty
- def _deleteQuery(cls): #@NoSelf
+ def _deleteQuery(cls): #@NoSelf
"""
DAL statement to delete a L{CommonObjectResource} by its resource ID.
"""
@@ -4595,7 +4600,7 @@
@classproperty
- def _textByIDQuery(cls): #@NoSelf
+ def _textByIDQuery(cls): #@NoSelf
"""
DAL query to load iCalendar/vCard text via an object's resource ID.
"""
@@ -4906,7 +4911,7 @@
@classproperty
- def _completelyNewRevisionQuery(cls): #@NoSelf
+ def _completelyNewRevisionQuery(cls): #@NoSelf
rev = cls._revisionsSchema
return Insert({rev.HOME_RESOURCE_ID: Parameter("homeID"),
# rev.RESOURCE_ID: Parameter("resourceID"),
@@ -4977,7 +4982,7 @@
@classproperty
- def _allColumnsByHomeIDQuery(cls): #@NoSelf
+ def _allColumnsByHomeIDQuery(cls): #@NoSelf
"""
DAL query to load all columns by home ID.
"""
@@ -5036,7 +5041,7 @@
@classproperty
- def _oneNotificationQuery(cls): #@NoSelf
+ def _oneNotificationQuery(cls): #@NoSelf
no = cls._objectSchema
return Select(
[
@@ -5077,7 +5082,7 @@
returnValue(None)
- def _loadPropertyStore(self, props=None, created=False): #@UnusedVariable
+ def _loadPropertyStore(self, props=None, created=False): #@UnusedVariable
if props is None:
props = NonePropertyStore(self._home.uid())
self._propertyStore = props
@@ -5115,7 +5120,7 @@
@classproperty
- def _newNotificationQuery(cls): #@NoSelf
+ def _newNotificationQuery(cls): #@NoSelf
no = cls._objectSchema
return Insert(
{
@@ -5130,7 +5135,7 @@
@classproperty
- def _updateNotificationQuery(cls): #@NoSelf
+ def _updateNotificationQuery(cls): #@NoSelf
no = cls._objectSchema
return Update(
{
@@ -5255,6 +5260,7 @@
)
+
@inlineCallbacks
def mergeHomes(sqlTxn, one, other, homeType):
"""
@@ -5313,6 +5319,7 @@
yield returnValue(newer)
+
def _renameHome(txn, table, oldUID, newUID):
"""
Rename a calendar, addressbook, or notification home. Note that this
@@ -5339,13 +5346,16 @@
Where=table.OWNER_UID == oldUID).on(txn)
+
def _dontBotherWithNotifications(older, newer, merge):
"""
Notifications are more transient and can be easily worked around; don't
bother to migrate all of them when there is a UUID case mismatch.
"""
+ pass
+
@inlineCallbacks
def _normalizeHomeUUIDsIn(t, homeType):
"""
@@ -5428,6 +5438,7 @@
returnValue(None)
+
def _getHome(txn, homeType, uid):
"""
Like L{CommonHome.homeWithUID} but also honoring ENOTIFICATIONTYPE which
@@ -5451,6 +5462,7 @@
return txn.homeWithUID(homeType, uid)
+
@inlineCallbacks
def _normalizeColumnUUIDs(txn, column):
"""
@@ -5498,6 +5510,7 @@
return self
+
@inlineCallbacks
def _needsNormalizationUpgrade(txn):
"""
@@ -5521,6 +5534,7 @@
returnValue(False)
+
@inlineCallbacks
def fixUUIDNormalization(store):
"""
Modified: CalendarServer/trunk/txdav/common/datastore/sql_tables.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_tables.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/common/datastore/sql_tables.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -377,6 +377,7 @@
out.write('\n);\n\n')
+
def splitSQLString(sqlString):
"""
Strings which mix zero or more sql statements with zero or more pl/sql
Modified: CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -43,6 +43,7 @@
yield updateAddressBookDataVersion(sqlStore, UPGRADE_TO_VERSION)
+
@inlineCallbacks
def populateMemberTables(sqlStore):
"""
@@ -61,7 +62,7 @@
abObjectResources = yield home.addressbook().objectResources()
for abObject in abObjectResources:
component = yield abObject.component()
- lcResourceKind = component.resourceKind().lower() if component.resourceKind() else component.resourceKind();
+ lcResourceKind = component.resourceKind().lower() if component.resourceKind() else component.resourceKind()
if lcResourceKind == "group":
# update kind
abo = schema.ADDRESSBOOK_OBJECT
@@ -72,10 +73,11 @@
#update rest
yield abObject.setComponent(component)
-
# Do this to each calendar home not already at version 2
yield doToEachHomeNotAtVersion(sqlStore, schema.ADDRESSBOOK_HOME, UPGRADE_TO_VERSION, doIt)
+
+
@inlineCallbacks
def removeResourceType(sqlStore):
sqlTxn = sqlStore.newTransaction()
Modified: CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_1_to_2.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_1_to_2.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_1_to_2.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -85,6 +85,6 @@
"""
home = yield txn.calendarHomeWithResourceID(homeResourceID)
yield home.splitCalendars()
-
+
# Do this to each calendar home not already at version 2
yield doToEachHomeNotAtVersion(sqlStore, schema.CALENDAR_HOME, UPGRADE_TO_VERSION, doIt)
Modified: CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_2_to_3.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_2_to_3.py 2013-05-20 19:13:24 UTC (rev 11221)
+++ CalendarServer/trunk/txdav/common/datastore/upgrade/sql/upgrades/calendar_upgrade_from_2_to_3.py 2013-05-20 19:45:21 UTC (rev 11222)
@@ -39,4 +39,3 @@
yield updateCalendarDataVersion(
sqlStore, UPGRADE_TO_VERSION
)
-
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20130520/112a763c/attachment-0001.html>
More information about the calendarserver-changes
mailing list