[CalendarServer-changes] [7024] CalendarServer/trunk

source_changes at macosforge.org source_changes at macosforge.org
Wed Feb 16 07:44:05 PST 2011


Revision: 7024
          http://trac.macosforge.org/projects/calendarserver/changeset/7024
Author:   glyph at apple.com
Date:     2011-02-16 07:44:03 -0800 (Wed, 16 Feb 2011)
Log Message:
-----------
Convert queries to use DAL API, everywhere except sql_legacy.py.

Modified Paths:
--------------
    CalendarServer/trunk/calendarserver/tools/test/test_purge_old_events.py
    CalendarServer/trunk/twext/enterprise/dal/model.py
    CalendarServer/trunk/twext/enterprise/dal/syntax.py
    CalendarServer/trunk/twext/enterprise/dal/test/test_parseschema.py
    CalendarServer/trunk/twext/enterprise/dal/test/test_sqlsyntax.py
    CalendarServer/trunk/txdav/base/propertystore/sql.py
    CalendarServer/trunk/txdav/caldav/datastore/file.py
    CalendarServer/trunk/txdav/caldav/datastore/sql.py
    CalendarServer/trunk/txdav/caldav/datastore/test/common.py
    CalendarServer/trunk/txdav/caldav/datastore/test/test_file.py
    CalendarServer/trunk/txdav/caldav/datastore/test/test_scheduling.py
    CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py
    CalendarServer/trunk/txdav/caldav/icalendarstore.py
    CalendarServer/trunk/txdav/carddav/datastore/sql.py
    CalendarServer/trunk/txdav/common/datastore/sql.py
    CalendarServer/trunk/txdav/common/datastore/sql_legacy.py
    CalendarServer/trunk/txdav/common/datastore/sql_schema_v1.sql
    CalendarServer/trunk/txdav/common/datastore/sql_tables.py

Property Changed:
----------------
    CalendarServer/trunk/
    CalendarServer/trunk/txdav/caldav/datastore/index_file.py
    CalendarServer/trunk/txdav/caldav/datastore/test/test_index_file.py
    CalendarServer/trunk/txdav/carddav/datastore/index_file.py
    CalendarServer/trunk/txdav/carddav/datastore/test/test_index_file.py


Property changes on: CalendarServer/trunk
___________________________________________________________________
Modified: svn:mergeinfo
   - /CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/wsanchez/transations:5515-5593
   + /CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/wsanchez/transations:5515-5593

Modified: CalendarServer/trunk/calendarserver/tools/test/test_purge_old_events.py
===================================================================
--- CalendarServer/trunk/calendarserver/tools/test/test_purge_old_events.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/calendarserver/tools/test/test_purge_old_events.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -30,6 +30,7 @@
 from calendarserver.tap.util import getRootResource
 from calendarserver.tools.purge import purgeOldEvents, purgeGUID, purgeOrphanedAttachments
 from twistedcaldav.config import config
+from twistedcaldav.memcacher import Memcacher
 from vobject.icalendar import utc
 
 import datetime
@@ -339,6 +340,9 @@
 
     @inlineCallbacks
     def setUp(self):
+        self.patch(config.Memcached.Pools.Default, "ClientEnabled", False)
+        self.patch(config.Memcached.Pools.Default, "ServerEnabled", False)
+        self.patch(Memcacher, "allowTestCache", True)
         yield super(PurgeOldEventsTests, self).setUp()
         self._sqlCalendarStore = yield buildStore(self, self.notifierFactory)
         yield self.populate()
@@ -353,7 +357,6 @@
                 os.path.dirname(__file__), "purge", "resources.xml"
             )
         )
-        self.patch(config.Memcached.Pools.Default, "ClientEnabled", False)
         self.rootResource = getRootResource(config, self._sqlCalendarStore)
         self.directory = self.rootResource.getDirectory()
 

Modified: CalendarServer/trunk/twext/enterprise/dal/model.py
===================================================================
--- CalendarServer/trunk/twext/enterprise/dal/model.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/twext/enterprise/dal/model.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -112,6 +112,7 @@
     """
 
 
+
 def _checkstr(x):
     """
     Verify that C{x} is a C{str}.  Raise a L{ValueError} if not.  This is to
@@ -121,6 +122,7 @@
         raise ValueError("%r is not a str." % (x,))
 
 
+
 class Column(object):
     """
     A column from a table.
@@ -184,7 +186,8 @@
 
         @rtype: C{bool}
         """
-        return self.canBeNull() or (self.default is not None)
+        return not (self.canBeNull() or
+                    (self.default not in (None, NO_DEFAULT)))
 
 
     def doesReferenceName(self, name):

Modified: CalendarServer/trunk/twext/enterprise/dal/syntax.py
===================================================================
--- CalendarServer/trunk/twext/enterprise/dal/syntax.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/twext/enterprise/dal/syntax.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -19,7 +19,7 @@
 Syntax wrappers and generators for SQL.
 """
 
-from twext.enterprise.dal.model import Schema, Table, Column
+from twext.enterprise.dal.model import Schema, Table, Column, Sequence
 
 
 class TableMismatch(Exception):
@@ -92,7 +92,7 @@
         if isinstance(other, ColumnSyntax):
             return ColumnComparison(self, comparator, other)
         else:
-            return ConstantComparison(self, comparator, other)
+            return CompoundComparison(self, comparator, Constant(other))
     return __
 
 
@@ -123,24 +123,59 @@
 
 
 class FunctionInvocation(ExpressionSyntax):
-    def __init__(self, name, arg):
+    def __init__(self, name, *args):
         self.name = name
-        self.arg = arg
+        self.args = args
 
 
     def allColumns(self):
-        return self.arg.allColumns()
+        """
+        All of the columns in all of the arguments' columns.
+        """
+        def ac():
+            for arg in self.args:
+                for column in arg.allColumns():
+                    yield column
+        return list(ac())
 
 
     def subSQL(self, placeholder, quote, allTables):
         result = SQLFragment(self.name)
-        result.text += "("
-        result.append(self.arg.subSQL(placeholder, quote, allTables))
-        result.text += ")"
+        result.append(_inParens(
+            _commaJoined(_convert(arg).subSQL(placeholder, quote, allTables)
+                         for arg in self.args)))
         return result
 
 
 
+class Constant(ExpressionSyntax):
+    def __init__(self, value):
+        self.value = value
+
+
+    def allColumns(self):
+        return []
+
+
+    def subSQL(self, placeholder, quote, allTables):
+        return SQLFragment(placeholder, [self.value])
+
+
+
+class NamedValue(ExpressionSyntax):
+    """
+    A constant within the database; something pre-defined, such as
+    CURRENT_TIMESTAMP.
+    """
+    def __init__(self, name):
+        self.name = name
+
+
+    def subSQL(self, placeholder, quote, allTables):
+        return SQLFragment(self.name)
+
+
+
 class Function(object):
     """
     An L{Function} is a representation of an SQL Function function.
@@ -150,12 +185,13 @@
         self.name = name
 
 
-    def __call__(self, arg):
+    def __call__(self, *args):
         """
         Produce an L{FunctionInvocation}
         """
-        return FunctionInvocation(self.name, arg)
+        return FunctionInvocation(self.name, *args)
 
+
 Max = Function("max")
 Len = Function("character_length")
 
@@ -172,11 +208,17 @@
         try:
             tableModel = self.model.tableNamed(attr)
         except KeyError:
-            raise AttributeError("schema has no table %r" % (attr,))
-        syntax = TableSyntax(tableModel)
-        # Needs to be preserved here so that aliasing will work.
-        setattr(self, attr, syntax)
-        return syntax
+            try:
+                seqModel = self.model.sequenceNamed(attr)
+            except KeyError:
+                raise AttributeError("schema has no table or sequence %r" % (attr,))
+            else:
+                return SequenceSyntax(seqModel)
+        else:
+            syntax = TableSyntax(tableModel)
+            # Needs to be preserved here so that aliasing will work.
+            setattr(self, attr, syntax)
+            return syntax
 
 
     def __iter__(self):
@@ -185,6 +227,21 @@
 
 
 
+class SequenceSyntax(ExpressionSyntax):
+    """
+    Syntactic convenience for L{Sequence}.
+    """
+
+    modelType = Sequence
+
+    def subSQL(self, placeholder, quote, allTables):
+        """
+        Convert to an SQL fragment.
+        """
+        return SQLFragment("nextval('%s')" % (self.model.name,))
+
+
+
 class TableSyntax(Syntax):
     """
     Syntactic convenience for L{Table}.
@@ -334,21 +391,6 @@
 
 
 
-class ConstantComparison(Comparison):
-
-    def allColumns(self):
-        return self.a.allColumns()
-
-
-    def subSQL(self, placeholder, quote, allTables):
-        sqls = SQLFragment()
-        sqls.append(self._subexpression(self.a, placeholder, quote, allTables))
-        sqls.append(SQLFragment(' ' + ' '.join([self.op, placeholder]),
-                                 [self.b]))
-        return sqls
-
-
-
 class CompoundComparison(Comparison):
     """
     A compound comparison; two or more constraints, joined by an operation
@@ -422,7 +464,7 @@
     """
 
     def __init__(self, columns=None, Where=None, From=None, OrderBy=None,
-                 GroupBy=None, Limit=None):
+                 GroupBy=None, Limit=None, ForUpdate=False):
         self.From = From
         self.Where = Where
         if not isinstance(OrderBy, (list, tuple, type(None))):
@@ -440,6 +482,7 @@
 
             columns = _SomeColumns(columns)
         self.columns = columns
+        self.ForUpdate = ForUpdate
 
 
     def toSQL(self, placeholder="?", quote=lambda x: x):
@@ -470,8 +513,10 @@
 
         if self.Limit is not None:
             stmt.text += quote(" limit ")
-            stmt.text += placeholder
-            stmt.parameters.append(self.Limit)
+            stmt.append(Constant(self.Limit).subSQL(placeholder, quote,
+                                                    allTables))
+        if self.ForUpdate:
+            stmt.text += quote(" for update")
         return stmt
 
 
@@ -525,13 +570,26 @@
 
 
 
-class Insert(object):
+class _CommaList(object):
+    def __init__(self, subfragments):
+        self.subfragments = subfragments
+
+
+    def subSQL(self, placeholder, quote, allTables):
+        return _commaJoined(f.subSQL(placeholder, quote, allTables)
+                            for f in self.subfragments)
+
+
+
+class Insert(_Statement):
     """
     'insert' statement.
     """
 
     def __init__(self, columnMap, Return=None):
         self.columnMap = columnMap
+        if isinstance(Return, (tuple, list)):
+            Return = _CommaList(Return)
         self.Return = Return
         columns = _modelsFromMap(columnMap)
         table = _fromSameTable(columns)
@@ -563,7 +621,8 @@
              sortedColumns])))
         stmt.append(SQLFragment(" values "))
         stmt.append(_inParens(_commaJoined(
-            [SQLFragment(placeholder, [v]) for (c, v) in sortedColumns])))
+            [_convert(v).subSQL(placeholder, quote, allTables)
+             for (c, v) in sortedColumns])))
         if self.Return is not None:
             stmt.text += ' returning '
             stmt.append(self.Return.subSQL(placeholder, quote, allTables))
@@ -571,8 +630,20 @@
 
 
 
-class Update(object):
+def _convert(x):
     """
+    Convert a value to an appropriate SQL AST node.  (Currently a simple
+    isinstance, could be promoted to use adaptation if we want to get fancy.)
+    """
+    if isinstance(x, ExpressionSyntax):
+        return x
+    else:
+        return Constant(x)
+
+
+
+class Update(_Statement):
+    """
     'update' statement
     """
 
@@ -581,6 +652,8 @@
         _fromSameTable(_modelsFromMap(columnMap))
         self.columnMap = columnMap
         self.Where = Where
+        if isinstance(Return, (tuple, list)):
+            Return = _CommaList(Return)
         self.Return = Return
 
 
@@ -602,7 +675,8 @@
         result.append(
             _commaJoined(
                 [c.subSQL(placeholder, quote, allTables).append(
-                    SQLFragment(" = " + placeholder, [v]))
+                    SQLFragment(" = ").subSQL(placeholder, quote, allTables)
+                ).append(_convert(v).subSQL(placeholder, quote, allTables))
                     for (c, v) in sortedColumns]
             )
         )
@@ -615,7 +689,7 @@
 
 
 
-class Delete(object):
+class Delete(_Statement):
     """
     'delete' statement.
     """
@@ -640,7 +714,7 @@
 
 
 
-class Lock(object):
+class Lock(_Statement):
     """
     An SQL 'lock' statement.
     """
@@ -722,4 +796,16 @@
         return 'Parameter(%r)' % (self.name,)
 
 
+# Common helpers:
 
+# current timestamp in UTC format.
+utcNowSQL = Function('timezone')('UTC', NamedValue('CURRENT_TIMESTAMP'))
+
+# You can't insert a column with no rows.  In SQL that just isn't valid syntax,
+# and in this DAL you need at least one key or we can't tell what table you're
+# talking about.  Luckily there's the 'default' keyword to the rescue, which, in
+# the context of an INSERT statement means 'use the default value explicitly'.
+# (Although this is a special keyword in a CREATE statement, in an INSERT it
+# behaves like an expression to the best of my knowledge.)
+default = NamedValue('default')
+

Modified: CalendarServer/trunk/twext/enterprise/dal/test/test_parseschema.py
===================================================================
--- CalendarServer/trunk/twext/enterprise/dal/test/test_parseschema.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/twext/enterprise/dal/test/test_parseschema.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -101,6 +101,23 @@
                           [s.tables[0].columns[0]])
 
 
+    def test_sequenceDefault(self):
+        """
+        Default sequence column.
+        """
+        s = Schema()
+        addSQLToSchema(s,
+                   """
+                   create sequence alpha;
+                   create table foo (
+                      bar integer default nextval('alpha') not null,
+                      qux integer not null
+                   );
+                   """)
+        self.assertEquals(s.tableNamed("foo").columnNamed("bar").needsValue(),
+                          False)
+
+
     def test_defaultConstantColumns(self):
         """
         Parsing a 'default' column with an appropriate type in it will return
@@ -125,6 +142,32 @@
         self.assertEquals(table.columnNamed("f").default, None)
 
 
+    def test_needsValue(self):
+        """
+        Columns with defaults, or with a 'not null' constraint don't need a
+        value; columns without one don't.
+        """
+        s = Schema()
+        addSQLToSchema(s,
+                       """
+                       create table a (
+                        b integer default 4321 not null,
+                        c boolean default false,
+                        d integer not null,
+                        e integer
+                       )
+                       """)
+        table = s.tableNamed("a")
+        # Has a default, NOT NULL.
+        self.assertEquals(table.columnNamed("b").needsValue(), False)
+        # Has a default _and_ nullable.
+        self.assertEquals(table.columnNamed("c").needsValue(), False)
+        # No default, not nullable.
+        self.assertEquals(table.columnNamed("d").needsValue(), True)
+        # Just nullable.
+        self.assertEquals(table.columnNamed("e").needsValue(), False)
+
+
     def test_notNull(self):
         """
         A column with a NOT NULL constraint in SQL will be parsed as a

Modified: CalendarServer/trunk/twext/enterprise/dal/test/test_sqlsyntax.py
===================================================================
--- CalendarServer/trunk/twext/enterprise/dal/test/test_sqlsyntax.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/twext/enterprise/dal/test/test_sqlsyntax.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -25,6 +25,7 @@
     TableMismatch, Parameter, Max, Len, NotEnoughValues
 )
 
+from twext.enterprise.dal.syntax import FunctionInvocation
 from twisted.trial.unittest import TestCase
 
 class GenerationTests(TestCase):
@@ -35,6 +36,7 @@
     def setUp(self):
         s = Schema(self.id())
         addSQLToSchema(schema=s, schemaData="""
+                       create sequence A_SEQ;
                        create table FOO (BAR integer, BAZ integer);
                        create table BOZ (QUX integer);
                        create table OTHER (BAR integer,
@@ -137,6 +139,17 @@
         )
 
 
+    def test_forUpdate(self):
+        """
+        L{Select}'s L{ForUpdate} parameter generates a 'for update' clause at
+        the end of the query.
+        """
+        self.assertEquals(
+            Select(From=self.schema.FOO, ForUpdate=True).toSQL(),
+            SQLFragment("select * from FOO for update")
+        )
+
+
     def test_groupBy(self):
         """
         L{Select}'s L{GroupBy} parameter generates a 'group by' clause for a
@@ -367,6 +380,21 @@
         )
 
 
+    def test_insertMultiReturn(self):
+        """
+        L{Insert}'s C{Return} argument can also be a C{tuple}, which will insert
+        an SQL 'returning' clause with multiple columns.
+        """
+        self.assertEquals(
+            Insert({self.schema.FOO.BAR: 23,
+                    self.schema.FOO.BAZ: 9},
+                   Return=(self.schema.FOO.BAR, self.schema.FOO.BAZ)).toSQL(),
+            SQLFragment(
+                "insert into FOO (BAR, BAZ) values (?, ?) returning BAR, BAZ",
+                [23, 9])
+        )
+
+
     def test_insertMismatch(self):
         """
         L{Insert} raises L{TableMismatch} if the columns specified aren't all
@@ -408,6 +436,37 @@
         )
 
 
+    def test_updateFunction(self):
+        """
+        L{Update} values may be L{FunctionInvocation}s, to update to computed
+        values in the database.
+        """
+        self.assertEquals(
+            Update(
+                {self.schema.FOO.BAR: 23,
+                 self.schema.FOO.BAZ: FunctionInvocation("hello")},
+                Where=self.schema.FOO.BAZ == 9
+            ).toSQL(),
+            SQLFragment("update FOO set BAR = ?, BAZ = hello() "
+                        "where BAZ = ?", [23, 9])
+        )
+
+
+    def test_insertFunction(self):
+        """
+        L{Update} values may be L{FunctionInvocation}s, to update to computed
+        values in the database.
+        """
+        self.assertEquals(
+            Insert(
+                {self.schema.FOO.BAR: 23,
+                 self.schema.FOO.BAZ: FunctionInvocation("hello")},
+            ).toSQL(),
+            SQLFragment("insert into FOO (BAR, BAZ) "
+                        "values (?, hello())", [23])
+        )
+
+
     def test_deleteReturning(self):
         """
         L{Delete}'s C{Return} argument will delete an SQL 'returning' clause.
@@ -462,6 +521,18 @@
                    From=self.schema.FOO,
                    Limit=123).toSQL(),
             SQLFragment(
-                "select BAR from FOO limit ?", [123])
-        )
+                "select BAR from FOO limit ?", [123]))
 
+
+    def test_nextSequenceValue(self):
+        """
+        When a sequence is used as a value in an expression, it renders as the
+        call to 'nextval' that will produce its next value.
+        """
+        self.assertEquals(
+            Insert({self.schema.BOZ.QUX:
+                    self.schema.A_SEQ}).toSQL(),
+            SQLFragment("insert into BOZ (QUX) values (nextval('A_SEQ'))", []))
+
+
+

Modified: CalendarServer/trunk/txdav/base/propertystore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/base/propertystore/sql.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/base/propertystore/sql.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -23,15 +23,23 @@
     "PropertyStore",
 ]
 
+
 from twistedcaldav.memcacher import Memcacher
 
-from txdav.base.propertystore.base import AbstractPropertyStore, PropertyName,\
-    validKey
+from twext.enterprise.dal.syntax import (
+    Select, Parameter, Update, Insert, TableSyntax, Delete)
 
+from txdav.common.datastore.sql_tables import schema
+from txdav.base.propertystore.base import (AbstractPropertyStore,
+                                           PropertyName, validKey)
+
 from twext.web2.dav.davxml import WebDAVDocument
 
 from twisted.internet.defer import inlineCallbacks, returnValue
 
+
+prop = schema.RESOURCE_PROPERTY
+
 class PropertyStore(AbstractPropertyStore):
 
     _cacher = Memcacher("propertystore.sql", pickle=True, key_normalization=False)
@@ -42,6 +50,11 @@
         )
 
 
+    _allWithID = Select([prop.NAME, prop.VIEWER_UID, prop.VALUE],
+                        From=prop,
+                        Where=prop.RESOURCE_ID == Parameter("resourceID"))
+
+
     @classmethod
     @inlineCallbacks
     def load(cls, defaultuser, txn, resourceID, created=False):
@@ -51,54 +64,66 @@
         self._resourceID = resourceID
         self._cached = {}
         if not created:
-            
-            # Cache existing properties in this object 
-
+            # Cache existing properties in this object
             # Look for memcache entry first
             rows = yield self._cacher.get(str(self._resourceID))
-            
             if rows is None:
-                rows = yield self._txn.execSQL(
-                    """
-                    select NAME, VIEWER_UID, VALUE from RESOURCE_PROPERTY
-                    where RESOURCE_ID = %s
-                    """,
-                    [self._resourceID]
-                )
-                yield self._cacher.set(str(self._resourceID), rows if rows is not None else ())
+                rows = yield self._allWithID.on(txn,
+                                                resourceID=self._resourceID)
+                yield self._cacher.set(str(self._resourceID),
+                                       rows if rows is not None else ())
             for name, uid, value in rows:
                 self._cached[(name, uid)] = value
+        returnValue(self)
 
 
-        returnValue(self)
-
     @classmethod
     @inlineCallbacks
-    def loadAll(cls, defaultuser, txn, joinTable, joinColumn, parentIDColumn, parentID):
+    def forMultipleResources(cls, defaultUser, txn,
+                             childColumn, parentColumn, parentID):
         """
-        Return a list of property stores for all objects in a parent collection
+        Load all property stores for all objects in a collection.  This is used
+        to optimize Depth:1 operations on that collection, by loading all
+        relevant properties in a single query.
+
+        @param defaultUser: the UID of the user who owns / is requesting the
+            property stores; the ones whose per-user properties will be exposed.
+
+        @type defaultUser: C{str}
+
+        @param txn: the transaction within which to fetch the rows.
+
+        @type txn: L{IAsyncTransaction}
+
+        @param childColumn: The resource ID column for the child resources, i.e.
+            the resources of the type for which this method will loading the
+            property stores.
+
+        @param parentColumn: The resource ID column for the parent resources.
+            e.g. if childColumn is addressbook object's resource ID, then this
+            should be addressbook's resource ID.
+
+        @return: a L{Deferred} that fires with a C{dict} mapping resource ID (a
+            value taken from C{childColumn}) to a L{PropertyStore} for that ID.
         """
-        rows = yield txn.execSQL(
-            """
-            select
-              %s,
-              RESOURCE_PROPERTY.RESOURCE_ID,
-              RESOURCE_PROPERTY.NAME,
-              RESOURCE_PROPERTY.VIEWER_UID,
-              RESOURCE_PROPERTY.VALUE
-            from RESOURCE_PROPERTY
-            right join %s on (RESOURCE_PROPERTY.RESOURCE_ID = %s) 
-            where %s = %%s
-            """ % (joinColumn, joinTable, joinColumn, parentIDColumn),
-            [parentID]
+        childTable = TableSyntax(childColumn.model.table)
+        query = Select([
+            childColumn,
+            # XXX is that column necessary?  as per the 'on' clause it has to be
+            # the same as prop.RESOURCE_ID anyway.
+            prop.RESOURCE_ID, prop.NAME, prop.VIEWER_UID, prop.VALUE],
+            From=prop.join(childTable, prop.RESOURCE_ID == childColumn,
+                           'right'),
+            Where=parentColumn == parentID
         )
-        
+        rows = yield query.on(txn)
+
         createdStores = {}
         for object_resource_id, resource_id, name, view_uid, value in rows:
             if resource_id:
                 if resource_id not in createdStores:
                     store = cls.__new__(cls)
-                    super(PropertyStore, store).__init__(defaultuser)
+                    super(PropertyStore, store).__init__(defaultUser)
                     store._txn = txn
                     store._resourceID = resource_id
                     store._cached = {}
@@ -106,12 +131,12 @@
                 createdStores[resource_id]._cached[(name, view_uid)] = value
             else:
                 store = cls.__new__(cls)
-                super(PropertyStore, store).__init__(defaultuser)
+                super(PropertyStore, store).__init__(defaultUser)
                 store._txn = txn
                 store._resourceID = object_resource_id
                 store._cached = {}
                 createdStores[object_resource_id] = store
-                
+
         returnValue(createdStores)
 
 
@@ -126,6 +151,19 @@
         return WebDAVDocument.fromString(value).root_element
 
 
+    _updateQuery = Update({prop.VALUE: Parameter("value")},
+                          Where=(
+                              prop.RESOURCE_ID == Parameter("resourceID")).And(
+                              prop.NAME == Parameter("name")).And(
+                              prop.VIEWER_UID == Parameter("uid")))
+
+
+    _insertQuery = Insert({prop.VALUE: Parameter("value"),
+                           prop.RESOURCE_ID: Parameter("resourceID"),
+                           prop.NAME: Parameter("name"),
+                           prop.VIEWER_UID: Parameter("uid")})
+
+
     def _setitem_uid(self, key, value, uid):
         validKey(key)
 
@@ -133,44 +171,37 @@
         value_str = value.toxml()
 
         if (key_str, uid) in self._cached:
-            self._txn.execSQL(
-                """
-                update RESOURCE_PROPERTY
-                set VALUE = %s
-                where RESOURCE_ID = %s and NAME = %s and VIEWER_UID = %s
-                """,
-                [value_str, self._resourceID, key_str, uid]
-            )
-        else:        
-            self._txn.execSQL(
-                """
-                insert into RESOURCE_PROPERTY
-                (RESOURCE_ID, NAME, VALUE, VIEWER_UID)
-                values (%s, %s, %s, %s)
-                """,
-                [self._resourceID, key_str, value_str, uid]
-            )
+            self._updateQuery.on(self._txn, resourceID=self._resourceID,
+                                 value=value_str, name=key_str, uid=uid)
+        else:
+            self._insertQuery.on(self._txn, resourceID=self._resourceID,
+                                 value=value_str, name=key_str, uid=uid)
         self._cached[(key_str, uid)] = value_str
         self._cacher.delete(str(self._resourceID))
 
+
+    _deleteQuery = Delete(
+        prop, Where=(prop.RESOURCE_ID == Parameter("resourceID")).And(
+            prop.NAME == Parameter("name")).And(
+                prop.VIEWER_UID == Parameter("uid"))
+    )
+
+
     def _delitem_uid(self, key, uid):
         validKey(key)
 
         key_str = key.toString()
         del self._cached[(key_str, uid)]
-        self._txn.execSQL(
-            """
-            delete from RESOURCE_PROPERTY
-            where RESOURCE_ID = %s and NAME = %s and VIEWER_UID = %s
-            """,
-            [self._resourceID, key_str, uid],
-            raiseOnZeroRowCount=lambda:KeyError(key)
-        )
+        self._deleteQuery.on(self._txn, lambda:KeyError(key),
+                             resourceID=self._resourceID,
+                             name=key_str, uid=uid
+                            )
         self._cacher.delete(str(self._resourceID))
-            
 
+
     def _keys_uid(self, uid):
-
         for cachedKey, cachedUID in self._cached.keys():
             if cachedUID == uid:
                 yield PropertyName.fromString(cachedKey)
+
+

Modified: CalendarServer/trunk/txdav/caldav/datastore/file.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/file.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/caldav/datastore/file.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -164,15 +164,17 @@
 
     @inlineCallbacks
     def getAllDropboxIDs(self):
-
         dropboxIDs = []
         for calendar in self.calendars():
             for calendarObject in calendar.calendarObjects():
-                dropboxID = (yield calendarObject.dropboxID())
-                dropboxIDs.append(dropboxID)
-        
+                component = calendarObject.component()
+                if (component.hasPropertyInAnyComponent("X-APPLE-DROPBOX") or
+                    component.hasPropertyInAnyComponent("ATTACH")):
+                    dropboxID = (yield calendarObject.dropboxID())
+                    dropboxIDs.append(dropboxID)
         returnValue(dropboxIDs)
 
+
     @property
     def _calendarStore(self):
         return self._dataStore


Property changes on: CalendarServer/trunk/txdav/caldav/datastore/index_file.py
___________________________________________________________________
Modified: svn:mergeinfo
   - /CalendarServer/branches/config-separation/txdav/caldav/datastore/index_file.py:4379-4443
/CalendarServer/branches/egg-info-351/txdav/caldav/datastore/index_file.py:4589-4625
/CalendarServer/branches/generic-sqlstore/txdav/caldav/datastore/index_file.py:6167-6191
/CalendarServer/branches/new-store/txdav/caldav/datastore/index_file.py:5594-5934
/CalendarServer/branches/new-store-no-caldavfile/txdav/caldav/datastore/index_file.py:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2/txdav/caldav/datastore/index_file.py:5936-5981
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692/txdav/caldav/datastore/index_file.py:5693-5702
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627/txdav/caldav/datastore/index_file.py:3628-3644
/CalendarServer/branches/users/cdaboo/more-sharing-5591/txdav/caldav/datastore/index_file.py:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464/txdav/caldav/datastore/index_file.py:4465-4957
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070/txdav/caldav/datastore/index_file.py:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/caldav/datastore/index_file.py:5188-5440
/CalendarServer/branches/users/glyph/conn-limit/txdav/caldav/datastore/index_file.py:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge/txdav/caldav/datastore/index_file.py:4971-5080
/CalendarServer/branches/users/glyph/dont-start-postgres/txdav/caldav/datastore/index_file.py:6592-6614
/CalendarServer/branches/users/glyph/linux-tests/txdav/caldav/datastore/index_file.py:6893-6900
/CalendarServer/branches/users/glyph/more-deferreds-6/txdav/caldav/datastore/index_file.py:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7/txdav/caldav/datastore/index_file.py:6369-6445
/CalendarServer/branches/users/glyph/sendfdport/txdav/caldav/datastore/index_file.py:5388-5424
/CalendarServer/branches/users/glyph/sharedpool/txdav/caldav/datastore/index_file.py:6490-6550
/CalendarServer/branches/users/glyph/sql-store/txdav/caldav/datastore/index_file.py:5929-6073
/CalendarServer/branches/users/glyph/use-system-twisted/txdav/caldav/datastore/index_file.py:5084-5149
/CalendarServer/branches/users/sagen/locations-resources/txdav/caldav/datastore/index_file.py:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2/txdav/caldav/datastore/index_file.py:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events/txdav/caldav/datastore/index_file.py:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038/txdav/caldav/datastore/index_file.py:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066/txdav/caldav/datastore/index_file.py:4068-4075
/CalendarServer/branches/users/sagen/resources-2/txdav/caldav/datastore/index_file.py:5084-5093
/CalendarServer/branches/users/wsanchez/transations/txdav/caldav/datastore/index_file.py:5515-5593
/CalendarServer/trunk/twistedcaldav/index.py:6322-6394
   + /CalendarServer/branches/config-separation/txdav/caldav/datastore/index_file.py:4379-4443
/CalendarServer/branches/egg-info-351/txdav/caldav/datastore/index_file.py:4589-4625
/CalendarServer/branches/generic-sqlstore/txdav/caldav/datastore/index_file.py:6167-6191
/CalendarServer/branches/new-store/txdav/caldav/datastore/index_file.py:5594-5934
/CalendarServer/branches/new-store-no-caldavfile/txdav/caldav/datastore/index_file.py:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2/txdav/caldav/datastore/index_file.py:5936-5981
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692/txdav/caldav/datastore/index_file.py:5693-5702
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627/txdav/caldav/datastore/index_file.py:3628-3644
/CalendarServer/branches/users/cdaboo/more-sharing-5591/txdav/caldav/datastore/index_file.py:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464/txdav/caldav/datastore/index_file.py:4465-4957
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070/txdav/caldav/datastore/index_file.py:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/caldav/datastore/index_file.py:5188-5440
/CalendarServer/branches/users/glyph/conn-limit/txdav/caldav/datastore/index_file.py:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge/txdav/caldav/datastore/index_file.py:4971-5080
/CalendarServer/branches/users/glyph/dalify/txdav/caldav/datastore/index_file.py:6932-7023
/CalendarServer/branches/users/glyph/dont-start-postgres/txdav/caldav/datastore/index_file.py:6592-6614
/CalendarServer/branches/users/glyph/linux-tests/txdav/caldav/datastore/index_file.py:6893-6900
/CalendarServer/branches/users/glyph/more-deferreds-6/txdav/caldav/datastore/index_file.py:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7/txdav/caldav/datastore/index_file.py:6369-6445
/CalendarServer/branches/users/glyph/sendfdport/txdav/caldav/datastore/index_file.py:5388-5424
/CalendarServer/branches/users/glyph/sharedpool/txdav/caldav/datastore/index_file.py:6490-6550
/CalendarServer/branches/users/glyph/sql-store/txdav/caldav/datastore/index_file.py:5929-6073
/CalendarServer/branches/users/glyph/use-system-twisted/txdav/caldav/datastore/index_file.py:5084-5149
/CalendarServer/branches/users/sagen/locations-resources/txdav/caldav/datastore/index_file.py:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2/txdav/caldav/datastore/index_file.py:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events/txdav/caldav/datastore/index_file.py:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038/txdav/caldav/datastore/index_file.py:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066/txdav/caldav/datastore/index_file.py:4068-4075
/CalendarServer/branches/users/sagen/resources-2/txdav/caldav/datastore/index_file.py:5084-5093
/CalendarServer/branches/users/wsanchez/transations/txdav/caldav/datastore/index_file.py:5515-5593
/CalendarServer/trunk/twistedcaldav/index.py:6322-6394

Modified: CalendarServer/trunk/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/sql.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/caldav/datastore/sql.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -59,6 +59,13 @@
     CALENDAR_HOME_TABLE, CALENDAR_HOME_METADATA_TABLE,\
     CALENDAR_AND_CALENDAR_BIND, CALENDAR_OBJECT_REVISIONS_AND_BIND_TABLE,\
     CALENDAR_OBJECT_AND_BIND_TABLE, schema
+from twext.enterprise.dal.syntax import Select
+from twext.enterprise.dal.syntax import Insert
+from twext.enterprise.dal.syntax import Update
+from twext.enterprise.dal.syntax import Delete
+from twext.enterprise.dal.syntax import Parameter
+from twext.enterprise.dal.syntax import utcNowSQL
+from twext.enterprise.dal.syntax import Len
 from txdav.common.icommondatastore import IndexedSearchException
 
 from vobject.icalendar import utc
@@ -71,6 +78,14 @@
 
     implements(ICalendarHome)
 
+    # structured tables.  (new, preferred)
+    _homeSchema = schema.CALENDAR_HOME
+    _bindSchema = schema.CALENDAR_BIND
+    _homeMetaDataSchema = schema.CALENDAR_HOME_METADATA
+    _revisionsSchema = schema.CALENDAR_OBJECT_REVISIONS
+    _objectSchema = schema.CALENDAR_OBJECT
+
+    # string mappings (old, removing)
     _homeTable = CALENDAR_HOME_TABLE
     _homeMetaDataTable = CALENDAR_HOME_METADATA_TABLE
     _childTable = CALENDAR_TABLE
@@ -119,23 +134,22 @@
             
         returnValue(results)
 
+
     @inlineCallbacks
     def calendarObjectWithDropboxID(self, dropboxID):
         """
         Implement lookup via queries.
         """
-        rows = (yield self._txn.execSQL("""
-            select %(OBJECT:name)s.%(OBJECT:column_PARENT_RESOURCE_ID)s, %(OBJECT:column_RESOURCE_ID)s
-            from %(OBJECT:name)s
-            left outer join %(BIND:name)s on (
-              %(OBJECT:name)s.%(OBJECT:column_PARENT_RESOURCE_ID)s = %(BIND:name)s.%(BIND:column_RESOURCE_ID)s
-            )
-            where
-             %(OBJECT:column_DROPBOX_ID)s = %%s and
-             %(BIND:name)s.%(BIND:column_HOME_RESOURCE_ID)s = %%s
-            """ % CALENDAR_OBJECT_AND_BIND_TABLE,
-            [dropboxID, self._resourceID,]
-        ))
+        co = schema.CALENDAR_OBJECT
+        cb = schema.CALENDAR_BIND
+        rows = (yield Select(
+            [co.PARENT_RESOURCE_ID,
+             co.RESOURCE_ID],
+            From=co.join(cb, co.PARENT_RESOURCE_ID == cb.RESOURCE_ID,
+                         'left outer'),
+            Where=(co.DROPBOX_ID == dropboxID).And(
+                cb.HOME_RESOURCE_ID == self._resourceID)
+        ).on(self._txn))
 
         if rows:
             calendarID, objectID = rows[0]
@@ -143,28 +157,23 @@
             if calendar:
                 calendarObject = (yield calendar.objectResourceWithID(objectID))
                 returnValue(calendarObject)
-        
         returnValue(None)
 
+
     @inlineCallbacks
     def getAllDropboxIDs(self):
-
-        rows = (yield self._txn.execSQL("""
-            select %(OBJECT:column_DROPBOX_ID)s
-            from %(OBJECT:name)s
-            left outer join %(BIND:name)s on (
-              %(OBJECT:name)s.%(OBJECT:column_PARENT_RESOURCE_ID)s = %(BIND:name)s.%(BIND:column_RESOURCE_ID)s
-            )
-            where
-             %(OBJECT:column_DROPBOX_ID)s is not null and
-             %(BIND:name)s.%(BIND:column_HOME_RESOURCE_ID)s = %%s
-            order by %(OBJECT:column_DROPBOX_ID)s
-            """ % CALENDAR_OBJECT_AND_BIND_TABLE,
-            [self._resourceID]
-        ))
-        
+        co = schema.CALENDAR_OBJECT
+        cb = schema.CALENDAR_BIND
+        rows = (yield Select(
+            [co.DROPBOX_ID],
+            From=co.join(cb, co.PARENT_RESOURCE_ID == cb.RESOURCE_ID),
+            Where=(co.DROPBOX_ID != None).And(
+                cb.HOME_RESOURCE_ID == self._resourceID),
+            OrderBy=co.DROPBOX_ID
+        ).on(self._txn))
         returnValue([row[0] for row in rows])
 
+
     @inlineCallbacks
     def createdHome(self):
         defaultCal = yield self.createCalendarWithName("calendar")
@@ -173,13 +182,21 @@
             Opaque())
         yield self.createCalendarWithName("inbox")
 
+
+
 class Calendar(CommonHomeChild):
     """
     File-based implementation of L{ICalendar}.
     """
     implements(ICalendar)
 
+    # structured tables.  (new, preferred)
     _bindSchema = schema.CALENDAR_BIND
+    _homeChildSchema = schema.CALENDAR
+    _revisionsSchema = schema.CALENDAR_OBJECT_REVISIONS
+    _objectSchema = schema.CALENDAR_OBJECT
+
+    # string mappings (old, removing)
     _bindTable = CALENDAR_BIND_TABLE
     _homeChildTable = CALENDAR_TABLE
     _homeChildBindTable = CALENDAR_AND_CALENDAR_BIND
@@ -315,6 +332,7 @@
     implements(ICalendarObject)
 
     _objectTable = CALENDAR_OBJECT_TABLE
+    _objectSchema = schema.CALENDAR_OBJECT
 
     def __init__(self, calendar, name, uid, resourceID=None, metadata=None):
 
@@ -331,10 +349,6 @@
 
     @classmethod
     def _selectAllColumns(cls):
-        """
-        Full set of columns in the object table that need to be loaded to
-        initialize the object resource state.
-        """
         return """
             select 
               %(column_RESOURCE_ID)s,
@@ -353,10 +367,29 @@
               %(column_MODIFIED)s
         """ % cls._objectTable
 
+
+    _allColumns = [
+        _objectSchema.RESOURCE_ID,
+        _objectSchema.RESOURCE_NAME,
+        _objectSchema.UID,
+        _objectSchema.MD5,
+        Len(_objectSchema.TEXT),
+        _objectSchema.ATTACHMENTS_MODE,
+        _objectSchema.DROPBOX_ID,
+        _objectSchema.ACCESS,
+        _objectSchema.SCHEDULE_OBJECT,
+        _objectSchema.SCHEDULE_TAG,
+        _objectSchema.SCHEDULE_ETAGS,
+        _objectSchema.PRIVATE_COMMENTS,
+        _objectSchema.CREATED,
+        _objectSchema.MODIFIED
+    ]
+
+
     def _initFromRow(self, row):
         """
-        Given a select result using the columns from L{_selectAllColumns}, initialize
-        the object resource state.
+        Given a select result using the columns from L{_allColumns}, initialize
+        the calendar object resource state.
         """
         (self._resourceID,
          self._name,
@@ -373,6 +406,7 @@
          self._created,
          self._modified,) = tuple(row)
 
+
     @property
     def _calendar(self):
         return self._parentCollection
@@ -397,7 +431,8 @@
 
 
     @inlineCallbacks
-    def updateDatabase(self, component, expand_until=None, reCreate=False, inserting=False):
+    def updateDatabase(self, component,
+                       expand_until=None, reCreate=False, inserting=False):
         """
         Update the database tables for the new data being written.
 
@@ -407,8 +442,10 @@
 
         # Decide how far to expand based on the component
         master = component.masterComponent()
-        if master is None or not component.isRecurring() and not component.isRecurringUnbounded():
-            # When there is no master we have a set of overridden components - index them all.
+        if ( master is None or not component.isRecurring()
+             and not component.isRecurringUnbounded() ):
+            # When there is no master we have a set of overridden components -
+            #   index them all.
             # When there is one instance - index it.
             # When bounded - index all.
             expand = datetime.datetime(2100, 1, 1, 0, 0, 0, tzinfo=utc)
@@ -416,19 +453,22 @@
             if expand_until:
                 expand = expand_until
             else:
-                expand = datetime.date.today() + default_future_expansion_duration
-
-            if expand > (datetime.date.today() + maximum_future_expansion_duration):
+                expand = (datetime.date.today() +
+                          default_future_expansion_duration)
+            if expand > (datetime.date.today() +
+                         maximum_future_expansion_duration):
                 raise IndexedSearchException
-
         try:
-            instances = component.expandTimeRanges(expand, ignoreInvalidInstances=reCreate)
+            instances = component.expandTimeRanges(
+                expand, ignoreInvalidInstances=reCreate)
         except InvalidOverriddenInstanceError, e:
-            self.log_error("Invalid instance %s when indexing %s in %s" % (e.rid, self._name, self._calendar,))
-            
+            self.log_error("Invalid instance %s when indexing %s in %s" %
+                           (e.rid, self._name, self._calendar,))
+
             if self._txn._migrating:
                 # TODO: fix the data here by re-writing component then re-index
-                instances = component.expandTimeRanges(expand, ignoreInvalidInstances=True)
+                instances = component.expandTimeRanges(
+                    expand, ignoreInvalidInstances=True)
             else:
                 raise
 
@@ -452,86 +492,51 @@
                 self._attachment = _ATTACHMENTS_MODE_WRITE
                 self._dropboxID = (yield self.dropboxID())
             elif component.hasPropertyInAnyComponent("ATTACH"):
-                # FIXME: really we ought to check to see if the ATTACH properties have URI values
-                # and if those are pointing to our server dropbox collections and only then set
-                # the read mode
+                # FIXME: really we ought to check to see if the ATTACH
+                # properties have URI values and if those are pointing to our
+                # server dropbox collections and only then set the read mode
                 self._attachment = _ATTACHMENTS_MODE_READ
                 self._dropboxID = (yield self.dropboxID())
 
+        tr = schema.TIME_RANGE
+        co = schema.CALENDAR_OBJECT
+        tpy = schema.TRANSPARENCY
+
+        values = {
+            co.CALENDAR_RESOURCE_ID            : self._calendar._resourceID,
+            co.RESOURCE_NAME                   : self._name,
+            co.ICALENDAR_TEXT                  : componentText,
+            co.ICALENDAR_UID                   : self._uid,
+            co.ICALENDAR_TYPE                  : component.resourceType(),
+            co.ATTACHMENTS_MODE                : self._attachment,
+            co.DROPBOX_ID                      : self._dropboxID,
+            co.ORGANIZER                       : organizer,
+            co.RECURRANCE_MAX                  :
+                normalizeForIndex(instances.limit) if instances.limit else None,
+            co.ACCESS                          : self._access,
+            co.SCHEDULE_OBJECT                 : self._schedule_object,
+            co.SCHEDULE_TAG                    : self._schedule_tag,
+            co.SCHEDULE_ETAGS                  : self._schedule_etags,
+            co.PRIVATE_COMMENTS                : self._private_comments,
+            co.MD5                             : self._md5
+        }
+
         if inserting:
-            self._resourceID, self._created, self._modified  = (
-                yield self._txn.execSQL(
-                """
-                insert into CALENDAR_OBJECT
-                (CALENDAR_RESOURCE_ID, RESOURCE_NAME, ICALENDAR_TEXT, ICALENDAR_UID, ICALENDAR_TYPE,
-                 ATTACHMENTS_MODE, DROPBOX_ID, ORGANIZER, RECURRANCE_MAX, ACCESS, SCHEDULE_OBJECT, SCHEDULE_TAG,
-                 SCHEDULE_ETAGS, PRIVATE_COMMENTS, MD5)
-                 values
-                (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
-                returning
-                 RESOURCE_ID,
-                 CREATED,
-                 MODIFIED
-                """,
-                [
-                    self._calendar._resourceID,
-                    self._name,
-                    componentText,
-                    self._uid,
-                    component.resourceType(),
-                    self._attachment,
-                    self._dropboxID,
-                    organizer,
-                    normalizeForIndex(instances.limit) if instances.limit else None,
-                    self._access,
-                    self._schedule_object,
-                    self._schedule_tag,
-                    self._schedule_etags,
-                    self._private_comments,
-                    self._md5,
-                ]
-            ))[0]
+            self._resourceID, self._created, self._modified = (
+                yield Insert(values, Return=(
+                    co.RESOURCE_ID, co.CREATED, co.MODIFIED)).on(self._txn))[0]
         else:
-            yield self._txn.execSQL(
-                """
-                update CALENDAR_OBJECT set
-                (ICALENDAR_TEXT, ICALENDAR_UID, ICALENDAR_TYPE, ATTACHMENTS_MODE,
-                 DROPBOX_ID, ORGANIZER, RECURRANCE_MAX, ACCESS, SCHEDULE_OBJECT, SCHEDULE_TAG,
-                 SCHEDULE_ETAGS, PRIVATE_COMMENTS, MD5, MODIFIED)
-                 =
-                (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, timezone('UTC', CURRENT_TIMESTAMP))
-                where RESOURCE_ID = %s
-                returning MODIFIED
-                """,
-                [
-                    componentText,
-                    self._uid,
-                    component.resourceType(),
-                    self._attachment,
-                    self._dropboxID,
-                    organizer,
-                    normalizeForIndex(instances.limit) if instances.limit else None,
-                    self._access,
-                    self._schedule_object,
-                    self._schedule_tag,
-                    self._schedule_etags,
-                    self._private_comments,
-                    self._md5,
-                    self._resourceID,
-                ]
-            )
-
+            values[co.MODIFIED] = utcNowSQL
+            self._modified = (
+                yield Update(values, Return=co.MODIFIED,
+                             Where=co.RESOURCE_ID == self._resourceID
+                            ).on(self._txn))[0][0]
             # Need to wipe the existing time-range for this and rebuild
-            yield self._txn.execSQL(
-                """
-                delete from TIME_RANGE where CALENDAR_OBJECT_RESOURCE_ID = %s
-                """,
-                [
-                    self._resourceID,
-                ],
-            )
+            yield Delete(
+                From=tr,
+                Where=tr.CALENDAR_OBJECT_RESOURCE_ID == self._resourceID
+            ).on(self._txn)
 
-
         # CALENDAR_OBJECT table update
         for key in instances:
             instance = instances[key]
@@ -539,40 +544,25 @@
             end = instance.end.replace(tzinfo=utc)
             float = instance.start.tzinfo is None
             transp = instance.component.propertyValue("TRANSP") == "TRANSPARENT"
-            instanceid = (yield self._txn.execSQL(
-                """
-                insert into TIME_RANGE
-                (CALENDAR_RESOURCE_ID, CALENDAR_OBJECT_RESOURCE_ID, FLOATING, START_DATE, END_DATE, FBTYPE, TRANSPARENT)
-                 values
-                (%s, %s, %s, %s, %s, %s, %s)
-                 returning
-                INSTANCE_ID
-                """,
-                [
-                    self._calendar._resourceID,
-                    self._resourceID,
-                    float,
-                    start,
-                    end,
-                    icalfbtype_to_indexfbtype.get(instance.component.getFBType(), icalfbtype_to_indexfbtype["FREE"]),
-                    transp,
-                ],
-            ))[0][0]
+            instanceid = (yield Insert({
+                tr.CALENDAR_RESOURCE_ID        : self._calendar._resourceID,
+                tr.CALENDAR_OBJECT_RESOURCE_ID : self._resourceID,
+                tr.FLOATING                    : float,
+                tr.START_DATE                  : start,
+                tr.END_DATE                    : end,
+                tr.FBTYPE                      :
+                    icalfbtype_to_indexfbtype.get(
+                        instance.component.getFBType(),
+                        icalfbtype_to_indexfbtype["FREE"]),
+                tr.TRANSPARENT                 : transp,
+            }, Return=tr.INSTANCE_ID).on(self._txn))[0][0]
             peruserdata = component.perUserTransparency(instance.rid)
             for useruid, transp in peruserdata:
-                yield self._txn.execSQL(
-                    """
-                    insert into TRANSPARENCY
-                    (TIME_RANGE_INSTANCE_ID, USER_ID, TRANSPARENT)
-                     values
-                    (%s, %s, %s)
-                    """,
-                    [
-                        instanceid,
-                        useruid,
-                        transp,
-                    ],
-                )
+                (yield Insert({
+                    tpy.TIME_RANGE_INSTANCE_ID : instanceid,
+                    tpy.USER_ID                : useruid,
+                    tpy.TRANSPARENT            : transp,
+                }).on(self._txn))
 
         # Special - for unbounded recurrence we insert a value for "infinity"
         # that will allow an open-ended time-range to always match it.
@@ -580,40 +570,24 @@
             start = datetime.datetime(2100, 1, 1, 0, 0, 0, tzinfo=utc)
             end = datetime.datetime(2100, 1, 1, 1, 0, 0, tzinfo=utc)
             float = False
-            instanceid = (yield self._txn.execSQL(
-                """
-                insert into TIME_RANGE
-                (CALENDAR_RESOURCE_ID, CALENDAR_OBJECT_RESOURCE_ID, FLOATING, START_DATE, END_DATE, FBTYPE, TRANSPARENT)
-                 values
-                (%s, %s, %s, %s, %s, %s, %s)
-                 returning
-                INSTANCE_ID
-                """,
-                [
-                    self._calendar._resourceID,
-                    self._resourceID,
-                    float,
-                    start,
-                    end,
+            transp = True
+            instanceid = (yield Insert({
+                tr.CALENDAR_RESOURCE_ID        : self._calendar._resourceID,
+                tr.CALENDAR_OBJECT_RESOURCE_ID : self._resourceID,
+                tr.FLOATING                    : float,
+                tr.START_DATE                  : start,
+                tr.END_DATE                    : end,
+                tr.FBTYPE                      :
                     icalfbtype_to_indexfbtype["UNKNOWN"],
-                    True,
-                ],
-            ))[0][0]
+                tr.TRANSPARENT                 : transp,
+            }, Return=tr.INSTANCE_ID).on(self._txn))[0][0]
             peruserdata = component.perUserTransparency(None)
             for useruid, transp in peruserdata:
-                yield self._txn.execSQL(
-                    """
-                    insert into TRANSPARENCY
-                    (TIME_RANGE_INSTANCE_ID, USER_ID, TRANSPARENT)
-                     values
-                    (%s, %s, %s)
-                    """,
-                    [
-                        instanceid,
-                        useruid,
-                        transp,
-                    ],
-                )
+                (yield Insert({
+                    tpy.TIME_RANGE_INSTANCE_ID : instanceid,
+                    tpy.USER_ID                : useruid,
+                    tpy.TRANSPARENT            : transp,
+                }).on(self._txn))
 
 
     @inlineCallbacks
@@ -692,12 +666,17 @@
     dropboxID = dropboxIDFromCalendarObject
 
 
+    _attachmentsQuery = Select(
+        [schema.ATTACHMENT.PATH],
+        From=schema.ATTACHMENT,
+        Where=schema.ATTACHMENT.DROPBOX_ID == Parameter('dropboxID')
+    )
+
+
     @inlineCallbacks
     def attachments(self):
-        rows = yield self._txn.execSQL(
-            """
-            select PATH from ATTACHMENT where DROPBOX_ID = %s
-            """, [self._dropboxID])
+        rows = yield self._attachmentsQuery.on(self._txn,
+                                               dropboxID=self._dropboxID)
         result = []
         for row in rows:
             result.append((yield self.attachmentWithName(row[0])))
@@ -753,33 +732,32 @@
         self.attachment._contentType = self.contentType
         self.attachment._md5 = self.hash.hexdigest()
         self.attachment._size = len(self.buf)
+        att = schema.ATTACHMENT
         self.attachment._created, self.attachment._modified = map(
             sqltime,
-            (yield self._txn.execSQL(
-                """
-                update ATTACHMENT set CONTENT_TYPE = %s, SIZE = %s, MD5 = %s,
-                 MODIFIED = timezone('UTC', CURRENT_TIMESTAMP)
-                where PATH = %s
-                returning CREATED, MODIFIED
-                """,
-                [
-                    generateContentType(self.contentType),
-                    self.attachment._size,
-                    self.attachment._md5,
-                    self.attachment.name()
-                ]
-            ))[0]
+            (yield Update(
+                {
+                    att.CONTENT_TYPE : generateContentType(self.contentType),
+                    att.SIZE         : self.attachment._size,
+                    att.MD5          : self.attachment._md5,
+                    att.MODIFIED     : utcNowSQL
+                },
+                Where=att.PATH == self.attachment.name(),
+                Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0]
         )
 
-        home = (yield self._txn.calendarHomeWithResourceID(self.attachment._ownerHomeID))
+        home = (
+            yield self._txn.calendarHomeWithResourceID(
+                self.attachment._ownerHomeID))
         if home:
             # Adjust quota
             yield home.adjustQuotaUsedBytes(self.attachment.size() - old_size)
-            
+
             # Send change notification to home
             yield home.notifyChanged()
 
 
+
 def sqltime(value):
     return datetimeMktime(parseSQLTimestamp(value))
 
@@ -798,10 +776,11 @@
     @classmethod
     def _attachmentPathRoot(cls, txn, dropboxID):
         attachmentRoot = txn._store.attachmentsPath
-        
+
         # Use directory hashing scheme based on MD5 of dropboxID
         hasheduid = hashlib.md5(dropboxID).hexdigest()
-        return attachmentRoot.child(hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid)
+        return attachmentRoot.child(hasheduid[0:2]).child(
+            hasheduid[2:4]).child(hasheduid)
 
 
     @classmethod
@@ -816,24 +795,18 @@
 
         # Now create the DB entry
         attachment = cls(txn, dropboxID, name, ownerHomeID)
-        yield txn.execSQL(
-            """
-            insert into ATTACHMENT
-              (CALENDAR_HOME_RESOURCE_ID, DROPBOX_ID, CONTENT_TYPE, SIZE, MD5, PATH)
-             values
-              (%s, %s, %s, %s, %s, %s)
-            """,
-            [
-                ownerHomeID,
-                dropboxID,
-                "",
-                0,
-                "",
-                name,
-            ]
-        )
+        att = schema.ATTACHMENT
+        yield Insert({
+            att.CALENDAR_HOME_RESOURCE_ID : ownerHomeID,
+            att.DROPBOX_ID                : dropboxID,
+            att.CONTENT_TYPE              : "",
+            att.SIZE                      : 0,
+            att.MD5                       : "",
+            att.PATH                      : name
+        }).on(txn)
         returnValue(attachment)
 
+
     @classmethod
     @inlineCallbacks
     def attachmentWithName(cls, txn, dropboxID, name):
@@ -841,6 +814,7 @@
         attachment = (yield attachment.initFromStore())
         returnValue(attachment)
 
+
     @inlineCallbacks
     def initFromStore(self):
         """
@@ -848,14 +822,12 @@
 
         @return: C{True} if this attachment exists, C{False} otherwise.
         """
-        rows = yield self._txn.execSQL(
-            """
-            select CALENDAR_HOME_RESOURCE_ID, CONTENT_TYPE, SIZE, MD5, CREATED, MODIFIED
-             from ATTACHMENT
-             where DROPBOX_ID = %s and PATH = %s
-            """,
-            [self._dropboxID, self._name]
-        )
+        att = schema.ATTACHMENT
+        rows = (yield Select([att.CALENDAR_HOME_RESOURCE_ID, att.CONTENT_TYPE,
+                              att.SIZE, att.MD5, att.CREATED, att.MODIFIED],
+                             From=att,
+                             Where=(att.DROPBOX_ID == self._dropboxID).And(
+                                 att.PATH == self._name)).on(self._txn))
         if not rows:
             returnValue(None)
         self._ownerHomeID = rows[0][0]
@@ -870,14 +842,16 @@
     def name(self):
         return self._name
 
+
     @property
     def _path(self):
         attachmentRoot = self._txn._store.attachmentsPath
-        
         # Use directory hashing scheme based on MD5 of dropboxID
         hasheduid = hashlib.md5(self._dropboxID).hexdigest()
-        return attachmentRoot.child(hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid).child(self.name())
+        return attachmentRoot.child(hasheduid[0:2]).child(
+            hasheduid[2:4]).child(hasheduid).child(self.name())
 
+
     def properties(self):
         pass # stub
 
@@ -891,26 +865,28 @@
         protocol.connectionLost(Failure(ConnectionLost()))
 
 
+    _removeStatement = Delete(
+        From=schema.ATTACHMENT,
+        Where=(schema.ATTACHMENT.DROPBOX_ID == Parameter("dropboxID")).And(
+            schema.ATTACHMENT.PATH == Parameter("path")
+        ))
+
+
     @inlineCallbacks
     def remove(self):
         old_size = self._size
         self._txn.postCommit(self._path.remove)
-        yield self._txn.execSQL(
-            """
-            delete from ATTACHMENT
-             where DROPBOX_ID = %s and PATH = %s
-            """,
-            [self._dropboxID, self._name]
-        )
-
+        yield self._removeStatement.on(self._txn, dropboxID=self._dropboxID,
+                                       path=self._name)
         # Adjust quota
         home = (yield self._txn.calendarHomeWithResourceID(self._ownerHomeID))
         if home:
             yield home.adjustQuotaUsedBytes(-old_size)
-            
+
             # Send change notification to home
             yield home.notifyChanged()
 
+
     # IDataStoreResource
     def contentType(self):
         return self._contentType

Modified: CalendarServer/trunk/txdav/caldav/datastore/test/common.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/test/common.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/common.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -279,6 +279,30 @@
 
 
     @inlineCallbacks
+    def test_notificationSyncToken(self):
+        """
+        L{ICalendar.resourceNamesSinceToken} will return the names of calendar
+        objects changed or deleted since 
+        """
+        txn = self.transactionUnderTest()
+        coll = yield txn.notificationsWithUID("home1")
+        invite1 = InviteNotification()
+        yield coll.writeNotificationObject("1", invite1, invite1.toxml())
+        st = yield coll.syncToken()
+        yield coll.writeNotificationObject("2", invite1, invite1.toxml())
+        rev = self.token2revision(st)
+        yield coll.removeNotificationObjectWithUID("1")
+        st2 = yield coll.syncToken()
+        rev2 = self.token2revision(st2)
+        changed, deleted = yield coll.resourceNamesSinceToken(rev)
+        self.assertEquals(set(changed), set(["2.xml"]))
+        self.assertEquals(set(deleted), set(["1.xml"]))
+        changed, deleted = yield coll.resourceNamesSinceToken(rev2)
+        self.assertEquals(set(changed), set([]))
+        self.assertEquals(set(deleted), set([]))
+
+
+    @inlineCallbacks
     def test_replaceNotification(self):
         """
         L{INotificationCollection.writeNotificationObject} will silently
@@ -811,6 +835,24 @@
 
 
     @inlineCallbacks
+    def test_usedQuotaAdjustment(self):
+        """
+        Adjust used quota on the calendar home and then verify that it's used.
+        """
+        home = yield self.homeUnderTest()
+        initialQuota = yield home.quotaUsedBytes()
+        yield home.adjustQuotaUsedBytes(30)
+        yield self.commit()
+        home2 = yield self.homeUnderTest()
+        afterQuota = yield home2.quotaUsedBytes()
+        self.assertEqual(afterQuota - initialQuota, 30)
+        yield home2.adjustQuotaUsedBytes(-100000)
+        yield self.commit()
+        home3 = yield self.homeUnderTest()
+        self.assertEqual((yield home3.quotaUsedBytes()), 0)
+
+
+    @inlineCallbacks
     def test_component(self):
         """
         L{ICalendarObject.component} returns a L{VComponent} describing the
@@ -1228,7 +1270,97 @@
         self.assertEquals((yield obj.dropboxID()), "some-dropbox-id")
 
 
+    def token2revision(self, token):
+        """
+        FIXME: the API names for L{syncToken}() and L{resourceNamesSinceToken}()
+        are slightly inaccurate; one doesn't produce input for the other.
+        Actually it should be resource names since I{revision} and you need to
+        understand the structure of the tokens to extract the revision.  Right
+        now that logic lives in the protocol layer, so this testing method
+        replicates it.
+        """
+        uuid, rev = token.split("#", 1)
+        rev = int(rev)
+        return rev
+
+
     @inlineCallbacks
+    def test_simpleHomeSyncToken(self):
+        """
+        L{ICalendarHome.resourceNamesSinceToken} will return the names of
+        calendar objects created since L{ICalendarHome.syncToken} last returned
+        a particular value.
+        """
+        home = yield self.homeUnderTest()
+        cal = yield self.calendarUnderTest()
+        st = yield home.syncToken()
+        yield cal.createCalendarObjectWithName("new.ics", VComponent.fromString(
+                self.eventWithDropbox
+            )
+        )
+
+        yield cal.removeCalendarObjectWithName("2.ics")
+        yield home.createCalendarWithName("other-calendar")
+        st2 = yield home.syncToken()
+        self.failIfEquals(st, st2)
+
+        home = yield self.homeUnderTest()
+
+        changed, deleted = yield home.resourceNamesSinceToken(
+            self.token2revision(st), "depth_is_ignored")
+
+        self.assertEquals(set(changed), set(["calendar_1/new.ics",
+                                             "calendar_1/2.ics",
+                                             "other-calendar/"]))
+        self.assertEquals(set(deleted), set(["calendar_1/2.ics"]))
+
+        changed, deleted = yield home.resourceNamesSinceToken(
+            self.token2revision(st2), "depth_is_ignored")
+        self.assertEquals(changed, [])
+        self.assertEquals(deleted, [])
+
+
+    @inlineCallbacks
+    def test_collectionSyncToken(self):
+        """
+        L{ICalendar.resourceNamesSinceToken} will return the names of calendar
+        objects changed or deleted since 
+        """
+        cal = yield self.calendarUnderTest()
+        st = yield cal.syncToken()
+        rev = self.token2revision(st)
+        yield cal.createCalendarObjectWithName("new.ics", VComponent.fromString(
+                self.eventWithDropbox
+            )
+        )
+        yield cal.removeCalendarObjectWithName("2.ics")
+        st2 = yield cal.syncToken()
+        rev2 = self.token2revision(st2)
+        changed, deleted = yield cal.resourceNamesSinceToken(rev)
+        self.assertEquals(set(changed), set(["new.ics"]))
+        self.assertEquals(set(deleted), set(["2.ics"]))
+        changed, deleted = yield cal.resourceNamesSinceToken(rev2)
+        self.assertEquals(set(changed), set([]))
+        self.assertEquals(set(deleted), set([]))
+
+
+    @inlineCallbacks
+    def test_dropboxIDs(self):
+        """
+        L{ICalendarObject.getAllDropboxIDs} returns a L{Deferred} that fires
+        with a C{list} of all Dropbox IDs.
+        """
+        home = yield self.homeUnderTest()
+        # The only item in the home which has an ATTACH or X-APPLE-DROPBOX
+        # property.
+        allDropboxIDs = set([
+            u'FE5CDC6F-7776-4607-83A9-B90FF7ACC8D0.dropbox',
+        ])
+        self.assertEquals(set((yield home.getAllDropboxIDs())),
+                          allDropboxIDs)
+
+
+    @inlineCallbacks
     def test_indexByDropboxProperty(self):
         """
         L{ICalendarHome.calendarObjectWithDropboxID} will return a calendar

Modified: CalendarServer/trunk/txdav/caldav/datastore/test/test_file.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/test/test_file.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_file.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -495,3 +495,20 @@
         (yield self.homeUnderTest())._path.child(".foo").createDirectory()
         yield self.test_calendarObjects()
 
+
+    def test_simpleHomeSyncToken(self):
+        """
+        File store doesn't have a functioning C{resourceNamesSinceToken} for
+        L{CalendarHome}.
+        """
+
+    test_simpleHomeSyncToken.skip = "Not in file store."
+
+
+    def test_notificationSyncToken(self):
+        """
+        File store doesn't have a functioning C{resourceNamesSinceToken} for
+        L{Notifications}.
+        """
+
+    test_notificationSyncToken.skip = "Not in file store."


Property changes on: CalendarServer/trunk/txdav/caldav/datastore/test/test_index_file.py
___________________________________________________________________
Modified: svn:mergeinfo
   - /CalendarServer/branches/config-separation/txdav/caldav/datastore/test/test_index_file.py:4379-4443
/CalendarServer/branches/egg-info-351/txdav/caldav/datastore/test/test_index_file.py:4589-4625
/CalendarServer/branches/generic-sqlstore/txdav/caldav/datastore/test/test_index_file.py:6167-6191
/CalendarServer/branches/new-store/txdav/caldav/datastore/test/test_index_file.py:5594-5934
/CalendarServer/branches/new-store-no-caldavfile/txdav/caldav/datastore/test/test_index_file.py:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2/txdav/caldav/datastore/test/test_index_file.py:5936-5981
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692/txdav/caldav/datastore/test/test_index_file.py:5693-5702
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627/txdav/caldav/datastore/test/test_index_file.py:3628-3644
/CalendarServer/branches/users/cdaboo/more-sharing-5591/txdav/caldav/datastore/test/test_index_file.py:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464/txdav/caldav/datastore/test/test_index_file.py:4465-4957
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070/txdav/caldav/datastore/test/test_index_file.py:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/caldav/datastore/test/test_index_file.py:5188-5440
/CalendarServer/branches/users/glyph/conn-limit/txdav/caldav/datastore/test/test_index_file.py:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge/txdav/caldav/datastore/test/test_index_file.py:4971-5080
/CalendarServer/branches/users/glyph/dont-start-postgres/txdav/caldav/datastore/test/test_index_file.py:6592-6614
/CalendarServer/branches/users/glyph/linux-tests/txdav/caldav/datastore/test/test_index_file.py:6893-6900
/CalendarServer/branches/users/glyph/more-deferreds-6/txdav/caldav/datastore/test/test_index_file.py:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7/txdav/caldav/datastore/test/test_index_file.py:6369-6445
/CalendarServer/branches/users/glyph/sendfdport/txdav/caldav/datastore/test/test_index_file.py:5388-5424
/CalendarServer/branches/users/glyph/sharedpool/txdav/caldav/datastore/test/test_index_file.py:6490-6550
/CalendarServer/branches/users/glyph/sql-store/txdav/caldav/datastore/test/test_index_file.py:5929-6073
/CalendarServer/branches/users/glyph/use-system-twisted/txdav/caldav/datastore/test/test_index_file.py:5084-5149
/CalendarServer/branches/users/sagen/locations-resources/txdav/caldav/datastore/test/test_index_file.py:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2/txdav/caldav/datastore/test/test_index_file.py:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events/txdav/caldav/datastore/test/test_index_file.py:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038/txdav/caldav/datastore/test/test_index_file.py:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066/txdav/caldav/datastore/test/test_index_file.py:4068-4075
/CalendarServer/branches/users/sagen/resources-2/txdav/caldav/datastore/test/test_index_file.py:5084-5093
/CalendarServer/branches/users/wsanchez/transations/txdav/caldav/datastore/test/test_index_file.py:5515-5593
/CalendarServer/trunk/twistedcaldav/test/test_index.py:6322-6394
   + /CalendarServer/branches/config-separation/txdav/caldav/datastore/test/test_index_file.py:4379-4443
/CalendarServer/branches/egg-info-351/txdav/caldav/datastore/test/test_index_file.py:4589-4625
/CalendarServer/branches/generic-sqlstore/txdav/caldav/datastore/test/test_index_file.py:6167-6191
/CalendarServer/branches/new-store/txdav/caldav/datastore/test/test_index_file.py:5594-5934
/CalendarServer/branches/new-store-no-caldavfile/txdav/caldav/datastore/test/test_index_file.py:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2/txdav/caldav/datastore/test/test_index_file.py:5936-5981
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692/txdav/caldav/datastore/test/test_index_file.py:5693-5702
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627/txdav/caldav/datastore/test/test_index_file.py:3628-3644
/CalendarServer/branches/users/cdaboo/more-sharing-5591/txdav/caldav/datastore/test/test_index_file.py:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464/txdav/caldav/datastore/test/test_index_file.py:4465-4957
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070/txdav/caldav/datastore/test/test_index_file.py:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/caldav/datastore/test/test_index_file.py:5188-5440
/CalendarServer/branches/users/glyph/conn-limit/txdav/caldav/datastore/test/test_index_file.py:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge/txdav/caldav/datastore/test/test_index_file.py:4971-5080
/CalendarServer/branches/users/glyph/dalify/txdav/caldav/datastore/test/test_index_file.py:6932-7023
/CalendarServer/branches/users/glyph/dont-start-postgres/txdav/caldav/datastore/test/test_index_file.py:6592-6614
/CalendarServer/branches/users/glyph/linux-tests/txdav/caldav/datastore/test/test_index_file.py:6893-6900
/CalendarServer/branches/users/glyph/more-deferreds-6/txdav/caldav/datastore/test/test_index_file.py:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7/txdav/caldav/datastore/test/test_index_file.py:6369-6445
/CalendarServer/branches/users/glyph/sendfdport/txdav/caldav/datastore/test/test_index_file.py:5388-5424
/CalendarServer/branches/users/glyph/sharedpool/txdav/caldav/datastore/test/test_index_file.py:6490-6550
/CalendarServer/branches/users/glyph/sql-store/txdav/caldav/datastore/test/test_index_file.py:5929-6073
/CalendarServer/branches/users/glyph/use-system-twisted/txdav/caldav/datastore/test/test_index_file.py:5084-5149
/CalendarServer/branches/users/sagen/locations-resources/txdav/caldav/datastore/test/test_index_file.py:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2/txdav/caldav/datastore/test/test_index_file.py:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events/txdav/caldav/datastore/test/test_index_file.py:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038/txdav/caldav/datastore/test/test_index_file.py:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066/txdav/caldav/datastore/test/test_index_file.py:4068-4075
/CalendarServer/branches/users/sagen/resources-2/txdav/caldav/datastore/test/test_index_file.py:5084-5093
/CalendarServer/branches/users/wsanchez/transations/txdav/caldav/datastore/test/test_index_file.py:5515-5593
/CalendarServer/trunk/twistedcaldav/test/test_index.py:6322-6394

Modified: CalendarServer/trunk/txdav/caldav/datastore/test/test_scheduling.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/test/test_scheduling.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_scheduling.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -26,9 +26,8 @@
 scheduling or data caching as middleware in the data-store layer.
 """
 
-from twisted.trial.unittest import TestCase
-from txdav.caldav.datastore.test.common import CommonTests
-from txdav.caldav.datastore.test.test_file import setUpCalendarStore
+from twisted.trial.unittest import TestCase, SkipTest
+from txdav.caldav.datastore.test.test_file import FileStorageTests
 from txdav.caldav.datastore.scheduling import ImplicitStore
 
 simpleEvent = """BEGIN:VCALENDAR
@@ -45,7 +44,7 @@
 END:VCALENDAR
 """
 
-class ImplicitStoreTests(CommonTests, TestCase):
+class ImplicitStoreTests(FileStorageTests, TestCase):
     """
     Tests for L{ImplicitSchedulingStore}.
     """
@@ -54,6 +53,12 @@
 
     def storeUnderTest(self):
         if self.implicitStore is None:
-            setUpCalendarStore(self)
-            self.implicitStore = ImplicitStore(self.calendarStore)
+            sut = FileStorageTests.storeUnderTest(self)
+            self.implicitStore = ImplicitStore(sut)
         return self.implicitStore
+
+    def skipit(self):
+        raise SkipTest("No private attribute tests.")
+
+    test_calendarObjectsWithDotFile = skipit
+    test_init = skipit

Modified: CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/caldav/datastore/test/test_sql.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -130,7 +130,9 @@
         )._txn._store.attachmentsPath
         obj = yield self.calendarObjectUnderTest()
         hasheduid = hashlib.md5(obj._dropboxID).hexdigest()
-        attachmentPath = attachmentRoot.child(hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid).child("new.attachment")
+        attachmentPath = attachmentRoot.child(
+            hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid).child(
+                "new.attachment")
         self.assertTrue(attachmentPath.isfile())
 
 

Modified: CalendarServer/trunk/txdav/caldav/icalendarstore.py
===================================================================
--- CalendarServer/trunk/txdav/caldav/icalendarstore.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/caldav/icalendarstore.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -162,6 +162,30 @@
         """
 
 
+    def getAllDropboxIDs():
+        """
+        Retrieve all of the dropbox IDs of events in this home for calendar
+        objects which either allow attendee write access to their dropboxes,
+        have attachments to read, or both.
+
+        @return: a L{Deferred} which fires with a C{list} of all dropbox IDs (as
+            unicode strings)
+        """
+
+
+    def quotaUsedBytes():
+        """
+        The number of bytes counted towards the user's quota.
+        """
+
+
+    def adjustQuotaUsedBytes(delta):
+        """
+        Increase the number of bytes that count towards the user's quota.
+        """
+
+
+
 class ICalendar(INotifier, IShareableCollection, IDataStoreResource):
     """
     Calendar
@@ -282,6 +306,13 @@
         """
 
 
+    def resourceNamesSinceToken(revision):
+        """
+        Low-level query to gather names for calendarObjectsSinceToken.
+        """
+
+
+
 class ICalendarObject(IDataStoreResource):
     """
     Calendar object


Property changes on: CalendarServer/trunk/txdav/carddav/datastore/index_file.py
___________________________________________________________________
Modified: svn:mergeinfo
   - /CalendarServer/branches/config-separation/txdav/carddav/datastore/index_file.py:4379-4443
/CalendarServer/branches/egg-info-351/txdav/carddav/datastore/index_file.py:4589-4625
/CalendarServer/branches/generic-sqlstore/txdav/carddav/datastore/index_file.py:6167-6191
/CalendarServer/branches/new-store/txdav/carddav/datastore/index_file.py:5594-5934
/CalendarServer/branches/new-store-no-caldavfile/txdav/carddav/datastore/index_file.py:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2/txdav/carddav/datastore/index_file.py:5936-5981
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692/txdav/carddav/datastore/index_file.py:5693-5702
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627/txdav/carddav/datastore/index_file.py:3628-3644
/CalendarServer/branches/users/cdaboo/more-sharing-5591/txdav/carddav/datastore/index_file.py:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464/txdav/carddav/datastore/index_file.py:4465-4957
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070/txdav/carddav/datastore/index_file.py:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/carddav/datastore/index_file.py:5188-5440
/CalendarServer/branches/users/glyph/conn-limit/txdav/carddav/datastore/index_file.py:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge/txdav/carddav/datastore/index_file.py:4971-5080
/CalendarServer/branches/users/glyph/dont-start-postgres/txdav/carddav/datastore/index_file.py:6592-6614
/CalendarServer/branches/users/glyph/linux-tests/txdav/carddav/datastore/index_file.py:6893-6900
/CalendarServer/branches/users/glyph/more-deferreds-6/txdav/carddav/datastore/index_file.py:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7/txdav/carddav/datastore/index_file.py:6369-6445
/CalendarServer/branches/users/glyph/sendfdport/txdav/carddav/datastore/index_file.py:5388-5424
/CalendarServer/branches/users/glyph/sharedpool/txdav/carddav/datastore/index_file.py:6490-6550
/CalendarServer/branches/users/glyph/sql-store/txdav/carddav/datastore/index_file.py:5929-6073
/CalendarServer/branches/users/glyph/use-system-twisted/txdav/carddav/datastore/index_file.py:5084-5149
/CalendarServer/branches/users/sagen/locations-resources/txdav/carddav/datastore/index_file.py:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2/txdav/carddav/datastore/index_file.py:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events/txdav/carddav/datastore/index_file.py:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038/txdav/carddav/datastore/index_file.py:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066/txdav/carddav/datastore/index_file.py:4068-4075
/CalendarServer/branches/users/sagen/resources-2/txdav/carddav/datastore/index_file.py:5084-5093
/CalendarServer/branches/users/wsanchez/transations/txdav/carddav/datastore/index_file.py:5515-5593
/CalendarServer/trunk/twistedcaldav/vcardindex.py:6322-6394
   + /CalendarServer/branches/config-separation/txdav/carddav/datastore/index_file.py:4379-4443
/CalendarServer/branches/egg-info-351/txdav/carddav/datastore/index_file.py:4589-4625
/CalendarServer/branches/generic-sqlstore/txdav/carddav/datastore/index_file.py:6167-6191
/CalendarServer/branches/new-store/txdav/carddav/datastore/index_file.py:5594-5934
/CalendarServer/branches/new-store-no-caldavfile/txdav/carddav/datastore/index_file.py:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2/txdav/carddav/datastore/index_file.py:5936-5981
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692/txdav/carddav/datastore/index_file.py:5693-5702
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627/txdav/carddav/datastore/index_file.py:3628-3644
/CalendarServer/branches/users/cdaboo/more-sharing-5591/txdav/carddav/datastore/index_file.py:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464/txdav/carddav/datastore/index_file.py:4465-4957
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070/txdav/carddav/datastore/index_file.py:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/carddav/datastore/index_file.py:5188-5440
/CalendarServer/branches/users/glyph/conn-limit/txdav/carddav/datastore/index_file.py:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge/txdav/carddav/datastore/index_file.py:4971-5080
/CalendarServer/branches/users/glyph/dalify/txdav/carddav/datastore/index_file.py:6932-7023
/CalendarServer/branches/users/glyph/dont-start-postgres/txdav/carddav/datastore/index_file.py:6592-6614
/CalendarServer/branches/users/glyph/linux-tests/txdav/carddav/datastore/index_file.py:6893-6900
/CalendarServer/branches/users/glyph/more-deferreds-6/txdav/carddav/datastore/index_file.py:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7/txdav/carddav/datastore/index_file.py:6369-6445
/CalendarServer/branches/users/glyph/sendfdport/txdav/carddav/datastore/index_file.py:5388-5424
/CalendarServer/branches/users/glyph/sharedpool/txdav/carddav/datastore/index_file.py:6490-6550
/CalendarServer/branches/users/glyph/sql-store/txdav/carddav/datastore/index_file.py:5929-6073
/CalendarServer/branches/users/glyph/use-system-twisted/txdav/carddav/datastore/index_file.py:5084-5149
/CalendarServer/branches/users/sagen/locations-resources/txdav/carddav/datastore/index_file.py:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2/txdav/carddav/datastore/index_file.py:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events/txdav/carddav/datastore/index_file.py:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038/txdav/carddav/datastore/index_file.py:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066/txdav/carddav/datastore/index_file.py:4068-4075
/CalendarServer/branches/users/sagen/resources-2/txdav/carddav/datastore/index_file.py:5084-5093
/CalendarServer/branches/users/wsanchez/transations/txdav/carddav/datastore/index_file.py:5515-5593
/CalendarServer/trunk/twistedcaldav/vcardindex.py:6322-6394

Modified: CalendarServer/trunk/txdav/carddav/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/carddav/datastore/sql.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/carddav/datastore/sql.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -47,6 +47,9 @@
 
 from txdav.common.datastore.sql import CommonHome, CommonHomeChild,\
     CommonObjectResource
+from twext.enterprise.dal.syntax import Insert
+from twext.enterprise.dal.syntax import Update
+from twext.enterprise.dal.syntax import utcNowSQL
 from txdav.common.datastore.sql_tables import ADDRESSBOOK_TABLE,\
     ADDRESSBOOK_BIND_TABLE, ADDRESSBOOK_OBJECT_REVISIONS_TABLE,\
     ADDRESSBOOK_OBJECT_TABLE, ADDRESSBOOK_HOME_TABLE,\
@@ -61,6 +64,14 @@
 
     implements(IAddressBookHome)
 
+    # structured tables.  (new, preferred)
+    _homeSchema = schema.ADDRESSBOOK_HOME
+    _bindSchema = schema.ADDRESSBOOK_BIND
+    _homeMetaDataSchema = schema.ADDRESSBOOK_HOME_METADATA
+    _revisionsSchema = schema.ADDRESSBOOK_OBJECT_REVISIONS
+    _objectSchema = schema.ADDRESSBOOK_OBJECT
+
+    # string mappings (old, removing)
     _homeTable = ADDRESSBOOK_HOME_TABLE
     _homeMetaDataTable = ADDRESSBOOK_HOME_METADATA_TABLE
     _childTable = ADDRESSBOOK_TABLE
@@ -97,7 +108,13 @@
     """
     implements(IAddressBook)
 
+    # structured tables.  (new, preferred)
     _bindSchema = schema.ADDRESSBOOK_BIND
+    _homeChildSchema = schema.ADDRESSBOOK
+    _revisionsSchema = schema.ADDRESSBOOK_OBJECT_REVISIONS
+    _objectSchema = schema.ADDRESSBOOK_OBJECT
+
+    # string mappings (old, removing)
     _bindTable = ADDRESSBOOK_BIND_TABLE
     _homeChildTable = ADDRESSBOOK_TABLE
     _homeChildBindTable = ADDRESSBOOK_AND_ADDRESSBOOK_BIND
@@ -177,6 +194,7 @@
     implements(IAddressBookObject)
 
     _objectTable = ADDRESSBOOK_OBJECT_TABLE
+    _objectSchema = schema.ADDRESSBOOK_OBJECT
 
     def __init__(self, addressbook, name, uid, resourceID=None, metadata=None):
 
@@ -207,7 +225,8 @@
 
 
     @inlineCallbacks
-    def updateDatabase(self, component, expand_until=None, reCreate=False, inserting=False):
+    def updateDatabase(self, component, expand_until=None, reCreate=False,
+                       inserting=False):
         """
         Update the database tables for the new data being written.
 
@@ -215,6 +234,8 @@
         @type component: L{Component}
         """
 
+        ao = schema.ADDRESSBOOK_OBJECT
+
         componentText = str(component)
         self._objectText = componentText
 
@@ -223,42 +244,24 @@
         self._size = len(componentText)
         if inserting:
             self._resourceID, self._created, self._modified = (
-                yield self._txn.execSQL(
-                """
-                insert into ADDRESSBOOK_OBJECT
-                (ADDRESSBOOK_RESOURCE_ID, RESOURCE_NAME, VCARD_TEXT, VCARD_UID, MD5)
-                 values
-                (%s, %s, %s, %s, %s)
-                returning
-                 RESOURCE_ID,
-                 CREATED,
-                 MODIFIED
-                """,
-                [
-                    self._addressbook._resourceID,
-                    self._name,
-                    componentText,
-                    component.resourceUID(),
-                    self._md5,
-                ]
-            ))[0]
+                yield Insert(
+                    {ao.ADDRESSBOOK_RESOURCE_ID: self._addressbook._resourceID,
+                     ao.RESOURCE_NAME: self._name,
+                     ao.VCARD_TEXT: componentText,
+                     ao.VCARD_UID: component.resourceUID(),
+                     ao.MD5: self._md5},
+                    Return=(ao.RESOURCE_ID,
+                            ao.CREATED,
+                            ao.MODIFIED)
+                ).on(self._txn))[0]
         else:
-            yield self._txn.execSQL(
-                """
-                update ADDRESSBOOK_OBJECT set
-                (VCARD_TEXT, VCARD_UID, MD5, MODIFIED)
-                 =
-                (%s, %s, %s, timezone('UTC', CURRENT_TIMESTAMP))
-                where RESOURCE_ID = %s
-                returning MODIFIED
-                """,
-                [
-                    componentText,
-                    component.resourceUID(),
-                    self._md5,
-                    self._resourceID,
-                ]
-            )
+            self._modified = (yield Update(
+                {ao.VCARD_TEXT: componentText,
+                 ao.VCARD_UID: component.resourceUID(),
+                 ao.MD5: self._md5,
+                 ao.MODIFIED: utcNowSQL},
+                Where=ao.RESOURCE_ID == self._resourceID,
+                Return=ao.MODIFIED).on(self._txn))[0][0]
 
 
     @inlineCallbacks
@@ -275,3 +278,6 @@
         The content type of Addressbook objects is text/x-vcard.
         """
         return MimeType.fromString("text/vcard; charset=utf-8")
+
+
+


Property changes on: CalendarServer/trunk/txdav/carddav/datastore/test/test_index_file.py
___________________________________________________________________
Modified: svn:mergeinfo
   - /CalendarServer/branches/config-separation/txdav/carddav/datastore/test/test_index_file.py:4379-4443
/CalendarServer/branches/egg-info-351/txdav/carddav/datastore/test/test_index_file.py:4589-4625
/CalendarServer/branches/generic-sqlstore/txdav/carddav/datastore/test/test_index_file.py:6167-6191
/CalendarServer/branches/new-store/txdav/carddav/datastore/test/test_index_file.py:5594-5934
/CalendarServer/branches/new-store-no-caldavfile/txdav/carddav/datastore/test/test_index_file.py:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2/txdav/carddav/datastore/test/test_index_file.py:5936-5981
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692/txdav/carddav/datastore/test/test_index_file.py:5693-5702
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627/txdav/carddav/datastore/test/test_index_file.py:3628-3644
/CalendarServer/branches/users/cdaboo/more-sharing-5591/txdav/carddav/datastore/test/test_index_file.py:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464/txdav/carddav/datastore/test/test_index_file.py:4465-4957
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070/txdav/carddav/datastore/test/test_index_file.py:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/carddav/datastore/test/test_index_file.py:5188-5440
/CalendarServer/branches/users/glyph/conn-limit/txdav/carddav/datastore/test/test_index_file.py:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge/txdav/carddav/datastore/test/test_index_file.py:4971-5080
/CalendarServer/branches/users/glyph/dont-start-postgres/txdav/carddav/datastore/test/test_index_file.py:6592-6614
/CalendarServer/branches/users/glyph/linux-tests/txdav/carddav/datastore/test/test_index_file.py:6893-6900
/CalendarServer/branches/users/glyph/more-deferreds-6/txdav/carddav/datastore/test/test_index_file.py:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7/txdav/carddav/datastore/test/test_index_file.py:6369-6445
/CalendarServer/branches/users/glyph/sendfdport/txdav/carddav/datastore/test/test_index_file.py:5388-5424
/CalendarServer/branches/users/glyph/sharedpool/txdav/carddav/datastore/test/test_index_file.py:6490-6550
/CalendarServer/branches/users/glyph/sql-store/txdav/carddav/datastore/test/test_index_file.py:5929-6073
/CalendarServer/branches/users/glyph/use-system-twisted/txdav/carddav/datastore/test/test_index_file.py:5084-5149
/CalendarServer/branches/users/sagen/locations-resources/txdav/carddav/datastore/test/test_index_file.py:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2/txdav/carddav/datastore/test/test_index_file.py:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events/txdav/carddav/datastore/test/test_index_file.py:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038/txdav/carddav/datastore/test/test_index_file.py:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066/txdav/carddav/datastore/test/test_index_file.py:4068-4075
/CalendarServer/branches/users/sagen/resources-2/txdav/carddav/datastore/test/test_index_file.py:5084-5093
/CalendarServer/branches/users/wsanchez/transations/txdav/carddav/datastore/test/test_index_file.py:5515-5593
/CalendarServer/trunk/twistedcaldav/test/test_vcardindex.py:6322-6394
   + /CalendarServer/branches/config-separation/txdav/carddav/datastore/test/test_index_file.py:4379-4443
/CalendarServer/branches/egg-info-351/txdav/carddav/datastore/test/test_index_file.py:4589-4625
/CalendarServer/branches/generic-sqlstore/txdav/carddav/datastore/test/test_index_file.py:6167-6191
/CalendarServer/branches/new-store/txdav/carddav/datastore/test/test_index_file.py:5594-5934
/CalendarServer/branches/new-store-no-caldavfile/txdav/carddav/datastore/test/test_index_file.py:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2/txdav/carddav/datastore/test/test_index_file.py:5936-5981
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692/txdav/carddav/datastore/test/test_index_file.py:5693-5702
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627/txdav/carddav/datastore/test/test_index_file.py:3628-3644
/CalendarServer/branches/users/cdaboo/more-sharing-5591/txdav/carddav/datastore/test/test_index_file.py:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464/txdav/carddav/datastore/test/test_index_file.py:4465-4957
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070/txdav/carddav/datastore/test/test_index_file.py:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187/txdav/carddav/datastore/test/test_index_file.py:5188-5440
/CalendarServer/branches/users/glyph/conn-limit/txdav/carddav/datastore/test/test_index_file.py:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge/txdav/carddav/datastore/test/test_index_file.py:4971-5080
/CalendarServer/branches/users/glyph/dalify/txdav/carddav/datastore/test/test_index_file.py:6932-7023
/CalendarServer/branches/users/glyph/dont-start-postgres/txdav/carddav/datastore/test/test_index_file.py:6592-6614
/CalendarServer/branches/users/glyph/linux-tests/txdav/carddav/datastore/test/test_index_file.py:6893-6900
/CalendarServer/branches/users/glyph/more-deferreds-6/txdav/carddav/datastore/test/test_index_file.py:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7/txdav/carddav/datastore/test/test_index_file.py:6369-6445
/CalendarServer/branches/users/glyph/sendfdport/txdav/carddav/datastore/test/test_index_file.py:5388-5424
/CalendarServer/branches/users/glyph/sharedpool/txdav/carddav/datastore/test/test_index_file.py:6490-6550
/CalendarServer/branches/users/glyph/sql-store/txdav/carddav/datastore/test/test_index_file.py:5929-6073
/CalendarServer/branches/users/glyph/use-system-twisted/txdav/carddav/datastore/test/test_index_file.py:5084-5149
/CalendarServer/branches/users/sagen/locations-resources/txdav/carddav/datastore/test/test_index_file.py:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2/txdav/carddav/datastore/test/test_index_file.py:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events/txdav/carddav/datastore/test/test_index_file.py:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038/txdav/carddav/datastore/test/test_index_file.py:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066/txdav/carddav/datastore/test/test_index_file.py:4068-4075
/CalendarServer/branches/users/sagen/resources-2/txdav/carddav/datastore/test/test_index_file.py:5084-5093
/CalendarServer/branches/users/wsanchez/transations/txdav/carddav/datastore/test/test_index_file.py:5515-5593
/CalendarServer/trunk/twistedcaldav/test/test_vcardindex.py:6322-6394

Modified: CalendarServer/trunk/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/common/datastore/sql.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -25,7 +25,6 @@
     "CommonHome",
 ]
 
-import datetime
 
 from zope.interface import implements, directlyProvides
 
@@ -50,7 +49,7 @@
 from txdav.carddav.iaddressbookstore import IAddressBookTransaction
 
 from txdav.common.datastore.sql_tables import schema
-from txdav.common.datastore.sql_tables import NOTIFICATION_HOME_TABLE, _BIND_MODE_OWN, \
+from txdav.common.datastore.sql_tables import _BIND_MODE_OWN, \
     _BIND_STATUS_ACCEPTED, NOTIFICATION_OBJECT_REVISIONS_TABLE
 from txdav.common.icommondatastore import HomeChildNameNotAllowedError, \
     HomeChildNameAlreadyExistsError, NoSuchHomeChildError, \
@@ -62,6 +61,13 @@
 from twext.enterprise.dal.syntax import Parameter
 from twext.python.clsprop import classproperty
 from twext.enterprise.dal.syntax import Select
+from twext.enterprise.dal.syntax import Lock
+from twext.enterprise.dal.syntax import Insert
+from twext.enterprise.dal.syntax import Max
+from twext.enterprise.dal.syntax import default
+from twext.enterprise.dal.syntax import Delete
+from twext.enterprise.dal.syntax import Len
+from twext.enterprise.dal.syntax import Update
 
 from txdav.base.propertystore.base import PropertyName
 from txdav.base.propertystore.none import PropertyStore as NonePropertyStore
@@ -399,20 +405,31 @@
         for key, value in self._bindTable.iteritems():
             self._revisionBindJoinTable["BIND:%s" % (key,)] = value
 
+
+    @classproperty
+    def _resourceIDFromOwnerQuery(cls):
+        home = cls._homeSchema
+        return Select([home.RESOURCE_ID],
+                      From=home, Where=home.OWNER_UID == Parameter("ownerUID"))
+
+    @classproperty
+    def _ownerFromFromResourceID(cls):
+        home = cls._homeSchema
+        return Select([home.OWNER_UID],
+                      From=home,
+                      Where=home.RESOURCE_ID == Parameter("resourceID"))
+
     @inlineCallbacks
     def initFromStore(self, no_cache=False):
         """
-        Initialize this object from the store. We read in and cache all the extra meta-data
-        from the DB to avoid having to do DB queries for those individually later.
+        Initialize this object from the store. We read in and cache all the
+        extra meta-data from the DB to avoid having to do DB queries for those
+        individually later.
         """
-
         result = yield self._cacher.get(self._ownerUID)
         if result is None:
-            result = yield self._txn.execSQL(
-                "select %(column_RESOURCE_ID)s from %(name)s"
-                " where %(column_OWNER_UID)s = %%s" % self._homeTable,
-                [self._ownerUID]
-            )
+            result = yield self._resourceIDFromOwnerQuery.on(
+                self._txn, ownerUID=self._ownerUID)
             if result and not no_cache:
                 yield self._cacher.set(self._ownerUID, result)
 
@@ -423,10 +440,10 @@
         else:
             returnValue(None)
 
+
     @classmethod
     @inlineCallbacks
     def homeWithUID(cls, txn, uid, create=False):
-
         if txn._notifierFactory:
             notifiers = (txn._notifierFactory.newNotifier(
                 id=uid, prefix=cls._notifierPrefix
@@ -443,56 +460,44 @@
             # Need to lock to prevent race condition
 
             # FIXME: this is an entire table lock - ideally we want a row lock
-            # but the row does not exist yet. However, the "exclusive" mode
-            # does allow concurrent reads so the only thing we block is other
+            # but the row does not exist yet. However, the "exclusive" mode does
+            # allow concurrent reads so the only thing we block is other
             # attempts to provision a home, which is not too bad
-            
-            # Also note that we must not cache the owner_uid->resource_id mapping in _cacher
-            # when creating as we don't want that to appear until AFTER the commit
 
-            yield txn.execSQL(
-                "lock %(name)s in exclusive mode" % cls._homeTable,
-            )
+            # Also note that we must not cache the owner_uid->resource_id
+            # mapping in _cacher when creating as we don't want that to appear
+            # until AFTER the commit
+
+            yield Lock(cls._homeSchema, 'exclusive').on(txn)
             # Now test again
-            exists = yield txn.execSQL(
-                "select %(column_RESOURCE_ID)s from %(name)s"
-                " where %(column_OWNER_UID)s = %%s" % cls._homeTable,
-                [uid]
-            )
+            exists = yield cls._resourceIDFromOwnerQuery.on(txn, ownerUID=uid)
             if not exists:
-                resourceid = (yield txn.execSQL("""
-                    insert into %(name)s (%(column_OWNER_UID)s) values (%%s)
-                    returning %(column_RESOURCE_ID)s
-                    """ % cls._homeTable,
-                    [uid]
-                ))[0][0]
-                yield txn.execSQL(
-                    "insert into %(name)s (%(column_RESOURCE_ID)s) values (%%s)" % cls._homeMetaDataTable,
-                    [resourceid]
-                )
+                resourceid = (yield Insert(
+                    {cls._homeSchema.OWNER_UID: uid},
+                    Return=cls._homeSchema.RESOURCE_ID).on(txn))[0][0]
+                yield Insert(
+                    {cls._homeMetaDataSchema.RESOURCE_ID: resourceid}).on(txn)
             home = cls(txn, uid, notifiers)
             home = (yield home.initFromStore(no_cache=not exists))
             if not exists:
                 yield home.createdHome()
             returnValue(home)
 
+
     @classmethod
     @inlineCallbacks
     def homeUIDWithResourceID(cls, txn, rid):
-
-        rows = (yield txn.execSQL(
-            "select %(column_OWNER_UID)s from %(name)s"
-            " where %(column_RESOURCE_ID)s = %%s" % cls._homeTable,
-            [rid]
-        ))
+        rows = (yield cls._ownerFromFromResourceID.on(txn, resourceID=rid))
         if rows:
             returnValue(rows[0][0])
         else:
             returnValue(None)
 
+
     def __repr__(self):
         return "<%s: %s>" % (self.__class__.__name__, self._resourceID)
 
+
     def uid(self):
         """
         Retrieve the unique identifier for this home.
@@ -635,24 +640,49 @@
             self._children.pop(name, None)
 
 
+    @classproperty
+    def _syncTokenQuery(cls):
+        """
+        DAL Select statement to find the sync token.
+        """
+        rev = cls._revisionsSchema
+        bind = cls._bindSchema
+        return Select(
+            [Max(rev.REVISION)],
+            From=rev, Where=(
+                rev.RESOURCE_ID.In(Select(
+                    [bind.RESOURCE_ID], From=bind,
+                    Where=bind.HOME_RESOURCE_ID == Parameter("resourceID")))
+            ).Or((rev.HOME_RESOURCE_ID == Parameter("resourceID")).And(
+                rev.RESOURCE_ID == None))
+        )
+
+
     @inlineCallbacks
     def syncToken(self):
-        revision = (yield self._txn.execSQL(
-            """
-            select max(%(REV:column_REVISION)s) from %(REV:name)s
-            where %(REV:column_RESOURCE_ID)s in (
-              select %(BIND:column_RESOURCE_ID)s from %(BIND:name)s
-              where %(BIND:column_HOME_RESOURCE_ID)s = %%s
-            ) or (
-              %(REV:column_HOME_RESOURCE_ID)s = %%s and
-              %(REV:column_RESOURCE_ID)s is null
-            )
-            """ % self._revisionBindJoinTable,
-            [self._resourceID, self._resourceID,]
-        ))[0][0]
+        revision = (yield self._syncTokenQuery.on(
+            self._txn, resourceID=self._resourceID))[0][0]
         returnValue("%s#%s" % (self._resourceID, revision))
 
 
+    @classproperty
+    def _changesQuery(cls):
+        bind = cls._bindSchema
+        rev = cls._revisionsSchema
+        return Select([bind.RESOURCE_NAME, rev.COLLECTION_NAME,
+                       rev.RESOURCE_NAME, rev.DELETED],
+                      From=rev.join(
+                          bind,
+                          (bind.HOME_RESOURCE_ID ==
+                           Parameter("resourceID")).And(
+                               rev.RESOURCE_ID ==
+                               bind.RESOURCE_ID),
+                          'left outer'),
+                      Where=(rev.REVISION > Parameter("token")).And(
+                          rev.HOME_RESOURCE_ID ==
+                          Parameter("resourceID")))
+
+
     @inlineCallbacks
     def resourceNamesSinceToken(self, token, depth):
 
@@ -663,19 +693,9 @@
                 wasdeleted
             )
             for path, collection, name, wasdeleted in
-            (yield self._txn.execSQL("""
-                select %(BIND:column_RESOURCE_NAME)s, %(REV:column_COLLECTION_NAME)s, %(REV:column_RESOURCE_NAME)s, %(REV:column_DELETED)s
-                from %(REV:name)s
-                left outer join %(BIND:name)s on (
-                  %(BIND:name)s.%(BIND:column_HOME_RESOURCE_ID)s = %%s and
-                  %(REV:name)s.%(REV:column_RESOURCE_ID)s = %(BIND:name)s.%(BIND:column_RESOURCE_ID)s
-                )
-                where
-                  %(REV:column_REVISION)s > %%s and
-                  %(REV:name)s.%(REV:column_HOME_RESOURCE_ID)s = %%s
-                """ % self._revisionBindJoinTable,
-                [self._resourceID, token, self._resourceID],
-            ))
+            (yield self._changesQuery.on(self._txn,
+                                         resourceID=self._resourceID,
+                                         token=token))
         ]
 
         deleted = []
@@ -696,22 +716,17 @@
                     changed_collections.add(path)
 
         # Now deal with shared collections
+        bind = self._bindSchema
+        rev = self._revisionsSchema
         shares = yield self.listSharedChildren()
         for sharename in shares:
             sharetoken = 0 if sharename in changed_collections else token
-            shareID = (yield self._txn.execSQL("""
-                select %(column_RESOURCE_ID)s from %(name)s
-                where
-                  %(column_RESOURCE_NAME)s = %%s and
-                  %(column_HOME_RESOURCE_ID)s = %%s and
-                  %(column_BIND_MODE)s != %%s
-                """ % self._bindTable,
-                [
-                    sharename,
-                    self._resourceID,
-                    _BIND_MODE_OWN
-                ]
-            ))[0][0]
+            shareID = (yield Select(
+                [bind.RESOURCE_ID], From=bind,
+                Where=(bind.RESOURCE_NAME == sharename).And(
+                    bind.HOME_RESOURCE_ID == self._resourceID).And(
+                        bind.BIND_MODE != _BIND_MODE_OWN)
+            ).on(self._txn))[0][0]
             results = [
                 (
                     sharename,
@@ -719,13 +734,11 @@
                     wasdeleted
                 )
                 for name, wasdeleted in
-                (yield self._txn.execSQL("""
-                    select %(column_RESOURCE_NAME)s, %(column_DELETED)s
-                    from %(name)s
-                    where %(column_REVISION)s > %%s and %(column_RESOURCE_ID)s = %%s
-                    """ % self._revisionsTable,
-                    [sharetoken, shareID],
-                )) if name
+                (yield Select([rev.RESOURCE_NAME, rev.DELETED],
+                                 From=rev,
+                                Where=(rev.REVISION > sharetoken).And(
+                                rev.RESOURCE_ID == shareID)).on(self._txn))
+                if name
             ]
 
             for path, name, wasdeleted in results:
@@ -736,7 +749,6 @@
             for path, name, wasdeleted in results:
                 changed.append("%s/%s" % (path, name,))
 
-
         changed.sort()
         deleted.sort()
         returnValue((changed, deleted))
@@ -780,83 +792,98 @@
         return None
 
 
+    @classproperty
+    def _resourceByUIDQuery(cls):
+        obj = cls._objectSchema
+        bind = cls._bindSchema
+        return Select([obj.PARENT_RESOURCE_ID, obj.RESOURCE_ID],
+                     From=obj.join(bind, obj.PARENT_RESOURCE_ID ==
+                                   bind.RESOURCE_ID),
+                     Where=(obj.UID == Parameter("uid")).And(
+                            bind.HOME_RESOURCE_ID == Parameter("resourceID")))
+
+
     @inlineCallbacks
     def objectResourcesWithUID(self, uid, ignore_children=()):
         """
-        Return all child object resources with the specified UID, ignoring any in the
-        named child collections. The file implementation just iterates all child collections.
+        Return all child object resources with the specified UID, ignoring any
+        in the named child collections.
         """
-        
         results = []
-        rows = (yield self._txn.execSQL("""
-            select %(OBJECT:name)s.%(OBJECT:column_PARENT_RESOURCE_ID)s, %(OBJECT:column_RESOURCE_ID)s
-            from %(OBJECT:name)s
-            left outer join %(BIND:name)s on (
-              %(OBJECT:name)s.%(OBJECT:column_PARENT_RESOURCE_ID)s = %(BIND:name)s.%(BIND:column_RESOURCE_ID)s
-            )
-            where
-             %(OBJECT:column_UID)s = %%s and
-             %(BIND:name)s.%(BIND:column_HOME_RESOURCE_ID)s = %%s
-            """ % self._objectBindTable,
-            [uid, self._resourceID,]
-        ))
-
+        rows = (yield self._resourceByUIDQuery.on(self._txn, uid=uid,
+                                                  resourceID=self._resourceID))
         if rows:
             for childID, objectID in rows:
                 child = (yield self.childWithID(childID))
                 if child and child.name() not in ignore_children:
                     objectResource = (yield child.objectResourceWithID(objectID))
                     results.append(objectResource)
-        
+
         returnValue(results)
 
+
+    @classproperty
+    def _quotaQuery(cls):
+        meta = cls._homeMetaDataSchema
+        return Select(
+            [meta.QUOTA_USED_BYTES], From=meta,
+            Where=meta.RESOURCE_ID == Parameter("resourceID")
+        )
+
+
     @inlineCallbacks
     def quotaUsedBytes(self):
-        
         if self._quotaUsedBytes is None:
-            self._quotaUsedBytes = (yield self._txn.execSQL(
-                "select %(column_QUOTA_USED_BYTES)s from %(name)s"
-                " where %(column_RESOURCE_ID)s = %%s" % self._homeMetaDataTable,
-                [self._resourceID]
-            ))[0][0]
-        
+            self._quotaUsedBytes = (yield self._quotaQuery.on(
+                self._txn, resourceID=self._resourceID))[0][0]
         returnValue(self._quotaUsedBytes)
 
 
+    @classproperty
+    def _preLockResourceIDQuery(cls):
+        meta = cls._homeMetaDataSchema
+        return Select(From=meta,
+                      Where=meta.RESOURCE_ID==Parameter("resourceID"),
+                      ForUpdate=True)
+
+
+    @classproperty
+    def _increaseQuotaQuery(cls):
+        meta = cls._homeMetaDataSchema
+        return Update({meta.QUOTA_USED_BYTES: meta.QUOTA_USED_BYTES +
+                       Parameter("delta")},
+                      Where=meta.RESOURCE_ID == Parameter("resourceID"),
+                      Return=meta.QUOTA_USED_BYTES)
+
+
+    @classproperty
+    def _resetQuotaQuery(cls):
+        meta = cls._homeMetaDataSchema
+        return Update({meta.QUOTA_USED_BYTES: 0},
+                      Where=meta.RESOURCE_ID == Parameter("resourceID"))
+
+
     @inlineCallbacks
     def adjustQuotaUsedBytes(self, delta):
         """
-        Adjust quota used. We need to get a lock on the row first so that the adjustment
-        is done atomically. It is import to do the 'select ... for update' because a race also
-        exists in the 'update ... x = x + 1' case as seen via unit tests.
+        Adjust quota used. We need to get a lock on the row first so that the
+        adjustment is done atomically. It is import to do the 'select ... for
+        update' because a race also exists in the 'update ... x = x + 1' case as
+        seen via unit tests.
         """
-        yield self._txn.execSQL("""
-            select * from %(name)s
-            where %(column_RESOURCE_ID)s = %%s
-            for update
-            """ % self._homeMetaDataTable,
-            [self._resourceID]
-        )
+        yield self._preLockResourceIDQuery.on(self._txn,
+                                              resourceID=self._resourceID)
 
-        self._quotaUsedBytes = (yield self._txn.execSQL("""
-            update %(name)s
-            set %(column_QUOTA_USED_BYTES)s = %(column_QUOTA_USED_BYTES)s + %%s
-            where %(column_RESOURCE_ID)s = %%s
-            returning %(column_QUOTA_USED_BYTES)s
-            """ % self._homeMetaDataTable,
-            [delta, self._resourceID]
-        ))[0][0]
+        self._quotaUsedBytes = (yield self._increaseQuotaQuery.on(
+            self._txn, delta=delta, resourceID=self._resourceID))[0][0]
 
         # Double check integrity
         if self._quotaUsedBytes < 0:
-            log.error("Fixing quota adjusted below zero to %s by change amount %s" % (self._quotaUsedBytes, delta,))
-            yield self._txn.execSQL("""
-                update %(name)s
-                set %(column_QUOTA_USED_BYTES)s = 0
-                where %(column_RESOURCE_ID)s = %%s
-                """ % self._homeMetaDataTable,
-                [self._resourceID]
-            )
+            log.error(
+                "Fixing quota adjusted below zero to %s by change amount %s" %
+                (self._quotaUsedBytes, delta,))
+            yield self._resetQuotaQuery.on(self._txn,
+                                           resourceID=self._resourceID)
             self._quotaUsedBytes = 0
 
 
@@ -864,13 +891,15 @@
         if self._notifiers is None:
             self._notifiers = ()
         self._notifiers += (notifier,)
- 
+
+
     def notifierID(self, label="default"):
         if self._notifiers:
             return self._notifiers[0].getID(label)
         else:
             return None
 
+
     @inlineCallbacks
     def nodeName(self, label="default"):
         if self._notifiers:
@@ -890,8 +919,291 @@
                 self._txn.postCommit(notifier.notify)
 
 
-class CommonHomeChild(LoggingMixIn, FancyEqMixin):
+
+class _SharedSyncLogic(object):
     """
+    Logic for maintaining sync-token shared between notification collections and
+    shared collections.
+    """
+
+    @classproperty
+    def _childSyncTokenQuery(cls):
+        """
+        DAL query for retrieving the sync token of a L{CommonHomeChild} based on
+        its resource ID.
+        """
+        rev = cls._revisionsSchema
+        return Select([Max(rev.REVISION)], From=rev,
+                      Where=rev.RESOURCE_ID == Parameter("resourceID"))
+
+
+    @inlineCallbacks
+    def syncToken(self):
+        if self._syncTokenRevision is None:
+            self._syncTokenRevision = (yield self._childSyncTokenQuery.on(
+                self._txn, resourceID=self._resourceID))[0][0]
+        returnValue(("%s#%s" % (self._resourceID, self._syncTokenRevision,)))
+
+
+    def objectResourcesSinceToken(self, token):
+        raise NotImplementedError()
+
+
+    @classproperty
+    def _objectNamesSinceRevisionQuery(cls):
+        """
+        DAL query for (resource, deleted-flag)
+        """
+        rev = cls._revisionsSchema
+        return Select([rev.RESOURCE_NAME, rev.DELETED],
+                      From=rev,
+                      Where=(rev.REVISION > Parameter("revision")).And(
+                          rev.RESOURCE_ID == Parameter("resourceID")))
+
+
+    @inlineCallbacks
+    def resourceNamesSinceToken(self, token):
+        results = [
+            (name if name else "", deleted)
+            for name, deleted in
+            (yield self._objectNamesSinceRevisionQuery.on(
+                self._txn, revision=token, resourceID=self._resourceID))
+        ]
+        results.sort(key=lambda x:x[1])
+
+        changed = []
+        deleted = []
+        for name, wasdeleted in results:
+            if name:
+                if wasdeleted:
+                    if token:
+                        deleted.append(name)
+                else:
+                    changed.append(name)
+
+        returnValue((changed, deleted))
+
+
+    @classproperty
+    def _removeDeletedRevision(cls):
+        rev = cls._revisionsSchema
+        return Delete(From=rev,
+                      Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And(
+                          rev.COLLECTION_NAME == Parameter("collectionName")))
+
+
+    @classproperty
+    def _addNewRevision(cls):
+        rev = cls._revisionsSchema
+        return Insert({rev.HOME_RESOURCE_ID: Parameter("homeID"),
+                       rev.RESOURCE_ID: Parameter("resourceID"),
+                       rev.COLLECTION_NAME: Parameter("collectionName"),
+                       rev.RESOURCE_NAME: None,
+                       # Always starts false; may be updated to be a tombstone
+                       # later.
+                       rev.DELETED: False},
+                     Return=[rev.REVISION])
+
+
+    @inlineCallbacks
+    def _initSyncToken(self):
+        yield self._removeDeletedRevision.on(
+            self._txn, homeID=self._home._resourceID, collectionName=self._name
+        )
+        self._syncTokenRevision = (yield (
+            self._addNewRevision.on(self._txn, homeID=self._home._resourceID,
+                                    resourceID=self._resourceID,
+                                    collectionName=self._name)))[0][0]
+
+
+    @classproperty
+    def _renameSyncTokenQuery(cls):
+        """
+        DAL query to change sync token for a rename (increment and adjust
+        resource name).
+        """
+        rev = cls._revisionsSchema
+        return Update({
+            rev.REVISION: schema.REVISION_SEQ,
+            rev.COLLECTION_NAME: Parameter("name")},
+            Where=(rev.RESOURCE_ID == Parameter("resourceID")
+                  ).And(rev.RESOURCE_NAME == None),
+            Return=rev.REVISION
+        )
+
+
+    @inlineCallbacks
+    def _renameSyncToken(self):
+        self._syncTokenRevision = (yield self._renameSyncTokenQuery.on(
+            self._txn, name=self._name, resourceID=self._resourceID))[0][0]
+
+
+    @classproperty
+    def _deleteSyncTokenQuery(cls):
+        """
+        DAL query to update a sync revision to be a tombstone instead.
+        """
+        rev = cls._revisionsSchema
+        return Delete(From=rev, Where=(
+            rev.HOME_RESOURCE_ID == Parameter("homeID")).And(
+                rev.RESOURCE_ID == Parameter("resourceID")).And(
+                rev.COLLECTION_NAME == None))
+
+
+    @classproperty
+    def _sharedRemovalQuery(cls):
+        """
+        DAL query to update the sync token for a shared collection.
+        """
+        rev = cls._revisionsSchema
+        return Update({rev.RESOURCE_ID: None,
+                       rev.REVISION: schema.REVISION_SEQ,
+                       rev.DELETED: True},
+                      Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And(
+                          rev.RESOURCE_ID == Parameter("resourceID")).And(
+                              rev.RESOURCE_NAME == None),
+                     #Return=rev.REVISION
+                     )
+
+
+    @classproperty
+    def _unsharedRemovalQuery(cls):
+        """
+        DAL query to update the sync token for an owned collection.
+        """
+        rev = cls._revisionsSchema
+        return Update({rev.RESOURCE_ID: None,
+                       rev.REVISION: schema.REVISION_SEQ,
+                       rev.DELETED: True},
+                      Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
+                          rev.RESOURCE_NAME == None),
+                      # Return=rev.REVISION,
+                     )
+
+
+    @inlineCallbacks
+    def _deletedSyncToken(self, sharedRemoval=False):
+        # Remove all child entries
+        yield self._deleteSyncTokenQuery.on(self._txn,
+                                            homeID=self._home._resourceID,
+                                            resourceID=self._resourceID)
+
+        # If this is a share being removed then we only mark this one specific
+        # home/resource-id as being deleted.  On the other hand, if it is a
+        # non-shared collection, then we need to mark all collections
+        # with the resource-id as being deleted to account for direct shares.
+        if sharedRemoval:
+            yield self._sharedRemovalQuery.on(self._txn,
+                                              homeID=self._home._resourceID,
+                                              resourceID=self._resourceID)
+        else:
+            yield self._unsharedRemovalQuery.on(self._txn,
+                                                resourceID=self._resourceID)
+        self._syncTokenRevision = None
+
+
+    def _insertRevision(self, name):
+        return self._changeRevision("insert", name)
+
+
+    def _updateRevision(self, name):
+        return self._changeRevision("update", name)
+
+
+    def _deleteRevision(self, name):
+        return self._changeRevision("delete", name)
+
+
+    @classproperty
+    def _deleteBumpTokenQuery(cls):
+        rev = cls._revisionsSchema
+        return Update({rev.REVISION: schema.REVISION_SEQ,
+                       rev.DELETED: True},
+                      Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
+                           rev.RESOURCE_NAME == Parameter("name")),
+                      Return=rev.REVISION)
+
+
+    @classproperty
+    def _updateBumpTokenQuery(cls):
+        rev = cls._revisionsSchema
+        return Update({rev.REVISION: schema.REVISION_SEQ},
+                      Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
+                           rev.RESOURCE_NAME == Parameter("name")),
+                      Return=rev.REVISION)
+
+
+    @classproperty
+    def _insertFindPreviouslyNamedQuery(cls):
+        rev = cls._revisionsSchema
+        return Select([rev.RESOURCE_ID], From=rev,
+                      Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
+                           rev.RESOURCE_NAME == Parameter("name")))
+
+
+    @classproperty
+    def _updatePreviouslyNamedQuery(cls):
+        rev = cls._revisionsSchema
+        return Update({rev.REVISION: schema.REVISION_SEQ,
+                       rev.DELETED: False},
+                      Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
+                           rev.RESOURCE_NAME == Parameter("name")),
+                      Return=rev.REVISION)
+
+
+    @classproperty
+    def _completelyNewRevisionQuery(cls):
+        rev = cls._revisionsSchema
+        return Insert({rev.HOME_RESOURCE_ID: Parameter("homeID"),
+                       rev.RESOURCE_ID: Parameter("resourceID"),
+                       rev.RESOURCE_NAME: Parameter("name"),
+                       rev.REVISION: schema.REVISION_SEQ,
+                       rev.DELETED: False},
+                      Return=rev.REVISION)
+
+
+    @inlineCallbacks
+    def _changeRevision(self, action, name):
+        if action == "delete":
+            self._syncTokenRevision = (
+                yield self._deleteBumpTokenQuery.on(
+                    self._txn, resourceID=self._resourceID, name=name))[0][0]
+        elif action == "update":
+            self._syncTokenRevision = (
+                yield self._updateBumpTokenQuery.on(
+                    self._txn, resourceID=self._resourceID, name=name))[0][0]
+        elif action == "insert":
+            # Note that an "insert" may happen for a resource that previously
+            # existed and then was deleted. In that case an entry in the
+            # REVISIONS table still exists so we have to detect that and do db
+            # INSERT or UPDATE as appropriate
+
+            found = bool( (
+                yield self._insertFindPreviouslyNamedQuery.on(
+                    self._txn, resourceID=self._resourceID, name=name)) )
+            if found:
+                self._syncTokenRevision = (
+                    yield self._updatePreviouslyNamedQuery.on(
+                        self._txn, resourceID=self._resourceID, name=name)
+                )[0][0]
+            else:
+                self._syncTokenRevision = (
+                    yield self._completelyNewRevisionQuery.on(
+                        self._txn, homeID=self._home._resourceID,
+                        resourceID=self._resourceID, name=name)
+                )[0][0]
+        self._maybeNotify()
+
+
+    def _maybeNotify(self):
+        """
+        Maybe notify changed.  (Overridden in NotificationCollection.)
+        """
+
+
+
+class CommonHomeChild(LoggingMixIn, FancyEqMixin, _SharedSyncLogic):
+    """
     Common ancestor class of AddressBooks and Calendars.
     """
 
@@ -902,136 +1214,168 @@
     )
 
     _objectResourceClass = None
-    _bindTable = None
-    _homeChildTable = None
-    _homeChildBindTable = None
-    _revisionsTable = None
-    _revisionsBindTable = None
-    _objectTable = None
 
+    _bindSchema           = None
+    _homeChildSchema      = None
+    _revisionsSchema      = None
+    _objectSchema         = None
+
+    _bindTable           = None
+    _homeChildTable      = None
+    _homeChildBindTable  = None
+    _revisionsTable      = None
+    _revisionsBindTable  = None
+    _objectTable         = None
+
+
     def __init__(self, home, name, resourceID, owned):
-        self._home = home
-        self._name = name
-        self._resourceID = resourceID
-        self._owned = owned
-        self._created = None
-        self._modified = None
-        self._objects = {}
-        self._objectNames = None
-        self._syncTokenRevision = None
 
         if home._notifiers:
             childID = "%s/%s" % (home.uid(), name)
-            notifiers = [notifier.clone(label="collection", id=childID) for notifier in home._notifiers]
+            notifiers = [notifier.clone(label="collection", id=childID)
+                         for notifier in home._notifiers]
         else:
             notifiers = None
-        self._notifiers = notifiers
 
-        self._index = None  # Derived classes need to set this
-        self._invites = None # Derived classes need to set this
+        self._home              = home
+        self._name              = name
+        self._resourceID        = resourceID
+        self._owned             = owned
+        self._created           = None
+        self._modified          = None
+        self._objects           = {}
+        self._objectNames       = None
+        self._syncTokenRevision = None
+        self._notifiers         = notifiers
+        self._index             = None  # Derived classes need to set this
+        self._invites           = None  # Derived classes need to set this
 
+
+    @classproperty
+    def _ownedChildListQuery(cls):
+        bind = cls._bindSchema
+        return Select([bind.RESOURCE_NAME], From=bind,
+                      Where=(bind.HOME_RESOURCE_ID ==
+                             Parameter("resourceID")).And(
+                                 bind.BIND_MODE == _BIND_MODE_OWN))
+
+
+    @classproperty
+    def _sharedChildListQuery(cls):
+        bind = cls._bindSchema
+        return Select([bind.RESOURCE_NAME], From=bind,
+                      Where=(bind.HOME_RESOURCE_ID ==
+                             Parameter("resourceID")).And(
+                                 bind.BIND_MODE != _BIND_MODE_OWN).And(
+                                 bind.RESOURCE_NAME != None))
+
+
     @classmethod
     @inlineCallbacks
     def listObjects(cls, home, owned):
         """
-        Retrieve the names of the children that exist in this home.
+        Retrieve the names of the children that exist in the given home.
 
         @return: an iterable of C{str}s.
         """
-        # FIXME: not specified on the interface or exercised by the tests, but
-        # required by clients of the implementation!
+        # FIXME: tests don't cover this as directly as they should.
         if owned:
-            rows = yield home._txn.execSQL("""
-                select %(column_RESOURCE_NAME)s from %(name)s
-                where
-                  %(column_HOME_RESOURCE_ID)s = %%s and
-                  %(column_BIND_MODE)s = %%s
-                """ % cls._bindTable,
-                [home._resourceID, _BIND_MODE_OWN]
-            )
+            rows = yield cls._ownedChildListQuery.on(
+                home._txn, resourceID=home._resourceID)
         else:
-            rows = yield home._txn.execSQL("""
-                select %(column_RESOURCE_NAME)s from %(name)s
-                where
-                  %(column_HOME_RESOURCE_ID)s = %%s and
-                  %(column_BIND_MODE)s != %%s and
-                  %(column_RESOURCE_NAME)s is not null
-                """ % cls._bindTable,
-                [home._resourceID, _BIND_MODE_OWN]
-            )
-
+            rows = yield cls._sharedChildListQuery.on(
+                home._txn, resourceID=home._resourceID)
         names = [row[0] for row in rows]
         returnValue(names)
 
+
     @classmethod
+    def _allHomeChildrenQuery(cls, owned):
+        bind = cls._bindSchema
+        child = cls._homeChildSchema
+        if owned:
+            ownedPiece = bind.BIND_MODE == _BIND_MODE_OWN
+        else:
+            ownedPiece = (bind.BIND_MODE != _BIND_MODE_OWN).And(
+                bind.RESOURCE_NAME != None)
+        return Select([child.RESOURCE_ID,
+                       bind.RESOURCE_NAME,
+                       child.CREATED,
+                       child.MODIFIED],
+                     From=child.join(
+                         bind, child.RESOURCE_ID == bind.RESOURCE_ID,
+                         'left outer'),
+                     Where=(bind.HOME_RESOURCE_ID == Parameter("resourceID")
+                           ).And(ownedPiece))
+
+
+    @classproperty
+    def _ownedHomeChildrenQuery(cls):
+        return cls._allHomeChildrenQuery(True)
+
+
+    @classproperty
+    def _sharedHomeChildrenQuery(cls):
+        return cls._allHomeChildrenQuery(False)
+
+
+    @classmethod
     @inlineCallbacks
     def loadAllObjects(cls, home, owned):
         """
-        Load all child objects and return a list of them. This must create the child classes
-        and initialize them using "batched" SQL operations to keep this constant wrt the number of
-        children. This is an optimization for Depth:1 operations on the home.
+        Load all child objects and return a list of them. This must create the
+        child classes and initialize them using "batched" SQL operations to keep
+        this constant wrt the number of children. This is an optimization for
+        Depth:1 operations on the home.
         """
-        
         results = []
 
         # Load from the main table first
         if owned:
-            ownedPiece = "%(BIND:column_BIND_MODE)s = %%s"
+            query = cls._ownedHomeChildrenQuery
         else:
-            ownedPiece = "%(BIND:column_BIND_MODE)s != %%s and %(BIND:column_RESOURCE_NAME)s is not null"
-        dataRows = (yield home._txn.execSQL(("""
-            select %(CHILD:column_RESOURCE_ID)s, %(BIND:column_RESOURCE_NAME)s, %(CHILD:column_CREATED)s, %(CHILD:column_MODIFIED)s
-            from %(CHILD:name)s
-            left outer join %(BIND:name)s on (%(CHILD:column_RESOURCE_ID)s = %(BIND:column_RESOURCE_ID)s)
-            where
-              %(BIND:column_HOME_RESOURCE_ID)s = %%s and """ + ownedPiece
-            ) % cls._homeChildBindTable,
-            [
-                home._resourceID,
-                _BIND_MODE_OWN,
-            ]
-        ))
-        
+            query = cls._sharedHomeChildrenQuery
+        dataRows = (yield query.on(home._txn, resourceID=home._resourceID))
+
         if dataRows:
             # Get property stores for all these child resources (if any found)
-            propertyStores =(yield PropertyStore.loadAll(
-                home.uid(),
-                home._txn,
-                cls._bindTable["name"],
-                cls._bindTable["column_RESOURCE_ID"],
-                cls._bindTable["column_HOME_RESOURCE_ID"],
-                home._resourceID,
+            propertyStores = (yield PropertyStore.forMultipleResources(
+                home.uid(), home._txn,
+                cls._bindSchema.RESOURCE_ID, cls._bindSchema.HOME_RESOURCE_ID,
+                home._resourceID
             ))
 
-            revisions = (yield home._txn.execSQL(("""
-                select %(REV:name)s.%(REV:column_RESOURCE_ID)s, max(%(REV:column_REVISION)s) from %(REV:name)s
-                left join %(BIND:name)s on (%(REV:name)s.%(REV:column_RESOURCE_ID)s = %(BIND:name)s.%(BIND:column_RESOURCE_ID)s)
-                where
-                  %(BIND:name)s.%(BIND:column_HOME_RESOURCE_ID)s = %%s and
-                  %(BIND:column_BIND_MODE)s """ + ("=" if owned else "!=") + """ %%s and
-                  (%(REV:column_RESOURCE_NAME)s is not null or %(REV:column_DELETED)s = FALSE)
-                group by %(REV:name)s.%(REV:column_RESOURCE_ID)s
-                """) % cls._revisionsBindTable,
-                [
-                    home._resourceID,
-                    _BIND_MODE_OWN,
-                ]
-            ))
+            bind = cls._bindSchema
+            rev = cls._revisionsSchema
+            if owned:
+                ownedCond = bind.BIND_MODE == _BIND_MODE_OWN
+            else:
+                ownedCond = bind.BIND_MODE != _BIND_MODE_OWN
+            revisions = (yield Select(
+                [rev.RESOURCE_ID, Max(rev.REVISION)],
+                From=rev.join(bind, rev.RESOURCE_ID == bind.RESOURCE_ID,
+                              'left'),
+                Where=(bind.HOME_RESOURCE_ID == home._resourceID).And(
+                    ownedCond).And(
+                        (rev.RESOURCE_NAME != None).Or(rev.DELETED == False)),
+                GroupBy=rev.RESOURCE_ID
+            ).on(home._txn))
             revisions = dict(revisions)
 
         # Create the actual objects merging in properties
-        for resource_id, resource_name, created, modified in dataRows:
-            child = cls(home, resource_name, resource_id, owned)
+        for resourceID, resource_name, created, modified in dataRows:
+            child = cls(home, resource_name, resourceID, owned)
             child._created = created
             child._modified = modified
-            child._syncTokenRevision = revisions[resource_id]
-            yield child._loadPropertyStore(propertyStores.get(resource_id, None))
+            child._syncTokenRevision = revisions[resourceID]
+            propstore = propertyStores.get(resourceID, None)
+            yield child._loadPropertyStore(propstore)
             results.append(child)
         returnValue(results)
 
 
     @classmethod
-    def _objectResourceLookup(cls, ownedPart):
+    def _homeChildLookup(cls, ownedPart):
         """
         Common portions of C{_ownedResourceIDByName}
         C{_resourceIDSharedToHomeByName}, except for the 'owned' fragment of the
@@ -1053,7 +1397,7 @@
         resource name (C{objectName}), and a home resource ID
         (C{homeID}).
         """
-        return cls._objectResourceLookup(
+        return cls._homeChildLookup(
             cls._bindSchema.BIND_MODE == _BIND_MODE_OWN)
 
 
@@ -1064,7 +1408,7 @@
         resource name (C{objectName}), and a home resource ID
         (C{homeID}).
         """
-        return cls._objectResourceLookup(
+        return cls._homeChildLookup(
             cls._bindSchema.BIND_MODE != _BIND_MODE_OWN)
 
 
@@ -1072,11 +1416,13 @@
     @inlineCallbacks
     def objectWithName(cls, home, name, owned):
         """
-        Retrieve the child with the given C{name} contained in this
+        Retrieve the child with the given C{name} contained in the given
         C{home}.
 
         @param home: a L{CommonHome}.
-        @param name: a string.
+
+        @param name: a string; the name of the L{CommonHomeChild} to retrieve.
+
         @param owned: a boolean - whether or not to get a shared child
         @return: an L{CommonHomeChild} or C{None} if no such child
             exists.
@@ -1095,31 +1441,33 @@
         returnValue(child)
 
 
+    @classproperty
+    def _homeChildByIDQuery(cls):
+        """
+        DAL query that looks up home child names / bind modes by home child
+        resouce ID and home resource ID.
+        """
+        bind = cls._bindSchema
+        return Select([bind.RESOURCE_NAME, bind.BIND_MODE],
+                      From=bind,
+                      Where=(bind.RESOURCE_ID == Parameter("resourceID")
+                            ).And(bind.HOME_RESOURCE_ID == Parameter("homeID")))
+
+
     @classmethod
     @inlineCallbacks
     def objectWithID(cls, home, resourceID):
         """
-        Retrieve the child with the given C{resourceID} contained in this
+        Retrieve the child with the given C{resourceID} contained in the given
         C{home}.
 
         @param home: a L{CommonHome}.
         @param resourceID: a string.
-        @return: an L{CommonHomChild} or C{None} if no such child
+        @return: an L{CommonHomeChild} or C{None} if no such child
             exists.
         """
-
-        data = yield home._txn.execSQL("""
-            select %(column_RESOURCE_NAME)s, %(column_BIND_MODE)s from %(name)s
-            where
-              %(column_RESOURCE_ID)s = %%s and
-              %(column_HOME_RESOURCE_ID)s = %%s
-            """ % cls._bindTable,
-            [
-                resourceID,
-                home._resourceID,
-            ]
-        )
-
+        data = yield cls._homeChildByIDQuery.on(
+            home._txn, resourceID=resourceID, homeID=home._resourceID)
         if not data:
             returnValue(None)
         name, mode = data[0]
@@ -1127,39 +1475,49 @@
         yield child.initFromStore()
         returnValue(child)
 
+
+    @classproperty
+    def _insertDefaultHomeChild(cls):
+        """
+        DAL statement to create a home child with all default values.
+        """
+        child = cls._homeChildSchema
+        return Insert({child.RESOURCE_ID: default},
+                      Return=(child.RESOURCE_ID, child.CREATED, child.MODIFIED))
+
+
+    @classproperty
+    def _initialOwnerBind(cls):
+        """
+        DAL statement to create a bind entry for a particular home value.
+        """
+        bind = cls._bindSchema
+        return Insert({bind.HOME_RESOURCE_ID: Parameter("homeID"),
+                       bind.RESOURCE_ID: Parameter("resourceID"),
+                       bind.RESOURCE_NAME: Parameter("name"),
+                       bind.BIND_MODE: _BIND_MODE_OWN,
+                       bind.SEEN_BY_OWNER: True,
+                       bind.SEEN_BY_SHAREE: True,
+                       bind.BIND_STATUS: _BIND_STATUS_ACCEPTED})
+
+
     @classmethod
     @inlineCallbacks
     def create(cls, home, name):
-        
         child = (yield cls.objectWithName(home, name, owned=True))
-        if child:
+        if child is not None:
             raise HomeChildNameAlreadyExistsError(name)
 
         if name.startswith("."):
             raise HomeChildNameNotAllowedError(name)
-        
-        # Create and initialize (in a similar manner to initFromStore) this object
-        rows = yield home._txn.execSQL("select nextval('RESOURCE_ID_SEQ')")
-        resourceID = rows[0][0]
-        _created, _modified = (yield home._txn.execSQL("""
-            insert into %(name)s (%(column_RESOURCE_ID)s)
-            values (%%s)
-            returning %(column_CREATED)s, %(column_MODIFIED)s
-            """ % cls._homeChildTable,
-            [resourceID]
-        ))[0]
-        
+
+        # Create and initialize this object, similar to initFromStore
+        resourceID, _created, _modified = (
+            yield cls._insertDefaultHomeChild.on(home._txn))[0]
+
         # Bind table needs entry
-        yield home._txn.execSQL("""
-            insert into %(name)s (
-                %(column_HOME_RESOURCE_ID)s,
-                %(column_RESOURCE_ID)s, %(column_RESOURCE_NAME)s, %(column_BIND_MODE)s,
-                %(column_SEEN_BY_OWNER)s, %(column_SEEN_BY_SHAREE)s, %(column_BIND_STATUS)s) values (
-            %%s, %%s, %%s, %%s, %%s, %%s, %%s)
-            """ % cls._bindTable,
-            [home._resourceID, resourceID, name, _BIND_MODE_OWN, True, True,
-             _BIND_STATUS_ACCEPTED]
-        )
+        yield cls._initialOwnerBind.on(home._txn, homeID=home._resourceID,
+                                       resourceID=resourceID, name=name)
 
         # Initialize other state
         child = cls(home, name, resourceID, True)
@@ -1177,21 +1535,31 @@
         home.notifyChanged()
         returnValue(child)
 
+
+    @classproperty
+    def _datesByIDQuery(cls):
+        """
+        DAL query to retrieve created/modified dates based on a resource ID.
+        """
+        child = cls._homeChildSchema
+        return Select([child.CREATED, child.MODIFIED],
+                      From=child,
+                      Where=child.RESOURCE_ID == Parameter("resourceID"))
+
+
     @inlineCallbacks
     def initFromStore(self):
         """
-        Initialise this object from the store. We read in and cache all the extra metadata
-        from the DB to avoid having to do DB queries for those individually later.
+        Initialise this object from the store, based on its already-populated
+        resource ID. We read in and cache all the extra metadata from the DB to
+        avoid having to do DB queries for those individually later.
         """
+        self._created, self._modified = (
+            yield self._datesByIDQuery.on(self._txn,
+                                          resourceID=self._resourceID))[0]
+        yield self._loadPropertyStore()
 
-        self._created, self._modified = (yield self._txn.execSQL(
-            "select %(column_CREATED)s, %(column_MODIFIED)s from %(name)s "
-            "where %(column_RESOURCE_ID)s = %%s" % self._homeChildTable,
-            [self._resourceID]
-        ))[0]
 
-        yield self._loadPropertyStore()
-
     @property
     def _txn(self):
         return self._home._txn
@@ -1219,19 +1587,34 @@
         """
         return self._resourceID is not None
 
+
     def name(self):
         return self._name
 
 
+    @classproperty
+    def _renameQuery(cls):
+        """
+        DAL statement to rename a L{CommonHomeChild}
+        """
+        bind = cls._bindSchema
+        return Update({bind.RESOURCE_NAME: Parameter("name")},
+                      Where=(bind.RESOURCE_ID == Parameter("resourceID")).And(
+                          bind.HOME_RESOURCE_ID == Parameter("homeID")))
+
+
     @inlineCallbacks
     def rename(self, name):
+        """
+        Change the name of this L{CommonHomeChild} and update its sync token to
+        reflect that change.
+
+        @return: a L{Deferred} which fires when the modification is complete.
+        """
         oldName = self._name
-        yield self._txn.execSQL(
-            "update %(name)s set %(column_RESOURCE_NAME)s = %%s "
-            "where %(column_RESOURCE_ID)s = %%s AND "
-            "%(column_HOME_RESOURCE_ID)s = %%s" % self._bindTable,
-            [name, self._resourceID, self._home._resourceID]
-        )
+        yield self._renameQuery.on(self._txn, name=name,
+                                   resourceID=self._resourceID,
+                                   homeID=self._home._resourceID)
         self._name = name
         # update memos
         del self._home._children[oldName]
@@ -1241,45 +1624,60 @@
         self.notifyChanged()
 
 
+
+    @classproperty
+    def _deleteQuery(cls):
+        """
+        DAL statement to delete a L{CommonHomeChild} by its resource ID.
+        """
+        child = cls._homeChildSchema
+        return Delete(child, Where=child.RESOURCE_ID == Parameter("resourceID"))
+
+
     @inlineCallbacks
     def remove(self):
-
         yield self._deletedSyncToken()
-
-        yield self._txn.execSQL(
-            "delete from %(name)s where %(column_RESOURCE_ID)s = %%s" % self._homeChildTable,
-            [self._resourceID],
-            raiseOnZeroRowCount=NoSuchHomeChildError
-        )
-
+        yield self._deleteQuery.on(self._txn, NoSuchHomeChildError,
+                                   resourceID=self._resourceID)
         # Set to non-existent state
         self._resourceID = None
-        self._created = None
-        self._modified = None
-        self._objects = {}
+        self._created    = None
+        self._modified   = None
+        self._objects    = {}
 
         self.notifyChanged()
 
+
     def ownerHome(self):
         return self._home
 
+
+    @classproperty
+    def _ownerHomeFromResourceQuery(cls):
+        """
+        DAL query to retrieve the home resource ID of the owner from the bound
+        home-child ID.
+        """
+        bind = cls._bindSchema
+        return Select([bind.HOME_RESOURCE_ID],
+                     From=bind,
+                     Where=(bind.RESOURCE_ID ==
+                            Parameter("resourceID")).And(
+                                bind.BIND_MODE == _BIND_MODE_OWN))
+
+
     @inlineCallbacks
     def sharerHomeID(self):
-        
-        # If this is not shared then our home is what we want
         if self._owned:
+            # If this was loaded by its owner then we can skip the query, since
+            # we already know who the owner is.
             returnValue(self._home._resourceID)
         else:
-            rid = (yield self._txn.execSQL("""
-                select %(column_HOME_RESOURCE_ID)s from %(name)s
-                where
-                  %(column_RESOURCE_ID)s = %%s and
-                  %(column_BIND_MODE)s = %%s
-                """ % self._bindTable,
-                [self._resourceID, _BIND_MODE_OWN]
-            ))[0][0]
+            rid = (yield self._ownerHomeFromResourceQuery.on(
+                self._txn, resourceID=self._resourceID))[0][0]
             returnValue(rid)
 
+
     def setSharingUID(self, uid):
         self.properties()._setPerUserUID(uid)
 
@@ -1297,15 +1695,22 @@
         returnValue(results)
 
 
+    @classproperty
+    def _objectResourceNamesQuery(cls):
+        """
+        DAL query to load all object resource names for a home child.
+        """
+        obj = cls._objectSchema
+        return Select([obj.RESOURCE_NAME], From=obj,
+                      Where=obj.PARENT_RESOURCE_ID == Parameter('resourceID'))
+
+
     @inlineCallbacks
     def listObjectResources(self):
         if self._objectNames is None:
-            rows = yield self._txn.execSQL(
-                "select %(column_RESOURCE_NAME)s from %(name)s "
-                "where %(column_PARENT_RESOURCE_ID)s = %%s" % self._objectTable,
-                [self._resourceID])
+            rows = yield self._objectResourceNamesQuery.on(
+                self._txn, resourceID=self._resourceID)
             self._objectNames = sorted([row[0] for row in rows])
-
         returnValue(self._objectNames)
 
 
@@ -1329,16 +1734,21 @@
         else:
             return self._makeObjectResource(resourceID=resourceID)
 
+
     @inlineCallbacks
     def _makeObjectResource(self, name=None, uid=None, resourceID=None):
         """
-        We create the empty object first then have it initialize itself from the store
+        We create the empty object first then have it initialize itself from the
+        store.
         """
-        
         if resourceID:
-            objectResource = (yield self._objectResourceClass.objectWithID(self, resourceID))
+            objectResource = (
+                yield self._objectResourceClass.objectWithID(self, resourceID)
+            )
         else:
-            objectResource = (yield self._objectResourceClass.objectWithName(self, name, uid))
+            objectResource = (
+                yield self._objectResourceClass.objectWithName(self, name, uid)
+            )
         if objectResource:
             self._objects[objectResource.name()] = objectResource
             self._objects[objectResource.uid()] = objectResource
@@ -1351,6 +1761,19 @@
         returnValue(objectResource)
 
 
+    @classproperty
+    def _resourceNameForUIDQuery(cls):
+        """
+        DAL query to retrieve the resource name for an object resource based on
+        its UID column.
+        """
+        obj = cls._objectSchema
+        return Select(
+            [obj.RESOURCE_NAME], From=obj,
+            Where=(obj.UID == Parameter("uid")
+                  ).And(obj.PARENT_RESOURCE_ID == Parameter("resourceID")))
+
+
     @inlineCallbacks
     def resourceNameForUID(self, uid):
         try:
@@ -1358,20 +1781,28 @@
             returnValue(resource.name() if resource else None)
         except KeyError:
             pass
-
-        rows = yield self._txn.execSQL("""
-            select %(column_RESOURCE_NAME)s
-            from %(name)s
-            where %(column_UID)s = %%s and %(column_PARENT_RESOURCE_ID)s = %%s
-            """ % self._objectTable,
-            [uid, self._resourceID]
-        )
+        rows = yield self._resourceNameForUIDQuery.on(
+            self._txn, uid=uid, resourceID=self._resourceID)
         if rows:
             returnValue(rows[0][0])
         else:
             self._objects[uid] = None
             returnValue(None)
 
+
+    @classproperty
+    def _resourceUIDForNameQuery(cls):
+        """
+        DAL query to retrieve the UID for an object resource based on its
+        resource name column.
+        """
+        obj = cls._objectSchema
+        return Select(
+            [obj.UID], From=obj,
+            Where=(obj.UID == Parameter("name")
+                  ).And(obj.PARENT_RESOURCE_ID == Parameter("resourceID")))
+
+
     @inlineCallbacks
     def resourceUIDForName(self, name):
         try:
@@ -1379,65 +1810,80 @@
             returnValue(resource.uid() if resource else None)
         except KeyError:
             pass
-
-        rows = yield self._txn.execSQL("""
-            select %(column_UID)s
-            from %(name)s
-            where %(column_RESOURCE_NAME)s = %%s and %(column_PARENT_RESOURCE_ID)s = %%s
-            """ % self._objectTable,
-            [name, self._resourceID]
-        )
+        rows = yield self._resourceUIDForNameQuery.on(
+            self._txn, name=name, resourceID=self._resourceID)
         if rows:
             returnValue(rows[0][0])
         else:
             self._objects[name] = None
             returnValue(None)
 
+
     @inlineCallbacks
     def createObjectResourceWithName(self, name, component, metadata=None):
         """
-        Create a new resource with component data and optional metadata. We create the
-        python object using the metadata then create the actual store object with setComponent. 
+        Create a new resource with component data and optional metadata. We
+        create the python object using the metadata then create the actual store
+        object with setComponent.
         """
         if name in self._objects:
             if self._objects[name]:
                 raise ObjectResourceNameAlreadyExistsError()
 
-        objectResource = (yield self._objectResourceClass.create(self, name, component, metadata))
+        objectResource = (
+            yield self._objectResourceClass.create(self, name, component,
+                                                   metadata)
+        )
         self._objects[objectResource.name()] = objectResource
         self._objects[objectResource.uid()] = objectResource
 
-        # Note: create triggers a notification when the component is set, so we don't need to
-        # call notify( ) here like we do for object removal.
-
+        # Note: create triggers a notification when the component is set, so we
+        # don't need to call notify() here like we do for object removal.
         returnValue(objectResource)
 
+
+    @classproperty
+    def _removeObjectResourceByNameQuery(cls):
+        """
+        DAL query to remove an object resource from this collection by name,
+        returning the UID of the resource it deleted.
+        """
+        obj = cls._objectSchema
+        return Delete(From=obj,
+                      Where=(obj.RESOURCE_NAME == Parameter("name")).And(
+                          obj.PARENT_RESOURCE_ID == Parameter("resourceID")),
+                     Return=obj.UID)
+
+
     @inlineCallbacks
     def removeObjectResourceWithName(self, name):
-
-        uid = (yield self._txn.execSQL(
-            "delete from %(name)s "
-            "where %(column_RESOURCE_NAME)s = %%s and %(column_PARENT_RESOURCE_ID)s = %%s "
-            "returning %(column_UID)s" % self._objectTable,
-            [name, self._resourceID],
-            raiseOnZeroRowCount=lambda:NoSuchObjectResourceError()
-        ))[0][0]
+        uid = (yield self._removeObjectResourceByNameQuery.on(
+            self._txn, NoSuchObjectResourceError,
+            name=name, resourceID=self._resourceID))[0][0]
         self._objects.pop(name, None)
         self._objects.pop(uid, None)
         yield self._deleteRevision(name)
-
         self.notifyChanged()
 
 
+    @classproperty
+    def _removeObjectResourceByUIDQuery(cls):
+        """
+        DAL query to remove an object resource from this collection by UID,
+        returning the name of the resource it deleted.
+        """
+        obj = cls._objectSchema
+        return Delete(From=obj,
+                      Where=(obj.UID == Parameter("uid")).And(
+                          obj.PARENT_RESOURCE_ID == Parameter("resourceID")),
+                     Return=obj.RESOURCE_NAME)
+
+
     @inlineCallbacks
     def removeObjectResourceWithUID(self, uid):
-
-        name = (yield self._txn.execSQL(
-            "delete from %(name)s "
-            "where %(column_UID)s = %%s and %(column_PARENT_RESOURCE_ID)s = %%s "
-            "returning %(column_RESOURCE_NAME)s" % self._objectTable,
-            [uid, self._resourceID],
-            raiseOnZeroRowCount=lambda:NoSuchObjectResourceError()
+        name = (yield self._removeObjectResourceByUIDQuery.on(
+            self._txn, NoSuchObjectResourceError,
+            uid=uid, resourceID=self._resourceID
         ))[0][0]
         self._objects.pop(name, None)
         self._objects.pop(uid, None)
@@ -1446,199 +1892,10 @@
         self.notifyChanged()
 
 
-    @inlineCallbacks
-    def syncToken(self):
-        if self._syncTokenRevision is None:
-            self._syncTokenRevision = (yield self._txn.execSQL(
-                """
-                select max(%(column_REVISION)s) from %(name)s
-                where %(column_RESOURCE_ID)s = %%s
-                """ % self._revisionsTable,
-                [self._resourceID,]
-            ))[0][0]
-        returnValue(("%s#%s" % (self._resourceID, self._syncTokenRevision,)))
-
-
-    def objectResourcesSinceToken(self, token):
-        raise NotImplementedError()
-
-
-    @inlineCallbacks
-    def resourceNamesSinceToken(self, token):
-        results = [
-            (name if name else "", deleted)
-            for name, deleted in
-            (yield self._txn.execSQL("""
-                select %(column_RESOURCE_NAME)s, %(column_DELETED)s from %(name)s
-                where %(column_REVISION)s > %%s and %(column_RESOURCE_ID)s = %%s
-                """ % self._revisionsTable,
-                [token, self._resourceID],
-            ))
-        ]
-        results.sort(key=lambda x:x[1])
-
-        changed = []
-        deleted = []
-        for name, wasdeleted in results:
-            if name:
-                if wasdeleted:
-                    if token:
-                        deleted.append(name)
-                else:
-                    changed.append(name)
-
-        returnValue((changed, deleted))
-
-
-    @inlineCallbacks
-    def _initSyncToken(self):
-
-        # Remove any deleted revision entry that uses the same name
-        yield self._txn.execSQL("""
-            delete from %(name)s
-            where %(column_HOME_RESOURCE_ID)s = %%s and %(column_COLLECTION_NAME)s = %%s
-            """ % self._revisionsTable,
-            [self._home._resourceID, self._name]
-        )
-
-        # Insert new entry
-        self._syncTokenRevision = (yield self._txn.execSQL("""
-            insert into %(name)s
-            (%(column_HOME_RESOURCE_ID)s, %(column_RESOURCE_ID)s, %(column_COLLECTION_NAME)s, %(column_RESOURCE_NAME)s, %(column_REVISION)s, %(column_DELETED)s)
-            values (%%s, %%s, %%s, null, nextval('%(sequence)s'), FALSE)
-            returning %(column_REVISION)s
-            """ % self._revisionsTable,
-            [self._home._resourceID, self._resourceID, self._name]
-        ))[0][0]
-
-
-    @inlineCallbacks
-    def _updateSyncToken(self):
-
-        self._syncTokenRevision = (yield self._txn.execSQL("""
-            update %(name)s
-            set (%(column_REVISION)s) = (nextval('%(sequence)s'))
-            where %(column_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s is null
-            returning %(column_REVISION)s
-            """ % self._revisionsTable,
-            [self._resourceID,]
-        ))[0][0]
-
-
-    @inlineCallbacks
-    def _renameSyncToken(self):
-
-        self._syncTokenRevision = (yield self._txn.execSQL("""
-            update %(name)s
-            set (%(column_REVISION)s, %(column_COLLECTION_NAME)s) = (nextval('%(sequence)s'), %%s)
-            where %(column_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s is null
-            returning %(column_REVISION)s
-            """ % self._revisionsTable,
-            [self._name, self._resourceID,]
-        ))[0][0]
-
-
-    @inlineCallbacks
-    def _deletedSyncToken(self, sharedRemoval=False):
-
-        # Remove all child entries
-        yield self._txn.execSQL("""
-            delete from %(name)s
-            where %(column_HOME_RESOURCE_ID)s = %%s and %(column_RESOURCE_ID)s = %%s and %(column_COLLECTION_NAME)s is null
-            """ % self._revisionsTable,
-            [self._home._resourceID, self._resourceID,]
-        )
-
-        # If this is a share being removed then we only mark this one specific home/resource-id as being deleted.
-        # On the other hand, if it is a non-shared collection, then we need to mark all collections
-        # with the resource-id as being deleted to account for direct shares.
-        if sharedRemoval:
-            yield self._txn.execSQL("""
-                update %(name)s
-                set (%(column_RESOURCE_ID)s, %(column_REVISION)s, %(column_DELETED)s)
-                 = (null, nextval('%(sequence)s'), TRUE)
-                where %(column_HOME_RESOURCE_ID)s = %%s and %(column_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s is null
-                returning %(column_REVISION)s
-                """ % self._revisionsTable,
-                [self._home._resourceID, self._resourceID,]
-            )
-        else:
-            yield self._txn.execSQL("""
-                update %(name)s
-                set (%(column_RESOURCE_ID)s, %(column_REVISION)s, %(column_DELETED)s)
-                 = (null, nextval('%(sequence)s'), TRUE)
-                where %(column_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s is null
-                returning %(column_REVISION)s
-                """ % self._revisionsTable,
-                [self._resourceID,]
-            )
-        self._syncTokenRevision = None
-
-
-    def _insertRevision(self, name):
-        return self._changeRevision("insert", name)
-
-    def _updateRevision(self, name):
-        return self._changeRevision("update", name)
-
-    def _deleteRevision(self, name):
-        return self._changeRevision("delete", name)
-
-
-    @inlineCallbacks
-    def _changeRevision(self, action, name):
-
-        if action == "delete":
-            self._syncTokenRevision = (yield self._txn.execSQL("""
-                update %(name)s
-                set (%(column_REVISION)s, %(column_DELETED)s) = (nextval('%(sequence)s'), TRUE)
-                where %(column_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s = %%s
-                returning %(column_REVISION)s
-                """ % self._revisionsTable,
-                [self._resourceID, name]
-            ))[0][0]
-        elif action == "update":
-            self._syncTokenRevision = (yield self._txn.execSQL("""
-                update %(name)s
-                set (%(column_REVISION)s) = (nextval('%(sequence)s'))
-                where %(column_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s = %%s
-                returning %(column_REVISION)s
-                """ % self._revisionsTable,
-                [self._resourceID, name]
-            ))[0][0]
-        elif action == "insert":
-            # Note that an "insert" may happen for a resource that previously existed and then
-            # was deleted. In that case an entry in the REVISIONS table still exists so we have to
-            # detect that and do db INSERT or UPDATE as appropriate
-
-            found = bool( (yield self._txn.execSQL("""
-                select %(column_RESOURCE_ID)s from %(name)s
-                where %(column_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s = %%s
-                """ % self._revisionsTable,
-                [self._resourceID, name, ]
-            )) )
-            if found:
-                self._syncTokenRevision = (yield self._txn.execSQL("""
-                    update %(name)s
-                    set (%(column_REVISION)s, %(column_DELETED)s) = (nextval('%(sequence)s'), FALSE)
-                    where %(column_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s = %%s
-                    returning %(column_REVISION)s
-                    """ % self._revisionsTable,
-                    [self._resourceID, name]
-                ))[0][0]
-            else:
-                self._syncTokenRevision = (yield self._txn.execSQL("""
-                    insert into %(name)s
-                    (%(column_HOME_RESOURCE_ID)s, %(column_RESOURCE_ID)s, %(column_RESOURCE_NAME)s, %(column_REVISION)s, %(column_DELETED)s)
-                    values (%%s, %%s, %%s, nextval('%(sequence)s'), FALSE)
-                    returning %(column_REVISION)s
-                    """ % self._revisionsTable,
-                    [self._home._resourceID, self._resourceID, name]
-                ))[0][0]
-
     def objectResourcesHaveProperties(self):
         return False
 
+
     @inlineCallbacks
     def _loadPropertyStore(self, props=None):
         if props is None:
@@ -1734,6 +1991,8 @@
 
     _objectTable = None
 
+    _objectSchema = None
+
     def __init__(self, parent, name, uid, resourceID=None, metadata=None):
         self._parentCollection = parent
         self._resourceID = resourceID
@@ -1746,48 +2005,55 @@
         self._objectText = None
 
 
+
+    @classproperty
+    def _allColumnsWithParent(cls):
+        obj = cls._objectSchema
+        return Select(cls._allColumns, From=obj,
+                      Where=obj.PARENT_RESOURCE_ID == Parameter("parentID"))
+
+
     @classmethod
     @inlineCallbacks
     def loadAllObjects(cls, parent):
         """
-        Load all child objects and return a list of them. This must create the child classes
-        and initialize them using "batched" SQL operations to keep this constant wrt the number of
-        children. This is an optimization for Depth:1 operations on the collection.
+        Load all child objects and return a list of them. This must create the
+        child classes and initialize them using "batched" SQL operations to keep
+        this constant wrt the number of children. This is an optimization for
+        Depth:1 operations on the collection.
         """
-        
+
         results = []
 
         # Load from the main table first
-        dataRows = yield parent._txn.execSQL(cls._selectAllColumns() + """
-            from %(name)s
-            where %(column_PARENT_RESOURCE_ID)s = %%s
-            """ % cls._objectTable,
-            [parent._resourceID,]
-        )
-        
+        dataRows = yield cls._allColumnsWithParent.on(
+            parent._txn, parentID=parent._resourceID)
+
         if dataRows:
             # Get property stores for all these child resources (if any found)
             if parent.objectResourcesHaveProperties():
-                propertyStores =(yield PropertyStore.loadAll(
+                propertyStores =(yield PropertyStore.forMultipleResources(
                     parent._home.uid(),
                     parent._txn,
-                    cls._objectTable["name"],
-                    "%s.%s" % (cls._objectTable["name"], cls._objectTable["column_RESOURCE_ID"],),
-                    "%s.%s" % (cls._objectTable["name"], cls._objectTable["column_PARENT_RESOURCE_ID"]),
-                    parent._resourceID,
+                    cls._objectSchema.RESOURCE_ID,
+                    cls._objectSchema.PARENT_RESOURCE_ID,
+                    parent._resourceID
                 ))
             else:
                 propertyStores = {}
-        
+
         # Create the actual objects merging in properties
         for row in dataRows:
             child = cls(parent, "", None)
             child._initFromRow(tuple(row))
-            yield child._loadPropertyStore(props=propertyStores.get(child._resourceID, None))
+            yield child._loadPropertyStore(
+                props=propertyStores.get(child._resourceID, None)
+            )
             results.append(child)
-        
+
         returnValue(results)
 
+
     @classmethod
     def objectWithName(cls, parent, name, uid):
         objectResource = cls(parent, name, uid, None)
@@ -1818,37 +2084,59 @@
         
         returnValue(objectResource)
 
+
+    @classmethod
+    def _allWithParentAnd(cls, column, paramName):
+        """
+        DAL query for all columns where PARENT_RESOURCE_ID matches a parentID
+        parameter and a given instance column matches a given parameter name.
+        """
+        return Select(
+            cls._allColumns, From=cls._objectSchema,
+            Where=(column == Parameter(paramName)).And(
+                cls._objectSchema.PARENT_RESOURCE_ID == Parameter("parentID"))
+        )
+
+
+    @classproperty
+    def _allWithParentAndName(cls):
+        return cls._allWithParentAnd(cls._objectSchema.RESOURCE_NAME, "name")
+
+
+    @classproperty
+    def _allWithParentAndUID(cls):
+        return cls._allWithParentAnd(cls._objectSchema.UID, "uid")
+
+
+    @classproperty
+    def _allWithParentAndID(cls):
+        return cls._allWithParentAnd(cls._objectSchema.RESOURCE_ID,
+                                     "resourceID")
+
+
     @inlineCallbacks
     def initFromStore(self):
         """
-        Initialise this object from the store. We read in and cache all the extra metadata
-        from the DB to avoid having to do DB queries for those individually later. Either the
-        name or uid is present, so we have to tweak the query accordingly.
+        Initialise this object from the store. We read in and cache all the
+        extra metadata from the DB to avoid having to do DB queries for those
+        individually later. Either the name or uid is present, so we have to
+        tweak the query accordingly.
 
         @return: L{self} if object exists in the DB, else C{None}
         """
 
         if self._name:
-            rows = yield self._txn.execSQL(self._selectAllColumns() + """
-                from %(name)s
-                where %(column_RESOURCE_NAME)s = %%s and %(column_PARENT_RESOURCE_ID)s = %%s
-                """ % self._objectTable,
-                [self._name, self._parentCollection._resourceID]
-            )
+            rows = yield self._allWithParentAndName.on(
+                self._txn, name=self._name,
+                parentID=self._parentCollection._resourceID)
         elif self._uid:
-            rows = yield self._txn.execSQL(self._selectAllColumns() + """
-                from %(name)s
-                where %(column_UID)s = %%s and %(column_PARENT_RESOURCE_ID)s = %%s
-                """ % self._objectTable,
-                [self._uid, self._parentCollection._resourceID]
-            )
+            rows = yield self._allWithParentAndUID.on(
+                self._txn, uid=self._uid,
+                parentID=self._parentCollection._resourceID)
         elif self._resourceID:
-            rows = yield self._txn.execSQL(self._selectAllColumns() + """
-                from %(name)s
-                where %(column_RESOURCE_ID)s = %%s and %(column_PARENT_RESOURCE_ID)s = %%s
-                """ % self._objectTable,
-                [self._resourceID, self._parentCollection._resourceID]
-            )
+            rows = yield self._allWithParentAndID.on(
+                self._txn, resourceID=self._resourceID,
+                parentID=self._parentCollection._resourceID)
         if rows:
             self._initFromRow(tuple(rows[0]))
             yield self._loadPropertyStore()
@@ -1856,11 +2144,13 @@
         else:
             returnValue(None)
 
+
     @classmethod
     def _selectAllColumns(cls):
         """
         Full set of columns in the object table that need to be loaded to
-        initialize the object resource state.
+        initialize the object resource state.  (XXX: remove me, old string-based
+        version, see _allColumns)
         """
         return """
             select
@@ -1873,9 +2163,28 @@
               %(column_MODIFIED)s
         """ % cls._objectTable
 
+
+    @classproperty
+    def _allColumns(cls):
+        """
+        Full set of columns in the object table that need to be loaded to
+        initialize the object resource state.
+        """
+        obj = cls._objectSchema
+        return [
+            obj.RESOURCE_ID,
+            obj.RESOURCE_NAME,
+            obj.UID,
+            obj.MD5,
+            Len(obj.TEXT),
+            obj.CREATED,
+            obj.MODIFIED
+        ]
+
+
     def _initFromRow(self, row):
         """
-        Given a select result using the columns from L{_selectAllColumns}, initialize
+        Given a select result using the columns from L{_allColumns}, initialize
         the object resource state.
         """
         (self._resourceID,
@@ -1886,6 +2195,7 @@
          self._created,
          self._modified,) = tuple(row)
 
+
     @inlineCallbacks
     def _loadPropertyStore(self, props=None, created=False):
         if props is None:
@@ -1966,14 +2276,23 @@
         return datetimeMktime(parseSQLTimestamp(self._modified))
 
 
+    @classproperty
+    def _textByIDQuery(cls):
+        """
+        DAL query to load iCalendar/vCard text via an object's resource ID.
+        """
+        obj = cls._objectSchema
+        return Select([obj.TEXT], From=obj,
+                      Where=obj.RESOURCE_ID == Parameter("resourceID"))
+
+
     @inlineCallbacks
     def text(self):
         if self._objectText is None:
-            text = (yield self._txn.execSQL(
-                "select %(column_TEXT)s from %(name)s where "
-                "%(column_RESOURCE_ID)s = %%s" % self._objectTable,
-                [self._resourceID]
-            ))[0][0]
+            text = (
+                yield self._textByIDQuery.on(self._txn,
+                                             resourceID=self._resourceID)
+            )[0][0]
             self._objectText = text
             returnValue(text)
         else:
@@ -1981,7 +2300,7 @@
 
 
 
-class NotificationCollection(LoggingMixIn, FancyEqMixin):
+class NotificationCollection(LoggingMixIn, FancyEqMixin, _SharedSyncLogic):
 
     implements(INotificationCollection)
 
@@ -1991,7 +2310,10 @@
     )
 
     _revisionsTable = NOTIFICATION_OBJECT_REVISIONS_TABLE
+    _revisionsSchema = schema.NOTIFICATION_OBJECT_REVISIONS
+    _homeSchema = schema.NOTIFICATION_HOME
 
+
     def __init__(self, txn, uid, resourceID):
 
         self._txn = txn
@@ -2016,26 +2338,39 @@
             notifiers = None
         self._notifiers = notifiers
 
+
+    _resourceIDFromUIDQuery = Select(
+        [_homeSchema.RESOURCE_ID], From=_homeSchema,
+        Where=_homeSchema.OWNER_UID == Parameter("uid"))
+
+
+    _provisionNewNotificationsQuery = Insert(
+        {_homeSchema.OWNER_UID: Parameter("uid")},
+        Return=_homeSchema.RESOURCE_ID
+    )
+
+
+    @property
+    def _home(self):
+        """
+        L{NotificationCollection} serves as its own C{_home} for the purposes of
+        working with L{_SharedSyncLogic}.
+        """
+        return self
+
+
     @classmethod
     @inlineCallbacks
     def notificationsWithUID(cls, txn, uid):
-        """
-        Implement notificationsWithUID.
-        """
-        rows = yield txn.execSQL(
-            """
-            select %(column_RESOURCE_ID)s from %(name)s where
-            %(column_OWNER_UID)s = %%s
-            """ % NOTIFICATION_HOME_TABLE, [uid]
-        )
+        rows = yield cls._resourceIDFromUIDQuery.on(txn, uid=uid)
+
         if rows:
             resourceID = rows[0][0]
             created = False
         else:
-            resourceID = str((yield txn.execSQL(
-                "insert into %(name)s (%(column_OWNER_UID)s) values (%%s) returning %(column_RESOURCE_ID)s" % NOTIFICATION_HOME_TABLE,
-                [uid]
-            ))[0][0])
+            resourceID = str((
+                yield cls._provisionNewNotificationsQuery.on(txn, uid=uid)
+            )[0][0])
             created = True
         collection = cls(txn, uid, resourceID)
         yield collection._loadPropertyStore()
@@ -2043,6 +2378,7 @@
             yield collection._initSyncToken()
         returnValue(collection)
 
+
     @inlineCallbacks
     def _loadPropertyStore(self):
         self._propertyStore = yield PropertyStore.load(
@@ -2055,15 +2391,19 @@
     def resourceType(self):
         return ResourceType.notification #@UndefinedVariable
 
+
     def retrieveOldIndex(self):
         return PostgresLegacyNotificationsEmulator(self)
 
+
     def __repr__(self):
         return "<%s: %s>" % (self.__class__.__name__, self._resourceID)
 
+
     def name(self):
         return "notification"
 
+
     def uid(self):
         return self._uid
 
@@ -2077,13 +2417,17 @@
         returnValue(results)
 
 
+    _notificationUIDsForHomeQuery = Select(
+        [schema.NOTIFICATION.NOTIFICATION_UID], From=schema.NOTIFICATION,
+        Where=schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID ==
+        Parameter("resourceID"))
+
+
     @inlineCallbacks
     def listNotificationObjects(self):
         if self._notificationNames is None:
-            rows = yield self._txn.execSQL(
-                "select (NOTIFICATION_UID) from NOTIFICATION "
-                "where NOTIFICATION_HOME_RESOURCE_ID = %s",
-                [self._resourceID])
+            rows = yield self._notificationUIDsForHomeQuery.on(
+                self._txn, resourceID=self._resourceID)
             self._notificationNames = sorted([row[0] for row in rows])
         returnValue(self._notificationNames)
 
@@ -2104,9 +2448,9 @@
     @inlineCallbacks
     def notificationObjectWithUID(self, uid):
         """
-        We create the empty object first then have it initialize itself from the store
+        Create an empty notification object first then have it initialize itself
+        from the store.
         """
-
         no = NotificationObject(self, uid)
         no = (yield no.initFromStore())
         returnValue(no)
@@ -2131,150 +2475,53 @@
         return self.removeNotificationObjectWithUID(self._nameToUID(name))
 
 
+    _removeByUIDQuery = Delete(
+        From=schema.NOTIFICATION,
+        Where=(schema.NOTIFICATION.NOTIFICATION_UID == Parameter("uid")).And(
+            schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID
+            == Parameter("resourceID")))
+
+
     @inlineCallbacks
     def removeNotificationObjectWithUID(self, uid):
-        yield self._txn.execSQL(
-            "delete from NOTIFICATION "
-            "where NOTIFICATION_UID = %s and NOTIFICATION_HOME_RESOURCE_ID = %s",
-            [uid, self._resourceID]
-        )
+        yield self._removeByUIDQuery.on(
+            self._txn, uid=uid, resourceID=self._resourceID)
         self._notifications.pop(uid, None)
         yield self._deleteRevision("%s.xml" % (uid,))
 
 
-    @inlineCallbacks
-    def _initSyncToken(self):
-        self._syncTokenRevision = (yield self._txn.execSQL("""
-            insert into %(name)s
-            (%(column_HOME_RESOURCE_ID)s, %(column_RESOURCE_NAME)s, %(column_REVISION)s, %(column_DELETED)s)
-            values (%%s, null, nextval('%(sequence)s'), FALSE)
-            returning %(column_REVISION)s
-            """ % self._revisionsTable,
-            [self._resourceID,]
-        ))[0][0]
+    _initSyncTokenQuery = Insert(
+        {
+            _revisionsSchema.HOME_RESOURCE_ID : Parameter("resourceID"),
+            _revisionsSchema.RESOURCE_NAME    : None,
+            _revisionsSchema.REVISION         : schema.REVISION_SEQ,
+            _revisionsSchema.DELETED          : False
+        }, Return=_revisionsSchema.REVISION
+    )
 
 
     @inlineCallbacks
-    def syncToken(self):
-        if self._syncTokenRevision is None:
-            self._syncTokenRevision = (yield self._txn.execSQL(
-                """
-                select max(%(column_REVISION)s) from %(name)s
-                where %(column_HOME_RESOURCE_ID)s = %%s
-                """ % self._revisionsTable,
-                [self._resourceID,]
-            ))[0][0]
-        returnValue("%s#%s" % (self._resourceID, self._syncTokenRevision,))
+    def _initSyncToken(self):
+        self._syncTokenRevision = (yield self._initSyncTokenQuery.on(
+            self._txn, resourceID=self._resourceID))[0][0]
 
 
-    def objectResourcesSinceToken(self, token):
-        raise NotImplementedError()
+    _syncTokenQuery = Select(
+        [Max(_revisionsSchema.REVISION)], From=_revisionsSchema,
+        Where=_revisionsSchema.HOME_RESOURCE_ID == Parameter("resourceID")
+    )
 
 
     @inlineCallbacks
-    def resourceNamesSinceToken(self, token):
-        results = [
-            (name if name else "", deleted)
-            for name, deleted in
-            (yield self._txn.execSQL("""
-                select %(column_RESOURCE_NAME)s, %(column_DELETED)s from %(name)s
-                where %(column_REVISION)s > %%s and %(column_HOME_RESOURCE_ID)s = %%s
-                """ % self._revisionsTable,
-                [token, self._resourceID],
-            ))
-        ]
-        results.sort(key=lambda x:x[1])
+    def syncToken(self):
+        if self._syncTokenRevision is None:
+            self._syncTokenRevision = (
+                yield self._syncTokenQuery.on(
+                    self._txn, resourceID=self._resourceID)
+            )[0][0]
+        returnValue("%s#%s" % (self._resourceID, self._syncTokenRevision))
 
-        changed = []
-        deleted = []
-        for name, wasdeleted in results:
-            if name:
-                if wasdeleted:
-                    if token:
-                        deleted.append(name)
-                else:
-                    changed.append(name)
 
-        returnValue((changed, deleted))
-
-
-    def _updateSyncToken(self):
-        self._syncTokenRevision =  self._txn.execSQL("""
-            update %(name)s
-            set (%(column_REVISION)s) = (nextval('%(sequence)s'))
-            where %(column_HOME_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s is null
-            returning %(column_REVISION)s
-            """ % self._revisionsTable,
-            [self._resourceID,]
-        )[0][0]
-
-
-    def _insertRevision(self, name):
-        return self._changeRevision("insert", name)
-
-
-    def _updateRevision(self, name):
-        return self._changeRevision("update", name)
-
-
-    def _deleteRevision(self, name):
-        return self._changeRevision("delete", name)
-
-
-    @inlineCallbacks
-    def _changeRevision(self, action, name):
-
-        if action == "delete":
-            self._syncTokenRevision = (yield self._txn.execSQL("""
-                update %(name)s
-                set (%(column_REVISION)s, %(column_DELETED)s) = (nextval('%(sequence)s'), TRUE)
-                where %(column_HOME_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s = %%s
-                returning %(column_REVISION)s
-                """ % self._revisionsTable,
-                [self._resourceID, name]
-            ))[0][0]
-        elif action == "update":
-            self._syncTokenRevision = (yield self._txn.execSQL("""
-                update %(name)s
-                set (%(column_REVISION)s) = (nextval('%(sequence)s'))
-                where %(column_HOME_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s = %%s
-                returning %(column_REVISION)s
-                """ % self._revisionsTable,
-                [self._resourceID, name]
-            ))[0][0]
-        elif action == "insert":
-            # Note that an "insert" may happen for a resource that previously existed and then
-            # was deleted. In that case an entry in the REVISIONS table still exists so we have to
-            # detect that and do db INSERT or UPDATE as appropriate
-
-            found = bool( (yield self._txn.execSQL("""
-                select %(column_HOME_RESOURCE_ID)s from %(name)s
-                where %(column_HOME_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s = %%s
-                """ % self._revisionsTable,
-                [self._resourceID, name, ]
-            )))
-            if found:
-                self._syncTokenRevision = (yield self._txn.execSQL("""
-                    update %(name)s
-                    set (%(column_REVISION)s, %(column_DELETED)s) = (nextval('%(sequence)s'), FALSE)
-                    where %(column_HOME_RESOURCE_ID)s = %%s and %(column_RESOURCE_NAME)s = %%s
-                    returning %(column_REVISION)s
-                    """ % self._revisionsTable,
-                    [self._resourceID, name]
-                ))[0][0]
-            else:
-                self._syncTokenRevision = (yield self._txn.execSQL("""
-                    insert into %(name)s
-                    (%(column_HOME_RESOURCE_ID)s, %(column_RESOURCE_NAME)s, %(column_REVISION)s, %(column_DELETED)s)
-                    values (%%s, %%s, nextval('%(sequence)s'), FALSE)
-                    returning %(column_REVISION)s
-                    """ % self._revisionsTable,
-                    [self._resourceID, name,]
-                ))[0][0]
-
-        self.notifyChanged()
-
-
     def properties(self):
         return self._propertyStore
 
@@ -2285,6 +2532,7 @@
         else:
             return None
 
+
     @inlineCallbacks
     def nodeName(self, label="default"):
         if self._notifiers:
@@ -2295,6 +2543,7 @@
         else:
             returnValue(None)
 
+
     def notifyChanged(self):
         """
         Trigger a notification of a change
@@ -2304,6 +2553,25 @@
                 self._txn.postCommit(notifier.notify)
 
 
+    @classproperty
+    def _completelyNewRevisionQuery(cls):
+        rev = cls._revisionsSchema
+        return Insert({rev.HOME_RESOURCE_ID: Parameter("homeID"),
+                       # rev.RESOURCE_ID: Parameter("resourceID"),
+                       rev.RESOURCE_NAME: Parameter("name"),
+                       rev.REVISION: schema.REVISION_SEQ,
+                       rev.DELETED: False},
+                      Return=rev.REVISION)
+
+
+    def _maybeNotify(self):
+        """
+        Emit a push notification after C{_changeRevision}.
+        """
+        self.notifyChanged()
+
+
+
 class NotificationObject(LoggingMixIn, FancyEqMixin):
 
     implements(INotificationObject)
@@ -2313,6 +2581,8 @@
         "_home",
     )
 
+    _objectSchema = schema.NOTIFICATION
+
     def __init__(self, home, uid):
         self._home = home
         self._resourceID = None
@@ -2324,47 +2594,52 @@
         self._xmlType = None
         self._objectText = None
 
+
     def __repr__(self):
         return "<%s: %s>" % (self.__class__.__name__, self._resourceID)
 
+
+    @classproperty
+    def _allColumnsByHomeIDQuery(cls):
+        """
+        DAL query to load all columns by home ID.
+        """
+        obj = cls._objectSchema
+        return Select([obj.RESOURCE_ID, obj.NOTIFICATION_UID, obj.MD5,
+                       Len(obj.XML_DATA), obj.XML_TYPE, obj.CREATED,
+                       obj.MODIFIED],
+                      From=obj,
+                      Where=(obj.NOTIFICATION_HOME_RESOURCE_ID == Parameter(
+                          "homeID")))
+
+
     @classmethod
     @inlineCallbacks
     def loadAllObjects(cls, parent):
         """
-        Load all child objects and return a list of them. This must create the child classes
-        and initialize them using "batched" SQL operations to keep this constant wrt the number of
-        children. This is an optimization for Depth:1 operations on the collection.
+        Load all child objects and return a list of them. This must create the
+        child classes and initialize them using "batched" SQL operations to keep
+        this constant wrt the number of children. This is an optimization for
+        Depth:1 operations on the collection.
         """
-        
+
         results = []
 
         # Load from the main table first
-        dataRows = (yield parent._txn.execSQL("""
-            select
-                RESOURCE_ID,
-                NOTIFICATION_UID,
-                MD5,
-                character_length(XML_DATA),
-                XML_TYPE,
-                CREATED,
-                MODIFIED
-            from NOTIFICATION
-            where NOTIFICATION_HOME_RESOURCE_ID = %s
-            """,
-            [parent._resourceID]
-        ))
-        
+        dataRows = (
+            yield cls._allColumnsByHomeIDQuery.on(parent._txn,
+                                                  homeID=parent._resourceID))
+
         if dataRows:
             # Get property stores for all these child resources (if any found)
-            propertyStores =(yield PropertyStore.loadAll(
+            propertyStores =(yield PropertyStore.forMultipleResources(
                 parent.uid(),
                 parent._txn,
-                "NOTIFICATION",
-                "NOTIFICATION.RESOURCE_ID",
-                "NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID",
+                schema.NOTIFICATION.RESOURCE_ID,
+                schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID,
                 parent._resourceID,
             ))
-        
+
         # Create the actual objects merging in properties
         for row in dataRows:
             child = cls(parent, None)
@@ -2375,31 +2650,43 @@
              child._xmlType,
              child._created,
              child._modified,) = tuple(row)
-            child._loadPropertyStore(props=propertyStores.get(child._resourceID, None))
+            child._loadPropertyStore(
+                props=propertyStores.get(child._resourceID, None)
+            )
             results.append(child)
-        
+
         returnValue(results)
 
+
+    @classproperty
+    def _oneNotificationQuery(cls):
+        no = cls._objectSchema
+        return Select(
+            [
+                no.RESOURCE_ID,
+                no.MD5,
+                Len(no.XML_DATA),
+                no.XML_TYPE,
+                no.CREATED,
+                no.MODIFIED
+            ],
+            From=no,
+            Where=(no.NOTIFICATION_UID ==
+                   Parameter("uid")).And(no.NOTIFICATION_HOME_RESOURCE_ID ==
+                                         Parameter("homeID")))
+
+
     @inlineCallbacks
     def initFromStore(self):
         """
-        Initialise this object from the store. We read in and cache all the extra metadata
-        from the DB to avoid having to do DB queries for those individually later.
+        Initialise this object from the store, based on its UID and home
+        resource ID. We read in and cache all the extra metadata from the DB to
+        avoid having to do DB queries for those individually later.
 
         @return: L{self} if object exists in the DB, else C{None}
         """
-        rows = (yield self._txn.execSQL("""
-            select
-                RESOURCE_ID,
-                MD5,
-                character_length(XML_DATA),
-                XML_TYPE,
-                CREATED,
-                MODIFIED
-            from NOTIFICATION
-            where NOTIFICATION_UID = %s and NOTIFICATION_HOME_RESOURCE_ID = %s
-            """,
-            [self._uid, self._home._resourceID]))
+        rows = (yield self._oneNotificationQuery.on(
+            self._txn, uid=self._uid, homeID=self._home._resourceID))
         if rows:
             (self._resourceID,
              self._md5,
@@ -2412,6 +2699,7 @@
         else:
             returnValue(None)
 
+
     def _loadPropertyStore(self, props=None, created=False):
         if props is None:
             props = NonePropertyStore(self._home.uid())
@@ -2439,6 +2727,36 @@
         return self.uid() + ".xml"
 
 
+    @classproperty
+    def _newNotificationQuery(cls):
+        no = cls._objectSchema
+        return Insert(
+            {
+                no.NOTIFICATION_HOME_RESOURCE_ID: Parameter("homeID"),
+                no.NOTIFICATION_UID: Parameter("uid"),
+                no.XML_TYPE: Parameter("xmlType"),
+                no.XML_DATA: Parameter("xmlData"),
+                no.MD5: Parameter("md5"),
+            },
+            Return=[no.RESOURCE_ID, no.CREATED, no.MODIFIED]
+        )
+
+
+    @classproperty
+    def _updateNotificationQuery(cls):
+        no = cls._objectSchema
+        return Update(
+            {
+                no.XML_TYPE: Parameter("xmlType"),
+                no.XML_DATA: Parameter("xmlData"),
+                no.MD5: Parameter("md5"),
+            },
+            Where=(no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("homeID")).And(
+                no.NOTIFICATION_UID == Parameter("uid")),
+            Return=no.MODIFIED
+        )
+
+
     @inlineCallbacks
     def setData(self, uid, xmltype, xmldata, inserting=False):
         """
@@ -2449,48 +2767,32 @@
         self._md5 = hashlib.md5(xmldata).hexdigest()
         self._size = len(xmldata)
         if inserting:
-            rows = yield self._txn.execSQL("""
-                insert into NOTIFICATION
-                  (NOTIFICATION_HOME_RESOURCE_ID, NOTIFICATION_UID, XML_TYPE, XML_DATA, MD5)
-                values
-                  (%s, %s, %s, %s, %s)
-                returning
-                  RESOURCE_ID,
-                  CREATED,
-                  MODIFIED
-                """,
-                [self._home._resourceID, uid, self._xmlType.toxml(), xmldata, self._md5]
+            rows = yield self._newNotificationQuery.on(
+                self._txn, homeID=self._home._resourceID, uid=uid,
+                xmlType=self._xmlType.toxml(), xmlData=xmldata, md5=self._md5
             )
             self._resourceID, self._created, self._modified = rows[0]
             self._loadPropertyStore()
         else:
-            rows = yield self._txn.execSQL("""
-                update NOTIFICATION
-                set XML_TYPE = %s, XML_DATA = %s, MD5 = %s
-                where NOTIFICATION_HOME_RESOURCE_ID = %s and NOTIFICATION_UID = %s
-                returning MODIFIED
-                """,
-                [self._xmlType.toxml(), xmldata, self._md5, self._home._resourceID, uid])
+            rows = yield self._updateNotificationQuery.on(
+                self._txn, homeID=self._home._resourceID, uid=uid,
+                xmlType=self._xmlType.toxml(), xmlData=xmldata, md5=self._md5
+            )
             self._modified = rows[0][0]
-        
         self._objectText = xmldata
 
 
-    @inlineCallbacks
-    def _fieldQuery(self, field):
-        data = yield self._txn.execSQL(
-            "select " + field + " from NOTIFICATION "
-            "where RESOURCE_ID = %s",
-            [self._resourceID]
-        )
-        returnValue(data[0][0])
+    _xmlDataFromID = Select(
+        [_objectSchema.XML_DATA], From=_objectSchema,
+        Where=_objectSchema.RESOURCE_ID == Parameter("resourceID"))
 
 
     @inlineCallbacks
     def xmldata(self):
-        
         if self._objectText is None:
-            self._objectText = (yield self._fieldQuery("XML_DATA"))
+            self._objectText = (
+                yield self._xmlDataFromID.on(
+                    self._txn, resourceID=self._resourceID))[0][0]
         returnValue(self._objectText)
 
 

Modified: CalendarServer/trunk/txdav/common/datastore/sql_legacy.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_legacy.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/common/datastore/sql_legacy.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -70,7 +70,7 @@
                 NotificationRecord(
                     notificationObject.uid(),
                     notificationObject.name(),
-                    (yield notificationObject._fieldQuery("XML_TYPE"))
+                    (yield notificationObject.xmlType().toxml())
                 )
             )
         else:

Modified: CalendarServer/trunk/txdav/common/datastore/sql_schema_v1.sql
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_schema_v1.sql	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/common/datastore/sql_schema_v1.sql	2011-02-16 15:44:03 UTC (rev 7024)
@@ -416,7 +416,7 @@
   CALENDAR_RESOURCE_ID      integer      references CALENDAR,
   CALENDAR_NAME             varchar(255) default null,
   RESOURCE_NAME             varchar(255),
-  REVISION                  integer      not null,
+  REVISION                  integer      default nextval('REVISION_SEQ') not null,
   DELETED                   boolean      not null,
 
   unique(CALENDAR_RESOURCE_ID, RESOURCE_NAME)
@@ -439,7 +439,7 @@
   ADDRESSBOOK_RESOURCE_ID      integer      references ADDRESSBOOK,
   ADDRESSBOOK_NAME             varchar(255) default null,
   RESOURCE_NAME                varchar(255),
-  REVISION                     integer      not null,
+  REVISION                     integer      default nextval('REVISION_SEQ') not null,
   DELETED                      boolean      not null,
 
   unique(ADDRESSBOOK_RESOURCE_ID, RESOURCE_NAME)
@@ -458,7 +458,7 @@
 create table NOTIFICATION_OBJECT_REVISIONS (
   NOTIFICATION_HOME_RESOURCE_ID integer      not null references NOTIFICATION_HOME on delete cascade,
   RESOURCE_NAME                 varchar(255),
-  REVISION                      integer      not null,
+  REVISION                      integer      default nextval('REVISION_SEQ') not null,
   DELETED                       boolean      not null,
 
   unique(NOTIFICATION_HOME_RESOURCE_ID, RESOURCE_NAME)

Modified: CalendarServer/trunk/txdav/common/datastore/sql_tables.py
===================================================================
--- CalendarServer/trunk/txdav/common/datastore/sql_tables.py	2011-02-16 15:10:23 UTC (rev 7023)
+++ CalendarServer/trunk/txdav/common/datastore/sql_tables.py	2011-02-16 15:44:03 UTC (rev 7024)
@@ -66,6 +66,8 @@
     schema.ADDRESSBOOK_OBJECT_REVISIONS.ADDRESSBOOK_NAME
 schema.NOTIFICATION_OBJECT_REVISIONS.HOME_RESOURCE_ID = \
     schema.NOTIFICATION_OBJECT_REVISIONS.NOTIFICATION_HOME_RESOURCE_ID
+schema.NOTIFICATION_OBJECT_REVISIONS.RESOURCE_ID = \
+    schema.NOTIFICATION_OBJECT_REVISIONS.NOTIFICATION_HOME_RESOURCE_ID
 schema.CALENDAR_OBJECT.TEXT = \
     schema.CALENDAR_OBJECT.ICALENDAR_TEXT
 schema.CALENDAR_OBJECT.UID = \
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20110216/90d2a56d/attachment-0001.html>


More information about the calendarserver-changes mailing list