[CalendarServer-changes] [12041] CalendarServer/branches/users/cdaboo/cross-pod-sharing
source_changes at macosforge.org
source_changes at macosforge.org
Wed Mar 12 11:15:45 PDT 2014
Revision: 12041
http://trac.calendarserver.org//changeset/12041
Author: cdaboo at apple.com
Date: 2013-12-06 13:43:35 -0800 (Fri, 06 Dec 2013)
Log Message:
-----------
Initial work on cross-pod sharing. This supports the round-tripping of invites across pods with appropriate
bind table entries so that shared collections appear on the sharee side. Access to actual data in the shared
resource is the next thing to be done.
Modified Paths:
--------------
CalendarServer/branches/users/cdaboo/cross-pod-sharing/calendarserver/tap/caldav.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/calendarserver/tap/util.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/conf/caldavd-test-podB.plist
CalendarServer/branches/users/cdaboo/cross-pod-sharing/twext/web2/http.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/twisted/plugins/caldav.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/twistedcaldav/stdconfig.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/base/datastore/subpostgres.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/base/datastore/util.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/caldav/datastore/scheduling/ischedule/localservers.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/caldav/datastore/sql.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/caldav/icalendardirectoryservice.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/carddav/datastore/sql.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/current-oracle-dialect.sql
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/current.sql
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_tables.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/icommondatastore.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/idirectoryservice.py
Added Paths:
-----------
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/__init__.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/conduit.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/request.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/resource.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/__init__.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/test_conduit.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/test_external_home.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/test_resource.py
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/old/oracle-dialect/v29.sql
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/old/postgres-dialect/v29.sql
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_29_to_30.sql
CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_29_to_30.sql
Property Changed:
----------------
CalendarServer/branches/users/cdaboo/cross-pod-sharing/
Property changes on: CalendarServer/branches/users/cdaboo/cross-pod-sharing
___________________________________________________________________
Modified: svn:mergeinfo
- /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/json:11622-11912
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/sharing-in-the-store:11935-12016
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/wsanchez/transations:5515-5593
+ /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/release/CalendarServer-5.1-dev:11846
/CalendarServer/branches/release/CalendarServer-5.2-dev:11972
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11607-11871
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/json:11622-11912
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/performance-tweaks:11824-11836
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/reverse-proxy-pods:11875-11900
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/sharing-in-the-store:11935-12016
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/log-cleanups:11691-11731
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/whenNotProposed:11881-11897
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/wsanchez/transations:5515-5593
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/calendarserver/tap/caldav.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/calendarserver/tap/caldav.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/calendarserver/tap/caldav.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -1691,11 +1691,9 @@
raise StoreNotAvailable()
from twisted.internet import reactor
- pool = PeerConnectionPool(reactor, store.newTransaction,
- 7654, schema)
+ pool = PeerConnectionPool(reactor, store.newTransaction, config.WorkQueue.ampPort, schema)
store.queuer = store.queuer.transferProposalCallbacks(pool)
- controlSocket.addFactory(_QUEUE_ROUTE,
- pool.workerListenerFactory())
+ controlSocket.addFactory(_QUEUE_ROUTE, pool.workerListenerFactory())
# TODO: now that we have the shared control socket, we should get
# rid of the connection dispenser and make a shared / async
# connection pool implementation that can dispense transactions
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/calendarserver/tap/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/calendarserver/tap/util.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/calendarserver/tap/util.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -92,6 +92,7 @@
from calendarserver.webadmin.resource import WebAdminResource
from calendarserver.webcal.resource import WebCalendarResource
+from txdav.common.datastore.podding.resource import ConduitResource
from txdav.common.datastore.sql import CommonDataStore as CommonSQLDataStore
from txdav.common.datastore.file import CommonDataStore as CommonFileDataStore
from txdav.common.datastore.sql import current_sql_schema
@@ -407,6 +408,7 @@
rootResourceClass = RootResource
calendarResourceClass = DirectoryCalendarHomeProvisioningResource
iScheduleResourceClass = IScheduleInboxResource
+ conduitResourceClass = ConduitResource
timezoneServiceResourceClass = TimezoneServiceResource
timezoneStdServiceResourceClass = TimezoneStdServiceResource
webCalendarResourceClass = WebCalendarResource
@@ -636,7 +638,7 @@
addSystemEventTrigger("after", "startup", timezoneStdService.onStartup)
#
- # iSchedule service for podding
+ # iSchedule/cross-pod service for podding
#
if config.Servers.Enabled:
log.info("Setting up iSchedule podding inbox resource: {cls}", cls=iScheduleResourceClass)
@@ -648,6 +650,14 @@
)
root.putChild(config.Servers.InboxName, ischedule)
+ log.info("Setting up podding conduit resource: {cls}", cls=conduitResourceClass)
+
+ conduit = conduitResourceClass(
+ root,
+ newStore,
+ )
+ root.putChild(config.Servers.ConduitName, conduit)
+
#
# iSchedule service (not used for podding)
#
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/conf/caldavd-test-podB.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/conf/caldavd-test-podB.plist 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/conf/caldavd-test-podB.plist 2013-12-06 21:43:35 UTC (rev 12041)
@@ -43,6 +43,13 @@
<array>
</array>
+ <!-- Work Queue -->
+ <key>WorkQueue</key>
+ <dict>
+ <key>ampPort</key>
+ <integer>7655</integer>
+ </dict>
+
<!-- Server root -->
<key>ServerRoot</key>
<string>./data/podB</string>
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/twext/web2/http.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/twext/web2/http.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/twext/web2/http.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -558,7 +558,7 @@
"""
def __init__(self, code, jobj):
"""
- @param xml_responses: an iterable of davxml.Response objects.
+ @param jobj: a Python object that can be serialized to JSON.
"""
Response.__init__(self, code, stream=json.dumps(jobj))
self.headers.setHeader("content-type", http_headers.MimeType("application", "json"))
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/twisted/plugins/caldav.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/twisted/plugins/caldav.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/twisted/plugins/caldav.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -25,7 +25,6 @@
from twisted.internet.protocol import Factory
Factory.noisy = False
-
def serviceMakerProperty(propname):
def getProperty(self):
return getattr(reflect.namedClass(self.serviceMakerClass), propname)
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/twistedcaldav/stdconfig.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/twistedcaldav/stdconfig.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -317,6 +317,13 @@
# upgrade.
#
+ # Work queue configuration information
+ #
+ "WorkQueue" : {
+ "ampPort": 7654, # Port used for hosts in a cluster to take to each other
+ },
+
+ #
# Types of service provided
#
"EnableCalDAV" : True, # Enable CalDAV service
@@ -360,7 +367,7 @@
#
# Directory service
#
- # A directory service provides information about principals (eg.
+ # A directory service provides information about principals (e.g.
# users, groups, locations and resources) to the server.
#
"DirectoryService": {
@@ -821,8 +828,9 @@
"Servers" : {
"Enabled": False, # Multiple servers enabled or not
"ConfigFile": "localservers.xml", # File path for server information
- "MaxClients": 5, # Pool size for connections to between servers
+ "MaxClients": 5, # Pool size for connections between servers
"InboxName": "podding", # Name for top-level inbox resource
+ "ConduitName": "conduit", # Name for top-level cross-pod resource
},
#
@@ -1063,8 +1071,11 @@
def _loadImport(childDict):
# Look for an import and read that one as the main config and merge the current one into that
if "ImportConfig" in childDict and childDict.ImportConfig:
- configRoot = os.path.join(childDict.ServerRoot, childDict.ConfigRoot)
- path = _expandPath(fullServerPath(configRoot, childDict.ImportConfig))
+ if childDict.ImportConfig[0] != ".":
+ configRoot = os.path.join(childDict.ServerRoot, childDict.ConfigRoot)
+ path = _expandPath(fullServerPath(configRoot, childDict.ImportConfig))
+ else:
+ path = childDict.ImportConfig
if os.path.exists(path):
importDict = ConfigDict(self._parseConfigFromFile(path))
if importDict:
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/base/datastore/subpostgres.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/base/datastore/subpostgres.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/base/datastore/subpostgres.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -228,7 +228,7 @@
# in /tmp and based on a hash of the data store directory
digest = md5(dataStoreDirectory.path).hexdigest()
socketDir = "/tmp/ccs_postgres_" + digest
-
+
self.socketDir = CachingFilePath(socketDir)
self.host = self.socketDir.path
self.port = None
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/base/datastore/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/base/datastore/util.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/base/datastore/util.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -98,6 +98,12 @@
return "objectWithResourceID:%s:%s" % (homeResourceID, resourceID)
+ # Home child objects by external id
+
+ def keyForObjectWithExternalID(self, homeResourceID, externalID):
+ return "objectWithExternalID:%s:%s" % (homeResourceID, externalID)
+
+
# Home metadata (Created/Modified)
def keyForHomeMetaData(self, homeResourceID):
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/caldav/datastore/scheduling/ischedule/localservers.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/caldav/datastore/scheduling/ischedule/localservers.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/caldav/datastore/scheduling/ischedule/localservers.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -84,6 +84,12 @@
self._thisServer = None
+ def addServer(self, server):
+ self._servers[server.id] = server
+ if server.thisServer:
+ self._thisServer = server
+
+
def getServerById(self, id):
return self._servers.get(id)
@@ -125,16 +131,22 @@
Represents a server.
"""
- def __init__(self):
- self.id = None
- self.uri = None
- self.thisServer = False
+ def __init__(self, id=None, uri=None, sharedSecret=None, thisServer=False):
+ self.id = id
+ self.uri = uri
+ self.thisServer = thisServer
self.ips = set()
self.allowed_from_ips = set()
- self.shared_secret = None
+ self.shared_secret = sharedSecret
self.isImplicit = True
+ def details(self):
+ if not hasattr(self, "ssl"):
+ self._parseDetails()
+ return (self.ssl, self.host, self.port, self.path,)
+
+
def check(self, ignoreIPLookupFailures=False):
# Check whether this matches the current server
parsed_uri = urlparse.urlparse(self.uri)
@@ -215,7 +227,28 @@
return (SERVER_SECRET_HEADER, self.shared_secret,)
+ def _parseDetails(self):
+ # Extract scheme, host, port and path
+ if self.uri.startswith("http://"):
+ self.ssl = False
+ rest = self.uri[7:]
+ elif self.uri.startswith("https://"):
+ self.ssl = True
+ rest = self.uri[8:]
+ splits = rest.split("/", 1)
+ hostport = splits[0].split(":")
+ self.host = hostport[0]
+ if len(hostport) > 1:
+ self.port = int(hostport[1])
+ else:
+ self.port = {False: 80, True: 443}[self.ssl]
+ self.path = "/"
+ if len(splits) > 1:
+ self.path += splits[1]
+
+
+
ELEMENT_SERVERS = "servers"
ELEMENT_SERVER = "server"
ELEMENT_ID = "id"
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/caldav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/caldav/datastore/sql.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/caldav/datastore/sql.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -75,7 +75,7 @@
InvalidAttachmentOperation, DuplicatePrivateCommentsError
from txdav.caldav.icalendarstore import QuotaExceeded
from txdav.common.datastore.sql import CommonHome, CommonHomeChild, \
- CommonObjectResource, ECALENDARTYPE
+ CommonObjectResource, ECALENDARTYPE, CommonHomeExternal
from txdav.common.datastore.sql_legacy import PostgresLegacyIndexEmulator, \
PostgresLegacyInboxIndexEmulator
from txdav.common.datastore.sql_tables import _ATTACHMENTS_MODE_NONE, \
@@ -433,6 +433,7 @@
def __init__(self, transaction, ownerUID):
+ self._externalClass = CalendarHomeExternal
self._childClass = Calendar
super(CalendarHome, self).__init__(transaction, ownerUID)
@@ -940,6 +941,127 @@
+class CalendarHomeExternal(CommonHomeExternal, CalendarHome):
+
+ def __init__(self, transaction, ownerUID, resourceID):
+
+ CalendarHome.__init__(self, transaction, ownerUID)
+ CommonHomeExternal.__init__(self, transaction, ownerUID, resourceID)
+
+
+ def hasCalendarResourceUIDSomewhereElse(self, uid, ok_object, mode):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def getCalendarResourcesForUID(self, uid):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def calendarObjectWithDropboxID(self, dropboxID):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def getAllDropboxIDs(self):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def getAllAttachmentNames(self):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def getAllManagedIDs(self):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def createdHome(self):
+ """
+ No children - make this a no-op.
+ """
+ return succeed(None)
+
+
+ def splitCalendars(self):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def ensureDefaultCalendarsExist(self):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def setDefaultCalendar(self, calendar, componentType):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def defaultCalendar(self, componentType, create=True):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def isDefaultCalendar(self, calendar):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def getDefaultAlarm(self, vevent, timed):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def setDefaultAlarm(self, alarm, vevent, timed):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def getAvailability(self):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def setAvailability(self, availability):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+
class Calendar(CommonHomeChild):
"""
SQL-based implementation of L{ICalendar}.
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/caldav/icalendardirectoryservice.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/caldav/icalendardirectoryservice.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/caldav/icalendardirectoryservice.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -60,14 +60,6 @@
@rtype: C{str}
"""
- def thisServer(): #@NoSelf
- """
- Indicates whether the record is hosted on this server "pod".
-
- @return: C{True} if hosted by this service.
- @rtype: C{bool}
- """
-
def calendarsEnabled(): #@NoSelf
"""
Indicates whether the record enabled for using the calendar service.
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/carddav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/carddav/datastore/sql.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/carddav/datastore/sql.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -430,6 +430,8 @@
"""
implements(IAddressBook)
+ _homeType = EADDRESSBOOKTYPE
+
# structured tables. (new, preferred)
_homeSchema = schema.ADDRESSBOOK_HOME
_bindSchema = schema.SHARED_ADDRESSBOOK_BIND
@@ -705,7 +707,7 @@
home._txn, homeID=home._resourceID
)
for groupRow in groupRows:
- bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = groupRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
ownerAddressBookID = yield AddressBookObject.ownerAddressBookIDFromGroupID(home._txn, resourceID)
ownerHome = yield home._txn.homeWithResourceID(home._homeType, ownerAddressBookID, create=True)
names |= set([ownerHome.uid()])
@@ -733,7 +735,7 @@
)
# get ownerHomeIDs
for dataRow in dataRows:
- bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
ownerHome = yield home.ownerHomeWithChildID(resourceID)
ownerHomeToDataRowMap[ownerHome] = dataRow
@@ -742,7 +744,7 @@
home._txn, homeID=home._resourceID
)
for groupBindRow in groupBindRows:
- bindMode, homeID, resourceID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
ownerAddressBookID = yield AddressBookObject.ownerAddressBookIDFromGroupID(home._txn, resourceID)
ownerHome = yield home.ownerHomeWithChildID(ownerAddressBookID)
if ownerHome not in ownerHomeToDataRowMap:
@@ -765,7 +767,7 @@
# Create the actual objects merging in properties
for ownerHome, dataRow in ownerHomeToDataRowMap.iteritems():
- bindMode, homeID, resourceID, name, bindStatus, bindRevision, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
additionalBind = dataRow[cls.bindColumnCount:cls.bindColumnCount + len(cls.additionalBindColumns())]
metadata = dataRow[cls.bindColumnCount + len(cls.additionalBindColumns()):]
@@ -888,7 +890,7 @@
overallBindStatus = _BIND_STATUS_INVITED
minBindRevision = None
for row in rows:
- bindMode, homeID, resourceGroupID, name, bindStatus, bindRevision, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceGroupID, externalID, name, bindStatus, bindRevision, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable
if groupID is None:
groupID = resourceGroupID
minBindRevision = min(minBindRevision, bindRevision) if minBindRevision is not None else bindRevision
@@ -995,7 +997,7 @@
readWriteGroupIDs = []
readOnlyGroupIDs = []
for groupBindRow in groupBindRows:
- bindMode, homeID, resourceID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
if bindMode == _BIND_MODE_WRITE:
readWriteGroupIDs.append(resourceID)
else:
@@ -1056,7 +1058,7 @@
readWriteGroupIDs = []
readOnlyGroupIDs = []
for groupBindRow in groupBindRows:
- bindMode, homeID, resourceID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
if bindMode == _BIND_MODE_WRITE:
readWriteGroupIDs.append(resourceID)
else:
@@ -1594,7 +1596,7 @@
)
if groupBindRows:
groupBindRow = groupBindRows[0]
- bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
if accepted is not None and (bindStatus == _BIND_STATUS_ACCEPTED) != bool(accepted):
returnValue(None)
@@ -1677,7 +1679,7 @@
if groupBindRows:
groupBindRow = groupBindRows[0]
- bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = groupBindRow[:AddressBookObject.bindColumnCount] #@UnusedVariable
self._bindMode = bindMode
self._bindStatus = bindStatus
self._bindMessage = bindMessage
Added: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/__init__.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/__init__.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/__init__.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -0,0 +1,15 @@
+##
+# Copyright (c) 2013 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
Added: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/conduit.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/conduit.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/conduit.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -0,0 +1,341 @@
+##
+# Copyright (c) 2013 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+from txdav.common.datastore.podding.request import ConduitRequest
+from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
+from txdav.common.icommondatastore import ExternalShareFailed
+
+
+__all__ = [
+ "PoddingConduitResource",
+]
+
+class BadMessageError(Exception):
+ pass
+
+
+
+class InvalidCrossPodRequestError(Exception):
+ pass
+
+
+
+class FailedCrossPodRequestError(Exception):
+ pass
+
+
+
+class PoddingConduit(object):
+ """
+ This class is the API/RPC bridge between cross-pod requests and the store.
+
+ Each cross-pod request/response is described by a Python C{dict} that is serialized
+ to JSON for the HTTP request/response.
+
+ Each request C{dict} has an "action" key that indicates what call is being made, and
+ the other keys are arguments to that call.
+
+ Each response C{dict} has a "result" key that indicates the call result, and other
+ optional keys for any parameters returned by the call.
+
+ The conduit provides two methods for each action: one for the sending side and one for
+ the receiving side, called "send_{action}" and "recv_{action}", respectively, where
+ {action} is the action value.
+
+ The "send_{action}" calls each have a set of arguments specific to the call itself. The
+ code takes care of packing that into a C{dict} and sending to the appropriate pod.
+
+ The "recv_{action}" calls take a single C{dict} argument that is the deserialized JSON
+ data from the incoming request. The return value is a C{dict} with the result.
+
+ Right now this conduit is used for cross-pod sharing operations. In the future we will
+ likely use it for cross-pod migration.
+ """
+
+ def __init__(self, store):
+ """
+ @param store: the L{CommonDataStore} in use.
+ """
+ self.store = store
+
+
+ def validRequst(self, source_guid, destination_guid):
+ """
+ Verify that the specified GUIDs are valid for the request and return the
+ matching directory records.
+
+ @param source_guid: GUID for the user on whose behalf the request is being made
+ @type source_guid: C{str}
+ @param destination_guid: GUID for the user to whom the request is being sent
+ @type destination_guid: C{str}
+
+ @return: C{tuple} of L{IStoreDirectoryRecord}
+ """
+
+ source = self.store.directoryService().recordWithUID(source_guid)
+ if source is None:
+ raise DirectoryRecordNotFoundError("Cross-pod source: {}".format(source_guid))
+ if not source.thisServer():
+ raise InvalidCrossPodRequestError("Cross-pod source not on this server: {}".format(source_guid))
+
+ destination = self.store.directoryService().recordWithUID(destination_guid)
+ if destination is None:
+ raise DirectoryRecordNotFoundError("Cross-pod destination: {}".format(destination_guid))
+ if destination.thisServer():
+ raise InvalidCrossPodRequestError("Cross-pod destination on this server: {}".format(destination_guid))
+
+ return (source, destination,)
+
+
+ @inlineCallbacks
+ def send_shareinvite(self, txn, homeType, ownerUID, ownerID, ownerName, shareeUID, shareUID, bindMode, summary, supported_components):
+ """
+ Send a sharing invite cross-pod message.
+
+ @param homeType: Type of home being shared.
+ @type homeType: C{int}
+ @param ownerUID: GUID of the sharer.
+ @type ownerUID: C{str}
+ @param ownerID: resource ID of the sharer calendar
+ @type ownerID: C{int}
+ @param ownerName: owner's name of the sharer calendar
+ @type ownerName: C{str}
+ @param shareeUID: GUID of the sharee
+ @type shareeUID: C{str}
+ @param shareUID: Resource/invite ID for sharee
+ @type shareUID: C{str}
+ @param bindMode: bind mode for the share
+ @type bindMode: C{str}
+ @param summary: sharing message
+ @type summary: C{str}
+ @param supported_components: supproted components, may be C{None}
+ @type supported_components: C{str}
+ """
+
+ _ignore_owner, sharee = self.validRequst(ownerUID, shareeUID)
+
+ action = {
+ "action": "shareinvite",
+ "type": homeType,
+ "owner": ownerUID,
+ "owner_id": ownerID,
+ "owner_name": ownerName,
+ "sharee": shareeUID,
+ "share_id": shareUID,
+ "mode": bindMode,
+ "summary": summary,
+ }
+ if supported_components is not None:
+ action["supported-components"] = supported_components
+
+ request = ConduitRequest(sharee.server(), action)
+ response = (yield request.doRequest(txn))
+ if response["result"] != "ok":
+ raise FailedCrossPodRequestError(response["description"])
+
+
+ @inlineCallbacks
+ def recv_shareinvite(self, txn, message):
+ """
+ Process a sharing invite cross-pod message. Message arguments as per L{send_shareinvite}.
+
+ @param message: message arguments
+ @type message: C{dict}
+ """
+
+ if message["action"] != "shareinvite":
+ raise BadMessageError("Wrong action '{}' for recv_shareinvite".format(message["action"]))
+
+ # Create a share
+ shareeHome = yield txn.homeWithUID(message["type"], message["sharee"], create=True)
+ if shareeHome is None or shareeHome.external():
+ returnValue({
+ "result": "bad",
+ "description": "Invalid sharee UID specified",
+ })
+
+ try:
+ yield shareeHome.processExternalInvite(
+ message["owner"],
+ message["owner_id"],
+ message["owner_name"],
+ message["share_id"],
+ message["mode"],
+ message["summary"],
+ supported_components=message.get("supported-components")
+ )
+ except ExternalShareFailed as e:
+ returnValue({
+ "result": "bad",
+ "description": str(e),
+ })
+
+ returnValue({
+ "result": "ok",
+ "description": "Success"
+ })
+
+
+ @inlineCallbacks
+ def send_shareuninvite(self, txn, homeType, ownerUID, ownerID, shareeUID, shareUID):
+ """
+ Send a sharing uninvite cross-pod message.
+
+ @param homeType: Type of home being shared.
+ @type homeType: C{int}
+ @param ownerUID: GUID of the sharer.
+ @type ownerUID: C{str}
+ @param ownerID: resource ID of the sharer calendar
+ @type ownerID: C{int}
+ @param shareeUID: GUID of the sharee
+ @type shareeUID: C{str}
+ @param shareUID: Resource/invite ID for sharee
+ @type shareUID: C{str}
+ """
+
+ _ignore_owner, sharee = self.validRequst(ownerUID, shareeUID)
+
+ action = {
+ "action": "shareuninvite",
+ "type": homeType,
+ "owner": ownerUID,
+ "owner_id": ownerID,
+ "sharee": shareeUID,
+ "share_id": shareUID,
+ }
+
+ request = ConduitRequest(sharee.server(), action)
+ response = (yield request.doRequest(txn))
+ if response["result"] != "ok":
+ raise FailedCrossPodRequestError(response["description"])
+
+
+ @inlineCallbacks
+ def recv_shareuninvite(self, txn, message):
+ """
+ Process a sharing uninvite cross-pod message. Message arguments as per L{send_shareuninvite}.
+
+ @param message: message arguments
+ @type message: C{dict}
+ """
+
+ if message["action"] != "shareuninvite":
+ raise BadMessageError("Wrong action '{}' for recv_shareuninvite".format(message["action"]))
+
+ # Create a share
+ shareeHome = yield txn.homeWithUID(message["type"], message["sharee"], create=True)
+ if shareeHome is None or shareeHome.external():
+ returnValue({
+ "result": "bad",
+ "description": "Invalid sharee UID specified",
+ })
+
+ try:
+ yield shareeHome.processExternalUninvite(
+ message["owner"],
+ message["owner_id"],
+ message["share_id"],
+ )
+ except ExternalShareFailed as e:
+ returnValue({
+ "result": "bad",
+ "description": str(e),
+ })
+
+ returnValue({
+ "result": "ok",
+ "description": "Success"
+ })
+
+
+ @inlineCallbacks
+ def send_sharereply(self, txn, homeType, ownerUID, shareeUID, shareUID, bindStatus, summary=None):
+ """
+ Send a sharing reply cross-pod message.
+
+ @param homeType: Type of home being shared.
+ @type homeType: C{int}
+ @param ownerUID: GUID of the sharer.
+ @type ownerUID: C{str}
+ @param shareeUID: GUID of the sharee
+ @type shareeUID: C{str}
+ @param shareUID: Resource/invite ID for sharee
+ @type shareUID: C{str}
+ @param bindStatus: bind mode for the share
+ @type bindStatus: C{str}
+ @param summary: sharing message
+ @type summary: C{str}
+ """
+
+ _ignore_owner, sharee = self.validRequst(shareeUID, ownerUID)
+
+ action = {
+ "action": "sharereply",
+ "type": homeType,
+ "owner": ownerUID,
+ "sharee": shareeUID,
+ "share_id": shareUID,
+ "status": bindStatus,
+ }
+ if summary is not None:
+ action["summary"] = summary
+
+ request = ConduitRequest(sharee.server(), action)
+ response = (yield request.doRequest(txn))
+ if response["result"] != "ok":
+ raise FailedCrossPodRequestError(response["description"])
+
+
+ @inlineCallbacks
+ def recv_sharereply(self, txn, message):
+ """
+ Process a sharing reply cross-pod message. Message arguments as per L{send_sharereply}.
+
+ @param message: message arguments
+ @type message: C{dict}
+ """
+
+ if message["action"] != "sharereply":
+ raise BadMessageError("Wrong action '{}' for recv_sharereply".format(message["action"]))
+
+ # Create a share
+ ownerHome = yield txn.homeWithUID(message["type"], message["owner"])
+ if ownerHome is None or ownerHome.external():
+ returnValue({
+ "result": "bad",
+ "description": "Invalid owner UID specified",
+ })
+
+ try:
+ yield ownerHome.processExternalReply(
+ message["owner"],
+ message["sharee"],
+ message["share_id"],
+ message["status"],
+ summary=message.get("summary")
+ )
+ except ExternalShareFailed as e:
+ returnValue({
+ "result": "bad",
+ "description": str(e),
+ })
+
+ returnValue({
+ "result": "ok",
+ "description": "Success"
+ })
Added: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/request.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/request.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/request.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -0,0 +1,175 @@
+##
+# Copyright (c) 2013 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from calendarserver.version import version
+
+from twext.internet.gaiendpoint import GAIEndpoint
+from twext.python.log import Logger
+from twext.web2 import responsecode
+from twext.web2.client.http import HTTPClientProtocol, ClientRequest
+from twext.web2.dav.util import allDataFromStream
+from twext.web2.http_headers import Headers, MimeType
+from twext.web2.stream import MemoryStream
+
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.protocol import Factory
+
+from twistedcaldav.accounting import accountingEnabledForCategory, \
+ emitAccounting
+from twistedcaldav.client.pool import _configuredClientContextFactory
+from twistedcaldav.config import config
+from twistedcaldav.util import utf8String
+
+from cStringIO import StringIO
+import json
+
+
+log = Logger()
+
+
+
+class ConduitRequest(object):
+
+ def __init__(self, server, data):
+
+ self.server = server
+ self.data = json.dumps(data)
+
+
+ @inlineCallbacks
+ def doRequest(self, txn):
+
+ # Generate an HTTP client request
+ try:
+ if "xpod" not in txn.logItems:
+ txn.logItems["xpod"] = 0
+ txn.logItems["xpod"] += 1
+
+ response = (yield self._processRequest())
+
+ if accountingEnabledForCategory("xPod"):
+ self.loggedResponse = yield self.logResponse(response)
+ emitAccounting("xPod", "", self.loggedRequest + "\n" + self.loggedResponse, "POST")
+
+ if response.code in (responsecode.OK,):
+ data = (yield allDataFromStream(response.stream))
+ data = json.loads(data)
+ else:
+ raise ValueError("Incorrect cross-pod response status code: {}".format(response.code))
+
+ except Exception as e:
+ # Request failed
+ log.error("Could not do cross-pod request : {request} {ex}", request=self, ex=e)
+ raise ValueError("Failed cross-pod request: {}".format(response.code))
+
+ returnValue(data)
+
+
+ @inlineCallbacks
+ def logRequest(self, request):
+ """
+ Log an HTTP request.
+ """
+
+ iostr = StringIO()
+ iostr.write(">>>> Request start\n\n")
+ if hasattr(request, "clientproto"):
+ protocol = "HTTP/{:d}.{:d}".format(request.clientproto[0], request.clientproto[1])
+ else:
+ protocol = "HTTP/1.1"
+ iostr.write("{} {} {}\n".format(request.method, request.uri, protocol))
+ for name, valuelist in request.headers.getAllRawHeaders():
+ for value in valuelist:
+ # Do not log authorization details
+ if name not in ("Authorization",):
+ iostr.write("{}: {}\n".format(name, value))
+ else:
+ iostr.write("{}: xxxxxxxxx\n".format(name))
+ iostr.write("\n")
+
+ # We need to play a trick with the request stream as we can only read it once. So we
+ # read it, store the value in a MemoryStream, and replace the request's stream with that,
+ # so the data can be read again.
+ data = (yield allDataFromStream(request.stream))
+ iostr.write(data)
+ request.stream = MemoryStream(data if data is not None else "")
+ request.stream.doStartReading = None
+
+ iostr.write("\n\n>>>> Request end\n")
+ returnValue(iostr.getvalue())
+
+
+ @inlineCallbacks
+ def logResponse(self, response):
+ """
+ Log an HTTP request.
+ """
+ iostr = StringIO()
+ iostr.write(">>>> Response start\n\n")
+ code_message = responsecode.RESPONSES.get(response.code, "Unknown Status")
+ iostr.write("HTTP/1.1 {:d} {}\n".format(response.code, code_message))
+ for name, valuelist in response.headers.getAllRawHeaders():
+ for value in valuelist:
+ # Do not log authorization details
+ if name not in ("WWW-Authenticate",):
+ iostr.write("{}: {}\n".format(name, value))
+ else:
+ iostr.write("{}: xxxxxxxxx\n".format(name))
+ iostr.write("\n")
+
+ # We need to play a trick with the response stream to ensure we don't mess it up. So we
+ # read it, store the value in a MemoryStream, and replace the response's stream with that,
+ # so the data can be read again.
+ data = (yield allDataFromStream(response.stream))
+ iostr.write(data)
+ response.stream = MemoryStream(data if data is not None else "")
+ response.stream.doStartReading = None
+
+ iostr.write("\n\n>>>> Response end\n")
+ returnValue(iostr.getvalue())
+
+
+ @inlineCallbacks
+ def _processRequest(self):
+ """
+ Process the request by sending it to the relevant server.
+
+ @return: the HTTP response.
+ @rtype: L{Response}
+ """
+ ssl, host, port, _ignore_path = self.server.details()
+ path = "/" + config.Servers.ConduitName
+
+ headers = Headers()
+ headers.setHeader("Host", utf8String(host + ":{}".format(port)))
+ headers.setHeader("Content-Type", MimeType("application", "json", params={"charset": "utf-8", }))
+ headers.setHeader("User-Agent", "CalendarServer/{}".format(version))
+ headers.addRawHeader(*self.server.secretHeader())
+
+ from twisted.internet import reactor
+ f = Factory()
+ f.protocol = HTTPClientProtocol
+ ep = GAIEndpoint(reactor, host, port, _configuredClientContextFactory() if ssl else None)
+ proto = (yield ep.connect(f))
+
+ request = ClientRequest("POST", path, headers, self.data)
+
+ if accountingEnabledForCategory("xPod"):
+ self.loggedRequest = yield self.logRequest(request)
+
+ response = (yield proto.submitRequest(request))
+
+ returnValue(response)
Added: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/resource.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/resource.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/resource.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -0,0 +1,195 @@
+##
+# Copyright (c) 2013 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+import json
+
+from twext.web2 import responsecode
+from twext.web2.dav.noneprops import NonePropertyStore
+from twext.web2.dav.util import allDataFromStream
+from twext.web2.http import Response, HTTPError, StatusResponse, JSONResponse
+from twext.web2.http_headers import MimeType
+
+from twisted.internet.defer import succeed, returnValue, inlineCallbacks
+
+from twistedcaldav.extensions import DAVResource, \
+ DAVResourceWithoutChildrenMixin
+from twistedcaldav.resource import ReadOnlyNoCopyResourceMixIn
+from twistedcaldav.scheduling_store.caldav.resource import \
+ deliverSchedulePrivilegeSet
+
+from txdav.xml import element as davxml
+from txdav.caldav.datastore.scheduling.ischedule.localservers import Servers
+
+__all__ = [
+ "ConduitResource",
+]
+
+class ConduitResource(ReadOnlyNoCopyResourceMixIn, DAVResourceWithoutChildrenMixin, DAVResource):
+ """
+ Podding cross-pod RPC conduit resource.
+
+ Extends L{DAVResource} to provide cross-pod RPC functionality.
+ """
+
+ def __init__(self, parent, store):
+ """
+ @param parent: the parent resource of this one.
+ """
+ assert parent is not None
+
+ DAVResource.__init__(self, principalCollections=parent.principalCollections())
+
+ self.parent = parent
+ self.store = store
+
+
+ def deadProperties(self):
+ if not hasattr(self, "_dead_properties"):
+ self._dead_properties = NonePropertyStore(self)
+ return self._dead_properties
+
+
+ def etag(self):
+ return succeed(None)
+
+
+ def checkPreconditions(self, request):
+ return None
+
+
+ def resourceType(self):
+ return davxml.ResourceType.ischeduleinbox
+
+
+ def contentType(self):
+ return MimeType.fromString("text/html; charset=utf-8")
+
+
+ def isCollection(self):
+ return False
+
+
+ def isCalendarCollection(self):
+ return False
+
+
+ def isPseudoCalendarCollection(self):
+ return False
+
+
+ def principalForCalendarUserAddress(self, address):
+ for principalCollection in self.principalCollections():
+ principal = principalCollection.principalForCalendarUserAddress(address)
+ if principal is not None:
+ return principal
+ return None
+
+
+ def render(self, request):
+ output = """<html>
+<head>
+<title>Podding Conduit Resource</title>
+</head>
+<body>
+<h1>Podding Conduit Resource.</h1>
+</body
+</html>"""
+
+ response = Response(200, {}, output)
+ response.headers.setHeader("content-type", MimeType("text", "html"))
+ return response
+
+
+ @inlineCallbacks
+ def http_POST(self, request):
+ """
+ The server-to-server POST method.
+ """
+
+ # Check shared secret
+ if not Servers.getThisServer().checkSharedSecret(request.headers):
+ self.log.error("Invalid shared secret header in cross-pod request")
+ raise HTTPError(StatusResponse(responsecode.FORBIDDEN, "Not authorized to make this request"))
+
+ # Check content first
+ contentType = request.headers.getHeader("content-type")
+
+ if "{}/{}".format(contentType.mediaType, contentType.mediaSubtype) != "application/json":
+ self.log.error("MIME type {mime} not allowed in request", mime=contentType)
+ raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "MIME type {} not allowed in request".format(contentType)))
+
+ body = (yield allDataFromStream(request.stream))
+ try:
+ j = json.loads(body)
+ except ValueError as e:
+ self.log.error("Invalid JSON data in request: {ex}\n{body}", ex=e, body=body)
+ raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "Invalid JSON data in request: {}\n{}".format(e, body)))
+
+ # Must have a dict with an "action" key
+ try:
+ action = j["action"]
+ except (KeyError, TypeError) as e:
+ self.log.error("JSON data must have an object as its root with an 'action' attribute: {ex}\n{json}", ex=e, json=j)
+ raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "JSON data must have an object as its root with an 'action' attribute: {}\n{}".format(e, j,)))
+
+ if action == "ping":
+ result = {"result": "ok"}
+ response = JSONResponse(responsecode.OK, result)
+ returnValue(response)
+
+ method = "recv_{}".format(action)
+ if not hasattr(self.store.conduit, method):
+ self.log.error("Unsupported action: {action}", action=action)
+ raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "Unsupported action: {}".format(action)))
+
+ # Need a transaction to work with
+ txn = self.store.newTransaction(repr(request))
+
+ # Do the POST processing treating this as a non-local schedule
+ try:
+ result = (yield getattr(self.store.conduit, method)(txn, j))
+ except Exception as e:
+ yield txn.abort()
+ self.log.error("Failed action: {action}, {ex}", action=action, ex=e)
+ raise HTTPError(StatusResponse(responsecode.INTERNAL_SERVER_ERROR, "Failed action: {}, {}".format(action, e)))
+
+ yield txn.commit()
+
+ response = JSONResponse(responsecode.OK, result)
+ returnValue(response)
+
+
+ ##
+ # ACL
+ ##
+
+ def supportedPrivileges(self, request):
+ return succeed(deliverSchedulePrivilegeSet)
+
+
+ def defaultAccessControlList(self):
+ privs = (
+ davxml.Privilege(davxml.Read()),
+ )
+
+ return davxml.ACL(
+ # DAV:Read for all principals (includes anonymous)
+ davxml.ACE(
+ davxml.Principal(davxml.All()),
+ davxml.Grant(*privs),
+ davxml.Protected(),
+ ),
+ )
Added: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/__init__.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/__init__.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/__init__.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -0,0 +1,15 @@
+##
+# Copyright (c) 2013 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
Added: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/test_conduit.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/test_conduit.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/test_conduit.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -0,0 +1,116 @@
+##
+# Copyright (c) 2005-2013 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.python.clsprop import classproperty
+import twext.web2.dav.test.util
+from twisted.internet.defer import inlineCallbacks, succeed
+from txdav.caldav.datastore.scheduling.ischedule.localservers import Servers, Server
+from txdav.caldav.datastore.test.util import buildCalendarStore, \
+ TestCalendarStoreDirectoryRecord
+from txdav.common.datastore.podding.resource import ConduitResource
+from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
+from txdav.common.datastore.podding.conduit import PoddingConduit, \
+ InvalidCrossPodRequestError
+from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
+
+class Conduit (CommonCommonTests, twext.web2.dav.test.util.TestCase):
+
+ class FakeConduit(object):
+
+ def recv_fake(self, j):
+ return succeed({
+ "result": "ok",
+ "back2u": j["echo"],
+ "more": "bits",
+ })
+
+
+ @inlineCallbacks
+ def setUp(self):
+ yield super(Conduit, self).setUp()
+ self._sqlCalendarStore = yield buildCalendarStore(self, self.notifierFactory)
+ self.directory = self._sqlCalendarStore.directoryService()
+
+ for ctr in range(1, 100):
+ self.directory.addRecord(TestCalendarStoreDirectoryRecord(
+ "puser{:02d}".format(ctr),
+ ("puser{:02d}".format(ctr),),
+ "Puser {:02d}".format(ctr),
+ frozenset((
+ "urn:uuid:puser{:02d}".format(ctr),
+ "mailto:puser{:02d}@example.com".format(ctr),
+ )),
+ thisServer=False,
+ ))
+
+ self.site.resource.putChild("conduit", ConduitResource(self.site.resource, self.storeUnderTest()))
+
+ self.thisServer = Server("A", "http://127.0.0.1", "A", True)
+ Servers.addServer(self.thisServer)
+
+ yield self.populate()
+
+
+ def storeUnderTest(self):
+ """
+ Return a store for testing.
+ """
+ return self._sqlCalendarStore
+
+
+ @inlineCallbacks
+ def populate(self):
+ yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
+ self.notifierFactory.reset()
+
+
+ @classproperty(cache=False)
+ def requirements(cls): #@NoSelf
+ return {
+ "user01": {
+ "calendar_1": {
+ },
+ "inbox": {
+ },
+ },
+ "user02": {
+ "calendar_1": {
+ },
+ "inbox": {
+ },
+ },
+ "user03": {
+ "calendar_1": {
+ },
+ "inbox": {
+ },
+ },
+ }
+
+
+ def test_validRequst(self):
+ """
+ Cross-pod request fails when there is no shared secret header present.
+ """
+
+ conduit = PoddingConduit(self.storeUnderTest())
+ r1, r2 = conduit.validRequst("user01", "puser02")
+ self.assertTrue(r1 is not None)
+ self.assertTrue(r2 is not None)
+
+ self.assertRaises(DirectoryRecordNotFoundError, conduit.validRequst, "bogus01", "user02")
+ self.assertRaises(DirectoryRecordNotFoundError, conduit.validRequst, "user01", "bogus02")
+ self.assertRaises(InvalidCrossPodRequestError, conduit.validRequst, "user01", "user02")
Added: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/test_external_home.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/test_external_home.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/test_external_home.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -0,0 +1,98 @@
+##
+# Copyright (c) 2005-2013 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twisted.internet.defer import inlineCallbacks
+
+from txdav.caldav.datastore.scheduling.ischedule.localservers import Servers, \
+ Server
+from txdav.caldav.datastore.test.util import buildCalendarStore, \
+ TestCalendarStoreDirectoryRecord
+from txdav.common.datastore.podding.resource import ConduitResource
+from txdav.common.datastore.sql_tables import _HOME_STATUS_NORMAL, \
+ _HOME_STATUS_EXTERNAL
+from txdav.common.datastore.test.util import CommonCommonTests
+from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
+
+import twext.web2.dav.test.util
+
+
+class ExternalHome(CommonCommonTests, twext.web2.dav.test.util.TestCase):
+
+ @inlineCallbacks
+ def setUp(self):
+ yield super(ExternalHome, self).setUp()
+ self._sqlCalendarStore = yield buildCalendarStore(self, self.notifierFactory)
+ self.directory = self._sqlCalendarStore.directoryService()
+
+ for ctr in range(1, 100):
+ self.directory.addRecord(TestCalendarStoreDirectoryRecord(
+ "puser{:02d}".format(ctr),
+ ("puser{:02d}".format(ctr),),
+ "Puser {:02d}".format(ctr),
+ frozenset((
+ "urn:uuid:puser{:02d}".format(ctr),
+ "mailto:puser{:02d}@example.com".format(ctr),
+ )),
+ thisServer=False,
+ ))
+
+ self.site.resource.putChild("conduit", ConduitResource(self.site.resource, self.storeUnderTest()))
+
+ self.thisServer = Server("A", "http://127.0.0.1", "A", True)
+ Servers.addServer(self.thisServer)
+
+
+ def storeUnderTest(self):
+ """
+ Return a store for testing.
+ """
+ return self._sqlCalendarStore
+
+
+ @inlineCallbacks
+ def test_validNormalHome(self):
+ """
+ Locally hosted homes are valid.
+ """
+
+ for i in range(1, 100):
+ home = yield self.transactionUnderTest().calendarHomeWithUID("user{:02d}".format(i), create=True)
+ self.assertTrue(home is not None)
+ self.assertEqual(home._status, _HOME_STATUS_NORMAL)
+ calendar = yield home.childWithName("calendar")
+ self.assertTrue(calendar is not None)
+
+
+ @inlineCallbacks
+ def test_validExternalHome(self):
+ """
+ Externally hosted homes are valid.
+ """
+
+ for i in range(1, 100):
+ home = yield self.transactionUnderTest().calendarHomeWithUID("puser{:02d}".format(i), create=True)
+ self.assertTrue(home is not None)
+ self.assertEqual(home._status, _HOME_STATUS_EXTERNAL)
+ self.assertRaises(AssertionError, home.childWithName, "calendar")
+
+
+ @inlineCallbacks
+ def test_invalidHome(self):
+ """
+ Homes are invalid.
+ """
+
+ yield self.assertFailure(self.transactionUnderTest().calendarHomeWithUID("buser01", create=True), DirectoryRecordNotFoundError)
Added: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/test_resource.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/test_resource.py (rev 0)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/podding/test/test_resource.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -0,0 +1,272 @@
+##
+# Copyright (c) 2005-2013 Apple Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from twext.python.clsprop import classproperty
+from twext.web2 import http_headers, responsecode
+import twext.web2.dav.test.util
+from twext.web2.dav.util import allDataFromStream
+from twext.web2.test.test_server import SimpleRequest
+from twisted.internet.defer import inlineCallbacks, succeed
+from txdav.caldav.datastore.scheduling.ischedule.localservers import Servers, Server
+from txdav.caldav.datastore.test.util import buildCalendarStore
+from txdav.common.datastore.podding.resource import ConduitResource
+from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
+import json
+
+class ConduitPOST (CommonCommonTests, twext.web2.dav.test.util.TestCase):
+
+ class FakeConduit(object):
+
+ def recv_fake(self, j):
+ return succeed({
+ "result": "ok",
+ "back2u": j["echo"],
+ "more": "bits",
+ })
+
+
+ @inlineCallbacks
+ def setUp(self):
+ yield super(ConduitPOST, self).setUp()
+ self._sqlCalendarStore = yield buildCalendarStore(self, self.notifierFactory)
+ self.directory = self._sqlCalendarStore.directoryService()
+
+ self.site.resource.putChild("conduit", ConduitResource(self.site.resource, self.storeUnderTest()))
+
+ self.thisServer = Server("A", "http://127.0.0.1", "A", True)
+ Servers.addServer(self.thisServer)
+
+ yield self.populate()
+
+
+ def storeUnderTest(self):
+ """
+ Return a store for testing.
+ """
+ return self._sqlCalendarStore
+
+
+ @inlineCallbacks
+ def populate(self):
+ yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
+ self.notifierFactory.reset()
+
+
+ @classproperty(cache=False)
+ def requirements(cls): #@NoSelf
+ return {
+ "user01": {
+ "calendar_1": {
+ },
+ "inbox": {
+ },
+ },
+ "user02": {
+ "calendar_1": {
+ },
+ "inbox": {
+ },
+ },
+ "user03": {
+ "calendar_1": {
+ },
+ "inbox": {
+ },
+ },
+ }
+
+
+ @inlineCallbacks
+ def test_receive_no_secret(self):
+ """
+ Cross-pod request fails when there is no shared secret header present.
+ """
+
+ request = SimpleRequest(
+ self.site,
+ "POST",
+ "/conduit",
+ headers=http_headers.Headers(rawHeaders={
+ "Content-Type": ("text/plain",)
+ }),
+ content="""Hello, World!
+""".replace("\n", "\r\n")
+ )
+
+ response = (yield self.send(request))
+ self.assertEqual(response.code, responsecode.FORBIDDEN)
+
+
+ @inlineCallbacks
+ def test_receive_wrong_mime(self):
+ """
+ Cross-pod request fails when Content-Type header is wrong.
+ """
+
+ request = SimpleRequest(
+ self.site,
+ "POST",
+ "/conduit",
+ headers=http_headers.Headers(rawHeaders={
+ "Content-Type": ("text/plain",),
+ self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
+ }),
+ content="""Hello, World!
+""".replace("\n", "\r\n")
+ )
+
+ response = (yield self.send(request))
+ self.assertEqual(response.code, responsecode.BAD_REQUEST)
+
+
+ @inlineCallbacks
+ def test_receive_invalid_json(self):
+ """
+ Cross-pod request fails when request data is not JSON.
+ """
+
+ request = SimpleRequest(
+ self.site,
+ "POST",
+ "/conduit",
+ headers=http_headers.Headers(rawHeaders={
+ "Content-Type": ("application/json",),
+ self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
+ }),
+ content="""Hello, World!
+""".replace("\n", "\r\n")
+ )
+
+ response = (yield self.send(request))
+ self.assertEqual(response.code, responsecode.BAD_REQUEST)
+
+
+ @inlineCallbacks
+ def test_receive_bad_json(self):
+ """
+ Cross-pod request fails when JSON data does not have an "action".
+ """
+
+ request = SimpleRequest(
+ self.site,
+ "POST",
+ "/conduit",
+ headers=http_headers.Headers(rawHeaders={
+ "Content-Type": ("application/json",),
+ self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
+ }),
+ content="""
+{
+ "foo":"bar"
+}
+""".replace("\n", "\r\n")
+ )
+
+ response = (yield self.send(request))
+ self.assertEqual(response.code, responsecode.BAD_REQUEST)
+
+
+ @inlineCallbacks
+ def test_receive_ping(self):
+ """
+ Cross-pod request works with the "ping" action.
+ """
+
+ request = SimpleRequest(
+ self.site,
+ "POST",
+ "/conduit",
+ headers=http_headers.Headers(rawHeaders={
+ "Content-Type": ("application/json",),
+ self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
+ }),
+ content="""
+{
+ "action":"ping"
+}
+""".replace("\n", "\r\n")
+ )
+
+ response = (yield self.send(request))
+ self.assertEqual(response.code, responsecode.OK)
+ data = (yield allDataFromStream(response.stream))
+ j = json.loads(data)
+ self.assertTrue("result" in j)
+ self.assertEqual(j["result"], "ok")
+
+
+ @inlineCallbacks
+ def test_receive_fake_conduit_no_action(self):
+ """
+ Cross-pod request fails when conduit does not support the action.
+ """
+
+ self.patch(self.storeUnderTest(), "conduit", self.FakeConduit())
+
+ request = SimpleRequest(
+ self.site,
+ "POST",
+ "/conduit",
+ headers=http_headers.Headers(rawHeaders={
+ "Content-Type": ("application/json",),
+ self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
+ }),
+ content="""
+{
+ "action":"bogus",
+ "echo":"bravo"
+}
+""".replace("\n", "\r\n")
+ )
+
+ response = (yield self.send(request))
+ self.assertEqual(response.code, responsecode.BAD_REQUEST)
+
+
+ @inlineCallbacks
+ def test_receive_fake_conduit(self):
+ """
+ Cross-pod request works when conduit does support the action.
+ """
+
+ self.patch(self.storeUnderTest(), "conduit", self.FakeConduit())
+
+ request = SimpleRequest(
+ self.site,
+ "POST",
+ "/conduit",
+ headers=http_headers.Headers(rawHeaders={
+ "Content-Type": ("application/json",),
+ self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
+ }),
+ content="""
+{
+ "action":"fake",
+ "echo":"bravo"
+}
+""".replace("\n", "\r\n")
+ )
+
+ response = (yield self.send(request))
+ self.assertEqual(response.code, responsecode.OK)
+ data = (yield allDataFromStream(response.stream))
+ j = json.loads(data)
+ self.assertTrue("result" in j)
+ self.assertEqual(j["result"], "ok")
+ self.assertTrue("back2u" in j)
+ self.assertEqual(j["back2u"], "bravo")
+ self.assertTrue("more" in j)
+ self.assertEqual(j["more"], "bits")
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -14,9 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
##
-from collections import namedtuple
-from txdav.xml import element
-from txdav.base.propertystore.base import PropertyName
"""
SQL data store.
@@ -57,31 +54,37 @@
from txdav.base.datastore.util import QueryCacher
from txdav.base.datastore.util import normalizeUUIDOrNot
+from txdav.base.propertystore.base import PropertyName
from txdav.base.propertystore.none import PropertyStore as NonePropertyStore
from txdav.base.propertystore.sql import PropertyStore
from txdav.caldav.icalendarstore import ICalendarTransaction, ICalendarStore
from txdav.carddav.iaddressbookstore import IAddressBookTransaction
from txdav.common.datastore.common import HomeChildBase
+from txdav.common.datastore.podding.conduit import PoddingConduit
from txdav.common.datastore.sql_tables import _BIND_MODE_OWN, \
_BIND_STATUS_ACCEPTED, _BIND_STATUS_DECLINED, _BIND_STATUS_INVALID, \
_BIND_STATUS_INVITED, _BIND_MODE_DIRECT, _BIND_STATUS_DELETED, \
- _BIND_MODE_INDIRECT
+ _BIND_MODE_INDIRECT, _HOME_STATUS_NORMAL, _HOME_STATUS_EXTERNAL
from txdav.common.datastore.sql_tables import schema, splitSQLString
-from txdav.common.icommondatastore import ConcurrentModification
+from txdav.common.icommondatastore import ConcurrentModification, \
+ RecordNotAllowedError, ExternalShareFailed
from txdav.common.icommondatastore import HomeChildNameNotAllowedError, \
HomeChildNameAlreadyExistsError, NoSuchHomeChildError, \
ObjectResourceNameNotAllowedError, ObjectResourceNameAlreadyExistsError, \
NoSuchObjectResourceError, AllRetriesFailed, InvalidSubscriptionValues, \
InvalidIMIPTokenValues, TooManyObjectResourcesError
-from txdav.common.idirectoryservice import IStoreDirectoryService
+from txdav.common.idirectoryservice import IStoreDirectoryService, \
+ DirectoryRecordNotFoundError
from txdav.common.inotifications import INotificationCollection, \
INotificationObject
from txdav.idav import ChangeCategory
+from txdav.xml import element
from uuid import uuid4, UUID
from zope.interface import implements, directlyProvides
+from collections import namedtuple
import json
import sys
import time
@@ -188,6 +191,8 @@
else:
self.queryCacher = None
+ self.conduit = PoddingConduit(self)
+
# Always import these here to trigger proper "registration" of the calendar and address book
# home classes
__import__("txdav.caldav.datastore.sql")
@@ -1469,7 +1474,77 @@
returnValue(shareeView)
+ #
+ # External (cross-pod) sharing - entry point is the sharee's home collection.
+ #
+ @inlineCallbacks
+ def processExternalInvite(self, ownerUID, ownerRID, ownerName, shareUID, bindMode, summary, supported_components=None):
+ """
+ External invite received.
+ """
+ # Get the owner home - create external one if not present
+ ownerHome = yield self._txn.homeWithUID(self._homeType, ownerUID, create=True)
+ if ownerHome is None or not ownerHome.external():
+ raise ExternalShareFailed("Invalid owner UID: {}".format(ownerUID))
+
+ # Try to find owner calendar via its external id
+ ownerView = yield ownerHome.childWithExternalID(ownerRID)
+ if ownerView is None:
+ ownerView = yield ownerHome.createChildWithName(ownerName, externalID=ownerRID)
+ if supported_components is not None and hasattr(ownerView, "setSupportedComponents"):
+ yield ownerView.setSupportedComponents(supported_components)
+
+ # Now carry out the share operation
+ yield ownerView.inviteUserToShare(self.uid(), bindMode, summary, shareName=shareUID)
+
+
+ @inlineCallbacks
+ def processExternalUninvite(self, ownerUID, ownerRID, shareUID):
+ """
+ External invite received.
+ """
+
+ # Get the owner home
+ ownerHome = yield self._txn.homeWithUID(self._homeType, ownerUID)
+ if ownerHome is None or not ownerHome.external():
+ raise ExternalShareFailed("Invalid owner UID: {}".format(ownerUID))
+
+ # Try to find owner calendar via its external id
+ ownerView = yield ownerHome.childWithExternalID(ownerRID)
+ if ownerView is None:
+ raise ExternalShareFailed("Invalid share ID: {}".format(shareUID))
+
+ # Now carry out the share operation
+ yield ownerView.uninviteUserFromShare(self.uid())
+
+
+ @inlineCallbacks
+ def processExternalReply(self, ownerUID, shareeUID, shareUID, bindStatus, summary=None):
+ """
+ External invite received.
+ """
+
+ # Make sure the shareeUID and shareUID match
+
+ # Get the owner home - create external one if not present
+ shareeHome = yield self._txn.homeWithUID(self._homeType, shareeUID)
+ if shareeHome is None or not shareeHome.external():
+ raise ExternalShareFailed("Invalid sharee UID: {}".format(shareeUID))
+
+ # Try to find owner calendar via its external id
+ shareeView = yield shareeHome.anyObjectWithShareUID(shareUID)
+ if shareeView is None:
+ raise ExternalShareFailed("Invalid share UID: {}".format(shareUID))
+
+ # Now carry out the share operation
+ if bindStatus == _BIND_STATUS_ACCEPTED:
+ yield shareeHome.acceptShare(shareUID, summary)
+ elif bindStatus == _BIND_STATUS_DECLINED:
+ yield shareeHome.declineShare(shareUID)
+
+
+
class CommonHome(SharingHomeMixIn):
log = Logger()
@@ -1477,6 +1552,7 @@
_homeType = None
_homeTable = None
_homeMetaDataTable = None
+ _externalClass = None
_childClass = None
_childTable = None
_notifierPrefix = None
@@ -1490,6 +1566,7 @@
self._txn = transaction
self._ownerUID = ownerUID
self._resourceID = None
+ self._status = _HOME_STATUS_NORMAL
self._dataVersion = None
self._childrenLoaded = False
self._children = {}
@@ -1553,6 +1630,7 @@
return (
cls._homeSchema.RESOURCE_ID,
cls._homeSchema.OWNER_UID,
+ cls._homeSchema.STATUS,
)
@@ -1567,6 +1645,7 @@
return (
"_resourceID",
"_ownerUID",
+ "_status",
)
@@ -1611,39 +1690,53 @@
"""
result = yield self._cacher.get(self._ownerUID)
if result is None:
- result = yield self._homeColumnsFromOwnerQuery.on(
- self._txn, ownerUID=self._ownerUID)
- if result and not no_cache:
- yield self._cacher.set(self._ownerUID, result)
+ result = yield self._homeColumnsFromOwnerQuery.on(self._txn, ownerUID=self._ownerUID)
+ if result:
+ result = result[0]
+ if not no_cache:
+ yield self._cacher.set(self._ownerUID, result)
if result:
- for attr, value in zip(self.homeAttributes(), result[0]):
+ for attr, value in zip(self.homeAttributes(), result):
setattr(self, attr, value)
- queryCacher = self._txn._queryCacher
- if queryCacher:
- # Get cached copy
- cacheKey = queryCacher.keyForHomeMetaData(self._resourceID)
- data = yield queryCacher.get(cacheKey)
+ # STOP! If the status is external we need to convert this object to a CommonHomeExternal class which will
+ # have the right behavior for non-hosted external users.
+ if self._status == _HOME_STATUS_EXTERNAL:
+ actualHome = self._externalClass(self._txn, self._ownerUID, self._resourceID)
else:
- data = None
- if data is None:
- # Don't have a cached copy
- data = (yield self._metaDataQuery.on(
- self._txn, resourceID=self._resourceID))[0]
- if queryCacher:
- # Cache the data
- yield queryCacher.setAfterCommit(self._txn, cacheKey, data)
+ actualHome = self
+ yield actualHome.initMetaDataFromStore()
+ yield actualHome._loadPropertyStore()
+ returnValue(actualHome)
+ else:
+ returnValue(None)
- for attr, value in zip(self.metadataAttributes(), data):
- setattr(self, attr, value)
- yield self._loadPropertyStore()
- returnValue(self)
+ @inlineCallbacks
+ def initMetaDataFromStore(self):
+ """
+ Load up the metadata and property store
+ """
+
+ queryCacher = self._txn._queryCacher
+ if queryCacher:
+ # Get cached copy
+ cacheKey = queryCacher.keyForHomeMetaData(self._resourceID)
+ data = yield queryCacher.get(cacheKey)
else:
- returnValue(None)
+ data = None
+ if data is None:
+ # Don't have a cached copy
+ data = (yield self._metaDataQuery.on(self._txn, resourceID=self._resourceID))[0]
+ if queryCacher:
+ # Cache the data
+ yield queryCacher.setAfterCommit(self._txn, cacheKey, data)
+ for attr, value in zip(self.metadataAttributes(), data):
+ setattr(self, attr, value)
+
@classmethod
@inlineCallbacks
def listHomes(cls, txn):
@@ -1673,6 +1766,13 @@
if not create:
returnValue(None)
+ # Determine if the user is local or external
+ record = txn.directoryService().recordWithUID(uid)
+ if record is None:
+ raise DirectoryRecordNotFoundError("Cannot create home for UID since no directory record exists: {uid}".format(uid=uid))
+
+ state = _HOME_STATUS_NORMAL if record.thisServer() else _HOME_STATUS_EXTERNAL
+
# Use savepoint so we can do a partial rollback if there is a race condition
# where this row has already been inserted
savepoint = SavepointAction("homeWithUID")
@@ -1684,11 +1784,12 @@
resourceid = (yield Insert(
{
cls._homeSchema.OWNER_UID: uid,
+ cls._homeSchema.STATUS: state,
cls._homeSchema.DATAVERSION: cls._dataVersionValue,
},
- Return=cls._homeSchema.RESOURCE_ID).on(txn))[0][0]
- yield Insert(
- {cls._homeMetaDataSchema.RESOURCE_ID: resourceid}).on(txn)
+ Return=cls._homeSchema.RESOURCE_ID
+ ).on(txn))[0][0]
+ yield Insert({cls._homeMetaDataSchema.RESOURCE_ID: resourceid}).on(txn)
except Exception: # FIXME: Really want to trap the pg.DatabaseError but in a non-DB specific manner
yield savepoint.rollback(txn)
@@ -1726,7 +1827,7 @@
def __repr__(self):
- return "<%s: %s>" % (self.__class__.__name__, self._resourceID)
+ return "<%s: %s, %s>" % (self.__class__.__name__, self._resourceID, self._ownerUID)
def id(self):
@@ -1748,6 +1849,15 @@
return self._ownerUID
+ def external(self):
+ """
+ Is this an external home.
+
+ @return: a string.
+ """
+ return False
+
+
def transaction(self):
return self._txn
@@ -1865,6 +1975,17 @@
return self._childClass.objectWithID(self, resourceID)
+ def childWithExternalID(self, externalID):
+ """
+ Retrieve the child with the given C{externalID} contained in this
+ home.
+
+ @param name: a string.
+ @return: an L{ICalendar} or C{None} if no such child exists.
+ """
+ return self._childClass.objectWithExternalID(self, externalID)
+
+
def allChildWithID(self, resourceID):
"""
Retrieve the child with the given C{resourceID} contained in this
@@ -1877,12 +1998,11 @@
@inlineCallbacks
- def createChildWithName(self, name):
+ def createChildWithName(self, name, externalID=None):
if name.startswith("."):
raise HomeChildNameNotAllowedError(name)
- yield self._childClass.create(self, name)
- child = (yield self.childWithName(name))
+ child = yield self._childClass.create(self, name, externalID=externalID)
returnValue(child)
@@ -2413,6 +2533,165 @@
+class CommonHomeExternal(CommonHome):
+ """
+ A CommonHome for a user not hosted on this system, but on another pod. This is needed to provide a
+ "reference" to the external user so we can share with them. Actual operations to list child resources, etc
+ are all stubbed out since no data for the user is actually hosted in this store.
+ """
+
+ def __init__(self, transaction, ownerUID, resourceID):
+ super(CommonHomeExternal, self).__init__(transaction, ownerUID)
+ self._resourceID = resourceID
+ self._status = _HOME_STATUS_EXTERNAL
+
+
+ def initFromStore(self, no_cache=False):
+ """
+ Never called - this should be done by CommonHome.initFromStore only.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def external(self):
+ """
+ Is this an external home.
+
+ @return: a string.
+ """
+ return True
+
+
+ def children(self):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def loadChildren(self):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def listChildren(self):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def objectWithShareUID(self, shareUID):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def invitedObjectWithShareUID(self, shareUID):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ @memoizedKey("name", "_children")
+ @inlineCallbacks
+ def createChildWithName(self, name, externalID=None):
+ """
+ No real children - only external ones.
+ """
+ if externalID is None:
+ raise AssertionError("CommonHomeExternal: not supported")
+ child = yield super(CommonHomeExternal, self).createChildWithName(name, externalID)
+ returnValue(child)
+
+
+ def removeChildWithName(self, name):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def syncToken(self):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def resourceNamesSinceRevision(self, revision, depth):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ @inlineCallbacks
+ def _loadPropertyStore(self):
+ """
+ No property store - stub to a NonePropertyStore.
+ """
+ props = yield PropertyStore.load(
+ self.uid(),
+ self.uid(),
+ self._txn,
+ self._resourceID,
+ notifyCallback=self.notifyChanged
+ )
+ self._propertyStore = props
+
+
+ def properties(self):
+ return self._propertyStore
+
+
+ def objectResourcesWithUID(self, uid, ignore_children=[], allowShared=True):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def objectResourceWithID(self, rid):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+ def notifyChanged(self):
+ """
+ Notifications are not handled for external homes - make this a no-op.
+ """
+ return succeed(None)
+
+
+ def bumpModified(self):
+ """
+ No changes recorded for external homes - make this a no-op.
+ """
+ return succeed(None)
+
+
+ def removeUnacceptedShares(self):
+ """
+ No children.
+ """
+ raise AssertionError("CommonHomeExternal: not supported")
+
+
+# def ownerHomeAndChildNameForChildID(self, resourceID):
+# """
+# No children.
+# """
+# raise AssertionError("CommonHomeExternal: not supported")
+
+
+
class _SharedSyncLogic(object):
"""
Logic for maintaining sync-token shared between notification collections and
@@ -2791,6 +3070,7 @@
return Insert({
bind.HOME_RESOURCE_ID: Parameter("homeID"),
bind.RESOURCE_ID: Parameter("resourceID"),
+ bind.EXTERNAL_ID: Parameter("externalID"),
bind.RESOURCE_NAME: Parameter("name"),
bind.BIND_MODE: Parameter("mode"),
bind.BIND_STATUS: Parameter("bindStatus"),
@@ -2872,11 +3152,21 @@
"""
bind = cls._bindSchema
return cls._bindFor((bind.RESOURCE_ID == Parameter("resourceID"))
- .And(bind.HOME_RESOURCE_ID == Parameter("homeID"))
- )
+ .And(bind.HOME_RESOURCE_ID == Parameter("homeID")))
@classproperty
+ def _bindForExternalIDAndHomeID(cls): #@NoSelf
+ """
+ DAL query that looks up home bind rows by home child
+ resource ID and home resource ID.
+ """
+ bind = cls._bindSchema
+ return cls._bindFor((bind.EXTERNAL_ID == Parameter("externalID"))
+ .And(bind.HOME_RESOURCE_ID == Parameter("homeID")))
+
+
+ @classproperty
def _bindForNameAndHomeID(cls): #@NoSelf
"""
DAL query that looks up any bind rows by home child
@@ -2884,15 +3174,14 @@
"""
bind = cls._bindSchema
return cls._bindFor((bind.RESOURCE_NAME == Parameter("name"))
- .And(bind.HOME_RESOURCE_ID == Parameter("homeID"))
- )
+ .And(bind.HOME_RESOURCE_ID == Parameter("homeID")))
#
# Higher level API
#
@inlineCallbacks
- def inviteUserToShare(self, shareeUID, mode, summary):
+ def inviteUserToShare(self, shareeUID, mode, summary, shareName=None):
"""
Invite a user to share this collection - either create the share if it does not exist, or
update the existing share with new values. Make sure a notification is sent as well.
@@ -2911,15 +3200,19 @@
status = _BIND_STATUS_INVITED if shareeView.shareStatus() in (_BIND_STATUS_DECLINED, _BIND_STATUS_INVALID) else None
yield self.updateShare(shareeView, mode=mode, status=status, summary=summary)
else:
- shareeView = yield self.createShare(shareeUID=shareeUID, mode=mode, summary=summary)
+ shareeView = yield self.createShare(shareeUID=shareeUID, mode=mode, summary=summary, shareName=shareName)
- # Send invite notification
- yield self._sendInviteNotification(shareeView)
+ # Check for external
+ if shareeView.viewerHome().external():
+ yield self._sendExternalInvite(shareeView)
+ else:
+ # Send invite notification
+ yield self._sendInviteNotification(shareeView)
returnValue(shareeView)
@inlineCallbacks
- def directShareWithUser(self, shareeUID):
+ def directShareWithUser(self, shareeUID, shareName=None):
"""
Create a direct share with the specified user. Note it is currently up to the app layer
to enforce access control - this is not ideal as we really should have control of that in
@@ -2934,7 +3227,7 @@
# Ignore if it already exists
shareeView = yield self.shareeView(shareeUID)
if shareeView is None:
- shareeView = yield self.createShare(shareeUID=shareeUID, mode=_BIND_MODE_DIRECT)
+ shareeView = yield self.createShare(shareeUID=shareeUID, mode=_BIND_MODE_DIRECT, shareName=shareName)
yield shareeView.newShare()
returnValue(shareeView)
@@ -2951,13 +3244,16 @@
shareeView = yield self.shareeView(shareeUID)
if shareeView is not None:
- # If current user state is accepted then we send an invite with the new state, otherwise
- # we cancel any existing invites for the user
- if not shareeView.direct():
- if shareeView.shareStatus() != _BIND_STATUS_ACCEPTED:
- yield self._removeInviteNotification(shareeView)
- else:
- yield self._sendInviteNotification(shareeView, notificationState=_BIND_STATUS_DELETED)
+ if shareeView.viewerHome().external():
+ yield self._sendExternalUninvite(shareeView)
+ else:
+ # If current user state is accepted then we send an invite with the new state, otherwise
+ # we cancel any existing invites for the user
+ if not shareeView.direct():
+ if shareeView.shareStatus() != _BIND_STATUS_ACCEPTED:
+ yield self._removeInviteNotification(shareeView)
+ else:
+ yield self._sendInviteNotification(shareeView, notificationState=_BIND_STATUS_DELETED)
# Remove the bind
yield self.removeShare(shareeView)
@@ -2970,10 +3266,13 @@
"""
if not self.direct() and self.shareStatus() != _BIND_STATUS_ACCEPTED:
+ if self.external():
+ yield self._replyExternalInvite(_BIND_STATUS_ACCEPTED, summary)
ownerView = yield self.ownerView()
yield ownerView.updateShare(self, status=_BIND_STATUS_ACCEPTED)
yield self.newShare(displayname=summary)
- yield self._sendReplyNotification(ownerView, summary)
+ if not ownerView.external():
+ yield self._sendReplyNotification(ownerView, summary)
@inlineCallbacks
@@ -2983,9 +3282,12 @@
"""
if not self.direct() and self.shareStatus() != _BIND_STATUS_DECLINED:
+ if self.external():
+ yield self._replyExternalInvite(_BIND_STATUS_DECLINED)
ownerView = yield self.ownerView()
yield ownerView.updateShare(self, status=_BIND_STATUS_DECLINED)
- yield self._sendReplyNotification(ownerView)
+ if not ownerView.external():
+ yield self._sendReplyNotification(ownerView)
@inlineCallbacks
@@ -3100,10 +3402,56 @@
#
- # Lower level API
+ # External/cross-pod API
#
+ @inlineCallbacks
+ def _sendExternalInvite(self, shareeView):
+ yield self._txn.store().conduit.send_shareinvite(
+ self._txn,
+ shareeView.ownerHome()._homeType,
+ shareeView.ownerHome().uid(),
+ self.id(),
+ self.shareName(),
+ shareeView.viewerHome().uid(),
+ shareeView.shareUID(),
+ shareeView.shareMode(),
+ shareeView.shareMessage(),
+ supported_components=self.getSupportedComponents() if hasattr(self, "getSupportedComponents") else None,
+ )
+
+
@inlineCallbacks
+ def _sendExternalUninvite(self, shareeView):
+
+ yield self._txn.store().conduit.send_shareuninvite(
+ self._txn,
+ shareeView.ownerHome()._homeType,
+ shareeView.ownerHome().uid(),
+ self.id(),
+ shareeView.viewerHome().uid(),
+ shareeView.shareUID(),
+ )
+
+
+ @inlineCallbacks
+ def _replyExternalInvite(self, status, summary=None):
+
+ yield self._txn.store().conduit.send_sharereply(
+ self._txn,
+ self.viewerHome()._homeType,
+ self.ownerHome().uid(),
+ self.viewerHome().uid(),
+ self.shareName(),
+ status,
+ summary,
+ )
+
+
+ #
+ # Lower level API
+ #
+ @inlineCallbacks
def ownerView(self):
"""
Return the owner resource counterpart of this shared resource.
@@ -3126,7 +3474,7 @@
@inlineCallbacks
- def shareWith(self, shareeHome, mode, status=None, summary=None):
+ def shareWith(self, shareeHome, mode, status=None, summary=None, shareName=None):
"""
Share this (owned) L{CommonHomeChild} with another home.
@@ -3154,11 +3502,12 @@
@inlineCallbacks
def doInsert(subt):
- newName = self.newShareName()
+ newName = shareName if shareName is not None else self.newShareName()
yield self._bindInsertQuery.on(
subt,
homeID=shareeHome._resourceID,
resourceID=self._resourceID,
+ externalID=None,
name=newName,
mode=mode,
bindStatus=status,
@@ -3192,7 +3541,7 @@
@inlineCallbacks
- def createShare(self, shareeUID, mode, summary=None):
+ def createShare(self, shareeUID, mode, summary=None, shareName=None):
"""
Create a new shared resource. If the mode is direct, the share is created in accepted state,
otherwise the share is created in invited state.
@@ -3204,6 +3553,7 @@
mode=mode,
status=_BIND_STATUS_INVITED if mode != _BIND_MODE_DIRECT else _BIND_STATUS_ACCEPTED,
summary=summary,
+ shareName=shareName,
)
shareeView = yield self.shareeView(shareeUID)
returnValue(shareeView)
@@ -3267,12 +3617,7 @@
if summary is not None:
shareeView._bindMessage = columnMap[bind.MESSAGE]
- queryCacher = self._txn._queryCacher
- if queryCacher:
- cacheKey = queryCacher.keyForObjectWithName(shareeView._home._resourceID, shareeView._name)
- yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
- cacheKey = queryCacher.keyForObjectWithResourceID(shareeView._home._resourceID, shareeView._resourceID)
- yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
+ yield shareeView.invalidateQueryCache()
# Must send notification to ensure cache invalidation occurs
yield self.notifyPropertyChanged()
@@ -3326,12 +3671,7 @@
homeID=shareeHome._resourceID,
)
- queryCacher = self._txn._queryCacher
- if queryCacher:
- cacheKey = queryCacher.keyForObjectWithName(shareeHome._resourceID, shareeView._name)
- yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
- cacheKey = queryCacher.keyForObjectWithResourceID(shareeHome._resourceID, shareeView._resourceID)
- yield queryCacher.invalidateAfterCommit(self._txn, cacheKey)
+ yield shareeView.invalidateQueryCache()
@inlineCallbacks
@@ -3574,13 +3914,14 @@
cls._bindSchema.BIND_MODE,
cls._bindSchema.HOME_RESOURCE_ID,
cls._bindSchema.RESOURCE_ID,
+ cls._bindSchema.EXTERNAL_ID,
cls._bindSchema.RESOURCE_NAME,
cls._bindSchema.BIND_STATUS,
cls._bindSchema.BIND_REVISION,
cls._bindSchema.MESSAGE
)
- bindColumnCount = 7
+ bindColumnCount = 8
@classmethod
def additionalBindColumns(cls):
@@ -3640,6 +3981,7 @@
yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForHomeChildMetaData(self._resourceID))
yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithName(self._home._resourceID, self._name))
yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID))
+ yield queryCacher.invalidateAfterCommit(self._txn, queryCacher.keyForObjectWithExternalID(self._home._resourceID, self._externalID))
@@ -3665,11 +4007,12 @@
_objectSchema = None
- def __init__(self, home, name, resourceID, mode, status, revision=0, message=None, ownerHome=None, ownerName=None):
+ def __init__(self, home, name, resourceID, mode, status, revision=0, message=None, ownerHome=None, ownerName=None, externalID=None):
self._home = home
self._name = name
self._resourceID = resourceID
+ self._externalID = externalID
self._bindMode = mode
self._bindStatus = status
self._bindRevision = revision
@@ -3752,7 +4095,7 @@
# Create the actual objects merging in properties
for dataRow in dataRows:
- bindMode, homeID, resourceID, bindName, bindStatus, bindRevision, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, externalID, bindName, bindStatus, bindRevision, bindMessage = dataRow[:cls.bindColumnCount] #@UnusedVariable
additionalBind = dataRow[cls.bindColumnCount:cls.bindColumnCount + len(cls.additionalBindColumns())]
metadata = dataRow[cls.bindColumnCount + len(cls.additionalBindColumns()):]
@@ -3773,6 +4116,7 @@
message=bindMessage,
ownerHome=ownerHome,
ownerName=ownerName,
+ externalID=externalID,
)
for attr, value in zip(cls.additionalBindAttributes(), additionalBind):
setattr(child, attr, value)
@@ -3800,8 +4144,13 @@
@classmethod
+ def objectWithExternalID(cls, home, externalID, accepted=True):
+ return cls._objectWithNameOrID(home, externalID=externalID, accepted=accepted)
+
+
+ @classmethod
@inlineCallbacks
- def _objectWithNameOrID(cls, home, name=None, resourceID=None, accepted=True):
+ def _objectWithNameOrID(cls, home, name=None, resourceID=None, externalID=None, accepted=True):
# replaces objectWithName()
"""
Retrieve the child with the given C{name} or C{resourceID} contained in the given
@@ -3821,27 +4170,32 @@
# Retrieve data from cache
if name:
cacheKey = queryCacher.keyForObjectWithName(home._resourceID, name)
- else:
+ elif resourceID:
cacheKey = queryCacher.keyForObjectWithResourceID(home._resourceID, resourceID)
+ elif externalID:
+ cacheKey = queryCacher.keyForObjectWithExternalID(home._resourceID, externalID)
rows = yield queryCacher.get(cacheKey)
if rows is None:
# No cached copy
if name:
rows = yield cls._bindForNameAndHomeID.on(home._txn, name=name, homeID=home._resourceID)
- else:
+ elif resourceID:
rows = yield cls._bindForResourceIDAndHomeID.on(home._txn, resourceID=resourceID, homeID=home._resourceID)
+ elif resourceID:
+ rows = yield cls._bindForExternalIDAndHomeID.on(home._txn, externalID=externalID, homeID=home._resourceID)
if not rows:
returnValue(None)
row = rows[0]
- bindMode, homeID, resourceID, name, bindStatus, bindRevision, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable
+ bindMode, homeID, resourceID, externalID, name, bindStatus, bindRevision, bindMessage = row[:cls.bindColumnCount] #@UnusedVariable
if queryCacher:
# Cache the result
queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithName(home._resourceID, name), rows)
queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithResourceID(home._resourceID, resourceID), rows)
+ queryCacher.setAfterCommit(home._txn, queryCacher.keyForObjectWithExternalID(home._resourceID, externalID), rows)
if accepted is not None and (bindStatus == _BIND_STATUS_ACCEPTED) != bool(accepted):
returnValue(None)
@@ -3862,7 +4216,8 @@
revision=bindRevision,
message=bindMessage,
ownerHome=ownerHome,
- ownerName=ownerName
+ ownerName=ownerName,
+ externalID=externalID,
)
yield child.initFromStore(additionalBind)
returnValue(child)
@@ -3892,35 +4247,28 @@
@classmethod
@inlineCallbacks
- def create(cls, home, name):
+ def create(cls, home, name, externalID=None):
- if (yield cls._bindForNameAndHomeID.on(home._txn,
- name=name, homeID=home._resourceID)):
+ if (yield cls._bindForNameAndHomeID.on(home._txn, name=name, homeID=home._resourceID)):
raise HomeChildNameAlreadyExistsError(name)
if name.startswith("."):
raise HomeChildNameNotAllowedError(name)
# Create this object
- resourceID = (
- yield cls._insertHomeChild.on(home._txn))[0][0]
+ resourceID = (yield cls._insertHomeChild.on(home._txn))[0][0]
# Initialize this object
- _created, _modified = (
- yield cls._insertHomeChildMetaData.on(home._txn,
- resourceID=resourceID))[0]
+ _created, _modified = (yield cls._insertHomeChildMetaData.on(home._txn, resourceID=resourceID))[0]
# Bind table needs entry
yield cls._bindInsertQuery.on(
- home._txn, homeID=home._resourceID, resourceID=resourceID,
+ home._txn, homeID=home._resourceID, resourceID=resourceID, externalID=externalID,
name=name, mode=_BIND_MODE_OWN, bindStatus=_BIND_STATUS_ACCEPTED,
message=None,
)
# Initialize other state
- child = cls(home, name, resourceID, _BIND_MODE_OWN, _BIND_STATUS_ACCEPTED)
- child._created = _created
- child._modified = _modified
- yield child._loadPropertyStore()
+ child = yield cls.objectWithID(home, resourceID)
yield child._initSyncToken()
@@ -3981,6 +4329,15 @@
return self._resourceID
+ def external(self):
+ """
+ Is this an external home.
+
+ @return: a string.
+ """
+ return self.ownerHome().external()
+
+
@property
def _txn(self):
return self._home._txn
@@ -4030,12 +4387,7 @@
"""
oldName = self._name
- queryCacher = self._home._txn._queryCacher
- if queryCacher:
- cacheKey = queryCacher.keyForObjectWithName(self._home._resourceID, oldName)
- yield queryCacher.invalidateAfterCommit(self._home._txn, cacheKey)
- cacheKey = queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID)
- yield queryCacher.invalidateAfterCommit(self._home._txn, cacheKey)
+ yield self.invalidateQueryCache()
yield self._renameQuery.on(self._txn, name=name,
resourceID=self._resourceID,
@@ -4065,16 +4417,10 @@
# Do before setting _resourceID making changes
yield self.notifyPropertyChanged()
- queryCacher = self._home._txn._queryCacher
- if queryCacher:
- cacheKey = queryCacher.keyForObjectWithName(self._home._resourceID, self._name)
- yield queryCacher.invalidateAfterCommit(self._home._txn, cacheKey)
- cacheKey = queryCacher.keyForObjectWithResourceID(self._home._resourceID, self._resourceID)
- yield queryCacher.invalidateAfterCommit(self._home._txn, cacheKey)
+ yield self.invalidateQueryCache()
yield self._deletedSyncToken()
- yield self._deleteQuery.on(self._txn, NoSuchHomeChildError,
- resourceID=self._resourceID)
+ yield self._deleteQuery.on(self._txn, NoSuchHomeChildError, resourceID=self._resourceID)
yield self.properties()._removeResource()
# Set to non-existent state
@@ -5193,6 +5539,15 @@
resourceID = rows[0][0]
created = False
elif create:
+ # Determine if the user is local or external
+ record = txn.directoryService().recordWithUID(uid)
+ if record is None:
+ raise DirectoryRecordNotFoundError("Cannot create home for UID since no directory record exists: {uid}".format(uid=uid))
+
+ state = _HOME_STATUS_NORMAL if record.thisServer() else _HOME_STATUS_EXTERNAL
+ if state == _HOME_STATUS_EXTERNAL:
+ raise RecordNotAllowedError("Cannot store notifications for external user: {uid}".format(uid=uid))
+
# Use savepoint so we can do a partial rollback if there is a race
# condition where this row has already been inserted
savepoint = SavepointAction("notificationsWithUID")
@@ -5597,6 +5952,8 @@
child._notificationType = json.loads(child._notificationType)
except ValueError:
pass
+ if isinstance(child._notificationType, unicode):
+ child._notificationType = child._notificationType.encode("utf-8")
child._loadPropertyStore(
props=propertyStores.get(child._resourceID, None)
)
@@ -5645,6 +6002,8 @@
self._notificationType = json.loads(self._notificationType)
except ValueError:
pass
+ if isinstance(self._notificationType, unicode):
+ self._notificationType = self._notificationType.encode("utf-8")
self._loadPropertyStore()
returnValue(self)
else:
@@ -5758,6 +6117,8 @@
self._notificationData = json.loads(self._notificationData)
except ValueError:
pass
+ if isinstance(self._notificationData, unicode):
+ self._notificationData = self._notificationData.encode("utf-8")
returnValue(self._notificationData)
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/current-oracle-dialect.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/current-oracle-dialect.sql 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/current-oracle-dialect.sql 2013-12-06 21:43:35 UTC (rev 12041)
@@ -18,9 +18,17 @@
create table CALENDAR_HOME (
"RESOURCE_ID" integer primary key,
"OWNER_UID" nvarchar2(255) unique,
+ "STATUS" integer default 0 not null,
"DATAVERSION" integer default 0 not null
);
+create table HOME_STATUS (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into HOME_STATUS (DESCRIPTION, ID) values ('normal', 0);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('external', 1);
create table CALENDAR (
"RESOURCE_ID" integer primary key
);
@@ -50,6 +58,7 @@
create table NOTIFICATION_HOME (
"RESOURCE_ID" integer primary key,
"OWNER_UID" nvarchar2(255) unique,
+ "STATUS" integer default 0 not null,
"DATAVERSION" integer default 0 not null
);
@@ -210,6 +219,7 @@
"RESOURCE_ID" integer primary key,
"ADDRESSBOOK_PROPERTY_STORE_ID" integer not null,
"OWNER_UID" nvarchar2(255) unique,
+ "STATUS" integer default 0 not null,
"DATAVERSION" integer default 0 not null
);
@@ -370,7 +380,7 @@
"VALUE" nvarchar2(255)
);
-insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '29');
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '30');
insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '5');
insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
insert into CALENDARSERVER (NAME, VALUE) values ('NOTIFICATION-DATAVERSION', '1');
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/current.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/current.sql 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/current.sql 2013-12-06 21:43:35 UTC (rev 12041)
@@ -53,10 +53,22 @@
create table CALENDAR_HOME (
RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
- OWNER_UID varchar(255) not null unique, -- implicit index
+ OWNER_UID varchar(255) not null unique, -- implicit index
+ STATUS integer default 0 not null, -- enum HOME_STATUS
DATAVERSION integer default 0 not null
);
+-- Enumeration of statuses
+
+create table HOME_STATUS (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into HOME_STATUS values (0, 'normal' );
+insert into HOME_STATUS values (1, 'external');
+
+
--------------
-- Calendar --
--------------
@@ -65,6 +77,7 @@
RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ') -- implicit index
);
+
----------------------------
-- Calendar Home Metadata --
----------------------------
@@ -91,6 +104,7 @@
create index CALENDAR_HOME_METADATA_DEFAULT_POLLS on
CALENDAR_HOME_METADATA(DEFAULT_POLLS);
+
-----------------------
-- Calendar Metadata --
-----------------------
@@ -110,6 +124,7 @@
create table NOTIFICATION_HOME (
RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
OWNER_UID varchar(255) not null unique, -- implicit index
+ STATUS integer default 0 not null, -- enum HOME_STATUS
DATAVERSION integer default 0 not null
);
@@ -139,6 +154,7 @@
create table CALENDAR_BIND (
CALENDAR_HOME_RESOURCE_ID integer not null references CALENDAR_HOME,
CALENDAR_RESOURCE_ID integer not null references CALENDAR on delete cascade,
+ EXTERNAL_ID integer default null,
CALENDAR_RESOURCE_NAME varchar(255) not null,
BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
@@ -380,6 +396,7 @@
RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
ADDRESSBOOK_PROPERTY_STORE_ID integer default nextval('RESOURCE_ID_SEQ') not null, -- implicit index
OWNER_UID varchar(255) not null unique, -- implicit index
+ STATUS integer default 0 not null, -- enum HOME_STATUS
DATAVERSION integer default 0 not null
);
@@ -405,6 +422,7 @@
create table SHARED_ADDRESSBOOK_BIND (
ADDRESSBOOK_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME,
OWNER_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME on delete cascade,
+ EXTERNAL_ID integer default null,
ADDRESSBOOK_RESOURCE_NAME varchar(255) not null,
BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
@@ -495,6 +513,7 @@
create table SHARED_GROUP_BIND (
ADDRESSBOOK_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME,
GROUP_RESOURCE_ID integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+ EXTERNAL_ID integer default null,
GROUP_ADDRESSBOOK_NAME varchar(255) not null,
BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
@@ -702,7 +721,7 @@
VALUE varchar(255)
);
-insert into CALENDARSERVER values ('VERSION', '29');
+insert into CALENDARSERVER values ('VERSION', '30');
insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '5');
insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');
insert into CALENDARSERVER values ('NOTIFICATION-DATAVERSION', '1');
Added: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/old/oracle-dialect/v29.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/old/oracle-dialect/v29.sql (rev 0)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/old/oracle-dialect/v29.sql 2013-12-06 21:43:35 UTC (rev 12041)
@@ -0,0 +1,505 @@
+create sequence RESOURCE_ID_SEQ;
+create sequence INSTANCE_ID_SEQ;
+create sequence ATTACHMENT_ID_SEQ;
+create sequence REVISION_SEQ;
+create sequence WORKITEM_SEQ;
+create table NODE_INFO (
+ "HOSTNAME" nvarchar2(255),
+ "PID" integer not null,
+ "PORT" integer not null,
+ "TIME" timestamp default CURRENT_TIMESTAMP at time zone 'UTC' not null,
+ primary key("HOSTNAME", "PORT")
+);
+
+create table NAMED_LOCK (
+ "LOCK_NAME" nvarchar2(255) primary key
+);
+
+create table CALENDAR_HOME (
+ "RESOURCE_ID" integer primary key,
+ "OWNER_UID" nvarchar2(255) unique,
+ "DATAVERSION" integer default 0 not null
+);
+
+create table CALENDAR (
+ "RESOURCE_ID" integer primary key
+);
+
+create table CALENDAR_HOME_METADATA (
+ "RESOURCE_ID" integer primary key references CALENDAR_HOME on delete cascade,
+ "QUOTA_USED_BYTES" integer default 0 not null,
+ "DEFAULT_EVENTS" integer default null references CALENDAR on delete set null,
+ "DEFAULT_TASKS" integer default null references CALENDAR on delete set null,
+ "DEFAULT_POLLS" integer default null references CALENDAR on delete set null,
+ "ALARM_VEVENT_TIMED" nclob default null,
+ "ALARM_VEVENT_ALLDAY" nclob default null,
+ "ALARM_VTODO_TIMED" nclob default null,
+ "ALARM_VTODO_ALLDAY" nclob default null,
+ "AVAILABILITY" nclob default null,
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDAR_METADATA (
+ "RESOURCE_ID" integer primary key references CALENDAR on delete cascade,
+ "SUPPORTED_COMPONENTS" nvarchar2(255) default null,
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table NOTIFICATION_HOME (
+ "RESOURCE_ID" integer primary key,
+ "OWNER_UID" nvarchar2(255) unique,
+ "DATAVERSION" integer default 0 not null
+);
+
+create table NOTIFICATION (
+ "RESOURCE_ID" integer primary key,
+ "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME,
+ "NOTIFICATION_UID" nvarchar2(255),
+ "NOTIFICATION_TYPE" nvarchar2(255),
+ "NOTIFICATION_DATA" nclob,
+ "MD5" nchar(32),
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ unique("NOTIFICATION_UID", "NOTIFICATION_HOME_RESOURCE_ID")
+);
+
+create table CALENDAR_BIND (
+ "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+ "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+ "CALENDAR_RESOURCE_NAME" nvarchar2(255),
+ "BIND_MODE" integer not null,
+ "BIND_STATUS" integer not null,
+ "BIND_REVISION" integer default 0 not null,
+ "MESSAGE" nclob,
+ "TRANSP" integer default 0 not null,
+ "ALARM_VEVENT_TIMED" nclob default null,
+ "ALARM_VEVENT_ALLDAY" nclob default null,
+ "ALARM_VTODO_TIMED" nclob default null,
+ "ALARM_VTODO_ALLDAY" nclob default null,
+ "TIMEZONE" nclob default null,
+ primary key("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_ID"),
+ unique("CALENDAR_HOME_RESOURCE_ID", "CALENDAR_RESOURCE_NAME")
+);
+
+create table CALENDAR_BIND_MODE (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('own', 0);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('write', 2);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('direct', 3);
+insert into CALENDAR_BIND_MODE (DESCRIPTION, ID) values ('indirect', 4);
+create table CALENDAR_BIND_STATUS (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invited', 0);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('accepted', 1);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('declined', 2);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('invalid', 3);
+insert into CALENDAR_BIND_STATUS (DESCRIPTION, ID) values ('deleted', 4);
+create table CALENDAR_TRANSP (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('opaque', 0);
+insert into CALENDAR_TRANSP (DESCRIPTION, ID) values ('transparent', 1);
+create table CALENDAR_OBJECT (
+ "RESOURCE_ID" integer primary key,
+ "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+ "RESOURCE_NAME" nvarchar2(255),
+ "ICALENDAR_TEXT" nclob,
+ "ICALENDAR_UID" nvarchar2(255),
+ "ICALENDAR_TYPE" nvarchar2(255),
+ "ATTACHMENTS_MODE" integer default 0 not null,
+ "DROPBOX_ID" nvarchar2(255),
+ "ORGANIZER" nvarchar2(255),
+ "RECURRANCE_MIN" date,
+ "RECURRANCE_MAX" date,
+ "ACCESS" integer default 0 not null,
+ "SCHEDULE_OBJECT" integer default 0,
+ "SCHEDULE_TAG" nvarchar2(36) default null,
+ "SCHEDULE_ETAGS" nclob default null,
+ "PRIVATE_COMMENTS" integer default 0 not null,
+ "MD5" nchar(32),
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ unique("CALENDAR_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table CALENDAR_OBJECT_ATTACHMENTS_MO (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('none', 0);
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('read', 1);
+insert into CALENDAR_OBJECT_ATTACHMENTS_MO (DESCRIPTION, ID) values ('write', 2);
+create table CALENDAR_ACCESS_TYPE (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(32) unique
+);
+
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('', 0);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('public', 1);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('private', 2);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('confidential', 3);
+insert into CALENDAR_ACCESS_TYPE (DESCRIPTION, ID) values ('restricted', 4);
+create table TIME_RANGE (
+ "INSTANCE_ID" integer primary key,
+ "CALENDAR_RESOURCE_ID" integer not null references CALENDAR on delete cascade,
+ "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+ "FLOATING" integer not null,
+ "START_DATE" timestamp not null,
+ "END_DATE" timestamp not null,
+ "FBTYPE" integer not null,
+ "TRANSPARENT" integer not null
+);
+
+create table FREE_BUSY_TYPE (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('unknown', 0);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('free', 1);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy', 2);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-unavailable', 3);
+insert into FREE_BUSY_TYPE (DESCRIPTION, ID) values ('busy-tentative', 4);
+create table TRANSPARENCY (
+ "TIME_RANGE_INSTANCE_ID" integer not null references TIME_RANGE on delete cascade,
+ "USER_ID" nvarchar2(255),
+ "TRANSPARENT" integer not null
+);
+
+create table ATTACHMENT (
+ "ATTACHMENT_ID" integer primary key,
+ "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+ "DROPBOX_ID" nvarchar2(255),
+ "CONTENT_TYPE" nvarchar2(255),
+ "SIZE" integer not null,
+ "MD5" nchar(32),
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "PATH" nvarchar2(1024)
+);
+
+create table ATTACHMENT_CALENDAR_OBJECT (
+ "ATTACHMENT_ID" integer not null references ATTACHMENT on delete cascade,
+ "MANAGED_ID" nvarchar2(255),
+ "CALENDAR_OBJECT_RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade,
+ primary key("ATTACHMENT_ID", "CALENDAR_OBJECT_RESOURCE_ID"),
+ unique("MANAGED_ID", "CALENDAR_OBJECT_RESOURCE_ID")
+);
+
+create table RESOURCE_PROPERTY (
+ "RESOURCE_ID" integer not null,
+ "NAME" nvarchar2(255),
+ "VALUE" nclob,
+ "VIEWER_UID" nvarchar2(255),
+ primary key("RESOURCE_ID", "NAME", "VIEWER_UID")
+);
+
+create table ADDRESSBOOK_HOME (
+ "RESOURCE_ID" integer primary key,
+ "ADDRESSBOOK_PROPERTY_STORE_ID" integer not null,
+ "OWNER_UID" nvarchar2(255) unique,
+ "DATAVERSION" integer default 0 not null
+);
+
+create table ADDRESSBOOK_HOME_METADATA (
+ "RESOURCE_ID" integer primary key references ADDRESSBOOK_HOME on delete cascade,
+ "QUOTA_USED_BYTES" integer default 0 not null,
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table SHARED_ADDRESSBOOK_BIND (
+ "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+ "OWNER_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+ "ADDRESSBOOK_RESOURCE_NAME" nvarchar2(255),
+ "BIND_MODE" integer not null,
+ "BIND_STATUS" integer not null,
+ "BIND_REVISION" integer default 0 not null,
+ "MESSAGE" nclob,
+ primary key("ADDRESSBOOK_HOME_RESOURCE_ID", "OWNER_HOME_RESOURCE_ID"),
+ unique("ADDRESSBOOK_HOME_RESOURCE_ID", "ADDRESSBOOK_RESOURCE_NAME")
+);
+
+create table ADDRESSBOOK_OBJECT (
+ "RESOURCE_ID" integer primary key,
+ "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+ "RESOURCE_NAME" nvarchar2(255),
+ "VCARD_TEXT" nclob,
+ "VCARD_UID" nvarchar2(255),
+ "KIND" integer not null,
+ "MD5" nchar(32),
+ "CREATED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "MODIFIED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ unique("ADDRESSBOOK_HOME_RESOURCE_ID", "RESOURCE_NAME"),
+ unique("ADDRESSBOOK_HOME_RESOURCE_ID", "VCARD_UID")
+);
+
+create table ADDRESSBOOK_OBJECT_KIND (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('person', 0);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('group', 1);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('resource', 2);
+insert into ADDRESSBOOK_OBJECT_KIND (DESCRIPTION, ID) values ('location', 3);
+create table ABO_MEMBERS (
+ "GROUP_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+ "ADDRESSBOOK_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+ "MEMBER_ID" integer not null references ADDRESSBOOK_OBJECT,
+ primary key("GROUP_ID", "MEMBER_ID")
+);
+
+create table ABO_FOREIGN_MEMBERS (
+ "GROUP_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+ "ADDRESSBOOK_ID" integer not null references ADDRESSBOOK_HOME on delete cascade,
+ "MEMBER_ADDRESS" nvarchar2(255),
+ primary key("GROUP_ID", "MEMBER_ADDRESS")
+);
+
+create table SHARED_GROUP_BIND (
+ "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+ "GROUP_RESOURCE_ID" integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+ "GROUP_ADDRESSBOOK_NAME" nvarchar2(255),
+ "BIND_MODE" integer not null,
+ "BIND_STATUS" integer not null,
+ "BIND_REVISION" integer default 0 not null,
+ "MESSAGE" nclob,
+ primary key("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_RESOURCE_ID"),
+ unique("ADDRESSBOOK_HOME_RESOURCE_ID", "GROUP_ADDRESSBOOK_NAME")
+);
+
+create table CALENDAR_OBJECT_REVISIONS (
+ "CALENDAR_HOME_RESOURCE_ID" integer not null references CALENDAR_HOME,
+ "CALENDAR_RESOURCE_ID" integer references CALENDAR,
+ "CALENDAR_NAME" nvarchar2(255) default null,
+ "RESOURCE_NAME" nvarchar2(255),
+ "REVISION" integer not null,
+ "DELETED" integer not null
+);
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+ "ADDRESSBOOK_HOME_RESOURCE_ID" integer not null references ADDRESSBOOK_HOME,
+ "OWNER_HOME_RESOURCE_ID" integer references ADDRESSBOOK_HOME,
+ "ADDRESSBOOK_NAME" nvarchar2(255) default null,
+ "RESOURCE_NAME" nvarchar2(255),
+ "REVISION" integer not null,
+ "DELETED" integer not null
+);
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+ "NOTIFICATION_HOME_RESOURCE_ID" integer not null references NOTIFICATION_HOME on delete cascade,
+ "RESOURCE_NAME" nvarchar2(255),
+ "REVISION" integer not null,
+ "DELETED" integer not null,
+ unique("NOTIFICATION_HOME_RESOURCE_ID", "RESOURCE_NAME")
+);
+
+create table APN_SUBSCRIPTIONS (
+ "TOKEN" nvarchar2(255),
+ "RESOURCE_KEY" nvarchar2(255),
+ "MODIFIED" integer not null,
+ "SUBSCRIBER_GUID" nvarchar2(255),
+ "USER_AGENT" nvarchar2(255) default null,
+ "IP_ADDR" nvarchar2(255) default null,
+ primary key("TOKEN", "RESOURCE_KEY")
+);
+
+create table IMIP_TOKENS (
+ "TOKEN" nvarchar2(255),
+ "ORGANIZER" nvarchar2(255),
+ "ATTENDEE" nvarchar2(255),
+ "ICALUID" nvarchar2(255),
+ "ACCESSED" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ primary key("ORGANIZER", "ATTENDEE", "ICALUID")
+);
+
+create table IMIP_INVITATION_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "FROM_ADDR" nvarchar2(255),
+ "TO_ADDR" nvarchar2(255),
+ "ICALENDAR_TEXT" nclob
+);
+
+create table IMIP_POLLING_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table IMIP_REPLY_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "ORGANIZER" nvarchar2(255),
+ "ATTENDEE" nvarchar2(255),
+ "ICALENDAR_TEXT" nclob
+);
+
+create table PUSH_NOTIFICATION_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "PUSH_ID" nvarchar2(255),
+ "PRIORITY" integer not null
+);
+
+create table GROUP_CACHER_POLLING_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
+);
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+ "WORK_ID" integer primary key not null,
+ "NOT_BEFORE" timestamp default CURRENT_TIMESTAMP at time zone 'UTC',
+ "RESOURCE_ID" integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create table CALENDARSERVER (
+ "NAME" nvarchar2(255) primary key,
+ "VALUE" nvarchar2(255)
+);
+
+insert into CALENDARSERVER (NAME, VALUE) values ('VERSION', '29');
+insert into CALENDARSERVER (NAME, VALUE) values ('CALENDAR-DATAVERSION', '5');
+insert into CALENDARSERVER (NAME, VALUE) values ('ADDRESSBOOK-DATAVERSION', '2');
+insert into CALENDARSERVER (NAME, VALUE) values ('NOTIFICATION-DATAVERSION', '1');
+create index CALENDAR_HOME_METADAT_3cb9049e on CALENDAR_HOME_METADATA (
+ DEFAULT_EVENTS
+);
+
+create index CALENDAR_HOME_METADAT_d55e5548 on CALENDAR_HOME_METADATA (
+ DEFAULT_TASKS
+);
+
+create index CALENDAR_HOME_METADAT_910264ce on CALENDAR_HOME_METADATA (
+ DEFAULT_POLLS
+);
+
+create index NOTIFICATION_NOTIFICA_f891f5f9 on NOTIFICATION (
+ NOTIFICATION_HOME_RESOURCE_ID
+);
+
+create index CALENDAR_BIND_RESOURC_e57964d4 on CALENDAR_BIND (
+ CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_CALEN_a9a453a9 on CALENDAR_OBJECT (
+ CALENDAR_RESOURCE_ID,
+ ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_CALEN_96e83b73 on CALENDAR_OBJECT (
+ CALENDAR_RESOURCE_ID,
+ RECURRANCE_MAX
+);
+
+create index CALENDAR_OBJECT_ICALE_82e731d5 on CALENDAR_OBJECT (
+ ICALENDAR_UID
+);
+
+create index CALENDAR_OBJECT_DROPB_de041d80 on CALENDAR_OBJECT (
+ DROPBOX_ID
+);
+
+create index TIME_RANGE_CALENDAR_R_beb6e7eb on TIME_RANGE (
+ CALENDAR_RESOURCE_ID
+);
+
+create index TIME_RANGE_CALENDAR_O_acf37bd1 on TIME_RANGE (
+ CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index TRANSPARENCY_TIME_RAN_5f34467f on TRANSPARENCY (
+ TIME_RANGE_INSTANCE_ID
+);
+
+create index ATTACHMENT_CALENDAR_H_0078845c on ATTACHMENT (
+ CALENDAR_HOME_RESOURCE_ID
+);
+
+create index ATTACHMENT_CALENDAR_O_81508484 on ATTACHMENT_CALENDAR_OBJECT (
+ CALENDAR_OBJECT_RESOURCE_ID
+);
+
+create index SHARED_ADDRESSBOOK_BI_e9a2e6d4 on SHARED_ADDRESSBOOK_BIND (
+ OWNER_HOME_RESOURCE_ID
+);
+
+create index ABO_MEMBERS_ADDRESSBO_4effa879 on ABO_MEMBERS (
+ ADDRESSBOOK_ID
+);
+
+create index ABO_MEMBERS_MEMBER_ID_8d66adcf on ABO_MEMBERS (
+ MEMBER_ID
+);
+
+create index ABO_FOREIGN_MEMBERS_A_1fd2c5e9 on ABO_FOREIGN_MEMBERS (
+ ADDRESSBOOK_ID
+);
+
+create index SHARED_GROUP_BIND_RES_cf52f95d on SHARED_GROUP_BIND (
+ GROUP_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_3a3956c4 on CALENDAR_OBJECT_REVISIONS (
+ CALENDAR_HOME_RESOURCE_ID,
+ CALENDAR_RESOURCE_ID
+);
+
+create index CALENDAR_OBJECT_REVIS_6d9d929c on CALENDAR_OBJECT_REVISIONS (
+ CALENDAR_RESOURCE_ID,
+ RESOURCE_NAME,
+ DELETED,
+ REVISION
+);
+
+create index CALENDAR_OBJECT_REVIS_265c8acf on CALENDAR_OBJECT_REVISIONS (
+ CALENDAR_RESOURCE_ID,
+ REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_2bfcf757 on ADDRESSBOOK_OBJECT_REVISIONS (
+ ADDRESSBOOK_HOME_RESOURCE_ID,
+ OWNER_HOME_RESOURCE_ID
+);
+
+create index ADDRESSBOOK_OBJECT_RE_00fe8288 on ADDRESSBOOK_OBJECT_REVISIONS (
+ OWNER_HOME_RESOURCE_ID,
+ RESOURCE_NAME,
+ DELETED,
+ REVISION
+);
+
+create index ADDRESSBOOK_OBJECT_RE_45004780 on ADDRESSBOOK_OBJECT_REVISIONS (
+ OWNER_HOME_RESOURCE_ID,
+ REVISION
+);
+
+create index NOTIFICATION_OBJECT_R_036a9cee on NOTIFICATION_OBJECT_REVISIONS (
+ NOTIFICATION_HOME_RESOURCE_ID,
+ REVISION
+);
+
+create index APN_SUBSCRIPTIONS_RES_9610d78e on APN_SUBSCRIPTIONS (
+ RESOURCE_KEY
+);
+
+create index IMIP_TOKENS_TOKEN_e94b918f on IMIP_TOKENS (
+ TOKEN
+);
+
+create index CALENDAR_OBJECT_SPLIT_af71dcda on CALENDAR_OBJECT_SPLITTER_WORK (
+ RESOURCE_ID
+);
+
Added: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/old/postgres-dialect/v29.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/old/postgres-dialect/v29.sql (rev 0)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/old/postgres-dialect/v29.sql 2013-12-06 21:43:35 UTC (rev 12041)
@@ -0,0 +1,708 @@
+-- -*- test-case-name: txdav.caldav.datastore.test.test_sql,txdav.carddav.datastore.test.test_sql -*-
+
+----
+-- Copyright (c) 2010-2013 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+
+-----------------
+-- Resource ID --
+-----------------
+
+create sequence RESOURCE_ID_SEQ;
+
+
+-------------------------
+-- Cluster Bookkeeping --
+-------------------------
+
+-- Information about a process connected to this database.
+
+-- Note that this must match the node info schema in twext.enterprise.queue.
+create table NODE_INFO (
+ HOSTNAME varchar(255) not null,
+ PID integer not null,
+ PORT integer not null,
+ TIME timestamp not null default timezone('UTC', CURRENT_TIMESTAMP),
+
+ primary key (HOSTNAME, PORT)
+);
+
+-- Unique named locks. This table should always be empty, but rows are
+-- temporarily created in order to prevent undesirable concurrency.
+create table NAMED_LOCK (
+ LOCK_NAME varchar(255) primary key
+);
+
+
+-------------------
+-- Calendar Home --
+-------------------
+
+create table CALENDAR_HOME (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ OWNER_UID varchar(255) not null unique, -- implicit index
+ DATAVERSION integer default 0 not null
+);
+
+--------------
+-- Calendar --
+--------------
+
+create table CALENDAR (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ') -- implicit index
+);
+
+----------------------------
+-- Calendar Home Metadata --
+----------------------------
+
+create table CALENDAR_HOME_METADATA (
+ RESOURCE_ID integer primary key references CALENDAR_HOME on delete cascade, -- implicit index
+ QUOTA_USED_BYTES integer default 0 not null,
+ DEFAULT_EVENTS integer default null references CALENDAR on delete set null,
+ DEFAULT_TASKS integer default null references CALENDAR on delete set null,
+ DEFAULT_POLLS integer default null references CALENDAR on delete set null,
+ ALARM_VEVENT_TIMED text default null,
+ ALARM_VEVENT_ALLDAY text default null,
+ ALARM_VTODO_TIMED text default null,
+ ALARM_VTODO_ALLDAY text default null,
+ AVAILABILITY text default null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+create index CALENDAR_HOME_METADATA_DEFAULT_EVENTS on
+ CALENDAR_HOME_METADATA(DEFAULT_EVENTS);
+create index CALENDAR_HOME_METADATA_DEFAULT_TASKS on
+ CALENDAR_HOME_METADATA(DEFAULT_TASKS);
+create index CALENDAR_HOME_METADATA_DEFAULT_POLLS on
+ CALENDAR_HOME_METADATA(DEFAULT_POLLS);
+
+-----------------------
+-- Calendar Metadata --
+-----------------------
+
+create table CALENDAR_METADATA (
+ RESOURCE_ID integer primary key references CALENDAR on delete cascade, -- implicit index
+ SUPPORTED_COMPONENTS varchar(255) default null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+---------------------------
+-- Sharing Notifications --
+---------------------------
+
+create table NOTIFICATION_HOME (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ OWNER_UID varchar(255) not null unique, -- implicit index
+ DATAVERSION integer default 0 not null
+);
+
+create table NOTIFICATION (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ NOTIFICATION_HOME_RESOURCE_ID integer not null references NOTIFICATION_HOME,
+ NOTIFICATION_UID varchar(255) not null,
+ NOTIFICATION_TYPE varchar(255) not null,
+ NOTIFICATION_DATA text not null,
+ MD5 char(32) not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ unique(NOTIFICATION_UID, NOTIFICATION_HOME_RESOURCE_ID) -- implicit index
+);
+
+create index NOTIFICATION_NOTIFICATION_HOME_RESOURCE_ID on
+ NOTIFICATION(NOTIFICATION_HOME_RESOURCE_ID);
+
+
+-------------------
+-- Calendar Bind --
+-------------------
+
+-- Joins CALENDAR_HOME and CALENDAR
+
+create table CALENDAR_BIND (
+ CALENDAR_HOME_RESOURCE_ID integer not null references CALENDAR_HOME,
+ CALENDAR_RESOURCE_ID integer not null references CALENDAR on delete cascade,
+ CALENDAR_RESOURCE_NAME varchar(255) not null,
+ BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
+ BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
+ BIND_REVISION integer default 0 not null,
+ MESSAGE text,
+ TRANSP integer default 0 not null, -- enum CALENDAR_TRANSP
+ ALARM_VEVENT_TIMED text default null,
+ ALARM_VEVENT_ALLDAY text default null,
+ ALARM_VTODO_TIMED text default null,
+ ALARM_VTODO_ALLDAY text default null,
+ TIMEZONE text default null,
+
+ primary key(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID), -- implicit index
+ unique(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_NAME) -- implicit index
+);
+
+create index CALENDAR_BIND_RESOURCE_ID on
+ CALENDAR_BIND(CALENDAR_RESOURCE_ID);
+
+-- Enumeration of calendar bind modes
+
+create table CALENDAR_BIND_MODE (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_MODE values (0, 'own' );
+insert into CALENDAR_BIND_MODE values (1, 'read' );
+insert into CALENDAR_BIND_MODE values (2, 'write');
+insert into CALENDAR_BIND_MODE values (3, 'direct');
+insert into CALENDAR_BIND_MODE values (4, 'indirect');
+
+-- Enumeration of statuses
+
+create table CALENDAR_BIND_STATUS (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_BIND_STATUS values (0, 'invited' );
+insert into CALENDAR_BIND_STATUS values (1, 'accepted');
+insert into CALENDAR_BIND_STATUS values (2, 'declined');
+insert into CALENDAR_BIND_STATUS values (3, 'invalid');
+insert into CALENDAR_BIND_STATUS values (4, 'deleted');
+
+
+-- Enumeration of transparency
+
+create table CALENDAR_TRANSP (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_TRANSP values (0, 'opaque' );
+insert into CALENDAR_TRANSP values (1, 'transparent');
+
+
+---------------------
+-- Calendar Object --
+---------------------
+
+create table CALENDAR_OBJECT (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ CALENDAR_RESOURCE_ID integer not null references CALENDAR on delete cascade,
+ RESOURCE_NAME varchar(255) not null,
+ ICALENDAR_TEXT text not null,
+ ICALENDAR_UID varchar(255) not null,
+ ICALENDAR_TYPE varchar(255) not null,
+ ATTACHMENTS_MODE integer default 0 not null, -- enum CALENDAR_OBJECT_ATTACHMENTS_MODE
+ DROPBOX_ID varchar(255),
+ ORGANIZER varchar(255),
+ RECURRANCE_MIN date, -- minimum date that recurrences have been expanded to.
+ RECURRANCE_MAX date, -- maximum date that recurrences have been expanded to.
+ ACCESS integer default 0 not null,
+ SCHEDULE_OBJECT boolean default false,
+ SCHEDULE_TAG varchar(36) default null,
+ SCHEDULE_ETAGS text default null,
+ PRIVATE_COMMENTS boolean default false not null,
+ MD5 char(32) not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ unique (CALENDAR_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+
+ -- since the 'inbox' is a 'calendar resource' for the purpose of storing
+ -- calendar objects, this constraint has to be selectively enforced by the
+ -- application layer.
+
+ -- unique(CALENDAR_RESOURCE_ID, ICALENDAR_UID)
+);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_AND_ICALENDAR_UID on
+ CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_CALENDAR_RESOURCE_ID_RECURRANCE_MAX on
+ CALENDAR_OBJECT(CALENDAR_RESOURCE_ID, RECURRANCE_MAX);
+
+create index CALENDAR_OBJECT_ICALENDAR_UID on
+ CALENDAR_OBJECT(ICALENDAR_UID);
+
+create index CALENDAR_OBJECT_DROPBOX_ID on
+ CALENDAR_OBJECT(DROPBOX_ID);
+
+-- Enumeration of attachment modes
+
+create table CALENDAR_OBJECT_ATTACHMENTS_MODE (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (0, 'none' );
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (1, 'read' );
+insert into CALENDAR_OBJECT_ATTACHMENTS_MODE values (2, 'write');
+
+
+-- Enumeration of calendar access types
+
+create table CALENDAR_ACCESS_TYPE (
+ ID integer primary key,
+ DESCRIPTION varchar(32) not null unique
+);
+
+insert into CALENDAR_ACCESS_TYPE values (0, '' );
+insert into CALENDAR_ACCESS_TYPE values (1, 'public' );
+insert into CALENDAR_ACCESS_TYPE values (2, 'private' );
+insert into CALENDAR_ACCESS_TYPE values (3, 'confidential' );
+insert into CALENDAR_ACCESS_TYPE values (4, 'restricted' );
+
+
+-----------------
+-- Instance ID --
+-----------------
+
+create sequence INSTANCE_ID_SEQ;
+
+
+----------------
+-- Time Range --
+----------------
+
+create table TIME_RANGE (
+ INSTANCE_ID integer primary key default nextval('INSTANCE_ID_SEQ'), -- implicit index
+ CALENDAR_RESOURCE_ID integer not null references CALENDAR on delete cascade,
+ CALENDAR_OBJECT_RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade,
+ FLOATING boolean not null,
+ START_DATE timestamp not null,
+ END_DATE timestamp not null,
+ FBTYPE integer not null,
+ TRANSPARENT boolean not null
+);
+
+create index TIME_RANGE_CALENDAR_RESOURCE_ID on
+ TIME_RANGE(CALENDAR_RESOURCE_ID);
+create index TIME_RANGE_CALENDAR_OBJECT_RESOURCE_ID on
+ TIME_RANGE(CALENDAR_OBJECT_RESOURCE_ID);
+
+
+-- Enumeration of free/busy types
+
+create table FREE_BUSY_TYPE (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into FREE_BUSY_TYPE values (0, 'unknown' );
+insert into FREE_BUSY_TYPE values (1, 'free' );
+insert into FREE_BUSY_TYPE values (2, 'busy' );
+insert into FREE_BUSY_TYPE values (3, 'busy-unavailable');
+insert into FREE_BUSY_TYPE values (4, 'busy-tentative' );
+
+
+------------------
+-- Transparency --
+------------------
+
+create table TRANSPARENCY (
+ TIME_RANGE_INSTANCE_ID integer not null references TIME_RANGE on delete cascade,
+ USER_ID varchar(255) not null,
+ TRANSPARENT boolean not null
+);
+
+create index TRANSPARENCY_TIME_RANGE_INSTANCE_ID on
+ TRANSPARENCY(TIME_RANGE_INSTANCE_ID);
+
+
+----------------
+-- Attachment --
+----------------
+
+create sequence ATTACHMENT_ID_SEQ;
+
+create table ATTACHMENT (
+ ATTACHMENT_ID integer primary key default nextval('ATTACHMENT_ID_SEQ'), -- implicit index
+ CALENDAR_HOME_RESOURCE_ID integer not null references CALENDAR_HOME,
+ DROPBOX_ID varchar(255),
+ CONTENT_TYPE varchar(255) not null,
+ SIZE integer not null,
+ MD5 char(32) not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ PATH varchar(1024) not null
+);
+
+create index ATTACHMENT_CALENDAR_HOME_RESOURCE_ID on
+ ATTACHMENT(CALENDAR_HOME_RESOURCE_ID);
+
+-- Many-to-many relationship between attachments and calendar objects
+create table ATTACHMENT_CALENDAR_OBJECT (
+ ATTACHMENT_ID integer not null references ATTACHMENT on delete cascade,
+ MANAGED_ID varchar(255) not null,
+ CALENDAR_OBJECT_RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade,
+
+ primary key (ATTACHMENT_ID, CALENDAR_OBJECT_RESOURCE_ID), -- implicit index
+ unique (MANAGED_ID, CALENDAR_OBJECT_RESOURCE_ID) --implicit index
+);
+
+create index ATTACHMENT_CALENDAR_OBJECT_CALENDAR_OBJECT_RESOURCE_ID on
+ ATTACHMENT_CALENDAR_OBJECT(CALENDAR_OBJECT_RESOURCE_ID);
+
+-----------------------
+-- Resource Property --
+-----------------------
+
+create table RESOURCE_PROPERTY (
+ RESOURCE_ID integer not null, -- foreign key: *.RESOURCE_ID
+ NAME varchar(255) not null,
+ VALUE text not null, -- FIXME: xml?
+ VIEWER_UID varchar(255),
+
+ primary key (RESOURCE_ID, NAME, VIEWER_UID) -- implicit index
+);
+
+
+----------------------
+-- AddressBook Home --
+----------------------
+
+create table ADDRESSBOOK_HOME (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ ADDRESSBOOK_PROPERTY_STORE_ID integer default nextval('RESOURCE_ID_SEQ') not null, -- implicit index
+ OWNER_UID varchar(255) not null unique, -- implicit index
+ DATAVERSION integer default 0 not null
+);
+
+
+-------------------------------
+-- AddressBook Home Metadata --
+-------------------------------
+
+create table ADDRESSBOOK_HOME_METADATA (
+ RESOURCE_ID integer primary key references ADDRESSBOOK_HOME on delete cascade, -- implicit index
+ QUOTA_USED_BYTES integer default 0 not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+-----------------------------
+-- Shared AddressBook Bind --
+-----------------------------
+
+-- Joins sharee ADDRESSBOOK_HOME and owner ADDRESSBOOK_HOME
+
+create table SHARED_ADDRESSBOOK_BIND (
+ ADDRESSBOOK_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME,
+ OWNER_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME on delete cascade,
+ ADDRESSBOOK_RESOURCE_NAME varchar(255) not null,
+ BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
+ BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
+ BIND_REVISION integer default 0 not null,
+ MESSAGE text, -- FIXME: xml?
+
+ primary key (ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID), -- implicit index
+ unique (ADDRESSBOOK_HOME_RESOURCE_ID, ADDRESSBOOK_RESOURCE_NAME) -- implicit index
+);
+
+create index SHARED_ADDRESSBOOK_BIND_RESOURCE_ID on
+ SHARED_ADDRESSBOOK_BIND(OWNER_HOME_RESOURCE_ID);
+
+
+------------------------
+-- AddressBook Object --
+------------------------
+
+create table ADDRESSBOOK_OBJECT (
+ RESOURCE_ID integer primary key default nextval('RESOURCE_ID_SEQ'), -- implicit index
+ ADDRESSBOOK_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME on delete cascade,
+ RESOURCE_NAME varchar(255) not null,
+ VCARD_TEXT text not null,
+ VCARD_UID varchar(255) not null,
+ KIND integer not null, -- enum ADDRESSBOOK_OBJECT_KIND
+ MD5 char(32) not null,
+ CREATED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ MODIFIED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ unique (ADDRESSBOOK_HOME_RESOURCE_ID, RESOURCE_NAME), -- implicit index
+ unique (ADDRESSBOOK_HOME_RESOURCE_ID, VCARD_UID) -- implicit index
+);
+
+
+-----------------------------
+-- AddressBook Object kind --
+-----------------------------
+
+create table ADDRESSBOOK_OBJECT_KIND (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into ADDRESSBOOK_OBJECT_KIND values (0, 'person');
+insert into ADDRESSBOOK_OBJECT_KIND values (1, 'group' );
+insert into ADDRESSBOOK_OBJECT_KIND values (2, 'resource');
+insert into ADDRESSBOOK_OBJECT_KIND values (3, 'location');
+
+
+---------------------------------
+-- Address Book Object Members --
+---------------------------------
+
+create table ABO_MEMBERS (
+ GROUP_ID integer not null references ADDRESSBOOK_OBJECT on delete cascade, -- AddressBook Object's (kind=='group') RESOURCE_ID
+ ADDRESSBOOK_ID integer not null references ADDRESSBOOK_HOME on delete cascade,
+ MEMBER_ID integer not null references ADDRESSBOOK_OBJECT, -- member AddressBook Object's RESOURCE_ID
+
+ primary key (GROUP_ID, MEMBER_ID) -- implicit index
+);
+
+create index ABO_MEMBERS_ADDRESSBOOK_ID on
+ ABO_MEMBERS(ADDRESSBOOK_ID);
+create index ABO_MEMBERS_MEMBER_ID on
+ ABO_MEMBERS(MEMBER_ID);
+
+------------------------------------------
+-- Address Book Object Foreign Members --
+------------------------------------------
+
+create table ABO_FOREIGN_MEMBERS (
+ GROUP_ID integer not null references ADDRESSBOOK_OBJECT on delete cascade, -- AddressBook Object's (kind=='group') RESOURCE_ID
+ ADDRESSBOOK_ID integer not null references ADDRESSBOOK_HOME on delete cascade,
+ MEMBER_ADDRESS varchar(255) not null, -- member AddressBook Object's 'calendar' address
+
+ primary key (GROUP_ID, MEMBER_ADDRESS) -- implicit index
+);
+
+create index ABO_FOREIGN_MEMBERS_ADDRESSBOOK_ID on
+ ABO_FOREIGN_MEMBERS(ADDRESSBOOK_ID);
+
+-----------------------
+-- Shared Group Bind --
+-----------------------
+
+-- Joins ADDRESSBOOK_HOME and ADDRESSBOOK_OBJECT (kind == group)
+
+create table SHARED_GROUP_BIND (
+ ADDRESSBOOK_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME,
+ GROUP_RESOURCE_ID integer not null references ADDRESSBOOK_OBJECT on delete cascade,
+ GROUP_ADDRESSBOOK_NAME varchar(255) not null,
+ BIND_MODE integer not null, -- enum CALENDAR_BIND_MODE
+ BIND_STATUS integer not null, -- enum CALENDAR_BIND_STATUS
+ BIND_REVISION integer default 0 not null,
+ MESSAGE text, -- FIXME: xml?
+
+ primary key (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_RESOURCE_ID), -- implicit index
+ unique (ADDRESSBOOK_HOME_RESOURCE_ID, GROUP_ADDRESSBOOK_NAME) -- implicit index
+);
+
+create index SHARED_GROUP_BIND_RESOURCE_ID on
+ SHARED_GROUP_BIND(GROUP_RESOURCE_ID);
+
+
+---------------
+-- Revisions --
+---------------
+
+create sequence REVISION_SEQ;
+
+
+-------------------------------
+-- Calendar Object Revisions --
+-------------------------------
+
+create table CALENDAR_OBJECT_REVISIONS (
+ CALENDAR_HOME_RESOURCE_ID integer not null references CALENDAR_HOME,
+ CALENDAR_RESOURCE_ID integer references CALENDAR,
+ CALENDAR_NAME varchar(255) default null,
+ RESOURCE_NAME varchar(255),
+ REVISION integer default nextval('REVISION_SEQ') not null,
+ DELETED boolean not null
+);
+
+create index CALENDAR_OBJECT_REVISIONS_HOME_RESOURCE_ID_CALENDAR_RESOURCE_ID
+ on CALENDAR_OBJECT_REVISIONS(CALENDAR_HOME_RESOURCE_ID, CALENDAR_RESOURCE_ID);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+ on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index CALENDAR_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+ on CALENDAR_OBJECT_REVISIONS(CALENDAR_RESOURCE_ID, REVISION);
+
+
+----------------------------------
+-- AddressBook Object Revisions --
+----------------------------------
+
+create table ADDRESSBOOK_OBJECT_REVISIONS (
+ ADDRESSBOOK_HOME_RESOURCE_ID integer not null references ADDRESSBOOK_HOME,
+ OWNER_HOME_RESOURCE_ID integer references ADDRESSBOOK_HOME,
+ ADDRESSBOOK_NAME varchar(255) default null,
+ RESOURCE_NAME varchar(255),
+ REVISION integer default nextval('REVISION_SEQ') not null,
+ DELETED boolean not null
+);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_HOME_RESOURCE_ID_OWNER_HOME_RESOURCE_ID
+ on ADDRESSBOOK_OBJECT_REVISIONS(ADDRESSBOOK_HOME_RESOURCE_ID, OWNER_HOME_RESOURCE_ID);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_RESOURCE_NAME_DELETED_REVISION
+ on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, RESOURCE_NAME, DELETED, REVISION);
+
+create index ADDRESSBOOK_OBJECT_REVISIONS_OWNER_HOME_RESOURCE_ID_REVISION
+ on ADDRESSBOOK_OBJECT_REVISIONS(OWNER_HOME_RESOURCE_ID, REVISION);
+
+
+-----------------------------------
+-- Notification Object Revisions --
+-----------------------------------
+
+create table NOTIFICATION_OBJECT_REVISIONS (
+ NOTIFICATION_HOME_RESOURCE_ID integer not null references NOTIFICATION_HOME on delete cascade,
+ RESOURCE_NAME varchar(255),
+ REVISION integer default nextval('REVISION_SEQ') not null,
+ DELETED boolean not null,
+
+ unique(NOTIFICATION_HOME_RESOURCE_ID, RESOURCE_NAME) -- implicit index
+);
+
+create index NOTIFICATION_OBJECT_REVISIONS_RESOURCE_ID_REVISION
+ on NOTIFICATION_OBJECT_REVISIONS(NOTIFICATION_HOME_RESOURCE_ID, REVISION);
+
+
+-------------------------------------------
+-- Apple Push Notification Subscriptions --
+-------------------------------------------
+
+create table APN_SUBSCRIPTIONS (
+ TOKEN varchar(255) not null,
+ RESOURCE_KEY varchar(255) not null,
+ MODIFIED integer not null,
+ SUBSCRIBER_GUID varchar(255) not null,
+ USER_AGENT varchar(255) default null,
+ IP_ADDR varchar(255) default null,
+
+ primary key (TOKEN, RESOURCE_KEY) -- implicit index
+);
+
+create index APN_SUBSCRIPTIONS_RESOURCE_KEY
+ on APN_SUBSCRIPTIONS(RESOURCE_KEY);
+
+
+-----------------
+-- IMIP Tokens --
+-----------------
+
+create table IMIP_TOKENS (
+ TOKEN varchar(255) not null,
+ ORGANIZER varchar(255) not null,
+ ATTENDEE varchar(255) not null,
+ ICALUID varchar(255) not null,
+ ACCESSED timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+
+ primary key (ORGANIZER, ATTENDEE, ICALUID) -- implicit index
+);
+
+create index IMIP_TOKENS_TOKEN
+ on IMIP_TOKENS(TOKEN);
+
+
+----------------
+-- Work Items --
+----------------
+
+create sequence WORKITEM_SEQ;
+
+
+---------------------------
+-- IMIP Inivitation Work --
+---------------------------
+
+create table IMIP_INVITATION_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ FROM_ADDR varchar(255) not null,
+ TO_ADDR varchar(255) not null,
+ ICALENDAR_TEXT text not null
+);
+
+
+-----------------------
+-- IMIP Polling Work --
+-----------------------
+
+create table IMIP_POLLING_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+---------------------
+-- IMIP Reply Work --
+---------------------
+
+create table IMIP_REPLY_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ ORGANIZER varchar(255) not null,
+ ATTENDEE varchar(255) not null,
+ ICALENDAR_TEXT text not null
+);
+
+
+------------------------
+-- Push Notifications --
+------------------------
+
+create table PUSH_NOTIFICATION_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ PUSH_ID varchar(255) not null,
+ PRIORITY integer not null -- 1:low 5:medium 10:high
+);
+
+-----------------
+-- GroupCacher --
+-----------------
+
+create table GROUP_CACHER_POLLING_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP)
+);
+
+
+--------------------------
+-- Object Splitter Work --
+--------------------------
+
+create table CALENDAR_OBJECT_SPLITTER_WORK (
+ WORK_ID integer primary key default nextval('WORKITEM_SEQ') not null, -- implicit index
+ NOT_BEFORE timestamp default timezone('UTC', CURRENT_TIMESTAMP),
+ RESOURCE_ID integer not null references CALENDAR_OBJECT on delete cascade
+);
+
+create index CALENDAR_OBJECT_SPLITTER_WORK_RESOURCE_ID on
+ CALENDAR_OBJECT_SPLITTER_WORK(RESOURCE_ID);
+
+--------------------
+-- Schema Version --
+--------------------
+
+create table CALENDARSERVER (
+ NAME varchar(255) primary key, -- implicit index
+ VALUE varchar(255)
+);
+
+insert into CALENDARSERVER values ('VERSION', '29');
+insert into CALENDARSERVER values ('CALENDAR-DATAVERSION', '5');
+insert into CALENDARSERVER values ('ADDRESSBOOK-DATAVERSION', '2');
+insert into CALENDARSERVER values ('NOTIFICATION-DATAVERSION', '1');
Added: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_29_to_30.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_29_to_30.sql (rev 0)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/upgrades/oracle-dialect/upgrade_from_29_to_30.sql 2013-12-06 21:43:35 UTC (rev 12041)
@@ -0,0 +1,53 @@
+----
+-- Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 29 to 30 --
+---------------------------------------------------
+
+-- Home related updates
+
+alter table CALENDAR_HOME
+ add ("STATUS" integer default 0 not null);
+
+alter table NOTIFICATION_HOME
+ add ("STATUS" integer default 0 not null);
+
+alter table ADDRESSBOOK_HOME
+ add ("STATUS" integer default 0 not null);
+
+create table HOME_STATUS (
+ "ID" integer primary key,
+ "DESCRIPTION" nvarchar2(16) unique
+);
+
+insert into HOME_STATUS (DESCRIPTION, ID) values ('normal', 0);
+insert into HOME_STATUS (DESCRIPTION, ID) values ('external', 1);
+
+-- Bind changes
+alter table CALENDAR_BIND
+ add ("EXTERNAL_ID" integer default null);
+
+alter table SHARED_ADDRESSBOOK_BIND
+ add ("EXTERNAL_ID" integer default null);
+
+alter table SHARED_GROUP_BIND
+ add ("EXTERNAL_ID" integer default null);
+
+
+-- Now update the version
+-- No data upgrades
+update CALENDARSERVER set VALUE = '30' where NAME = 'VERSION';
Added: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_29_to_30.sql
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_29_to_30.sql (rev 0)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_schema/upgrades/postgres-dialect/upgrade_from_29_to_30.sql 2013-12-06 21:43:35 UTC (rev 12041)
@@ -0,0 +1,55 @@
+----
+-- Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+----
+
+---------------------------------------------------
+-- Upgrade database schema from VERSION 29 to 30 --
+---------------------------------------------------
+
+-- Home related updates
+
+alter table CALENDAR_HOME
+ add column STATUS integer default 0 not null;
+
+alter table NOTIFICATION_HOME
+ add column STATUS integer default 0 not null;
+
+alter table ADDRESSBOOK_HOME
+ add column STATUS integer default 0 not null;
+
+-- Enumeration of statuses
+
+create table HOME_STATUS (
+ ID integer primary key,
+ DESCRIPTION varchar(16) not null unique
+);
+
+insert into HOME_STATUS values (0, 'normal' );
+insert into HOME_STATUS values (1, 'external');
+
+-- Bind changes
+alter table CALENDAR_BIND
+ add column EXTERNAL_ID integer default null;
+
+alter table SHARED_ADDRESSBOOK_BIND
+ add column EXTERNAL_ID integer default null;
+
+alter table SHARED_GROUP_BIND
+ add column EXTERNAL_ID integer default null;
+
+
+-- Now update the version
+-- No data upgrades
+update CALENDARSERVER set VALUE = '30' where NAME = 'VERSION';
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_tables.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_tables.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/datastore/sql_tables.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -133,6 +133,15 @@
# Various constants
+_homeStatus = _schemaConstants(
+ schema.HOME_STATUS.DESCRIPTION,
+ schema.HOME_STATUS.ID
+)
+
+
+_HOME_STATUS_NORMAL = _homeStatus('normal')
+_HOME_STATUS_EXTERNAL = _homeStatus('external')
+
_bindStatus = _schemaConstants(
schema.CALENDAR_BIND_STATUS.DESCRIPTION,
schema.CALENDAR_BIND_STATUS.ID
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/icommondatastore.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/icommondatastore.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/icommondatastore.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -51,6 +51,13 @@
+class RecordNotAllowedError(CommonStoreError):
+ """
+ User not allowed.
+ """
+
+
+
class NameNotAllowedError(CommonStoreError):
"""
Attempt to create an object with a name that is not allowed.
@@ -205,6 +212,13 @@
"""
+
+class ExternalShareFailed(CommonStoreError):
+ """
+ An external sharee operation failed.
+ """
+
+
# Indexing / sync tokens
Modified: CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/idirectoryservice.py
===================================================================
--- CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/idirectoryservice.py 2013-12-06 20:40:44 UTC (rev 12040)
+++ CalendarServer/branches/users/cdaboo/cross-pod-sharing/txdav/common/idirectoryservice.py 2013-12-06 21:43:35 UTC (rev 12041)
@@ -26,6 +26,20 @@
"IStoreDirectoryRecord",
]
+class IStoreDirectoryError(Exception):
+ """
+ Base class for directory related errors.
+ """
+
+
+
+class DirectoryRecordNotFoundError(Exception):
+ """
+ Directory record not found.
+ """
+
+
+
class IStoreDirectoryService(Interface):
"""
Directory Service for looking up users.
@@ -55,3 +69,27 @@
fullName = Attribute("Full name for the entity associated with the record: C{str}")
displayName = Attribute("Display name for entity associated with the record: C{str}")
+
+ def serverURI(): #@NoSelf
+ """
+ Return the URI for the record's server "pod".
+
+ @return: a URI.
+ @rtype: C{str}
+ """
+
+ def server(): #@NoSelf
+ """
+ Return the L{txdav.caldav.datastore.scheduling.localservers.Server} for the record's server "pod".
+
+ @return: a pod server record.
+ @rtype: L{txdav.caldav.datastore.scheduling.localservers.Server}
+ """
+
+ def thisServer(): #@NoSelf
+ """
+ Indicates whether the record is hosted on this server "pod".
+
+ @return: C{True} if hosted by this service.
+ @rtype: C{bool}
+ """
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20140312/587b5dd8/attachment.html>
More information about the calendarserver-changes
mailing list