[CalendarServer-changes] [11650] CalendarServer/branches/users/cdaboo/json
source_changes at macosforge.org
source_changes at macosforge.org
Sat Aug 31 14:14:10 PDT 2013
Revision: 11650
http://trac.calendarserver.org//changeset/11650
Author: cdaboo at apple.com
Date: 2013-08-31 14:14:10 -0700 (Sat, 31 Aug 2013)
Log Message:
-----------
Merge from trunk.
Modified Paths:
--------------
CalendarServer/branches/users/cdaboo/json/calendarserver/tap/caldav.py
CalendarServer/branches/users/cdaboo/json/calendarserver/tools/gateway.py
CalendarServer/branches/users/cdaboo/json/calendarserver/tools/util.py
CalendarServer/branches/users/cdaboo/json/conf/caldavd-apple.plist
CalendarServer/branches/users/cdaboo/json/contrib/tools/netstatus.py
CalendarServer/branches/users/cdaboo/json/twext/web2/metafd.py
CalendarServer/branches/users/cdaboo/json/twext/web2/test/test_metafd.py
CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/appleopendirectory.py
CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_buildquery.py
CalendarServer/branches/users/cdaboo/json/twistedcaldav/resource.py
CalendarServer/branches/users/cdaboo/json/twistedcaldav/stdconfig.py
CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/imip/inbound.py
CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py
CalendarServer/branches/users/cdaboo/json/txdav/carddav/datastore/sql.py
CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql.py
CalendarServer/branches/users/cdaboo/json/txdav/xml/base.py
Property Changed:
----------------
CalendarServer/branches/users/cdaboo/json/
Property changes on: CalendarServer/branches/users/cdaboo/json
___________________________________________________________________
Modified: svn:mergeinfo
- /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11612
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/wsanchez/transations:5515-5593
+ /CalDAVTester/trunk:11193-11198
/CalendarServer/branches/config-separation:4379-4443
/CalendarServer/branches/egg-info-351:4589-4625
/CalendarServer/branches/generic-sqlstore:6167-6191
/CalendarServer/branches/new-store:5594-5934
/CalendarServer/branches/new-store-no-caldavfile:5911-5935
/CalendarServer/branches/new-store-no-caldavfile-2:5936-5981
/CalendarServer/branches/release/CalendarServer-4.3-dev:10180-10190,10192
/CalendarServer/branches/users/cdaboo/batchupload-6699:6700-7198
/CalendarServer/branches/users/cdaboo/cached-subscription-calendars-5692:5693-5702
/CalendarServer/branches/users/cdaboo/component-set-fixes:8130-8346
/CalendarServer/branches/users/cdaboo/directory-cache-on-demand-3627:3628-3644
/CalendarServer/branches/users/cdaboo/fix-no-ischedule:11612
/CalendarServer/branches/users/cdaboo/implicituidrace:8137-8141
/CalendarServer/branches/users/cdaboo/ischedule-dkim:9747-9979
/CalendarServer/branches/users/cdaboo/managed-attachments:9985-10145
/CalendarServer/branches/users/cdaboo/more-sharing-5591:5592-5601
/CalendarServer/branches/users/cdaboo/partition-4464:4465-4957
/CalendarServer/branches/users/cdaboo/pods:7297-7377
/CalendarServer/branches/users/cdaboo/pycalendar:7085-7206
/CalendarServer/branches/users/cdaboo/pycard:7227-7237
/CalendarServer/branches/users/cdaboo/queued-attendee-refreshes:7740-8287
/CalendarServer/branches/users/cdaboo/relative-config-paths-5070:5071-5105
/CalendarServer/branches/users/cdaboo/shared-calendars-5187:5188-5440
/CalendarServer/branches/users/cdaboo/store-scheduling:10876-11129
/CalendarServer/branches/users/cdaboo/timezones:7443-7699
/CalendarServer/branches/users/cdaboo/txn-debugging:8730-8743
/CalendarServer/branches/users/gaya/sharedgroups-3:11088-11204
/CalendarServer/branches/users/glyph/always-abort-txn-on-error:9958-9969
/CalendarServer/branches/users/glyph/case-insensitive-uid:8772-8805
/CalendarServer/branches/users/glyph/conn-limit:6574-6577
/CalendarServer/branches/users/glyph/contacts-server-merge:4971-5080
/CalendarServer/branches/users/glyph/dalify:6932-7023
/CalendarServer/branches/users/glyph/db-reconnect:6824-6876
/CalendarServer/branches/users/glyph/deploybuild:7563-7572
/CalendarServer/branches/users/glyph/digest-auth-redux:10624-10635
/CalendarServer/branches/users/glyph/disable-quota:7718-7727
/CalendarServer/branches/users/glyph/dont-start-postgres:6592-6614
/CalendarServer/branches/users/glyph/enforce-max-requests:11640-11643
/CalendarServer/branches/users/glyph/hang-fix:11465-11491
/CalendarServer/branches/users/glyph/imip-and-admin-html:7866-7984
/CalendarServer/branches/users/glyph/ipv6-client:9054-9105
/CalendarServer/branches/users/glyph/launchd-wrapper-bis:11413-11436
/CalendarServer/branches/users/glyph/linux-tests:6893-6900
/CalendarServer/branches/users/glyph/migrate-merge:8690-8713
/CalendarServer/branches/users/glyph/misc-portability-fixes:7365-7374
/CalendarServer/branches/users/glyph/more-deferreds-6:6322-6368
/CalendarServer/branches/users/glyph/more-deferreds-7:6369-6445
/CalendarServer/branches/users/glyph/multiget-delete:8321-8330
/CalendarServer/branches/users/glyph/new-export:7444-7485
/CalendarServer/branches/users/glyph/one-home-list-api:10048-10073
/CalendarServer/branches/users/glyph/oracle:7106-7155
/CalendarServer/branches/users/glyph/oracle-nulls:7340-7351
/CalendarServer/branches/users/glyph/other-html:8062-8091
/CalendarServer/branches/users/glyph/parallel-sim:8240-8251
/CalendarServer/branches/users/glyph/parallel-upgrade:8376-8400
/CalendarServer/branches/users/glyph/parallel-upgrade_to_1:8571-8583
/CalendarServer/branches/users/glyph/q:9560-9688
/CalendarServer/branches/users/glyph/queue-locking-and-timing:10204-10289
/CalendarServer/branches/users/glyph/quota:7604-7637
/CalendarServer/branches/users/glyph/sendfdport:5388-5424
/CalendarServer/branches/users/glyph/shared-pool-fixes:8436-8443
/CalendarServer/branches/users/glyph/shared-pool-take2:8155-8174
/CalendarServer/branches/users/glyph/sharedpool:6490-6550
/CalendarServer/branches/users/glyph/sharing-api:9192-9205
/CalendarServer/branches/users/glyph/skip-lonely-vtimezones:8524-8535
/CalendarServer/branches/users/glyph/sql-store:5929-6073
/CalendarServer/branches/users/glyph/start-service-start-loop:11060-11065
/CalendarServer/branches/users/glyph/subtransactions:7248-7258
/CalendarServer/branches/users/glyph/table-alias:8651-8664
/CalendarServer/branches/users/glyph/uidexport:7673-7676
/CalendarServer/branches/users/glyph/unshare-when-access-revoked:10562-10595
/CalendarServer/branches/users/glyph/use-system-twisted:5084-5149
/CalendarServer/branches/users/glyph/uuid-normalize:9268-9296
/CalendarServer/branches/users/glyph/warning-cleanups:11347-11357
/CalendarServer/branches/users/glyph/xattrs-from-files:7757-7769
/CalendarServer/branches/users/sagen/applepush:8126-8184
/CalendarServer/branches/users/sagen/inboxitems:7380-7381
/CalendarServer/branches/users/sagen/locations-resources:5032-5051
/CalendarServer/branches/users/sagen/locations-resources-2:5052-5061
/CalendarServer/branches/users/sagen/purge_old_events:6735-6746
/CalendarServer/branches/users/sagen/resource-delegates-4038:4040-4067
/CalendarServer/branches/users/sagen/resource-delegates-4066:4068-4075
/CalendarServer/branches/users/sagen/resources-2:5084-5093
/CalendarServer/branches/users/sagen/testing:10827-10851,10853-10855
/CalendarServer/branches/users/wsanchez/transations:5515-5593
/CalendarServer/trunk:11622-11649
Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/tap/caldav.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/tap/caldav.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/tap/caldav.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -63,8 +63,9 @@
from twext.enterprise.ienterprise import POSTGRES_DIALECT
from twext.enterprise.ienterprise import ORACLE_DIALECT
from twext.enterprise.adbapi2 import ConnectionPool
+from twext.enterprise.queue import NonPerformingQueuer
+from twext.enterprise.queue import PeerConnectionPool
from twext.enterprise.queue import WorkerFactory as QueueWorkerFactory
-from twext.enterprise.queue import PeerConnectionPool
from txdav.common.datastore.sql_tables import schema
from txdav.common.datastore.upgrade.sql.upgrade import (
@@ -225,14 +226,31 @@
""" Registers a rotating file logger for error logging, if
config.ErrorLogEnabled is True. """
+ def __init__(self, logEnabled, logPath, logRotateLength, logMaxFiles):
+ """
+ @param logEnabled: Whether to write to a log file
+ @type logEnabled: C{boolean}
+ @param logPath: the full path to the log file
+ @type logPath: C{str}
+ @param logRotateLength: rotate when files exceed this many bytes
+ @type logRotateLength: C{int}
+ @param logMaxFiles: keep at most this many files
+ @type logMaxFiles: C{int}
+ """
+ MultiService.__init__(self)
+ self.logEnabled = logEnabled
+ self.logPath = logPath
+ self.logRotateLength = logRotateLength
+ self.logMaxFiles = logMaxFiles
+
def setServiceParent(self, app):
MultiService.setServiceParent(self, app)
- if config.ErrorLogEnabled:
+ if self.logEnabled:
errorLogFile = LogFile.fromFullPath(
- config.ErrorLogFile,
- rotateLength=config.ErrorLogRotateMB * 1024 * 1024,
- maxRotatedFiles=config.ErrorLogMaxRotatedFiles
+ self.logPath,
+ rotateLength = self.logRotateLength,
+ maxRotatedFiles = self.logMaxFiles
)
errorLogObserver = FileLogObserver(errorLogFile).emit
@@ -251,7 +269,9 @@
def __init__(self, logObserver):
self.logObserver = logObserver # accesslog observer
- MultiService.__init__(self)
+ ErrorLoggingMultiService.__init__(self, config.ErrorLogEnabled,
+ config.ErrorLogFile, config.ErrorLogRotateMB * 1024 * 1024,
+ config.ErrorLogMaxRotatedFiles)
def privilegedStartService(self):
@@ -1247,8 +1267,9 @@
Create an agent service which listens for configuration requests
"""
- # Don't use memcached -- calendar server might take it away at any
- # moment
+ # Don't use memcached initially -- calendar server might take it away at
+ # any moment. However, when we run a command through the gateway, it
+ # will conditionally set ClientEnabled at that time.
def agentPostUpdateHook(configDict, reloading=False):
configDict.Memcached.Pools.Default.ClientEnabled = False
@@ -1266,10 +1287,20 @@
dataStoreWatcher = DirectoryChangeListener(reactor,
config.DataRoot, DataStoreMonitor(reactor, storageService))
dataStoreWatcher.startListening()
+ if store is not None:
+ store.queuer = NonPerformingQueuer()
return makeAgentService(store)
uid, gid = getSystemIDs(config.UserName, config.GroupName)
- return self.storageService(agentServiceCreator, None, uid=uid, gid=gid)
+ svc = self.storageService(agentServiceCreator, None, uid=uid, gid=gid)
+ agentLoggingService = ErrorLoggingMultiService(
+ config.ErrorLogEnabled,
+ config.AgentLogFile,
+ config.ErrorLogRotateMB * 1024 * 1024,
+ config.ErrorLogMaxRotatedFiles
+ )
+ svc.setServiceParent(agentLoggingService)
+ return agentLoggingService
def storageService(self, createMainService, logObserver, uid=None, gid=None):
@@ -1428,7 +1459,12 @@
Create a master service to coordinate a multi-process configuration,
spawning subprocesses that use L{makeService_Slave} to perform work.
"""
- s = ErrorLoggingMultiService()
+ s = ErrorLoggingMultiService(
+ config.ErrorLogEnabled,
+ config.ErrorLogFile,
+ config.ErrorLogRotateMB * 1024 * 1024,
+ config.ErrorLogMaxRotatedFiles
+ )
# Add a service to re-exec the master when it receives SIGHUP
ReExecService(config.PIDFile).setServiceParent(s)
Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/tools/gateway.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/tools/gateway.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/tools/gateway.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -30,7 +30,7 @@
from calendarserver.tools.util import (
principalForPrincipalID, proxySubprincipal, addProxy, removeProxy,
- ProxyError, ProxyWarning
+ ProxyError, ProxyWarning, autoDisableMemcached
)
from calendarserver.tools.principals import getProxies, setProxies, updateRecord
from calendarserver.tools.purge import WorkerService, PurgeOldEventsService, DEFAULT_BATCH_SIZE, DEFAULT_RETAIN_DAYS
@@ -188,6 +188,22 @@
@inlineCallbacks
def run(self):
+
+ # This method can be called as the result of an agent request. We
+ # check to see if memcached is there for each call because the server
+ # could have stopped/started since the last time.
+
+ for pool in config.Memcached.Pools.itervalues():
+ pool.ClientEnabled = True
+ autoDisableMemcached(config)
+
+ from twistedcaldav.directory import calendaruserproxy
+ if calendaruserproxy.ProxyDBService is not None:
+ # Reset the proxy db memcacher because memcached may have come or
+ # gone since the last time through here.
+ # TODO: figure out a better way to do this
+ calendaruserproxy.ProxyDBService._memcacher._memcacheProtocol = None
+
try:
for command in self.commands:
commandName = command['command']
Modified: CalendarServer/branches/users/cdaboo/json/calendarserver/tools/util.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/calendarserver/tools/util.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/calendarserver/tools/util.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -235,23 +235,21 @@
def autoDisableMemcached(config):
"""
- If memcached is not running, set config.Memcached.ClientEnabled to False
+ Set ClientEnabled to False for each pool whose memcached is not running
"""
- if not config.Memcached.Pools.Default.ClientEnabled:
- return
+ for pool in config.Memcached.Pools.itervalues():
+ if pool.ClientEnabled:
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((pool.BindAddress, pool.Port))
+ s.close()
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ except socket.error:
+ pool.ClientEnabled = False
- try:
- s.connect((config.Memcached.Pools.Default.BindAddress, config.Memcached.Pools.Default.Port))
- s.close()
- except socket.error:
- config.Memcached.Pools.Default.ClientEnabled = False
-
-
def setupMemcached(config):
#
# Connect to memcached
Modified: CalendarServer/branches/users/cdaboo/json/conf/caldavd-apple.plist
===================================================================
--- CalendarServer/branches/users/cdaboo/json/conf/caldavd-apple.plist 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/conf/caldavd-apple.plist 2013-08-31 21:14:10 UTC (rev 11650)
@@ -111,11 +111,18 @@
<string>-c log_lock_waits=TRUE</string>
<string>-c deadlock_timeout=10</string>
<string>-c log_line_prefix='%m [%p] '</string>
+ <string>-c logging_collector=on</string>
+ <string>-c log_truncate_on_rotation=on</string>
+ <string>-c log_directory=/var/log/caldavd/postgresql</string>
+ <string>-c log_filename=postgresql_%w.log</string>
+ <string>-c log_rotation_age=1440</string>
</array>
<key>ExtraConnections</key>
<integer>20</integer>
<key>ClusterName</key>
<string>cluster.pg</string>
+ <key>LogFile</key>
+ <string>xpg_ctl.log</string>
</dict>
<!-- Data root -->
Modified: CalendarServer/branches/users/cdaboo/json/contrib/tools/netstatus.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/contrib/tools/netstatus.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/contrib/tools/netstatus.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -52,7 +52,7 @@
splits = line.split()
if splits[0] not in ("tcp4", "tcp6", "tcp"):
continue
- if not splits[3].endswith("8443") and not splits[3].endswith("8008"):
+ if not splits[3].endswith(":8443") and not splits[3].endswith(":8008"):
continue
for ctr, items in enumerate(stateNames):
Modified: CalendarServer/branches/users/cdaboo/json/twext/web2/metafd.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/web2/metafd.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/twext/web2/metafd.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -30,6 +30,7 @@
from twext.web2.channel.http import HTTPFactory
from twisted.application.service import MultiService, Service
from twisted.internet import reactor
+from twisted.python.util import FancyStrMixin
from twisted.internet.tcp import Server
log = Logger()
@@ -161,12 +162,15 @@
@total_ordering
-class WorkerStatus(object):
+class WorkerStatus(FancyStrMixin, object):
"""
The status of a worker process.
"""
- def __init__(self, acknowledged=0, unacknowledged=0, started=0):
+ showAttributes = "acknowledged unacknowledged started abandoned".split()
+
+ def __init__(self, acknowledged=0, unacknowledged=0, started=0,
+ abandoned=0):
"""
Create a L{ConnectionStatus} with a number of sent connections and a
number of un-acknowledged connections.
@@ -179,23 +183,36 @@
the subprocess which have never received a status response (a
"C{+}" status message).
+ @param abandoned: The number of connections which have been sent to
+ this worker, but were not acknowledged at the moment that the
+ worker restarted.
+
@param started: The number of times this worker has been started.
"""
self.acknowledged = acknowledged
self.unacknowledged = unacknowledged
self.started = started
+ self.abandoned = abandoned
+ def effective(self):
+ """
+ The current effective load.
+ """
+ return self.acknowledged + self.unacknowledged
+
+
def restarted(self):
"""
The L{WorkerStatus} derived from the current status of a process and
the fact that it just restarted.
"""
- return self.__class__(0, self.unacknowledged, self.started + 1)
+ return self.__class__(0, 0, self.started + 1, self.unacknowledged)
def _tuplify(self):
- return (self.acknowledged, self.unacknowledged, self.started)
+ return (self.acknowledged, self.unacknowledged, self.started,
+ self.abandoned)
def __lt__(self, other):
@@ -215,7 +232,8 @@
return NotImplemented
return self.__class__(self.acknowledged + other.acknowledged,
self.unacknowledged + other.unacknowledged,
- self.started + other.started)
+ self.started + other.started,
+ self.abandoned + other.abandoned)
def __sub__(self, other):
@@ -223,7 +241,8 @@
return NotImplemented
return self + self.__class__(-other.acknowledged,
-other.unacknowledged,
- -other.started)
+ -other.started,
+ -other.abandoned)
@@ -320,7 +339,7 @@
C{self.dispatcher.statuses} attribute, which is what
C{self.outstandingRequests} uses to compute it.)
"""
- current = sum(status.acknowledged
+ current = sum(status.effective()
for status in self.dispatcher.statuses)
self._outstandingRequests = current # preserve for or= field in log
maximum = self.maxRequests
Modified: CalendarServer/branches/users/cdaboo/json/twext/web2/test/test_metafd.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twext/web2/test/test_metafd.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/twext/web2/test/test_metafd.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -30,6 +30,7 @@
from twisted.application.service import Service
from twext.internet.test.test_sendfdport import ReaderAdder
+from twext.web2.metafd import WorkerStatus
from twisted.trial.unittest import TestCase
@@ -180,7 +181,29 @@
self.assertEquals(builder.port.reading, True)
+ def test_processStopsReadingEvenWhenConnectionsAreNotAcknowledged(self):
+ """
+ L{ConnectionLimiter.statusesChanged} determines whether the current
+ number of outstanding requests is above the limit.
+ """
+ builder = LimiterBuilder(self)
+ builder.fillUp(acknowledged=False)
+ self.assertEquals(builder.port.reading, False)
+ builder.processRestart()
+ self.assertEquals(builder.port.reading, True)
+
+ def test_workerStatusRepr(self):
+ """
+ L{WorkerStatus.__repr__} will show all the values associated with the
+ status of the worker.
+ """
+ self.assertEquals(repr(WorkerStatus(1, 2, 3, 4)),
+ "<WorkerStatus acknowledged=1 unacknowledged=2 "
+ "started=3 abandoned=4>")
+
+
+
class LimiterBuilder(object):
"""
A L{LimiterBuilder} can build a L{ConnectionLimiter} and associated objects
@@ -220,15 +243,20 @@
return serverServiceMaker
- def fillUp(self):
+ def fillUp(self, acknowledged=True):
"""
Fill up all the slots on the connection limiter.
+
+ @param acknowledged: Should the virtual connections created by this
+ method send a message back to the dispatcher indicating that the
+ subprocess has acknowledged receipt of the file descriptor?
"""
for x in range(self.limiter.maxRequests):
self.dispatcher.sendFileDescriptor(None, "SSL")
- self.dispatcher.statusMessage(
- self.dispatcher._subprocessSockets[0], "+"
- )
+ if acknowledged:
+ self.dispatcher.statusMessage(
+ self.dispatcher._subprocessSockets[0], "+"
+ )
def processRestart(self):
Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/appleopendirectory.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/appleopendirectory.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/appleopendirectory.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -1378,7 +1378,8 @@
def buildNestedQueryFromTokens(tokens, mapping):
"""
Build a DS query espression such that all the tokens must appear in either
- the fullName (anywhere) or emailAddresses (at the beginning).
+ the fullName (anywhere), emailAddresses (at the beginning) or record name
+ (at the beginning).
@param tokens: The tokens to search on
@type tokens: C{list} of C{str}
@@ -1394,6 +1395,7 @@
fields = [
("fullName", dsattributes.eDSContains),
("emailAddresses", dsattributes.eDSStartsWith),
+ ("recordName", dsattributes.eDSStartsWith),
]
outer = []
Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_buildquery.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_buildquery.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/directory/test/test_buildquery.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -140,17 +140,17 @@
query = buildNestedQueryFromTokens(["foo"], OpenDirectoryService._ODFields)
self.assertEquals(
query.generate(),
- "(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=foo*))"
+ "(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=foo*)(dsAttrTypeStandard:RecordName=foo*))"
)
query = buildNestedQueryFromTokens(["foo", "bar"], OpenDirectoryService._ODFields)
self.assertEquals(
query.generate(),
- "(&(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=foo*))(|(dsAttrTypeStandard:RealName=*bar*)(dsAttrTypeStandard:EMailAddress=bar*)))"
+ "(&(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=foo*)(dsAttrTypeStandard:RecordName=foo*))(|(dsAttrTypeStandard:RealName=*bar*)(dsAttrTypeStandard:EMailAddress=bar*)(dsAttrTypeStandard:RecordName=bar*)))"
)
query = buildNestedQueryFromTokens(["foo", "bar", "baz"], OpenDirectoryService._ODFields)
self.assertEquals(
query.generate(),
- "(&(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=foo*))(|(dsAttrTypeStandard:RealName=*bar*)(dsAttrTypeStandard:EMailAddress=bar*))(|(dsAttrTypeStandard:RealName=*baz*)(dsAttrTypeStandard:EMailAddress=baz*)))"
+ "(&(|(dsAttrTypeStandard:RealName=*foo*)(dsAttrTypeStandard:EMailAddress=foo*)(dsAttrTypeStandard:RecordName=foo*))(|(dsAttrTypeStandard:RealName=*bar*)(dsAttrTypeStandard:EMailAddress=bar*)(dsAttrTypeStandard:RecordName=bar*))(|(dsAttrTypeStandard:RealName=*baz*)(dsAttrTypeStandard:EMailAddress=baz*)(dsAttrTypeStandard:RecordName=baz*)))"
)
Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/resource.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/resource.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/resource.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -325,7 +325,7 @@
@param transaction: optional transaction to use instead of associated transaction
@type transaction: L{txdav.caldav.idav.ITransaction}
"""
- result = yield super(CalDAVResource, self).renderHTTP(request)
+ response = yield super(CalDAVResource, self).renderHTTP(request)
if transaction is None:
transaction = self._associatedTransaction
if transaction is not None:
@@ -333,9 +333,13 @@
yield transaction.abort()
else:
yield transaction.commit()
- returnValue(result)
+ # May need to reset the last-modified header in the response as txn.commit() can change it due to pre-commit hooks
+ if response.headers.hasHeader("last-modified"):
+ response.headers.setHeader("last-modified", self.lastModified())
+ returnValue(response)
+
# Begin transitional new-store resource interface:
def copyDeadPropertiesTo(self, other):
Modified: CalendarServer/branches/users/cdaboo/json/twistedcaldav/stdconfig.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/twistedcaldav/stdconfig.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/twistedcaldav/stdconfig.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -449,6 +449,7 @@
#
"AccessLogFile" : "access.log", # Apache-style access log
"ErrorLogFile" : "error.log", # Server activity log
+ "AgentLogFile" : "agent.log", # Agent activity log
"ErrorLogEnabled" : True, # True = use log file, False = stdout
"ErrorLogRotateMB" : 10, # Rotate error log after so many megabytes
"ErrorLogMaxRotatedFiles" : 5, # Retain this many error log files
@@ -1085,6 +1086,7 @@
("ConfigRoot", ("Scheduling", "iSchedule", "DKIM", "PrivateExchanges",)),
("LogRoot", "AccessLogFile"),
("LogRoot", "ErrorLogFile"),
+ ("LogRoot", "AgentLogFile"),
("LogRoot", ("Postgres", "LogFile",)),
("LogRoot", ("LogDatabase", "StatisticsLogFile",)),
("LogRoot", "AccountingLogRoot"),
Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/imip/inbound.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/imip/inbound.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/imip/inbound.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -49,11 +49,11 @@
# specifically, "Unhandled unsolicited response" nonsense.
#
class IMAPLogger(LegacyLogger):
- def emit(self, level, message=None, **kwargs):
- if message is not None and message.startswith("Unhandled unsolicited response:"):
+ def msg(self, *message, **kwargs):
+ if message and message[0].startswith("Unhandled unsolicited response:"):
return
- super(IMAPLogger, self).emit(self, level, message, **kwargs)
+ super(IMAPLogger, self).msg(self, *message, **kwargs)
imap4.log = IMAPLogger()
@@ -112,6 +112,11 @@
from twisted.internet import reactor
self.reactor = reactor
+ # If we're using our dedicated account on our local server, we're free
+ # to delete all messages that arrive in the inbox so as to not let
+ # cruft build up
+ self.deleteAllMail = shouldDeleteAllMail(config.ServerHostName,
+ settings.Server, settings.Username)
self.mailReceiver = MailReceiver(store, directory)
mailType = settings['Type']
if mailType.lower().startswith('pop'):
@@ -127,7 +132,8 @@
def fetchMail(self):
- return self.point.connect(self.factory(self.settings, self.mailReceiver))
+ return self.point.connect(self.factory(self.settings, self.mailReceiver,
+ self.deleteAllMail))
@inlineCallbacks
@@ -137,7 +143,28 @@
yield scheduleNextMailPoll(self.store, seconds)
+def shouldDeleteAllMail(serverHostName, inboundServer, username):
+ """
+ Given the hostname of the calendar server, the hostname of the pop/imap
+ server, and the username we're using to access inbound mail, determine
+ whether we should delete all messages in the inbox or whether to leave
+ all unprocessed messages.
+ @param serverHostName: the calendar server hostname (config.ServerHostName)
+ @type serverHostName: C{str}
+ @param inboundServer: the pop/imap server hostname
+ @type inboundServer: C{str}
+ @param username: the name of the account we're using to retrieve mail
+ @type username: C{str}
+ @return: True if we should delete all messages from the inbox, False otherwise
+ @rtype: C{boolean}
+ """
+ return (
+ inboundServer in (serverHostName, "localhost") and
+ username == "com.apple.calendarserver"
+ )
+
+
@inlineCallbacks
def scheduleNextMailPoll(store, seconds):
txn = store.newTransaction()
@@ -156,8 +183,9 @@
NO_ORGANIZER_ADDRESS = 3
REPLY_FORWARDED_TO_ORGANIZER = 4
INJECTION_SUBMITTED = 5
+ INCOMPLETE_DSN = 6
+ UNKNOWN_FAILURE = 7
- # What about purge( ) and lowercase( )
def __init__(self, store, directory):
self.store = store
self.directory = directory
@@ -363,7 +391,23 @@
# returns a deferred
def inbound(self, message):
+ """
+ Given the text of an incoming message, parse and process it.
+ The possible return values are:
+ NO_TOKEN - there was no token in the To address
+ UNKNOWN_TOKEN - there was an unknown token in the To address
+ MALFORMED_TO_ADDRESS - we could not parse the To address at all
+ NO_ORGANIZER_ADDRESS - no ics attachment and no email to forward to
+ REPLY_FORWARDED_TO_ORGANIZER - no ics attachment, but reply forwarded
+ INJECTION_SUBMITTED - looks ok, was submitted as a work item
+ INCOMPLETE_DSN - not enough in the DSN to go on
+ UNKNOWN_FAILURE - any error we aren't specifically catching
+
+ @param message: The body of the email
+ @type message: C{str}
+ @return: Deferred firing with one of the above action codes
+ """
try:
msg = email.message_from_string(message)
@@ -376,7 +420,7 @@
# It's a DSN without enough to go on
log.error("Mail gateway can't process DSN %s"
% (msg['Message-ID'],))
- return succeed(None)
+ return succeed(self.INCOMPLETE_DSN)
log.info("Mail gateway received message %s from %s to %s" %
(msg['Message-ID'], msg['From'], msg['To']))
@@ -386,7 +430,7 @@
except Exception, e:
# Don't let a failure of any kind stop us
log.error("Failed to process message: %s" % (e,))
- return succeed(None)
+ return succeed(self.UNKNOWN_FAILURE)
@@ -442,13 +486,22 @@
return defer.DeferredList(downloads).addCallback(self.cbFinished)
+ @inlineCallbacks
def cbDownloaded(self, lines, id):
self.log.debug("POP downloaded message %d" % (id,))
- self.factory.handleMessage("\r\n".join(lines))
- self.log.debug("POP deleting message %d" % (id,))
- self.delete(id)
+ actionTaken = (yield self.factory.handleMessage("\r\n".join(lines)))
+ if self.factory.deleteAllMail:
+ # Delete all mail we see
+ self.log.debug("POP deleting message %d" % (id,))
+ self.delete(id)
+ else:
+ # Delete only mail we've processed
+ if actionTaken == MailReceiver.INJECTION_SUBMITTED:
+ self.log.debug("POP deleting message %d" % (id,))
+ self.delete(id)
+
def cbFinished(self, results):
self.log.debug("POP finished")
return self.quit()
@@ -460,8 +513,10 @@
protocol = POP3DownloadProtocol
- def __init__(self, settings, mailReceiver):
+ def __init__(self, settings, mailReceiver, deleteAllMail):
+ self.settings = settings
self.mailReceiver = mailReceiver
+ self.deleteAllMail = deleteAllMail
self.noisy = False
@@ -477,7 +532,7 @@
def handleMessage(self, message):
self.log.debug("POP factory handle message")
- self.log.debug(message)
+ # self.log.debug(message)
return self.mailReceiver.inbound(message)
@@ -498,12 +553,12 @@
def ebLogError(self, error):
- self.log.error("IMAP Error: %s" % (error,))
+ self.log.error("IMAP Error: {err}", err=error)
def ebAuthenticateFailed(self, reason):
- self.log.debug("IMAP authenticate failed for %s, trying login" %
- (self.factory.settings["Username"],))
+ self.log.debug("IMAP authenticate failed for {name}, trying login",
+ name=self.factory.settings["Username"])
return self.login(self.factory.settings["Username"],
self.factory.settings["Password"]
).addCallback(self.cbLoggedIn
@@ -511,27 +566,34 @@
def ebLoginFailed(self, reason):
- self.log.error("IMAP login failed for %s" %
- (self.factory.settings["Username"],))
+ self.log.error("IMAP login failed for {name}", name=self.factory.settings["Username"])
self.transport.loseConnection()
def cbLoggedIn(self, result):
- self.log.debug("IMAP logged in [%s]" % (self.state,))
+ self.log.debug("IMAP logged in")
self.select("Inbox").addCallback(self.cbInboxSelected)
def cbInboxSelected(self, result):
- self.log.debug("IMAP Inbox selected [%s]" % (self.state,))
- allMessages = imap4.MessageSet(1, None)
- self.fetchUID(allMessages, True).addCallback(self.cbGotUIDs)
+ self.log.debug("IMAP Inbox selected")
+ self.search(imap4.Query(unseen=True)).addCallback(self.cbGotSearch)
+ def cbGotSearch(self, results):
+ if results:
+ ms = imap4.MessageSet()
+ for n in results:
+ ms.add(n)
+ self.fetchUID(ms).addCallback(self.cbGotUIDs)
+ else:
+ self.cbClosed(None)
+
+
def cbGotUIDs(self, results):
- self.log.debug("IMAP got uids [%s]" % (self.state,))
self.messageUIDs = [result['UID'] for result in results.values()]
self.messageCount = len(self.messageUIDs)
- self.log.debug("IMAP Inbox has %d messages" % (self.messageCount,))
+ self.log.debug("IMAP Inbox has {count} unseen messages", count=self.messageCount)
if self.messageCount:
self.fetchNextMessage()
else:
@@ -540,7 +602,7 @@
def fetchNextMessage(self):
- self.log.debug("IMAP in fetchnextmessage [%s]" % (self.state,))
+ # self.log.debug("IMAP in fetchnextmessage")
if self.messageUIDs:
nextUID = self.messageUIDs.pop(0)
messageListToFetch = imap4.MessageSet(nextUID)
@@ -556,8 +618,9 @@
self.expunge().addCallback(self.cbInboxSelected)
+ @inlineCallbacks
def cbGotMessage(self, results, messageList):
- self.log.debug("IMAP in cbGotMessage [%s]" % (self.state,))
+ self.log.debug("IMAP in cbGotMessage")
try:
messageData = results.values()[0]['RFC822']
except IndexError:
@@ -567,44 +630,46 @@
self.fetchNextMessage()
return
- d = self.factory.handleMessage(messageData)
- if isinstance(d, defer.Deferred):
- d.addCallback(self.cbFlagDeleted, messageList)
+ actionTaken = (yield self.factory.handleMessage(messageData))
+ if self.factory.deleteAllMail:
+ # Delete all mail we see
+ yield self.cbFlagDeleted(messageList)
else:
- # No deferred returned, so no need for addCallback( )
- self.cbFlagDeleted(None, messageList)
+ # Delete only mail we've processed; the rest are left flagged Seen
+ if actionTaken == MailReceiver.INJECTION_SUBMITTED:
+ yield self.cbFlagDeleted(messageList)
+ else:
+ self.fetchNextMessage()
- def cbFlagDeleted(self, results, messageList):
+ def cbFlagDeleted(self, messageList):
self.addFlags(messageList, ("\\Deleted",),
uid=True).addCallback(self.cbMessageDeleted, messageList)
def cbMessageDeleted(self, results, messageList):
- self.log.debug("IMAP in cbMessageDeleted [%s]" % (self.state,))
self.log.debug("Deleted message")
self.fetchNextMessage()
def cbClosed(self, results):
- self.log.debug("IMAP in cbClosed [%s]" % (self.state,))
self.log.debug("Mailbox closed")
self.logout().addCallback(
lambda _: self.transport.loseConnection())
def rawDataReceived(self, data):
- self.log.debug("RAW RECEIVED: %s" % (data,))
+ # self.log.debug("RAW RECEIVED: {data}", data=data)
imap4.IMAP4Client.rawDataReceived(self, data)
def lineReceived(self, line):
- self.log.debug("RECEIVED: %s" % (line,))
+ # self.log.debug("RECEIVED: {line}", line=line)
imap4.IMAP4Client.lineReceived(self, line)
def sendLine(self, line):
- self.log.debug("SENDING: %s" % (line,))
+ # self.log.debug("SENDING: {line}", line=line)
imap4.IMAP4Client.sendLine(self, line)
@@ -614,11 +679,12 @@
protocol = IMAP4DownloadProtocol
- def __init__(self, settings, mailReceiver):
+ def __init__(self, settings, mailReceiver, deleteAllMail):
self.log.debug("Setting up IMAPFactory")
self.settings = settings
self.mailReceiver = mailReceiver
+ self.deleteAllMail = deleteAllMail
self.noisy = False
@@ -633,7 +699,7 @@
def handleMessage(self, message):
self.log.debug("IMAP factory handle message")
- self.log.debug(message)
+ # self.log.debug(message)
return self.mailReceiver.inbound(message)
Modified: CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/txdav/caldav/datastore/scheduling/imip/test/test_inbound.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -15,7 +15,7 @@
##
-from twisted.internet.defer import inlineCallbacks
+from twisted.internet.defer import inlineCallbacks, succeed
from twisted.python.modules import getModule
from twistedcaldav.config import ConfigDict
@@ -25,6 +25,8 @@
from txdav.caldav.datastore.scheduling.imip.inbound import MailReceiver
from txdav.caldav.datastore.scheduling.imip.inbound import MailRetriever
from txdav.caldav.datastore.scheduling.imip.inbound import injectMessage
+from txdav.caldav.datastore.scheduling.imip.inbound import shouldDeleteAllMail
+from txdav.caldav.datastore.scheduling.imip.inbound import IMAP4DownloadProtocol
from txdav.caldav.datastore.scheduling.itip import iTIPRequestStatus
from txdav.caldav.datastore.test.util import buildCalendarStore
@@ -47,6 +49,7 @@
"UseSSL" : False,
"Server" : "example.com",
"Port" : 123,
+ "Username" : "xyzzy",
})
)
@@ -359,3 +362,87 @@
))
yield txn.commit()
yield wp.whenExecuted()
+
+
+ def test_shouldDeleteAllMail(self):
+
+ # Delete if the mail server is on the same host and using our
+ # dedicated account:
+ self.assertTrue(shouldDeleteAllMail("calendar.example.com",
+ "calendar.example.com", "com.apple.calendarserver"))
+ self.assertTrue(shouldDeleteAllMail("calendar.example.com",
+ "localhost", "com.apple.calendarserver"))
+
+ # Don't delete all otherwise:
+ self.assertFalse(shouldDeleteAllMail("calendar.example.com",
+ "calendar.example.com", "not_ours"))
+ self.assertFalse(shouldDeleteAllMail("calendar.example.com",
+ "localhost", "not_ours"))
+ self.assertFalse(shouldDeleteAllMail("calendar.example.com",
+ "mail.example.com", "com.apple.calendarserver"))
+
+
+ @inlineCallbacks
+ def test_deletion(self):
+ """
+ Verify the IMAP protocol will delete messages only when the right
+ conditions are met. Either:
+
+ A) We've been told to delete all mail
+ B) We've not been told to delete all mail, but it was a message
+ we processed
+ """
+
+ def stubFetchNextMessage():
+ pass
+
+ def stubCbFlagDeleted(result):
+ self.flagDeletedResult = result
+ return succeed(None)
+
+ proto = IMAP4DownloadProtocol()
+ self.patch(proto, "fetchNextMessage", stubFetchNextMessage)
+ self.patch(proto, "cbFlagDeleted", stubCbFlagDeleted)
+ results = {
+ "ignored" : (
+ {
+ "RFC822" : "a message"
+ }
+ )
+ }
+
+ # Delete all mail = False; action taken = submitted; result = deletion
+ proto.factory = StubFactory(MailReceiver.INJECTION_SUBMITTED, False)
+ self.flagDeletedResult = None
+ yield proto.cbGotMessage(results, "xyzzy")
+ self.assertEquals(self.flagDeletedResult, "xyzzy")
+
+ # Delete all mail = False; action taken = not submitted; result = no deletion
+ proto.factory = StubFactory(MailReceiver.NO_TOKEN, False)
+ self.flagDeletedResult = None
+ yield proto.cbGotMessage(results, "xyzzy")
+ self.assertEquals(self.flagDeletedResult, None)
+
+ # Delete all mail = True; action taken = submitted; result = deletion
+ proto.factory = StubFactory(MailReceiver.INJECTION_SUBMITTED, True)
+ self.flagDeletedResult = None
+ yield proto.cbGotMessage(results, "xyzzy")
+ self.assertEquals(self.flagDeletedResult, "xyzzy")
+
+ # Delete all mail = True; action taken = not submitted; result = deletion
+ proto.factory = StubFactory(MailReceiver.NO_TOKEN, True)
+ self.flagDeletedResult = None
+ yield proto.cbGotMessage(results, "xyzzy")
+ self.assertEquals(self.flagDeletedResult, "xyzzy")
+
+
+class StubFactory(object):
+
+ def __init__(self, actionTaken, deleteAllMail):
+ self.actionTaken = actionTaken
+ self.deleteAllMail = deleteAllMail
+
+ def handleMessage(self, messageData):
+ return succeed(self.actionTaken)
+
+
Modified: CalendarServer/branches/users/cdaboo/json/txdav/carddav/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/carddav/datastore/sql.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/txdav/carddav/datastore/sql.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -337,6 +337,8 @@
AddressBookHome._register(EADDRESSBOOKTYPE)
+
+
class AddressBookSharingMixIn(SharingMixIn):
"""
Sharing code shared between AddressBook and AddressBookObject
@@ -359,7 +361,7 @@
@inlineCallbacks
def _isSharedOrInvited(self):
"""
- return a bool if this L{AddressBook} is shared or invited
+ return True if this L{AddressBook} is shared or invited
"""
sharedRows = []
if self.owned():
@@ -1103,7 +1105,7 @@
@inlineCallbacks
- def updateShare(self, shareeView, mode=None, status=None, message=None, name=None):
+ def updateShare(self, shareeView, mode=None, status=None, message=None):
"""
Update share mode, status, and message for a home child shared with
this (owned) L{CommonHomeChild}.
@@ -1124,9 +1126,6 @@
will be used as the default display name, or None to not update
@type message: L{str}
- @param name: The bind resource name or None to not update
- @type message: L{str}
-
@return: the name of the shared item in the sharee's home.
@rtype: a L{Deferred} which fires with a L{str}
"""
@@ -1138,8 +1137,7 @@
columnMap = dict([(k, v if v != "" else None)
for k, v in {bind.BIND_MODE:mode,
bind.BIND_STATUS:status,
- bind.MESSAGE:message,
- bind.RESOURCE_NAME:name}.iteritems() if v is not None])
+ bind.MESSAGE:message}.iteritems() if v is not None])
if len(columnMap):
@@ -1481,11 +1479,6 @@
self._initFromRow(tuple(rows[0]))
if self._kind == _ABO_KIND_GROUP:
- # generate "X-ADDRESSBOOKSERVER-MEMBER" properties
- # calc md5 and set size
- componentText = str((yield self.component()))
- self._md5 = hashlib.md5(componentText).hexdigest()
- self._size = len(componentText)
groupBindRows = yield AddressBookObject._bindForResourceIDAndHomeID.on(
self._txn, resourceID=self._resourceID, homeID=self._home._resourceID
@@ -1791,6 +1784,7 @@
uid = component.resourceUID()
assert inserting or self._uid == uid # can't change UID. Should be checked in upper layers
self._uid = uid
+ originalComponentText = str(component)
if self._kind == _ABO_KIND_GROUP:
memberAddresses = set(component.resourceMemberAddresses())
@@ -1828,33 +1822,27 @@
# missing uids and other cuaddrs e.g. user at example.com, are stored in same schema table
foreignMemberAddrs.extend(["urn:uuid:" + missingUID for missingUID in missingUIDs])
- # don't store group members in object text
- orginialComponentText = str(component)
+ # sort unique members
component.removeProperties("X-ADDRESSBOOKSERVER-MEMBER")
for memberAddress in sorted(list(memberAddresses)): # sort unique
component.addProperty(Property("X-ADDRESSBOOKSERVER-MEMBER", memberAddress))
-
- # use sorted for md5
componentText = str(component)
- self._md5 = hashlib.md5(componentText).hexdigest()
- self._componentChanged = orginialComponentText != componentText
- # remove members from component get new text
- self._component = deepcopy(component)
- component.removeProperties("X-ADDRESSBOOKSERVER-MEMBER")
- componentText = str(component)
- self._objectText = componentText
-
- #size for quota does not include group members
- self._size = len(componentText)
-
+ # remove unneeded fields to get stored _objectText
+ thinComponent = deepcopy(component)
+ thinComponent.removeProperties("X-ADDRESSBOOKSERVER-MEMBER")
+ thinComponent.removeProperties("X-ADDRESSBOOKSERVER-KIND")
+ thinComponent.removeProperties("UID")
+ self._objectText = str(thinComponent)
else:
- self._component = component
componentText = str(component)
- self._md5 = hashlib.md5(componentText).hexdigest()
- self._size = len(componentText)
self._objectText = componentText
+ self._size = len(self._objectText)
+ self._component = component
+ self._md5 = hashlib.md5(componentText).hexdigest()
+ self._componentChanged = originalComponentText != componentText
+
# Special - if migrating we need to preserve the original md5
if self._txn._migrating and hasattr(component, "md5"):
self._md5 = component.md5
@@ -2031,6 +2019,8 @@
# now add the properties to the component
for memberAddress in sorted(memberAddresses + foreignMembers):
component.addProperty(Property("X-ADDRESSBOOKSERVER-MEMBER", memberAddress))
+ component.addProperty(Property("X-ADDRESSBOOKSERVER-KIND", "group"))
+ component.addProperty(Property("UID", self._uid))
self._component = component
@@ -2284,7 +2274,7 @@
else:
if status == _BIND_STATUS_ACCEPTED:
shareeView = yield shareeHome.objectWithShareUID(bindName)
- yield shareeView._initSyncToken()
+ yield shareeView.addressbook()._initSyncToken()
yield shareeView._initBindRevision()
queryCacher = self._txn._queryCacher
@@ -2299,16 +2289,9 @@
@inlineCallbacks
- def _initSyncToken(self):
- yield self.addressbook()._initSyncToken()
-
-
- @inlineCallbacks
def _initBindRevision(self):
yield self.addressbook()._initBindRevision()
- # almost works
- # yield super(AddressBookObject, self)._initBindRevision()
bind = self._bindSchema
yield self._updateBindColumnsQuery(
{bind.BIND_REVISION : Parameter("revision"), }).on(
@@ -2321,8 +2304,7 @@
@inlineCallbacks
- # TODO: This is almost the same as AddressBook.updateShare(): combine
- def updateShare(self, shareeView, mode=None, status=None, message=None, name=None):
+ def updateShare(self, shareeView, mode=None, status=None, message=None):
"""
Update share mode, status, and message for a home child shared with
this (owned) L{CommonHomeChild}.
@@ -2343,9 +2325,6 @@
will be used as the default display name, or None to not update
@type message: L{str}
- @param name: The bind resource name or None to not update
- @type message: L{str}
-
@return: the name of the shared item in the sharee's home.
@rtype: a L{Deferred} which fires with a L{str}
"""
@@ -2357,8 +2336,7 @@
columnMap = dict([(k, v if v != "" else None)
for k, v in {bind.BIND_MODE:mode,
bind.BIND_STATUS:status,
- bind.MESSAGE:message,
- bind.RESOURCE_NAME:name}.iteritems() if v is not None])
+ bind.MESSAGE:message}.iteritems() if v is not None])
if len(columnMap):
@@ -2384,7 +2362,7 @@
shareeView._bindStatus = columnMap[bind.BIND_STATUS]
if shareeView._bindStatus == _BIND_STATUS_ACCEPTED:
if 0 == previouslyAcceptedBindCount:
- yield shareeView._initSyncToken()
+ yield shareeView.addressbook()._initSyncToken()
yield shareeView._initBindRevision()
shareeView.viewerHome()._children[self.addressbook().shareeName()] = shareeView.addressbook()
shareeView.viewerHome()._children[shareeView._resourceID] = shareeView.addressbook()
Modified: CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/txdav/common/datastore/sql.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -2931,7 +2931,7 @@
@inlineCallbacks
- def updateShareFromSharingInvitation(self, invitation, mode=None, status=None, message=None, name=None):
+ def updateShareFromSharingInvitation(self, invitation, mode=None, status=None, message=None):
"""
Like L{updateShare} except that the original invitation is provided. That is used
to find the actual sharee L{CommonHomeChild} which is then passed to L{updateShare}.
@@ -2944,12 +2944,12 @@
if shareeView is None:
shareeView = yield shareeHome.invitedObjectWithShareUID(invitation.uid())
- result = yield self.updateShare(shareeView, mode, status, message, name)
+ result = yield self.updateShare(shareeView, mode, status, message)
returnValue(result)
@inlineCallbacks
- def updateShare(self, shareeView, mode=None, status=None, message=None, name=None):
+ def updateShare(self, shareeView, mode=None, status=None, message=None):
"""
Update share mode, status, and message for a home child shared with
this (owned) L{CommonHomeChild}.
@@ -2970,9 +2970,6 @@
will be used as the default display name, or None to not update
@type message: L{str}
- @param name: The bind resource name or None to not update
- @type message: L{str}
-
@return: the name of the shared item in the sharee's home.
@rtype: a L{Deferred} which fires with a L{str}
"""
@@ -2984,8 +2981,7 @@
columnMap = dict([(k, v if v != "" else None)
for k, v in {bind.BIND_MODE:mode,
bind.BIND_STATUS:status,
- bind.MESSAGE:message,
- bind.RESOURCE_NAME:name}.iteritems() if v is not None])
+ bind.MESSAGE:message}.iteritems() if v is not None])
if len(columnMap):
Modified: CalendarServer/branches/users/cdaboo/json/txdav/xml/base.py
===================================================================
--- CalendarServer/branches/users/cdaboo/json/txdav/xml/base.py 2013-08-30 20:39:27 UTC (rev 11649)
+++ CalendarServer/branches/users/cdaboo/json/txdav/xml/base.py 2013-08-31 21:14:10 UTC (rev 11650)
@@ -693,7 +693,7 @@
return date.strftime("%a, %d %b %Y %H:%M:%S GMT")
if type(date) is int:
- date = format(datetime.datetime.fromtimestamp(date))
+ date = format(datetime.datetime.utcfromtimestamp(date))
elif type(date) is str:
pass
elif type(date) is unicode:
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.macosforge.org/pipermail/calendarserver-changes/attachments/20130831/8fb59e33/attachment-0001.html>
More information about the calendarserver-changes
mailing list