Diff
Modified: trunk/src/benchmark.c (26 => 27)
--- trunk/src/benchmark.c 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/benchmark.c 2009-09-14 18:46:18 UTC (rev 27)
@@ -22,7 +22,9 @@
struct __dispatch_benchmark_data_s {
+#ifdef HAVE_MACH_ABSOLUTE_TIME
mach_timebase_info_data_t tbi;
+#endif
uint64_t loop_cost;
void (*func)(void *);
void *ctxt;
@@ -38,28 +40,32 @@
register void (*f)(void *) = bdata->func;
register void *c = bdata->ctxt;
register size_t cnt = bdata->count;
+ size_t i = 0;
uint64_t start, delta;
#ifdef __LP64__
__uint128_t lcost;
#else
long double lcost;
#endif
+#ifdef HAVE_MACH_ABSOLUTE_TIME
kern_return_t kr;
- size_t i = 0;
kr = mach_timebase_info(&bdata->tbi);
dispatch_assert_zero(kr);
+#endif
- start = mach_absolute_time();
+ start = _dispatch_absolute_time();
do {
i++;
f(c);
} while (i < cnt);
- delta = mach_absolute_time() - start;
+ delta = _dispatch_absolute_time() - start;
lcost = delta;
+#ifdef HAVE_MACH_ABSOLUTE_TIME
lcost *= bdata->tbi.numer;
lcost /= bdata->tbi.denom;
+#endif
lcost /= cnt;
bdata->loop_cost = lcost;
@@ -96,16 +102,20 @@
return 0;
}
- start = mach_absolute_time();
+ start = _dispatch_absolute_time();
do {
i++;
func(ctxt);
} while (i < count);
- delta = mach_absolute_time() - start;
+ delta = _dispatch_absolute_time() - start;
conversion = delta;
+#ifdef HAVE_MACH_ABSOLUTE_TIME
conversion *= bdata.tbi.numer;
big_denom = bdata.tbi.denom;
+#else
+ big_denom = delta;
+#endif
big_denom *= count;
conversion /= big_denom;
ns = conversion;
Modified: trunk/src/dispatch.h (26 => 27)
--- trunk/src/dispatch.h 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/dispatch.h 2009-09-14 18:46:18 UTC (rev 27)
@@ -21,7 +21,9 @@
#ifndef __DISPATCH_PUBLIC__
#define __DISPATCH_PUBLIC__
+#ifdef __APPLE__
#include <Availability.h>
+#endif
#include <sys/cdefs.h>
#include <stddef.h>
#include <stdint.h>
Modified: trunk/src/internal.h (26 => 27)
--- trunk/src/internal.h 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/internal.h 2009-09-14 18:46:18 UTC (rev 27)
@@ -27,8 +27,21 @@
#ifndef __DISPATCH_INTERNAL__
#define __DISPATCH_INTERNAL__
+#include <config/config.h>
+
#define __DISPATCH_BUILDING_DISPATCH__
#define __DISPATCH_INDIRECT__
+
+#ifndef HAVE_MALLOC_CREATE_ZONE
+#include <compat/malloc_zone.h>
+#endif
+
+#ifdef HAVE_AVAILABILITY_H
+#include <Availability.h>
+#else
+#include <compat/Availability.h>
+#endif
+
#include "dispatch.h"
#include "base.h"
#include "time.h"
@@ -54,8 +67,13 @@
#endif
+#ifdef HAVE_LIBKERN_OSCROSSENDIAN_H
#include <libkern/OSCrossEndian.h>
+#endif
+#ifdef HAVE_LIBKERN_OSATOMIC_H
#include <libkern/OSAtomic.h>
+#endif
+#ifdef HAVE_MACH
#include <mach/boolean.h>
#include <mach/clock_types.h>
#include <mach/clock.h>
@@ -70,7 +88,10 @@
#include <mach/mig_errors.h>
#include <mach/host_info.h>
#include <mach/notify.h>
+#endif /* HAVE_MACH */
+#ifdef HAVE_MALLOC_MALLOC_H
#include <malloc/malloc.h>
+#endif
#include <sys/event.h>
#include <sys/mount.h>
#include <sys/queue.h>
@@ -89,6 +110,9 @@
#include <errno.h>
#include <fcntl.h>
#include <search.h>
+#if !defined(HAVE_MACH) && defined(HAVE_SEM_INIT)
+#include <semaphore.h>
+#endif
#include <signal.h>
#include <stdarg.h>
#include <stdbool.h>
@@ -261,6 +285,9 @@
#include "semaphore_internal.h"
#include "source_internal.h"
+#ifdef USE_APPLE_CRASHREPORTER_INFO
+
+#ifdef HAVE_MACH
// MIG_REPLY_MISMATCH means either:
// 1) A signal handler is NOT using async-safe API. See the sigaction(2) man page for more info.
// 2) A hand crafted call to mach_msg*() screwed up. Use MIG.
@@ -270,6 +297,7 @@
_dispatch_hardware_crash(); \
} \
} while (0)
+#endif
#if defined(__x86_64__) || defined(__i386__)
// total hack to ensure that return register of a function is not trashed
@@ -283,7 +311,7 @@
_dispatch_hardware_crash(); \
} while (0)
-#else
+#else /* !(defined(__x86_64__) || defined(__i386__)) */
#define DISPATCH_CRASH(x) do { \
__crashreporter_info__ = "BUG IN LIBDISPATCH: " x; \
@@ -294,8 +322,21 @@
__crashreporter_info__ = "BUG IN CLIENT OF LIBDISPATCH: " x; \
_dispatch_hardware_crash(); \
} while (0)
+#endif /* defined(__x86_64__) || defined(__i386__) */
+#else /* !USE_APPLE_CRASHREPORTER_INFO */
+
+#ifdef HAVE_MACH
+#define DISPATCH_VERIFY_MIG(x) do { \
+ if ((x) == MIG_REPLY_MISMATCH) { \
+ _dispatch_hardware_crash(); \
+ } \
+ } while (0)
#endif
+#define DISPATCH_CRASH(x) _dispatch_hardware_crash()
+#define DISPATCH_CLIENT_CRASH(x) _dispatch_hardware_crash()
+#endif /* USE_APPLE_CRASHREPORTER_INFO */
+
#endif /* __DISPATCH_INTERNAL__ */
Modified: trunk/src/legacy.h (26 => 27)
--- trunk/src/legacy.h 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/legacy.h 2009-09-14 18:46:18 UTC (rev 27)
@@ -37,7 +37,9 @@
#include <dispatch/base.h> // for HeaderDoc
#endif
+#ifdef HAVE_MACH
#include <mach/mach_types.h>
+#endif
#define DISPATCH_DEPRECATED __attribute__((deprecated))
#define DISPATCH_PUBLIC_API __attribute__((visibility("default")))
@@ -49,7 +51,9 @@
struct dispatch_item_s *volatile di_next;
dispatch_queue_t di_cback_q;
uint32_t di_flags;
+#ifdef HAVE_MACH
semaphore_t di_semaphore;
+#endif
void * di_work_func;
void * di_work_ctxt;
void * di_cback_func;
@@ -302,11 +306,13 @@
long
dispatch_source_get_error(dispatch_source_t source, long* error);
+#ifdef HAVE_MACH
// Use: dispatch_source_get_handle
__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA)
DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW
mach_port_t
dispatch_source_get_machport(dispatch_source_t source);
+#endif
// Use: dispatch_source_get_handle
__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA)
@@ -414,6 +420,7 @@
dispatch_source_attr_t attr,
void *context);
+#ifdef HAVE_MACH
// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV, ...)
__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA)
DISPATCH_MALLOC DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW
@@ -424,6 +431,7 @@
dispatch_source_attr_t attr,
dispatch_queue_t queue,
dispatch_mig_callback_t mig_callback);
+#endif
enum {
DISPATCH_TIMER_WALL_CLOCK = 0x4,
@@ -588,6 +596,7 @@
};
// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV, ...)
+#ifdef HAVE_MACH
#ifdef __BLOCKS__
__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA)
DISPATCH_MALLOC DISPATCH_NOTHROW
@@ -611,6 +620,7 @@
dispatch_queue_t queue,
void *h_context,
dispatch_source_handler_function_t handler);
+#endif /* HAVE_MACH */
enum {
DISPATCH_SOURCE_DATA_ADD = 1,
@@ -691,6 +701,7 @@
void *h_context,
dispatch_source_handler_function_t handler);
+#ifdef HAVE_MACH
/*
* Raw Mach message support from MIG source.
*
@@ -743,6 +754,8 @@
new_callback(mach_msg_header_t *msg, mach_msg_header_t *reply) \
{ return _dispatch_CFMachPortCallBack(msg, reply, existing_callback); }
+#endif /* HAVE_MACH */
+
__END_DECLS
#endif
Modified: trunk/src/os_shims.h (26 => 27)
--- trunk/src/os_shims.h 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/os_shims.h 2009-09-14 18:46:18 UTC (rev 27)
@@ -28,19 +28,35 @@
#define __DISPATCH_OS_SHIMS__
#include <pthread.h>
+#ifdef HAVE_PTHREAD_MACHDEP_H
#include <pthread_machdep.h>
+#endif
+#ifdef HAVE_PTHREAD_WORKQUEUES
#include <pthread_workqueue.h>
+#endif
+#ifdef HAVE_PTHREAD_NP_H
+#include <pthread_np.h>
+#endif
+#ifdef USE_APPLE_CRASHREPORTER_INFO
__private_extern__ const char *__crashreporter_info__;
+#endif
+#ifdef HAVE_PTHREAD_KEY_INIT_NP
static const unsigned long dispatch_queue_key = __PTK_LIBDISPATCH_KEY0;
static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY1;
static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2;
static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY3;
//__PTK_LIBDISPATCH_KEY4
//__PTK_LIBDISPATCH_KEY5
+#else
+pthread_key_t dispatch_queue_key;
+pthread_key_t dispatch_sema4_key;
+pthread_key_t dispatch_cache_key;
+pthread_key_t dispatch_bcounter_key;
+#endif
-
+#ifdef USE_APPLE_TSD_OPTIMIZATIONS
#define SIMULATE_5491082 1
#ifndef _PTHREAD_TSD_OFFSET
#define _PTHREAD_TSD_OFFSET 0
@@ -80,18 +96,46 @@
#endif
}
+#else /* !USE_APPLE_TSD_OPTIMIZATIONS */
+
static inline void
+_dispatch_thread_setspecific(pthread_key_t k, void *v)
+{
+ int res;
+
+ res = pthread_setspecific(k, v);
+ dispatch_assert_zero(res);
+}
+
+static inline void *
+_dispatch_thread_getspecific(pthread_key_t k)
+{
+
+ return pthread_getspecific(k);
+}
+#endif /* USE_APPLE_TSD_OPTIMIZATIONS */
+
+#ifdef HAVE_PTHREAD_KEY_INIT_NP
+static inline void
_dispatch_thread_key_init_np(unsigned long k, void (*d)(void *))
{
dispatch_assert_zero(pthread_key_init_np((int)k, d));
}
+#else
+static inline void
+_dispatch_thread_key_create(pthread_key_t *key, void (*destructor)(void *))
+{
+ dispatch_assert_zero(pthread_key_create(key, destructor));
+}
+#endif
+
#define _dispatch_thread_self pthread_self
#if DISPATCH_PERF_MON
-#if defined(SIMULATE_5491082) && (defined(__i386__) || defined(__x86_64__))
+#if defined (USE_APPLE_TSD_OPTIMIZATIONS) && defined(SIMULATE_5491082) && (defined(__i386__) || defined(__x86_64__))
#ifdef __LP64__
#define _dispatch_workitem_inc() asm("incq %%gs:%0" : "+m" \
(*(void **)(dispatch_bcounter_key * sizeof(void *) + _PTHREAD_TSD_OFFSET)) :: "cc")
@@ -103,7 +147,7 @@
#define _dispatch_workitem_dec() asm("decl %%gs:%0" : "+m" \
(*(void **)(dispatch_bcounter_key * sizeof(void *) + _PTHREAD_TSD_OFFSET)) :: "cc")
#endif
-#else
+#else /* !USE_APPLE_TSD_OPTIMIZATIONS */
static inline void
_dispatch_workitem_inc(void)
{
@@ -116,7 +160,7 @@
unsigned long cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key);
_dispatch_thread_setspecific(dispatch_bcounter_key, (void *)--cnt);
}
-#endif
+#endif /* USE_APPLE_TSD_OPTIMIZATIONS */
// C99 doesn't define flsll() or ffsll()
#ifdef __LP64__
@@ -149,4 +193,21 @@
#define _dispatch_workitem_dec()
#endif // DISPATCH_PERF_MON
+static inline uint64_t
+_dispatch_absolute_time(void)
+{
+#ifndef HAVE_MACH_ABSOLUTE_TIME
+ struct timespec ts;
+ int ret;
+
+ ret = clock_gettime(CLOCK_UPTIME, &ts);
+ dispatch_assume_zero(ret);
+
+ /* XXXRW: Some kind of overflow detection needed? */
+ return (ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec);
+#else
+ return mach_absolute_time();
#endif
+}
+
+#endif
Modified: trunk/src/private.h (26 => 27)
--- trunk/src/private.h 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/private.h 2009-09-14 18:46:18 UTC (rev 27)
@@ -27,9 +27,11 @@
#ifndef __DISPATCH_PRIVATE__
#define __DISPATCH_PRIVATE__
+#ifdef HAVE_MACH
#include <mach/boolean.h>
#include <mach/mach.h>
#include <mach/message.h>
+#endif
#include <unistd.h>
#include <sys/cdefs.h>
#include <sys/event.h>
@@ -66,8 +68,13 @@
DISPATCH_NOTHROW
void
+#ifdef USE_LIBDISPATCH_INIT_CONSTRUCTOR
+libdispatch_init(void) __attribute__ ((constructor));
+#else
libdispatch_init(void);
+#endif
+#ifdef HAVE_MACH
#define DISPATCH_COCOA_COMPAT 1
#if DISPATCH_COCOA_COMPAT
@@ -94,6 +101,7 @@
extern void (*_dispatch_end_NSAutoReleasePool)(void *);
#endif
+#endif /* HAVE_MACH */
/* pthreads magic */
@@ -102,12 +110,14 @@
DISPATCH_NOTHROW void dispatch_atfork_child(void);
DISPATCH_NOTHROW void dispatch_init_pthread(pthread_t);
+#ifdef HAVE_MACH
/*
* Extract the context pointer from a mach message trailer.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA)
void *
dispatch_mach_msg_get_context(mach_msg_header_t *msg);
+#endif
__END_DECLS
Modified: trunk/src/queue.c (26 => 27)
--- trunk/src/queue.c 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/queue.c 2009-09-14 18:46:18 UTC (rev 27)
@@ -19,7 +19,9 @@
*/
#include "internal.h"
+#ifdef HAVE_MACH
#include "protocol.h"
+#endif
void
dummy_function(void)
@@ -190,7 +192,9 @@
#define MAX_THREAD_COUNT 255
struct dispatch_root_queue_context_s {
+#ifdef HAVE_PTHREAD_WORKQUEUES
pthread_workqueue_t dgq_kworkqueue;
+#endif
uint32_t dgq_pending;
uint32_t dgq_thread_pool_size;
dispatch_semaphore_t dgq_thread_mediator;
@@ -449,21 +453,35 @@
_dispatch_queue_set_width_init(void)
{
size_t valsz = sizeof(uint32_t);
+ int ret;
- errno = 0;
- sysctlbyname("hw.activecpu", &_dispatch_hw_config.cc_max_active, &valsz, NULL, 0);
- dispatch_assume_zero(errno);
+#ifdef __APPLE__
+ ret = sysctlbyname("hw.activecpu", &_dispatch_hw_config.cc_max_active,
+ &valsz, NULL, 0);
+ dispatch_assume_zero(ret);
dispatch_assume(valsz == sizeof(uint32_t));
- errno = 0;
- sysctlbyname("hw.logicalcpu_max", &_dispatch_hw_config.cc_max_logical, &valsz, NULL, 0);
- dispatch_assume_zero(errno);
+ ret = sysctlbyname("hw.logicalcpu_max",
+ &_dispatch_hw_config.cc_max_logical, &valsz, NULL, 0);
+ dispatch_assume_zero(ret);
dispatch_assume(valsz == sizeof(uint32_t));
- errno = 0;
- sysctlbyname("hw.physicalcpu_max", &_dispatch_hw_config.cc_max_physical, &valsz, NULL, 0);
- dispatch_assume_zero(errno);
+ ret = sysctlbyname("hw.physicalcpu_max",
+ &_dispatch_hw_config.cc_max_physical, &valsz, NULL, 0);
+ dispatch_assume_zero(ret);
dispatch_assume(valsz == sizeof(uint32_t));
+#elif defined(__FreeBSD__)
+ ret = sysctlbyname("kern.smp.cpus", &_dispatch_hw_config.cc_max_active,
+ &valsz, NULL, 0);
+ dispatch_assume_zero(ret);
+ dispatch_assume(valsz == sizeof(uint32_t));
+
+ _dispatch_hw_config.cc_max_logical =
+ _dispatch_hw_config.cc_max_physical =
+ _dispatch_hw_config.cc_max_active;
+#else
+#error "_dispatch_queue_set_width_init: no supported way to query CPU count"
+#endif
}
void
@@ -966,12 +984,24 @@
dispatch_assert(countof(_dispatch_thread_mediator) == DISPATCH_ROOT_QUEUE_COUNT);
dispatch_assert(countof(_dispatch_root_queue_contexts) == DISPATCH_ROOT_QUEUE_COUNT);
+#ifdef HAVE_PTHREAD_KEY_INIT_NP
_dispatch_thread_key_init_np(dispatch_queue_key, _dispatch_queue_cleanup);
_dispatch_thread_key_init_np(dispatch_sema4_key, (void (*)(void *))dispatch_release); // use the extern release
_dispatch_thread_key_init_np(dispatch_cache_key, _dispatch_cache_cleanup2);
#if DISPATCH_PERF_MON
_dispatch_thread_key_init_np(dispatch_bcounter_key, NULL);
#endif
+#else /* !HAVE_PTHREAD_KEY_INIT_NP */
+ _dispatch_thread_key_create(&dispatch_queue_key,
+ _dispatch_queue_cleanup);
+ _dispatch_thread_key_create(&dispatch_sema4_key,
+ (void (*)(void *))dispatch_release); // use the extern release
+ _dispatch_thread_key_create(&dispatch_cache_key,
+ _dispatch_cache_cleanup2);
+#ifdef DISPATCH_PERF_MON
+ _dispatch_thread_key_create(&dispatch_bcounter_key, NULL);
+#endif
+#endif /* HAVE_PTHREAD_KEY_INIT_NP */
_dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q);
@@ -1040,6 +1070,7 @@
}
#endif
+#ifdef HAVE_PTHREAD_WORKQUEUES
static inline int
_dispatch_rootq2wq_pri(long idx)
{
@@ -1060,19 +1091,30 @@
return pri;
#endif
}
+#endif
static void
_dispatch_root_queues_init(void *context __attribute__((unused)))
{
+#ifdef HAVE_PTHREAD_WORKQUEUES
bool disable_wq = getenv("LIBDISPATCH_DISABLE_KWQ");
pthread_workqueue_attr_t pwq_attr;
+ int r;
+#endif
+#ifdef HAVE_MACH
kern_return_t kr;
- int i, r;
+#else
+ int ret;
+#endif
+ int i;
+#ifdef HAVE_PTHREAD_WORKQUEUES
r = pthread_workqueue_attr_init_np(&pwq_attr);
dispatch_assume_zero(r);
+#endif
for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
+#ifdef HAVE_PTHREAD_WORKQUEUES
r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr, _dispatch_rootq2wq_pri(i));
dispatch_assume_zero(r);
r = pthread_workqueue_attr_setovercommit_np(&pwq_attr, i & 1);
@@ -1089,18 +1131,29 @@
if (r != ENOTSUP) {
dispatch_assume_zero(r);
}
+#endif /* HAVE_PTHREAD_WORKQUEUES */
+#ifdef HAVE_MACH
// override the default FIFO behavior for the pool semaphores
kr = semaphore_create(mach_task_self(), &_dispatch_thread_mediator[i].dsema_port, SYNC_POLICY_LIFO, 0);
DISPATCH_VERIFY_MIG(kr);
dispatch_assume_zero(kr);
dispatch_assume(_dispatch_thread_mediator[i].dsema_port);
+#else
+ /* XXXRW: POSIX semaphores don't support LIFO? */
+ ret = sem_init(&_dispatch_thread_mediator[i].dsema_sem, 0, 0);
+ dispatch_assume_zero(ret);
+#endif
+#ifdef HAVE_PTHREAD_WORKQUEUES
} else {
dispatch_assume(_dispatch_root_queue_contexts[i].dgq_kworkqueue);
}
+#endif
}
+#ifdef HAVE_PTHREAD_WORKQUEUES
r = pthread_workqueue_attr_destroy_np(&pwq_attr);
dispatch_assume_zero(r);
+#endif
}
bool
@@ -1108,8 +1161,10 @@
{
static dispatch_once_t pred;
struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
+#ifdef HAVE_PTHREAD_WORKQUEUES
pthread_workitem_handle_t wh;
unsigned int gen_cnt;
+#endif
pthread_t pthr;
int r, t_count;
@@ -1123,6 +1178,7 @@
dispatch_once_f(&pred, NULL, _dispatch_root_queues_init);
+#ifdef HAVE_PTHREAD_WORKQUEUES
if (qc->dgq_kworkqueue) {
if (dispatch_atomic_cmpxchg(&qc->dgq_pending, 0, 1)) {
_dispatch_debug("requesting new worker thread");
@@ -1134,6 +1190,7 @@
}
goto out;
}
+#endif
if (dispatch_semaphore_signal(qc->dgq_thread_mediator)) {
goto out;
@@ -1164,7 +1221,7 @@
_dispatch_queue_serial_drain_till_empty(dispatch_queue_t dq)
{
#if DISPATCH_PERF_MON
- uint64_t start = mach_absolute_time();
+ uint64_t start = _dispatch_absolute_time();
#endif
_dispatch_queue_drain(dq);
#if DISPATCH_PERF_MON
@@ -1390,7 +1447,7 @@
#endif
#if DISPATCH_PERF_MON
- uint64_t start = mach_absolute_time();
+ uint64_t start = _dispatch_absolute_time();
#endif
while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) {
_dispatch_continuation_pop(item);
@@ -1413,7 +1470,7 @@
void
_dispatch_queue_merge_stats(uint64_t start)
{
- uint64_t avg, delta = mach_absolute_time() - start;
+ uint64_t avg, delta = _dispatch_absolute_time() - start;
unsigned long count, bucket;
count = (size_t)_dispatch_thread_getspecific(dispatch_bcounter_key);
@@ -1489,7 +1546,7 @@
static void
dispatch_queue_attr_dispose(dispatch_queue_attr_t attr)
{
- dispatch_queue_attr_set_finalizer(attr, NULL);
+ dispatch_queue_attr_set_finalizer_f(attr, NULL, NULL);
_dispatch_dispose(attr);
}
@@ -1622,13 +1679,20 @@
static char _dispatch_build[16];
+/*
+ * XXXRW: What to do here for !Mac OS X?
+ */
static void
_dispatch_bug_init(void *context __attribute__((unused)))
{
+#ifdef __APPLE__
int mib[] = { CTL_KERN, KERN_OSVERSION };
size_t bufsz = sizeof(_dispatch_build);
sysctl(mib, 2, _dispatch_build, &bufsz, NULL, 0);
+#else
+ bzero(_dispatch_build, sizeof(_dispatch_build));
+#endif
}
void
@@ -1641,7 +1705,7 @@
dispatch_once_f(&pred, NULL, _dispatch_bug_init);
if (last_seen != ra) {
last_seen = ra;
- _dispatch_log("BUG in libdispatch: %s - %lu - 0x%lx", _dispatch_build, line, val);
+ _dispatch_log("BUG in libdispatch: %s - %lu - 0x%lx", _dispatch_build, (unsigned long)line, val);
}
}
Modified: trunk/src/queue_internal.h (26 => 27)
--- trunk/src/queue_internal.h 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/queue_internal.h 2009-09-14 18:46:18 UTC (rev 27)
@@ -62,6 +62,7 @@
#define DISPATCH_QUEUE_MIN_LABEL_SIZE 64
+#ifndef DISPATCH_NO_LEGACY
#define DISPATCH_QUEUE_HEADER \
uint32_t dq_running; \
uint32_t dq_width; \
@@ -70,6 +71,15 @@
unsigned long dq_serialnum; \
void *dq_finalizer_ctxt; \
dispatch_queue_finalizer_function_t dq_finalizer_func
+#else
+#define DISPATCH_QUEUE_HEADER \
+ uint32_t dq_running; \
+ uint32_t dq_width; \
+ struct dispatch_object_s *dq_items_tail; \
+ struct dispatch_object_s *volatile dq_items_head; \
+ unsigned long dq_serialnum; \
+ void *dq_finalizer_ctxt;
+#endif
struct dispatch_queue_s {
DISPATCH_STRUCT_HEADER(dispatch_queue_s, dispatch_queue_vtable_s);
Modified: trunk/src/semaphore.c (26 => 27)
--- trunk/src/semaphore.c 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/semaphore.c 2009-09-14 18:46:18 UTC (rev 27)
@@ -21,11 +21,19 @@
#include "internal.h"
// semaphores are too fundamental to use the dispatch_assume*() macros
+#ifdef HAVE_MACH
#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
if (x) { \
DISPATCH_CRASH("flawed group/semaphore logic"); \
} \
} while (0)
+#else
+#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
+ if ((x) == -1) { \
+ DISPATCH_CRASH("flawed group/semaphore logic"); \
+ } \
+ } while (0)
+#endif
struct dispatch_semaphore_vtable_s {
DISPATCH_VTABLE_HEADER(dispatch_semaphore_s);
@@ -100,6 +108,7 @@
return dsema;
}
+#ifdef HAVE_MACH
static void
_dispatch_semaphore_create_port(semaphore_t *s4)
{
@@ -129,13 +138,32 @@
_dispatch_safe_fork = false;
}
+#else /* !HAVE_MACH */
+static void
+_dispatch_posix_semaphore_create(sem_t *s4)
+{
+ int ret;
+ if (*s4) {
+ return;
+ }
+
+ ret = sem_init(s4, 0, 0);
+ dispatch_assume_zero(ret);
+}
+#endif /* HAVE_MACH */
+
DISPATCH_NOINLINE
static long
_dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{
+#ifdef HAVE_MACH
mach_timespec_t _timeout;
kern_return_t kr;
+#else
+ struct timespec _timeout;
+ int ret;
+#endif
uint64_t nsec;
long orig;
@@ -149,7 +177,11 @@
}
}
+#ifdef HAVE_MACH
_dispatch_semaphore_create_port(&dsema->dsema_port);
+#else
+ _dispatch_posix_semaphore_create(&dsema->dsema_sem);
+#endif
// From xnu/osfmk/kern/sync_sema.c:
// wait_semaphore->count = -1; /* we don't keep an actual count */
@@ -161,6 +193,7 @@
switch (timeout) {
default:
+#ifdef HAVE_MACH
do {
// timeout() already calculates relative time left
nsec = _dispatch_timeout(timeout);
@@ -173,20 +206,48 @@
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
break;
}
+#else /* !HAVE_MACH */
+ do {
+ nsec = _dispatch_timeout(timeout);
+ _timeout.tv_sec = (typeof(_timeout.tv_sec))
+ (nsec / NSEC_PER_SEC);
+ _timeout.tv_nsec = (typeof(_timeout.tv_nsec))
+ (nsec % NSEC_PER_SEC);
+ ret = slowpath(sem_timedwait(&dsema->dsema_sem,
+ &_timeout));
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1 && errno != ETIMEDOUT) {
+ DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+ break;
+ }
+#endif /* HAVE_MACH */
// Fall through and try to undo what the fast path did to dsema->dsema_value
case DISPATCH_TIME_NOW:
while ((orig = dsema->dsema_value) < 0) {
if (dispatch_atomic_cmpxchg(&dsema->dsema_value, orig, orig + 1)) {
+#ifdef HAVE_MACH
return KERN_OPERATION_TIMED_OUT;
+#else
+ errno = ETIMEDOUT;
+ return -1;
+#endif
}
}
// Another thread called semaphore_signal().
// Fall through and drain the wakeup.
case DISPATCH_TIME_FOREVER:
+#ifdef HAVE_MACH
do {
kr = semaphore_wait(dsema->dsema_port);
} while (kr == KERN_ABORTED);
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+#else
+ do {
+ ret = sem_wait(&dsema->dsema_sem);
+ } while (ret != 0);
+ DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+#endif
break;
}
@@ -198,7 +259,7 @@
dispatch_group_enter(dispatch_group_t dg)
{
dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
-#if defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) && !defined(__llvm__)
+#if defined(USE_APPLE_SEMAPHORE_OPTIMIZATIONS) && defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) && !defined(__llvm__)
// This assumes:
// 1) Way too much about the optimizer of GCC.
// 2) There will never be more than LONG_MAX threads.
@@ -226,7 +287,7 @@
long
dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{
-#if defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) && !defined(__llvm__)
+#if defined(USE_APPLE_SEMAPHORE_OPTIMIZATIONS) && defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) && !defined(__llvm__)
// This assumes:
// 1) Way too much about the optimizer of GCC.
// 2) There will never be more than LONG_MAX threads.
@@ -257,9 +318,13 @@
static long
_dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema)
{
+#ifndef HAVE_MACH
+ int ret;
+#else
kern_return_t kr;
_dispatch_semaphore_create_port(&dsema->dsema_port);
+#endif
// Before dsema_sent_ksignals is incremented we can rely on the reference
// held by the waiter. However, once this value is incremented the waiter
@@ -270,8 +335,12 @@
dispatch_atomic_inc(&dsema->dsema_sent_ksignals);
+#ifdef HAVE_MACH
kr = semaphore_signal(dsema->dsema_port);
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+#else
+ ret = sem_post(&dsema->dsema_sem);
+#endif
_dispatch_release(dsema);
@@ -294,7 +363,7 @@
long
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
-#if defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) && !defined(__llvm__)
+#if defined(USE_APPLE_SEMAPHORE_OPTIMIZATIONS) && defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) && !defined(__llvm__)
// overflow detection
// this assumes way too much about the optimizer of GCC
asm(
@@ -329,16 +398,27 @@
struct dispatch_sema_notify_s *tmp, *head = dispatch_atomic_xchg(&dsema->dsema_notify_head, NULL);
long rval = dispatch_atomic_xchg(&dsema->dsema_group_waiters, 0);
bool do_rel = head;
+#ifdef HAVE_MACH
long kr;
+#else
+ int ret;
+#endif
// wake any "group" waiter or notify blocks
if (rval) {
+#ifdef HAVE_MACH
_dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
do {
kr = semaphore_signal(dsema->dsema_waiter_port);
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
} while (--rval);
+#else
+ do {
+ ret = sem_post(&dsema->dsema_sem);
+ DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+ } while (--rval);
+#endif
}
while (head) {
dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func);
@@ -359,8 +439,13 @@
static long
_dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{
+#ifdef HAVE_MACH
mach_timespec_t _timeout;
kern_return_t kr;
+#else
+ struct timespec _timeout;
+ int ret;
+#endif
uint64_t nsec;
long orig;
@@ -378,7 +463,9 @@
return _dispatch_group_wake(dsema);
}
+#ifdef HAVE_MACH
_dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
+#endif
// From xnu/osfmk/kern/sync_sema.c:
// wait_semaphore->count = -1; /* we don't keep an actual count */
@@ -390,6 +477,7 @@
switch (timeout) {
default:
+#ifdef HAVE_MACH
do {
nsec = _dispatch_timeout(timeout);
_timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
@@ -400,20 +488,47 @@
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
break;
}
+#else
+ do {
+ nsec = _dispatch_timeout(timeout);
+ _timeout.tv_sec = (typeof(_timeout.tv_sec))
+ (nsec / NSEC_PER_SEC);
+ _timeout.tv_nsec = (typeof(_timeout.tv_nsec))
+ (nsec % NSEC_PER_SEC);
+ ret = slowpath(sem_timedwait(&dsema->dsema_sem,
+ &_timeout));
+ } while (ret == -1 && errno == EINTR);
+ if (ret == 0 || errno != ETIMEDOUT) {
+ DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+ break;
+ }
+#endif
// Fall through and try to undo the earlier change to dsema->dsema_group_waiters
case DISPATCH_TIME_NOW:
while ((orig = dsema->dsema_group_waiters)) {
if (dispatch_atomic_cmpxchg(&dsema->dsema_group_waiters, orig, orig - 1)) {
+#ifdef HAVE_MACH
return KERN_OPERATION_TIMED_OUT;
+#else
+ errno = ETIMEDOUT;
+ return -1;
+#endif
}
}
// Another thread called semaphore_signal().
// Fall through and drain the wakeup.
case DISPATCH_TIME_FOREVER:
+#ifdef HAVE_MACH
do {
kr = semaphore_wait(dsema->dsema_waiter_port);
} while (kr == KERN_ABORTED);
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+#else
+ do {
+ ret = sem_wait(&dsema->dsema_sem);
+ } while (ret == -1 && errno == EINTR);
+ DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+#endif
break;
}
@@ -429,7 +544,12 @@
return 0;
}
if (timeout == 0) {
+#ifdef HAVE_MACH
return KERN_OPERATION_TIMED_OUT;
+#else
+ errno = ETIMEDOUT;
+ return (-1);
+#endif
}
return _dispatch_group_wait_slow(dsema, timeout);
}
@@ -474,12 +594,17 @@
void
_dispatch_semaphore_dispose(dispatch_semaphore_t dsema)
{
+#ifdef HAVE_MACH
kern_return_t kr;
+#else
+ int ret;
+#endif
if (dsema->dsema_value < dsema->dsema_orig) {
DISPATCH_CLIENT_CRASH("Semaphore/group object deallocated while in use");
}
+#ifdef HAVE_MACH
if (dsema->dsema_port) {
kr = semaphore_destroy(mach_task_self(), dsema->dsema_port);
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
@@ -488,6 +613,10 @@
kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port);
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
}
+#else
+ ret = sem_destroy(&dsema->dsema_sem);
+ DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+#endif
_dispatch_dispose(dsema);
}
@@ -498,8 +627,12 @@
size_t offset = 0;
offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dx_kind(dsema), dsema);
offset += dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset);
- offset += snprintf(&buf[offset], bufsiz - offset, "port = 0x%u, value = %ld, orig = %ld }",
- dsema->dsema_port, dsema->dsema_value, dsema->dsema_orig);
+#ifdef HAVE_MACH
+ offset += snprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ",
+ dsema->dsema_port);
+#endif
+ offset += snprintf(&buf[offset], bufsiz - offset,
+ "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig);
return offset;
}
Modified: trunk/src/semaphore_internal.h (26 => 27)
--- trunk/src/semaphore_internal.h 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/semaphore_internal.h 2009-09-14 18:46:18 UTC (rev 27)
@@ -39,8 +39,14 @@
long dsema_value;
long dsema_orig;
size_t dsema_sent_ksignals;
+#ifdef HAVE_MACH
semaphore_t dsema_port;
semaphore_t dsema_waiter_port;
+#elif defined(HAVE_SEM_INIT)
+ sem_t dsema_sem;
+#else
+#error "No supported semaphore type"
+#endif
size_t dsema_group_waiters;
struct dispatch_sema_notify_s *dsema_notify_head;
struct dispatch_sema_notify_s *dsema_notify_tail;
Modified: trunk/src/shims.c (26 => 27)
--- trunk/src/shims.c 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/shims.c 2009-09-14 18:46:18 UTC (rev 27)
@@ -20,6 +20,7 @@
#include "internal.h"
+#ifdef HAVE_MACH
void *
dispatch_mach_msg_get_context(mach_msg_header_t *msg)
{
@@ -63,3 +64,4 @@
return TRUE;
}
+#endif /* HAVE_MACH */
Modified: trunk/src/source.c (26 => 27)
--- trunk/src/source.c 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/source.c 2009-09-14 18:46:18 UTC (rev 27)
@@ -19,8 +19,10 @@
*/
#include "internal.h"
+#ifdef HAVE_MACH
#include "protocol.h"
#include "protocolServer.h"
+#endif
#include <sys/mount.h>
#define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1)
@@ -92,15 +94,19 @@
static void _dispatch_kevent_merge(dispatch_source_t ds);
static void _dispatch_kevent_release(dispatch_source_t ds);
static void _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags);
+#ifdef HAVE_MACH
static void _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags);
static void _dispatch_kevent_machport_enable(dispatch_kevent_t dk);
static void _dispatch_kevent_machport_disable(dispatch_kevent_t dk);
static void _dispatch_drain_mach_messages(struct kevent *ke);
+#endif
static void _dispatch_timer_list_update(dispatch_source_t ds);
+#ifdef HAVE_MACH
static void
_dispatch_mach_notify_source_init(void *context __attribute__((unused)));
+#endif
static const char *
_evfiltstr(short filt)
@@ -114,10 +120,14 @@
_evfilt2(EVFILT_PROC);
_evfilt2(EVFILT_SIGNAL);
_evfilt2(EVFILT_TIMER);
+#ifdef HAVE_MACH
_evfilt2(EVFILT_MACHPORT);
+#endif
_evfilt2(EVFILT_FS);
_evfilt2(EVFILT_USER);
+#if HAVE_DECL_EVFILT_SESSION
_evfilt2(EVFILT_SESSION);
+#endif
_evfilt2(DISPATCH_EVFILT_TIMER);
_evfilt2(DISPATCH_EVFILT_CUSTOM_ADD);
@@ -135,7 +145,11 @@
static dispatch_kevent_t
_dispatch_kevent_find(uintptr_t ident, short filter)
{
+#ifdef HAVE_MACH
uintptr_t hash = DSL_HASH(filter == EVFILT_MACHPORT ? MACH_PORT_INDEX(ident) : ident);
+#else
+ uintptr_t hash = DSL_HASH(ident);
+#endif
dispatch_kevent_t dki;
TAILQ_FOREACH(dki, &_dispatch_sources[hash], dk_list) {
@@ -150,7 +164,11 @@
_dispatch_kevent_insert(dispatch_kevent_t dk)
{
uintptr_t ident = dk->dk_kevent.ident;
+#ifdef HAVE_MACH
uintptr_t hash = DSL_HASH(dk->dk_kevent.filter == EVFILT_MACHPORT ? MACH_PORT_INDEX(ident) : ident);
+#else
+ uintptr_t hash = DSL_HASH(ident);
+#endif
TAILQ_INSERT_TAIL(&_dispatch_sources[hash], dk, dk_list);
}
@@ -306,9 +324,11 @@
case DISPATCH_EVFILT_CUSTOM_OR:
// these types not registered with kevent
return;
+#ifdef HAVE_MACH
case EVFILT_MACHPORT:
_dispatch_kevent_machport_resume(dk, new_flags, del_flags);
break;
+#endif
case EVFILT_PROC:
if (dk->dk_kevent.flags & EV_ONESHOT) {
return;
@@ -413,6 +433,7 @@
_dispatch_queue_dispose((dispatch_queue_t)ds);
}
+#ifndef DISPATCH_NO_LEGACY
static void
_dispatch_kevent_debugger2(void *context, dispatch_source_t unused __attribute__((unused)))
{
@@ -457,8 +478,8 @@
}
TAILQ_FOREACH(dk, &_dispatch_sources[i], dk_list) {
fprintf(debug_stream, "\t<br><li>DK %p ident %lu filter %s flags 0x%hx fflags 0x%x data 0x%lx udata %p\n",
- dk, dk->dk_kevent.ident, _evfiltstr(dk->dk_kevent.filter), dk->dk_kevent.flags,
- dk->dk_kevent.fflags, dk->dk_kevent.data, dk->dk_kevent.udata);
+ dk, (unsigned long)dk->dk_kevent.ident, _evfiltstr(dk->dk_kevent.filter), dk->dk_kevent.flags,
+ dk->dk_kevent.fflags, (unsigned long)dk->dk_kevent.data, dk->dk_kevent.udata);
fprintf(debug_stream, "\t\t<ul>\n");
TAILQ_FOREACH(ds, &dk->dk_sources, ds_list) {
fprintf(debug_stream, "\t\t\t<li>DS %p refcnt 0x%x suspend 0x%x data 0x%lx mask 0x%lx flags 0x%x</li>\n",
@@ -546,21 +567,28 @@
out_bad:
close(fd);
}
+#endif /* DISPATCH_NO_LEGACY */
void
_dispatch_source_drain_kevent(struct kevent *ke)
{
+#ifndef DISPATCH_NO_LEGACY
static dispatch_once_t pred;
+#endif
dispatch_kevent_t dk = ke->udata;
dispatch_source_t dsi;
+#ifndef DISPATCH_NO_LEGACY
dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger);
+#endif
dispatch_debug_kevents(ke, 1, __func__);
+#ifdef HAVE_MACH
if (ke->filter == EVFILT_MACHPORT) {
return _dispatch_drain_mach_messages(ke);
}
+#endif
dispatch_assert(dk);
if (ke->flags & EV_ONESHOT) {
@@ -583,9 +611,11 @@
case DISPATCH_EVFILT_CUSTOM_OR:
// these sources live on statically allocated lists
return;
+#ifdef HAVE_MACH
case EVFILT_MACHPORT:
_dispatch_kevent_machport_resume(dk, 0, dk->dk_kevent.fflags);
break;
+#endif
case EVFILT_PROC:
if (dk->dk_kevent.flags & EV_ONESHOT) {
break; // implicitly deleted
@@ -599,11 +629,15 @@
break;
}
+#ifdef HAVE_MACH
if (dk->dk_kevent.filter == EVFILT_MACHPORT) {
key = MACH_PORT_INDEX(dk->dk_kevent.ident);
} else {
+#endif
key = dk->dk_kevent.ident;
+#ifdef HAVE_MACH
}
+#endif
TAILQ_REMOVE(&_dispatch_sources[DSL_HASH(key)], dk, dk_list);
free(dk);
@@ -923,7 +957,14 @@
.filter = EVFILT_PROC,
.flags = EV_CLEAR,
},
- .mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_SIGNAL|NOTE_REAP,
+ .mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC
+#if HAVE_DECL_NOTE_SIGNAL
+ |NOTE_SIGNAL
+#endif
+#if HAVE_DECL_NOTE_REAP
+ |NOTE_REAP
+#endif
+ ,
};
const struct dispatch_source_type_s _dispatch_source_type_signal = {
@@ -937,7 +978,12 @@
.filter = EVFILT_VNODE,
.flags = EV_CLEAR,
},
- .mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK|NOTE_RENAME|NOTE_REVOKE|NOTE_NONE,
+ .mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK|
+ NOTE_RENAME|NOTE_REVOKE
+#if HAVE_DECL_NOTE_NONE
+ |NOTE_NONE
+#endif
+ ,
};
const struct dispatch_source_type_s _dispatch_source_type_vfs = {
@@ -945,9 +991,18 @@
.filter = EVFILT_FS,
.flags = EV_CLEAR,
},
- .mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT|VQ_DEAD|VQ_ASSIST|VQ_NOTRESPLOCK|VQ_UPDATE|VQ_VERYLOWDISK,
+ .mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT|VQ_DEAD|
+ VQ_ASSIST|VQ_NOTRESPLOCK
+#if HAVE_DECL_VQ_UPDATE
+ |VQ_UPDATE
+#endif
+#if HAVE_DECL_VQ_VERYLOWDISK
+ |VQ_VERYLOWDISK
+#endif
+ ,
};
+#ifdef HAVE_MACH
const struct dispatch_source_type_s _dispatch_source_type_mach_send = {
.ke = {
.filter = EVFILT_MACHPORT,
@@ -964,6 +1019,7 @@
.fflags = DISPATCH_MACHPORT_RECV,
},
};
+#endif
const struct dispatch_source_type_s _dispatch_source_type_data_add = {
.ke = {
@@ -1044,9 +1100,11 @@
ds->ds_dkev = dk;
ds->ds_pending_data_mask = dk->dk_kevent.fflags;
if ((EV_DISPATCH|EV_ONESHOT) & proto_kev->flags) {
+#ifdef HAVE_MACH
if (proto_kev->filter != EVFILT_MACHPORT) {
ds->ds_is_level = true;
}
+#endif
ds->ds_needs_rearm = true;
} else if (!(EV_CLEAR & proto_kev->flags)) {
// we cheat and use EV_CLEAR to mean a "flag thingy"
@@ -1064,10 +1122,13 @@
#endif
// Some sources require special processing
+#ifdef HAVE_MACH
if (type == DISPATCH_SOURCE_TYPE_MACH_SEND) {
static dispatch_once_t pred;
dispatch_once_f(&pred, NULL, _dispatch_mach_notify_source_init);
- } else if (type == DISPATCH_SOURCE_TYPE_TIMER) {
+ } else
+#endif
+ if (type == DISPATCH_SOURCE_TYPE_TIMER) {
ds->ds_timer.flags = mask;
}
@@ -1320,7 +1381,7 @@
uint64_t now, missed;
if (timer == DISPATCH_TIMER_INDEX_MACH) {
- now = mach_absolute_time();
+ now = _dispatch_absolute_time();
} else {
now = _dispatch_get_nanoseconds();
}
@@ -1365,7 +1426,7 @@
}
}
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__x86_64__) || !defined(HAVE_MACH_ABSOLUTE_TIME)
// these architectures always return mach_absolute_time() in nanoseconds
#define _dispatch_convert_mach2nano(x) (x)
#define _dispatch_convert_nano2mach(x) (x)
@@ -1452,7 +1513,7 @@
if (ds->ds_timer.flags & DISPATCH_TIMER_WALL_CLOCK) {
now = _dispatch_get_nanoseconds();
} else {
- now = mach_absolute_time();
+ now = _dispatch_absolute_time();
}
if (ds->ds_timer.target <= now) {
howsoon->tv_sec = 0;
@@ -1521,7 +1582,7 @@
dispatch_suspend(ds);
if (start == DISPATCH_TIME_NOW) {
- start = mach_absolute_time();
+ start = _dispatch_absolute_time();
} else if (start == DISPATCH_TIME_FOREVER) {
start = INT64_MAX;
}
@@ -1542,7 +1603,7 @@
params->values.leeway = leeway;
params->values.flags |= DISPATCH_TIMER_WALL_CLOCK;
} else {
- // mach clock
+ // absolute clock
params->ident = DISPATCH_TIMER_INDEX_MACH;
params->values.start = start;
params->values.target = start;
@@ -1591,6 +1652,7 @@
}
#endif /* DISPATCH_NO_LEGACY */
+#ifdef HAVE_MACH
static dispatch_source_t _dispatch_mach_notify_source;
static mach_port_t _dispatch_port_set;
static mach_port_t _dispatch_event_port;
@@ -2010,3 +2072,4 @@
return kr;
}
+#endif /* HAVE_MACH */
Modified: trunk/src/source.h (26 => 27)
--- trunk/src/source.h 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/source.h 2009-09-14 18:46:18 UTC (rev 27)
@@ -26,8 +26,10 @@
#include <dispatch/base.h> // for HeaderDoc
#endif
+#ifdef HAVE_MACH
#include <mach/port.h>
#include <mach/message.h>
+#endif
#include <sys/signal.h>
/*!
Modified: trunk/src/source_private.h (26 => 27)
--- trunk/src/source_private.h 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/source_private.h 2009-09-14 18:46:18 UTC (rev 27)
@@ -111,6 +111,7 @@
__BEGIN_DECLS
+#ifdef HAVE_MACH
/*!
* @typedef dispatch_mig_callback_t
*
@@ -123,6 +124,7 @@
DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
mach_msg_return_t
dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback);
+#endif
__END_DECLS
Modified: trunk/src/time.c (26 => 27)
--- trunk/src/time.c 2009-09-14 18:31:33 UTC (rev 26)
+++ trunk/src/time.c 2009-09-14 18:46:18 UTC (rev 27)
@@ -128,7 +128,7 @@
// mach clock
delta = _dispatch_time_nano2mach(delta);
if (inval == 0) {
- inval = mach_absolute_time();
+ inval = _dispatch_absolute_time();
}
if (delta >= 0) {
if ((int64_t)(inval += delta) <= 0) {
@@ -178,6 +178,6 @@
now = _dispatch_get_nanoseconds();
return now >= when ? 0 : when - now;
}
- now = mach_absolute_time();
+ now = _dispatch_absolute_time();
return now >= when ? 0 : _dispatch_time_mach2nano(when - now);
}