[Xquartz-changes] xserver: Branch 'server-1.12-apple' - 2 commits

Jeremy Huddleston jeremyhu at freedesktop.org
Sun Oct 16 22:45:08 PDT 2011


Rebased ref, commits from common ancestor:
commit 9be76e4fe3cd507ebab45a9b67494f86d3561e56
Author: Jeremy Huddleston <jeremyhu at apple.com>
Date:   Sun Oct 16 21:09:15 2011 -0700

    mieq: Reserve some space in EQ for release and other special events
    
    The last 64 events in the event queue will be reserved for release
    events in order to help return the system to a cleaner state when
    it comes back from a soft wedge.
    
    Signed-off-by: Jeremy Huddleston <jeremyhu at apple.com>

diff --git a/mi/mieq.c b/mi/mieq.c
index b79d0b2..764abe5 100644
--- a/mi/mieq.c
+++ b/mi/mieq.c
@@ -59,7 +59,8 @@ in this Software without prior written authorization from The Open Group.
 # include <X11/extensions/dpmsconst.h>
 #endif
 
-#define QUEUE_INITIAL_SIZE                 128
+#define QUEUE_INITIAL_SIZE                 256
+#define QUEUE_RESERVED_SIZE                 64
 #define QUEUE_MAXIMUM_SIZE                4096
 #define QUEUE_DROP_BACKTRACE_FREQUENCY     100
 #define QUEUE_DROP_BACKTRACE_MAX            10
@@ -208,6 +209,26 @@ mieqFini(void)
     free(miEventQueue.events);
 }
 
+/* This function will determine if the given event is allowed to used the reserved
+ * queue space.
+ */
+static Bool
+mieqReservedCandidate(InternalEvent *e) {
+    switch(e->any.type) {
+        case ET_KeyRelease:
+        case ET_ButtonRelease:
+#if XFreeXDGA
+        case ET_DGAEvent:
+#endif
+        case ET_RawKeyRelease:
+        case ET_RawButtonRelease:
+        case ET_XQuartz:
+            return TRUE;
+        default:
+            return FALSE;
+    }
+}
+
 /*
  * Must be reentrant with ProcessInputEvents.  Assumption: mieqEnqueue
  * will never be interrupted.  If this is called from both signal
@@ -223,6 +244,7 @@ mieqEnqueue(DeviceIntPtr pDev, InternalEvent *e)
     int                    isMotion = 0;
     int                    evlen;
     Time                   time;
+    size_t                 n_enqueued;
 
 #ifdef XQUARTZ
     wait_for_server_init();
@@ -245,6 +267,8 @@ mieqEnqueue(DeviceIntPtr pDev, InternalEvent *e)
 
     verify_internal_event(e);
 
+    n_enqueued = mieqNumEnqueued(&miEventQueue);
+
     /* avoid merging events from different devices */
     if (e->any.type == ET_Motion)
         isMotion = pDev->id;
@@ -252,7 +276,8 @@ mieqEnqueue(DeviceIntPtr pDev, InternalEvent *e)
     if (isMotion && isMotion == miEventQueue.lastMotion &&
         oldtail != miEventQueue.head) {
         oldtail = (oldtail - 1) % miEventQueue.nevents;
-    } else if (((oldtail + 1) % miEventQueue.nevents) == miEventQueue.head) {
+    } else if ((n_enqueued + 1 == miEventQueue.nevents) ||
+               ((n_enqueued + 1 >= miEventQueue.nevents - QUEUE_RESERVED_SIZE) && !mieqReservedCandidate(e))) {
         /* Toss events which come in late.  Usually this means your server's
          * stuck in an infinite loop somewhere, but SIGIO is still getting
          * handled.
@@ -535,7 +560,7 @@ mieqProcessInputEvents(void)
 #endif
 
     n_enqueued = mieqNumEnqueued(&miEventQueue);
-    if (n_enqueued >= (miEventQueue.nevents >> 1) &&
+    if (n_enqueued >= (miEventQueue.nevents - (QUEUE_RESERVED_SIZE << 1)) &&
         miEventQueue.nevents < QUEUE_MAXIMUM_SIZE) {
         ErrorF("[mi] Increasing EQ size to %lu to prevent dropped events.\n", miEventQueue.nevents << 1);
         if (!mieqGrowQueue(&miEventQueue, miEventQueue.nevents << 1)) {
commit cb73e230f0d121ac620d0cec0272032a29d2ca04
Author: Jeremy Huddleston <jeremyhu at apple.com>
Date:   Sat Oct 15 22:51:30 2011 -0700

    mieq: Provide better adaptability and diagnostics during mieq overflow
    
    This patch changes from a static length event queue (512) to one that
    starts at 128 and grows to 4096 as it overflows, logging each time it
    grows.
    
    This change also allows for multiple backtraces to be printed when the
    server is wedged rather than just one.  This increased sampling should
    help identify the true hog in cases where one backtrace might be
    insufficient.
    
    Signed-off-by: Jeremy Huddleston <jeremyhu at apple.com>

diff --git a/mi/mieq.c b/mi/mieq.c
index b75bde9..b79d0b2 100644
--- a/mi/mieq.c
+++ b/mi/mieq.c
@@ -59,7 +59,10 @@ in this Software without prior written authorization from The Open Group.
 # include <X11/extensions/dpmsconst.h>
 #endif
 
-#define QUEUE_SIZE  512
+#define QUEUE_INITIAL_SIZE                 128
+#define QUEUE_MAXIMUM_SIZE                4096
+#define QUEUE_DROP_BACKTRACE_FREQUENCY     100
+#define QUEUE_DROP_BACKTRACE_MAX            10
 
 #define EnqueueScreen(dev) dev->spriteInfo->sprite->pEnqueueScreen
 #define DequeueScreen(dev) dev->spriteInfo->sprite->pDequeueScreen
@@ -74,7 +77,9 @@ typedef struct _EventQueue {
     HWEventQueueType head, tail;         /* long for SetInputCheck */
     CARD32           lastEventTime;      /* to avoid time running backwards */
     int              lastMotion;         /* device ID if last event motion? */
-    EventRec         events[QUEUE_SIZE]; /* static allocation for signals */
+    EventRec         *events;            /* our queue as an array */
+    size_t           nevents;            /* the number of buckets in our queue */
+    size_t           dropped;            /* counter for number of consecutive dropped events */
     mieqHandler      handlers[128];      /* custom event handler */
 } EventQueueRec, *EventQueuePtr;
 
@@ -99,25 +104,91 @@ static inline void wait_for_server_init(void) {
 }
 #endif
 
+static size_t mieqNumEnqueued(EventQueuePtr eventQueue) {
+    size_t n_enqueued = 0;
+    if (eventQueue->nevents) {
+        /* % is not well-defined with negative numbers... sigh */
+        n_enqueued = miEventQueue.tail - miEventQueue.head + eventQueue->nevents;
+        if (n_enqueued >= eventQueue->nevents)
+            n_enqueued -= eventQueue->nevents;
+    }
+    return n_enqueued;
+}
+
+/* Pre-condition: Called with miEventQueueMutex held */
+static Bool
+mieqGrowQueue(EventQueuePtr eventQueue, size_t new_nevents) {
+    size_t i, n_enqueued, first_hunk;
+    EventRec *new_events;
+
+    if (new_nevents <= eventQueue->nevents)
+        return FALSE;
+
+    if (!eventQueue) {
+        ErrorF("[mi] mieqGrowQueue called with a NULL eventQueue\n");
+        return FALSE;
+    }
+
+    if (CountBits((const uint8_t *)&new_nevents, sizeof(new_nevents) != 1)) {
+        ErrorF("[mi] mieqGrowQueue called with an invalid size: %lu\n", new_nevents);
+        return FALSE;
+    }
+
+    new_events = calloc(new_nevents, sizeof(EventRec));
+    if (new_events == NULL) {
+    	ErrorF("[mi] mieqGrowQueue memory allocation error.\n");
+        return FALSE;
+    }
+
+    n_enqueued = mieqNumEnqueued(eventQueue);
+
+    /* We block signals, so an mieqEnqueue triggered by SIGIO does not
+     * write to our queue as we are modifying it.
+     */
+    OsBlockSignals();
+
+    /* First copy the existing events */
+    first_hunk = eventQueue->nevents - eventQueue->head;
+    memcpy(new_events,
+           &eventQueue->events[miEventQueue.head],
+           first_hunk * sizeof(EventRec));
+    memcpy(&new_events[first_hunk],
+           eventQueue->events,
+           (eventQueue->head) * sizeof(EventRec));
+
+    /* Initialize the new portion */
+    for (i = eventQueue->nevents; i < new_nevents; i++) {
+        InternalEvent* evlist = InitEventList(1);
+        if (!evlist) {
+            size_t j;
+            for (j = 0; j < i; j++)
+                FreeEventList(new_events[j].events, 1);
+            free(new_events);
+            OsReleaseSignals();
+            return FALSE;
+        }
+        new_events[i].events = evlist;
+    }
+
+    /* And update our record */
+    miEventQueue.tail = n_enqueued;
+    miEventQueue.head = 0;
+    miEventQueue.nevents = new_nevents;
+    free(miEventQueue.events);
+    miEventQueue.events = new_events;
+
+    OsReleaseSignals();
+    return TRUE;
+}
+
 Bool
 mieqInit(void)
 {
-    int i;
-
-    miEventQueue.head = miEventQueue.tail = 0;
+    memset(&miEventQueue, 0, sizeof(miEventQueue));
     miEventQueue.lastEventTime = GetTimeInMillis ();
-    miEventQueue.lastMotion = FALSE;
-    for (i = 0; i < 128; i++)
-        miEventQueue.handlers[i] = NULL;
-    for (i = 0; i < QUEUE_SIZE; i++)
-    {
-	if (miEventQueue.events[i].events == NULL) {
-	    InternalEvent* evlist = InitEventList(1);
-	    if (!evlist)
-		FatalError("Could not allocate event queue.\n");
-	    miEventQueue.events[i].events = evlist;
-	}
-    }
+
+    if(!mieqGrowQueue(&miEventQueue, QUEUE_INITIAL_SIZE))
+	FatalError("Could not allocate event queue.\n");
 
     SetInputCheck(&miEventQueue.head, &miEventQueue.tail);
     return TRUE;
@@ -127,13 +198,14 @@ void
 mieqFini(void)
 {
     int i;
-    for (i = 0; i < QUEUE_SIZE; i++)
+    for (i = 0; i < miEventQueue.nevents; i++)
     {
 	if (miEventQueue.events[i].events != NULL) {
 	    FreeEventList(miEventQueue.events[i].events, 1);
 	    miEventQueue.events[i].events = NULL;
 	}
     }
+    free(miEventQueue.events);
 }
 
 /*
@@ -157,6 +229,20 @@ mieqEnqueue(DeviceIntPtr pDev, InternalEvent *e)
     pthread_mutex_lock(&miEventQueueMutex);
 #endif
 
+    if (!miEventQueue.nevents) {
+        /* The event queue is being resized due to previous overflow.
+         * This event will be dropped on the floor.
+         */
+        if (!miEventQueue.dropped) {
+            ErrorF("[mi] Event discarded during EQ resize.\n");
+        }
+        miEventQueue.dropped++;
+#ifdef XQUARTZ
+        pthread_mutex_unlock(&miEventQueueMutex);
+#endif
+        return;
+    }
+
     verify_internal_event(e);
 
     /* avoid merging events from different devices */
@@ -165,26 +251,31 @@ mieqEnqueue(DeviceIntPtr pDev, InternalEvent *e)
 
     if (isMotion && isMotion == miEventQueue.lastMotion &&
         oldtail != miEventQueue.head) {
-        oldtail = (oldtail - 1) % QUEUE_SIZE;
-    }
-    else {
-        static int stuck = 0;
+        oldtail = (oldtail - 1) % miEventQueue.nevents;
+    } else if (((oldtail + 1) % miEventQueue.nevents) == miEventQueue.head) {
         /* Toss events which come in late.  Usually this means your server's
          * stuck in an infinite loop somewhere, but SIGIO is still getting
-         * handled. */
-        if (((oldtail + 1) % QUEUE_SIZE) == miEventQueue.head) {
-            if (!stuck) {
-                ErrorF("[mi] EQ overflowing. The server is probably stuck "
-                        "in an infinite loop.\n");
-                xorg_backtrace();
-                stuck = 1;
+         * handled.
+         */
+        miEventQueue.dropped++;
+        if (miEventQueue.dropped == 1) {
+            ErrorF("[mi] EQ overflowing.  Additional events will be discarded until existing events are processed.\n");
+            xorg_backtrace();
+            ErrorF("[mi] These backtraces from mieqEnqueue may point to a culprit higher up the stack.\n");
+            ErrorF("[mi] mieq is *NOT* the cause.  It is a victim.\n");
+        } else if (miEventQueue.dropped %  QUEUE_DROP_BACKTRACE_FREQUENCY == 0 &&
+                   miEventQueue.dropped /  QUEUE_DROP_BACKTRACE_FREQUENCY <= QUEUE_DROP_BACKTRACE_MAX) {
+            ErrorF("[mi] EQ overflow continuing.  %lu events have been dropped.\n", miEventQueue.dropped);
+            if (miEventQueue.dropped /  QUEUE_DROP_BACKTRACE_FREQUENCY == QUEUE_DROP_BACKTRACE_MAX) {
+                ErrorF("[mi] No further overflow reports will be reported until the clog is cleared.\n");
             }
+            xorg_backtrace();
+        }
+
 #ifdef XQUARTZ
-            pthread_mutex_unlock(&miEventQueueMutex);
+        pthread_mutex_unlock(&miEventQueueMutex);
 #endif
-	        return;
-        }
-        stuck = 0;
+        return;
     }
 
     evlen = e->any.length;
@@ -203,7 +294,7 @@ mieqEnqueue(DeviceIntPtr pDev, InternalEvent *e)
     miEventQueue.events[oldtail].pDev = pDev;
 
     miEventQueue.lastMotion = isMotion;
-    miEventQueue.tail = (oldtail + 1) % QUEUE_SIZE;
+    miEventQueue.tail = (oldtail + 1) % miEventQueue.nevents;
 #ifdef XQUARTZ
     pthread_mutex_unlock(&miEventQueueMutex);
 #endif
@@ -437,11 +528,27 @@ mieqProcessInputEvents(void)
     static InternalEvent event;
     DeviceIntPtr dev = NULL,
                  master = NULL;
+    size_t n_enqueued;
 
 #ifdef XQUARTZ
     pthread_mutex_lock(&miEventQueueMutex);
 #endif
-    
+
+    n_enqueued = mieqNumEnqueued(&miEventQueue);
+    if (n_enqueued >= (miEventQueue.nevents >> 1) &&
+        miEventQueue.nevents < QUEUE_MAXIMUM_SIZE) {
+        ErrorF("[mi] Increasing EQ size to %lu to prevent dropped events.\n", miEventQueue.nevents << 1);
+        if (!mieqGrowQueue(&miEventQueue, miEventQueue.nevents << 1)) {
+            ErrorF("[mi] Increasing the size of EQ failed.\n");
+        }
+    }
+
+    if (miEventQueue.dropped) {
+        ErrorF("[mi] EQ processing has resumed after %lu dropped events.\n", miEventQueue.dropped);
+        ErrorF("[mi] This may be caused my a misbehaving driver monopolizing the server's resources.\n");
+        miEventQueue.dropped = 0;
+    }
+
     while (miEventQueue.head != miEventQueue.tail) {
         e = &miEventQueue.events[miEventQueue.head];
 
@@ -449,7 +556,7 @@ mieqProcessInputEvents(void)
         dev     = e->pDev;
         screen  = e->pScreen;
 
-        miEventQueue.head = (miEventQueue.head + 1) % QUEUE_SIZE;
+        miEventQueue.head = (miEventQueue.head + 1) % miEventQueue.nevents;
 
 #ifdef XQUARTZ
         pthread_mutex_unlock(&miEventQueueMutex);


More information about the Xquartz-changes mailing list