[PATCH 2/2 v4] mieq: Reserve some space in EQ for release and other special events

Jeremy Huddleston jeremyhu at apple.com
Sun Oct 16 21:16:13 PDT 2011


The last 64 events in the event queue will be reserved for release
events in order to help return the system to a cleaner state when
it comes back from a soft wedge.

Signed-off-by: Jeremy Huddleston <jeremyhu at apple.com>
---
 mi/mieq.c |   50 +++++++++++++++++++++++++++++++++++++++-----------
 1 files changed, 39 insertions(+), 11 deletions(-)

diff --git a/mi/mieq.c b/mi/mieq.c
index 541ff5e..71b5921 100644
--- a/mi/mieq.c
+++ b/mi/mieq.c
@@ -60,7 +60,8 @@ in this Software without prior written authorization from The Open Group.
 #endif
 
 /* Queue size must be a power of 2 */
-#define QUEUE_INITIAL_SIZE                 128
+#define QUEUE_INITIAL_SIZE                 256
+#define QUEUE_RESERVED_SIZE                 64
 #define QUEUE_MAXIMUM_SIZE                4096
 #define QUEUE_DROP_BACKTRACE_FREQUENCY     100
 #define QUEUE_DROP_BACKTRACE_MAX            10
@@ -105,6 +106,17 @@ static inline void wait_for_server_init(void) {
 }
 #endif
 
+static size_t mieqNumEnqueued(EventQueuePtr eventQueue) {
+    size_t n_enqueued = 0;
+    if (eventQueue->nevents) {
+    	/* % is not well-defined with negative numbers... sigh */
+        n_enqueued = miEventQueue.tail - miEventQueue.head + eventQueue->nevents;
+        if (n_enqueued >= eventQueue->nevents)
+            n_enqueued -= eventQueue->nevents;
+    }
+    return n_enqueued;
+}
+
 /* Pre-condition: Called with miEventQueueMutex held */
 static Bool
 mieqGrowQueue(EventQueuePtr eventQueue, size_t new_nevents) {
@@ -130,15 +142,7 @@ mieqGrowQueue(EventQueuePtr eventQueue, size_t new_nevents) {
         return FALSE;
     }
 
-    
-    if (eventQueue->nevents) {
-    	/* % is not well-defined with negative numbers... sigh */
-        n_enqueued = miEventQueue.tail - miEventQueue.head + eventQueue->nevents;
-        if (n_enqueued >= eventQueue->nevents)
-            n_enqueued -= eventQueue->nevents;
-    } else { 
-        n_enqueued = 0;
-    }
+    n_enqueued = mieqNumEnqueued(eventQueue);
 
     /* We block signals, so SIGIO does trigger mieqEnqueue to write to the
      * queue as we're modifying it.
@@ -200,6 +204,26 @@ mieqFini(void)
     free(miEventQueue.events);
 }
 
+/* This function will determine if the given event is allowed to used the reserved
+ * queue space.
+ */
+static Bool
+mieqReservedCandidate(InternalEvent *e) {
+    switch(e->any.type) {
+        case ET_KeyRelease:
+        case ET_ButtonRelease:
+#if XFreeXDGA
+        case ET_DGAEvent:
+#endif
+        case ET_RawKeyRelease:
+        case ET_RawButtonRelease:
+        case ET_XQuartz:
+            return TRUE;
+        default:
+            return FALSE;
+    }
+}
+
 /*
  * Must be reentrant with ProcessInputEvents.  Assumption: mieqEnqueue
  * will never be interrupted.  If this is called from both signal
@@ -215,6 +239,7 @@ mieqEnqueue(DeviceIntPtr pDev, InternalEvent *e)
     int                    isMotion = 0;
     int                    evlen;
     Time                   time;
+    size_t                 n_enqueued;
 
 #ifdef XQUARTZ
     wait_for_server_init();
@@ -237,6 +262,8 @@ mieqEnqueue(DeviceIntPtr pDev, InternalEvent *e)
 
     verify_internal_event(e);
 
+    n_enqueued = mieqNumEnqueued(&miEventQueue);
+
     /* avoid merging events from different devices */
     if (e->any.type == ET_Motion)
         isMotion = pDev->id;
@@ -244,7 +271,8 @@ mieqEnqueue(DeviceIntPtr pDev, InternalEvent *e)
     if (isMotion && isMotion == miEventQueue.lastMotion &&
         oldtail != miEventQueue.head) {
         oldtail = (oldtail - 1) % miEventQueue.nevents;
-    } else if (((oldtail + 1) % miEventQueue.nevents) == miEventQueue.head) {
+    } else if ((n_enqueued + 1 == miEventQueue.nevents) ||
+               ((n_enqueued + 1 >= miEventQueue.nevents - QUEUE_RESERVED_SIZE) && !mieqReservedCandidate(e))) {
         /* Toss events which come in late.  Usually this means your server's
          * stuck in an infinite loop somewhere, but SIGIO is still getting
          * handled.
-- 
1.7.6.1




More information about the xorg-devel mailing list