xserver: Branch 'server-1.14-branch' - 5 commits

Matt Dew marcoz at kemper.freedesktop.org
Tue Jul 16 09:20:47 PDT 2013


 dix/dispatch.c                      |    2 --
 dix/main.c                          |    3 +++
 dix/pixmap.c                        |    2 ++
 fb/fbpixmap.c                       |    1 +
 hw/xfree86/common/xf86platformBus.c |    8 ++++++++
 hw/xfree86/modes/xf86Crtc.c         |   34 ++++++++++++++++++++++++----------
 6 files changed, 38 insertions(+), 12 deletions(-)

New commits:
commit 54b125d1751385bcfc3c1b51d21ee6a7dc14143b
Author: Aaron Plattner <aplattner at nvidia.com>
Date:   Tue Apr 30 14:30:18 2013 -0700

    xfree86: don't enable anything in xf86InitialConfiguration for GPU screens
    
    There's no point in turning on outputs connected to GPU screens during initial
    configuration.  Not only does this cause them to just display black, it also
    confuses clients when these screens are attached to a master screen and RandR
    reports that the outputs are already on.
    
    Also, don't print the warning about no outputs being found on GPU screens,
    since that's expected.
    
    Signed-off-by: Aaron Plattner <aplattner at nvidia.com>
    Reviewed-by: Dave Airlie <airlied at gmail.com>
    (cherry picked from commit dbfeaf70623a83e1a3f3255c94d52e0e04702837)

diff --git a/hw/xfree86/modes/xf86Crtc.c b/hw/xfree86/modes/xf86Crtc.c
index 5a07793..989595f 100644
--- a/hw/xfree86/modes/xf86Crtc.c
+++ b/hw/xfree86/modes/xf86Crtc.c
@@ -1908,6 +1908,14 @@ xf86CollectEnabledOutputs(ScrnInfoPtr scrn, xf86CrtcConfigPtr config,
     Bool any_enabled = FALSE;
     int o;
 
+    /*
+     * Don't bother enabling outputs on GPU screens: a client needs to attach
+     * it to a source provider before setting a mode that scans out a shared
+     * pixmap.
+     */
+    if (scrn->is_gpu)
+        return FALSE;
+
     for (o = 0; o < config->num_output; o++)
         any_enabled |= enabled[o] = xf86OutputEnabled(config->output[o], TRUE);
 
@@ -2377,9 +2385,11 @@ xf86InitialConfiguration(ScrnInfoPtr scrn, Bool canGrow)
 
     ret = xf86CollectEnabledOutputs(scrn, config, enabled);
     if (ret == FALSE && canGrow) {
-        xf86DrvMsg(i, X_WARNING,
-                   "Unable to find connected outputs - setting %dx%d initial framebuffer\n",
-                   NO_OUTPUT_DEFAULT_WIDTH, NO_OUTPUT_DEFAULT_HEIGHT);
+        if (!scrn->is_gpu)
+            xf86DrvMsg(i, X_WARNING,
+		       "Unable to find connected outputs - setting %dx%d "
+                       "initial framebuffer\n",
+                       NO_OUTPUT_DEFAULT_WIDTH, NO_OUTPUT_DEFAULT_HEIGHT);
         have_outputs = FALSE;
     }
     else {
commit 29545a422bbdd11fda5cb61f27720332d68d0c36
Author: Dave Airlie <airlied at redhat.com>
Date:   Wed Jan 9 12:52:13 2013 +1000

    gpu: call CreateScreenResources for GPU screens
    
    I didn't think we needed this before, but after doing some more
    work with reverse optimus it seems like it should be called.
    
    Reviewed-by: Keith Packard <keithp at keithp.com>
    Signed-off-by: Dave Airlie <airlied at redhat.com>
    (cherry picked from commit f2fd8ec3725a61abbc831f0a9ec28fa2b7020c47)

diff --git a/dix/main.c b/dix/main.c
index fb935c9..e558d70 100644
--- a/dix/main.c
+++ b/dix/main.c
@@ -211,6 +211,9 @@ main(int argc, char *argv[], char *envp[])
             ScreenPtr pScreen = screenInfo.gpuscreens[i];
             if (!CreateScratchPixmapsForScreen(pScreen))
                 FatalError("failed to create scratch pixmaps");
+            if (pScreen->CreateScreenResources &&
+                !(*pScreen->CreateScreenResources) (pScreen))
+                FatalError("failed to create screen resources");
         }
 
         for (i = 0; i < screenInfo.numScreens; i++) {
diff --git a/hw/xfree86/common/xf86platformBus.c b/hw/xfree86/common/xf86platformBus.c
index 9034dad..db831a8 100644
--- a/hw/xfree86/common/xf86platformBus.c
+++ b/hw/xfree86/common/xf86platformBus.c
@@ -454,6 +454,14 @@ xf86platformAddDevice(int index)
 
    CreateScratchPixmapsForScreen(xf86GPUScreens[i]->pScreen);
 
+   if (xf86GPUScreens[i]->pScreen->CreateScreenResources &&
+       !(*xf86GPUScreens[i]->pScreen->CreateScreenResources) (xf86GPUScreens[i]->pScreen)) {
+       RemoveGPUScreen(xf86GPUScreens[i]->pScreen);
+       xf86DeleteScreen(xf86GPUScreens[i]);
+       xf86UnclaimPlatformSlot(&xf86_platform_devices[index], NULL);
+       xf86NumGPUScreens = old_screens;
+       return -1;
+   }
    /* attach unbound to 0 protocol screen */
    AttachUnboundGPU(xf86Screens[0]->pScreen, xf86GPUScreens[i]->pScreen);
 
commit f21cc327a56e3b453cf0dba04457223d61c27ea6
Author: Dave Airlie <airlied at redhat.com>
Date:   Wed Jan 9 12:52:08 2013 +1000

    dix: allow pixmap dirty helper to be used for non-shared pixmaps
    
    this allows the pixmap dirty helper to be used for reverse optimus,
    where the GPU wants to copy from the shared pixmap to its VRAM copy.
    
    [airlied: slave_dst is wrong name now but pointless ABI churn at this point]
    Reviewed-by: Keith Packard <keithp at keithp.com>
    Signed-off-by: Dave Airlie <airlied at redhat.com>
    (cherry picked from commit 8fcb9d91b69abc72ddef31b9f2e8585580c6cad2)

diff --git a/dix/pixmap.c b/dix/pixmap.c
index 2418812..fe92147 100644
--- a/dix/pixmap.c
+++ b/dix/pixmap.c
@@ -243,6 +243,8 @@ Bool PixmapSyncDirtyHelper(PixmapDirtyUpdatePtr dirty, RegionPtr dirty_region)
     }
 
     dst = dirty->slave_dst->master_pixmap;
+    if (!dst)
+        dst = dirty->slave_dst;
 
     RegionTranslate(dirty_region, -dirty->x, -dirty->y);
     n = RegionNumRects(dirty_region);
diff --git a/fb/fbpixmap.c b/fb/fbpixmap.c
index fbcdca9..0824b64 100644
--- a/fb/fbpixmap.c
+++ b/fb/fbpixmap.c
@@ -67,6 +67,7 @@ fbCreatePixmapBpp(ScreenPtr pScreen, int width, int height, int depth, int bpp,
     pPixmap->devKind = paddedWidth;
     pPixmap->refcnt = 1;
     pPixmap->devPrivate.ptr = (pointer) ((char *) pPixmap + base + adjust);
+    pPixmap->master_pixmap = NULL;
 
 #ifdef FB_DEBUG
     pPixmap->devPrivate.ptr =
commit d817284b01ad3bbe02c71d6b00af7526be77626b
Author: Dave Airlie <airlied at redhat.com>
Date:   Wed Jan 9 12:52:03 2013 +1000

    xf86crtc: don't use scrn->display for gpu screens
    
    scrn->display is a property of the main screen really, and we don't
    want to have the GPU screens use it for anything when picking modes
    or a front buffer size.
    
    This fixes a bug where when you plugged a display link device, it
    would try and allocate a screen the same size as the current running
    one (3360x1050 in this case), which was too big for the device. Avoid
    doing this and just pick sizes based on whats plugged into this device.
    
    Reviewed-by: Keith Packard <keithp at keithp.com>
    Signed-off-by: Dave Airlie <airlied at redhat.com>
    (cherry picked from commit 16077b81c502e04d77f81f683e0c213b9fe75393)

diff --git a/hw/xfree86/modes/xf86Crtc.c b/hw/xfree86/modes/xf86Crtc.c
index 7d55f60..5a07793 100644
--- a/hw/xfree86/modes/xf86Crtc.c
+++ b/hw/xfree86/modes/xf86Crtc.c
@@ -2360,11 +2360,11 @@ xf86InitialConfiguration(ScrnInfoPtr scrn, Bool canGrow)
     config->debug_modes = xf86ReturnOptValBool(config->options,
                                                OPTION_MODEDEBUG, FALSE);
 
-    if (scrn->display->virtualX)
+    if (scrn->display->virtualX && !scrn->is_gpu)
         width = scrn->display->virtualX;
     else
         width = config->maxWidth;
-    if (scrn->display->virtualY)
+    if (scrn->display->virtualY && !scrn->is_gpu)
         height = scrn->display->virtualY;
     else
         height = config->maxHeight;
@@ -2428,8 +2428,10 @@ xf86InitialConfiguration(ScrnInfoPtr scrn, Bool canGrow)
 
     /* XXX override xf86 common frame computation code */
 
-    scrn->display->frameX0 = 0;
-    scrn->display->frameY0 = 0;
+    if (!scrn->is_gpu) {
+        scrn->display->frameX0 = 0;
+        scrn->display->frameY0 = 0;
+    }
 
     for (c = 0; c < config->num_crtc; c++) {
         xf86CrtcPtr crtc = config->crtc[c];
@@ -2477,7 +2479,7 @@ xf86InitialConfiguration(ScrnInfoPtr scrn, Bool canGrow)
         }
     }
 
-    if (scrn->display->virtualX == 0) {
+    if (scrn->display->virtualX == 0 || scrn->is_gpu) {
         /*
          * Expand virtual size to cover the current config and potential mode
          * switches, if the driver can't enlarge the screen later.
@@ -2492,8 +2494,10 @@ xf86InitialConfiguration(ScrnInfoPtr scrn, Bool canGrow)
             }
         }
 
-        scrn->display->virtualX = width;
-        scrn->display->virtualY = height;
+	if (!scrn->is_gpu) {
+            scrn->display->virtualX = width;
+            scrn->display->virtualY = height;
+	}
     }
 
     if (width > scrn->virtualX)
commit 803d0ac49900903915f1dcd2496f085e5c1afa22
Author: Dave Airlie <airlied at redhat.com>
Date:   Wed Jan 9 12:51:55 2013 +1000

    dix/gpu: remove asserts for output/offload from same slave
    
    We should have no problem allowing output/offload from the same slave,
    I asserted here, but in order to implement reverse optimus this makes
    perfect sense. (reverse optimus is intel outputting to nvidia).
    
    Reviewed-by: Keith Packard <keithp at keithp.com>
    Signed-off-by: Dave Airlie <airlied at redhat.com>
    (cherry picked from commit f0d0d75bfe62553dde353f89e46ff13dd863fbe8)

diff --git a/dix/dispatch.c b/dix/dispatch.c
index 8d61735..90b6c7c 100644
--- a/dix/dispatch.c
+++ b/dix/dispatch.c
@@ -3942,7 +3942,6 @@ void
 AttachOutputGPU(ScreenPtr pScreen, ScreenPtr new)
 {
     assert(new->isGPU);
-    assert(!new->current_master);
     xorg_list_add(&new->output_head, &pScreen->output_slave_list);
     new->current_master = pScreen;
 }
@@ -3959,7 +3958,6 @@ void
 AttachOffloadGPU(ScreenPtr pScreen, ScreenPtr new)
 {
     assert(new->isGPU);
-    assert(!new->current_master);
     xorg_list_add(&new->offload_head, &pScreen->offload_slave_list);
     new->current_master = pScreen;
 }


More information about the xorg-commit mailing list