xf86-video-intel: 7 commits - src/common.h src/intel_display.c src/intel_driver.c src/intel_memory.c src/intel_module.c src/legacy/i810 src/sna/gen2_render.c src/sna/gen3_render.c src/sna/gen4_render.c src/sna/gen5_render.c src/sna/gen6_render.c src/sna/gen7_render.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_blt.c src/sna/sna_composite.c src/sna/sna_damage.c src/sna/sna_damage.h src/sna/sna_display.c src/sna/sna_dri.c src/sna/sna_glyphs.c src/sna/sna_gradient.c src/sna/sna.h src/sna/sna_io.c src/sna/sna_render.c src/sna/sna_render.h src/sna/sna_render_inline.h src/sna/sna_tiling.c src/sna/sna_transform.c src/sna/sna_trapezoids.c src/sna/sna_video.c src/sna/sna_video.h src/sna/sna_video_overlay.c src/sna/sna_video_sprite.c src/sna/sna_video_textured.c

Chris Wilson ickle at kemper.freedesktop.org
Sat Jul 14 04:21:24 PDT 2012


 src/common.h                  |  105 --------------
 src/intel_display.c           |    3 
 src/intel_driver.c            |    2 
 src/intel_memory.c            |    2 
 src/intel_module.c            |    4 
 src/legacy/i810/i810_driver.c |    2 
 src/sna/gen2_render.c         |  246 ++++++++++++++++-----------------
 src/sna/gen3_render.c         |  310 +++++++++++++++++++++---------------------
 src/sna/gen4_render.c         |  297 ++++++++++++++++++++--------------------
 src/sna/gen5_render.c         |  284 +++++++++++++++++++-------------------
 src/sna/gen6_render.c         |  304 ++++++++++++++++++++---------------------
 src/sna/gen7_render.c         |  300 ++++++++++++++++++++--------------------
 src/sna/kgem.c                |  140 +++++++++++++-----
 src/sna/kgem.h                |    6 
 src/sna/sna.h                 |   34 ++--
 src/sna/sna_accel.c           |  122 ++++++++--------
 src/sna/sna_blt.c             |  271 +++++++++++++++++++-----------------
 src/sna/sna_composite.c       |   16 +-
 src/sna/sna_damage.c          |   20 +-
 src/sna/sna_damage.h          |   10 -
 src/sna/sna_display.c         |   59 ++-----
 src/sna/sna_dri.c             |   71 ++++-----
 src/sna/sna_glyphs.c          |    5 
 src/sna/sna_gradient.c        |   26 +--
 src/sna/sna_io.c              |    2 
 src/sna/sna_render.c          |   20 +-
 src/sna/sna_render.h          |   78 +++++-----
 src/sna/sna_render_inline.h   |   22 +-
 src/sna/sna_tiling.c          |   38 ++---
 src/sna/sna_transform.c       |   22 +-
 src/sna/sna_trapezoids.c      |   26 +--
 src/sna/sna_video.c           |   26 +--
 src/sna/sna_video.h           |    6 
 src/sna/sna_video_overlay.c   |    8 -
 src/sna/sna_video_sprite.c    |   12 -
 src/sna/sna_video_textured.c  |   16 --
 36 files changed, 1435 insertions(+), 1480 deletions(-)

New commits:
commit 39845280b5a9d0ed60c6a158c3d0df9fb8756d40
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 12:14:28 2012 +0100

    sna: Allow BLT support on future unknown hardware
    
    Assume that if the kernel recognises the chipset and declares it has a
    BLT, we can use it.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index c528fe7..b44c734 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -599,7 +599,7 @@ static int gem_param(struct kgem *kgem, int name)
 	return v;
 }
 
-static bool semaphores_enabled(void)
+static bool test_has_semaphores_enabled(void)
 {
 	FILE *file;
 	bool detected = false;
@@ -632,6 +632,9 @@ static bool is_hw_supported(struct kgem *kgem,
 	if (DBG_NO_HW)
 		return false;
 
+	if (kgem->gen == 0) /* unknown chipset, assume future gen */
+		return kgem->has_blt;
+
 	if (kgem->gen <= 20) /* dynamic GTT is fubar */
 		return false;
 
@@ -641,11 +644,40 @@ static bool is_hw_supported(struct kgem *kgem,
 	}
 
 	if (kgem->gen >= 60) /* Only if the kernel supports the BLT ring */
-		return gem_param(kgem, I915_PARAM_HAS_BLT) > 0;
+		return kgem->has_blt;
 
 	return true;
 }
 
+static bool test_has_relaxed_fencing(struct kgem *kgem)
+{
+	if (kgem->gen < 40) {
+		if (DBG_NO_RELAXED_FENCING)
+			return false;
+
+		return gem_param(kgem, I915_PARAM_HAS_RELAXED_FENCING) > 0;
+	} else
+		return true;
+}
+
+static bool test_has_llc(struct kgem *kgem)
+{
+	int has_llc = -1;
+
+	if (DBG_NO_LLC)
+		return false;
+
+#if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */
+	has_llc = gem_param(kgem, I915_PARAM_HAS_LLC);
+#endif
+	if (has_llc == -1) {
+		DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__));
+		has_llc = kgem->gen >= 60;
+	}
+
+	return has_llc;
+}
+
 static bool test_has_cache_level(struct kgem *kgem)
 {
 #if defined(USE_CACHE_LEVEL)
@@ -670,6 +702,21 @@ static bool test_has_cache_level(struct kgem *kgem)
 #endif
 }
 
+static bool test_has_vmap(struct kgem *kgem)
+{
+#if defined(USE_VMAP)
+	if (DBG_NO_VMAP)
+		return false;
+
+	if (kgem->gen == 40)
+		return false;
+
+	return gem_param(kgem, I915_PARAM_HAS_VMAP) > 0;
+#else
+	return false;
+#endif
+}
+
 static int kgem_get_screen_index(struct kgem *kgem)
 {
 	struct sna *sna = container_of(kgem, struct sna, kgem);
@@ -683,10 +730,44 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	unsigned half_gpu_max;
 	unsigned int i, j;
 
+	DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen));
+
 	memset(kgem, 0, sizeof(*kgem));
 
 	kgem->fd = fd;
 	kgem->gen = gen;
+
+	kgem->has_blt = gem_param(kgem, I915_PARAM_HAS_BLT) > 0;
+	DBG(("%s: has BLT ring? %d\n", __FUNCTION__,
+	     kgem->has_blt));
+
+	kgem->has_relaxed_delta =
+		gem_param(kgem, I915_PARAM_HAS_RELAXED_DELTA) > 0;
+	DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
+	     kgem->has_relaxed_delta));
+
+	kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem);
+	DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
+	     kgem->has_relaxed_fencing));
+
+	kgem->has_llc = test_has_llc(kgem);
+	DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__,
+	     kgem->has_llc));
+
+	kgem->has_cache_level = test_has_cache_level(kgem);
+	DBG(("%s: has set-cache-level? %d\n", __FUNCTION__,
+	     kgem->has_cache_level));
+
+	kgem->has_vmap = test_has_vmap(kgem);
+	DBG(("%s: has vmap? %d\n", __FUNCTION__,
+	     kgem->has_vmap));
+
+	kgem->has_semaphores = false;
+	if (kgem->has_blt && test_has_semaphores_enabled())
+		kgem->has_semaphores = true;
+	DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
+	     kgem->has_semaphores));
+
 	if (!is_hw_supported(kgem, dev)) {
 		xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
 			   "Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
@@ -697,11 +778,6 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		kgem->wedged = 1;
 	}
 
-	kgem->has_relaxed_delta =
-		gem_param(kgem, I915_PARAM_HAS_RELAXED_DELTA) > 0;
-	DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
-	     kgem->has_relaxed_delta));
-
 	kgem->batch_size = ARRAY_SIZE(kgem->batch);
 	if (gen == 22)
 		/* 865g cannot handle a batch spanning multiple pages */
@@ -719,6 +795,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		kgem->min_alignment = 64;
 
 	kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
+	DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
+	     kgem->half_cpu_cace_pages));
 
 	list_init(&kgem->batch_partials);
 	list_init(&kgem->active_partials);
@@ -741,49 +819,10 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->next_request = __kgem_request_alloc();
 
-	kgem->has_cache_level = test_has_cache_level(kgem);
-	DBG(("%s: using set-cache-level=%d\n", __FUNCTION__, kgem->has_cache_level));
-
-#if defined(USE_VMAP)
-	if (!DBG_NO_VMAP)
-		kgem->has_vmap = gem_param(kgem, I915_PARAM_HAS_VMAP) > 0;
-	if (gen == 40)
-		kgem->has_vmap = false; /* sampler dies with snoopable memory */
-#endif
-	DBG(("%s: using vmap=%d\n", __FUNCTION__, kgem->has_vmap));
-
-	if (gen < 40) {
-		if (!DBG_NO_RELAXED_FENCING) {
-			kgem->has_relaxed_fencing =
-				gem_param(kgem, I915_PARAM_HAS_RELAXED_FENCING) > 0;
-		}
-	} else
-		kgem->has_relaxed_fencing = 1;
-	DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
-	     kgem->has_relaxed_fencing));
-
-	kgem->has_llc = false;
-	if (!DBG_NO_LLC) {
-		int has_llc = -1;
-#if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */
-		has_llc = gem_param(kgem, I915_PARAM_HAS_LLC);
-#endif
-		if (has_llc == -1) {
-			DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__));
-			has_llc = gen >= 60;
-		}
-		kgem->has_llc = has_llc;
-	}
 	DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, vmap? %d\n", __FUNCTION__,
 	     kgem->has_llc | kgem->has_vmap | kgem->has_cache_level,
 	     kgem->has_llc, kgem->has_cache_level, kgem->has_vmap));
 
-	kgem->has_semaphores = false;
-	if (gen >= 60 && semaphores_enabled())
-		kgem->has_semaphores = true;
-	DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
-	     kgem->has_semaphores));
-
 	VG_CLEAR(aperture);
 	aperture.aper_size = 64*1024*1024;
 	(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 7596326..e5db6fd 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -153,6 +153,7 @@ struct kgem {
 	uint32_t busy:1;
 
 	uint32_t has_vmap :1;
+	uint32_t has_blt :1;
 	uint32_t has_relaxed_fencing :1;
 	uint32_t has_relaxed_delta :1;
 	uint32_t has_semaphores :1;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 02727a3..dcfab91 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -283,7 +283,7 @@ void no_render_init(struct sna *sna)
 	sna->kgem.context_switch = no_render_context_switch;
 	sna->kgem.retire = no_render_retire;
 	sna->kgem.expire = no_render_expire;
-	if (sna->kgem.gen >= 60)
+	if (sna->kgem.has_blt)
 		sna->kgem.ring = KGEM_BLT;
 }
 
commit b260ca44b3aaba2c8cd25640ad7ac9ca6478b0f2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 09:59:07 2012 +0100

    Drop some unused includes
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/common.h b/src/common.h
index e3ab1f2..86e5b11 100644
--- a/src/common.h
+++ b/src/common.h
@@ -38,6 +38,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #ifndef _INTEL_COMMON_H_
 #define _INTEL_COMMON_H_
 
+#include <xf86.h>
+
 /* Provide substitutes for gcc's __FUNCTION__ on other compilers */
 #if !defined(__GNUC__) && !defined(__FUNCTION__)
 # if defined(__STDC__) && (__STDC_VERSION__>=199901L) /* C99 */
@@ -47,117 +49,14 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 # endif
 #endif
 
-
 #define PFX __FILE__,__LINE__,__FUNCTION__
 #define FUNCTION_NAME __FUNCTION__
 
 #define KB(x) ((x) * 1024)
 #define MB(x) ((x) * KB(1024))
 
-/* Using usleep() makes things noticably slow. */
-#if 0
-#define DELAY(x) usleep(x)
-#else
-#define DELAY(x) do {;} while (0)
-#endif
-
-/* I830 hooks for the I810 driver setup/probe. */
-extern const OptionInfoRec *I830AvailableOptions(int chipid, int busid);
 extern Bool intel_init_scrn(ScrnInfoPtr scrn);
 
-/* Symbol lists shared by the i810 and i830 parts. */
-extern int I830EntityIndex;
-
-#ifdef _I830_H_
-#define PrintErrorState i830_dump_error_state
-#define WaitRingFunc I830WaitLpRing
-#define RecPtr intel
-#else
-#define PrintErrorState I810PrintErrorState
-#define WaitRingFunc I810WaitLpRing
-#define RecPtr pI810
-#endif
-
-static inline void memset_volatile(volatile void *b, int c, size_t len)
-{
-    size_t i;
-
-    for (i = 0; i < len; i++)
-	((volatile char *)b)[i] = c;
-}
-
-static inline void memcpy_volatile(volatile void *dst, const void *src,
-				   size_t len)
-{
-    size_t i;
-
-    for (i = 0; i < len; i++)
-	((volatile char *)dst)[i] = ((const volatile char *)src)[i];
-}
-
-/* Memory mapped register access macros */
-#define INREG8(addr)        *(volatile uint8_t *)(RecPtr->MMIOBase + (addr))
-#define INREG16(addr)       *(volatile uint16_t *)(RecPtr->MMIOBase + (addr))
-#define INREG(addr)         *(volatile uint32_t *)(RecPtr->MMIOBase + (addr))
-#define INGTT(addr)         *(volatile uint32_t *)(RecPtr->GTTBase + (addr))
-#define POSTING_READ(addr)  (void)INREG(addr)
-
-#define OUTREG8(addr, val) do {						\
-   *(volatile uint8_t *)(RecPtr->MMIOBase  + (addr)) = (val);		\
-   if (I810_DEBUG&DEBUG_VERBOSE_OUTREG) {				\
-      ErrorF("OUTREG8(0x%lx, 0x%lx) in %s\n", (unsigned long)(addr),	\
-		(unsigned long)(val), FUNCTION_NAME);			\
-   }									\
-} while (0)
-
-#define OUTREG16(addr, val) do {					\
-   *(volatile uint16_t *)(RecPtr->MMIOBase + (addr)) = (val);		\
-   if (I810_DEBUG&DEBUG_VERBOSE_OUTREG) {				\
-      ErrorF("OUTREG16(0x%lx, 0x%lx) in %s\n", (unsigned long)(addr),	\
-		(unsigned long)(val), FUNCTION_NAME);			\
-   }									\
-} while (0)
-
-#define OUTREG(addr, val) do {						\
-   *(volatile uint32_t *)(RecPtr->MMIOBase + (addr)) = (val);		\
-   if (I810_DEBUG&DEBUG_VERBOSE_OUTREG) {				\
-      ErrorF("OUTREG(0x%lx, 0x%lx) in %s\n", (unsigned long)(addr),	\
-		(unsigned long)(val), FUNCTION_NAME);			\
-   }									\
-} while (0)
-
-
-#define DEBUG_VERBOSE_ACCEL  0x1
-#define DEBUG_VERBOSE_SYNC   0x2
-#define DEBUG_VERBOSE_VGA    0x4
-#define DEBUG_VERBOSE_RING   0x8
-#define DEBUG_VERBOSE_OUTREG 0x10
-#define DEBUG_VERBOSE_MEMORY 0x20
-#define DEBUG_VERBOSE_CURSOR 0x40
-#define DEBUG_ALWAYS_SYNC    0x80
-#define DEBUG_VERBOSE_DRI    0x100
-#define DEBUG_VERBOSE_BIOS   0x200
-
-/* Size of the mmio region.
- */
-#define I810_REG_SIZE 0x80000
-
-#define GTT_PAGE_SIZE			KB(4)
-#define PRIMARY_RINGBUFFER_SIZE		KB(128)
-#define MIN_SCRATCH_BUFFER_SIZE		KB(16)
-#define MAX_SCRATCH_BUFFER_SIZE		KB(64)
-#define HWCURSOR_SIZE			GTT_PAGE_SIZE
-#define HWCURSOR_SIZE_ARGB		GTT_PAGE_SIZE * 4
-
-/* Use a 64x64 HW cursor */
-#define I810_CURSOR_X			64
-#define I810_CURSOR_Y			I810_CURSOR_X
-
-#define PIPE_NAME(n)			('A' + (n))
-
-struct pci_device *
-intel_host_bridge (void);
-
 /**
  * Hints to CreatePixmap to tell the driver how the pixmap is going to be
  * used.
diff --git a/src/intel_display.c b/src/intel_display.c
index a974e34..0a80aa8 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -711,8 +711,7 @@ intel_crtc_init(ScrnInfoPtr scrn, struct intel_mode *mode, int num)
 							   crtc_id(intel_crtc));
 
 	intel_crtc->cursor = drm_intel_bo_alloc(intel->bufmgr, "ARGB cursor",
-						HWCURSOR_SIZE_ARGB,
-						GTT_PAGE_SIZE);
+						4*64*64, 4096);
 
 	intel_crtc->crtc = crtc;
 	list_add(&intel_crtc->link, &mode->crtcs);
diff --git a/src/intel_driver.c b/src/intel_driver.c
index f2770d6..78f7ce3 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -932,7 +932,7 @@ I830ScreenInit(SCREEN_INIT_ARGS_DECL)
 	miDCInitialize(screen, xf86GetPointerScreenFuncs());
 
 	xf86DrvMsg(scrn->scrnIndex, X_INFO, "Initializing HW Cursor\n");
-	if (!xf86_cursors_init(screen, I810_CURSOR_X, I810_CURSOR_Y,
+	if (!xf86_cursors_init(screen, 64, 64,
 			       (HARDWARE_CURSOR_TRUECOLOR_AT_8BPP |
 				HARDWARE_CURSOR_BIT_ORDER_MSBFIRST |
 				HARDWARE_CURSOR_INVERT_MASK |
diff --git a/src/intel_memory.c b/src/intel_memory.c
index bfc0e8c..bb7710f 100644
--- a/src/intel_memory.c
+++ b/src/intel_memory.c
@@ -96,7 +96,7 @@ unsigned long intel_get_fence_size(intel_screen_private *intel, unsigned long si
 
 	if (INTEL_INFO(intel)->gen >= 40 || intel->has_relaxed_fencing) {
 		/* The 965 can have fences at any page boundary. */
-		return ALIGN(size, GTT_PAGE_SIZE);
+		return ALIGN(size, 4096);
 	} else {
 		/* Align the size to a power of two greater than the smallest fence
 		 * size.
diff --git a/src/intel_module.c b/src/intel_module.c
index f8ba149..a39affb 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -28,9 +28,7 @@
 #include "config.h"
 #endif
 
-#include <xf86.h>
 #include <xf86_OSproc.h>
-#include <xf86cmap.h>
 #include <xf86Parser.h>
 #include <xf86drmMode.h>
 
@@ -159,10 +157,12 @@ SymTabRec *intel_chipsets = (SymTabRec *) _intel_chipsets;
     { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0, 0, (intptr_t)(i) }
 
 static const struct pci_id_match intel_device_match[] = {
+#if !KMS_ONLY
 	INTEL_DEVICE_MATCH (PCI_CHIP_I810, &intel_i81x_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_I810_DC100, &intel_i81x_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_I810_E, &intel_i81x_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_I815, &intel_i81x_info ),
+#endif
 
 	INTEL_DEVICE_MATCH (PCI_CHIP_I830_M, &intel_i830_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_845_G, &intel_i845_info ),
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 6fc17bd..9821612 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -142,8 +142,6 @@ static int i810_pitches[] = {
 };
 #endif
 
-int I830EntityIndex = -1;
-
 /*
  * I810GetRec and I810FreeRec --
  *
diff --git a/src/sna/sna.h b/src/sna/sna.h
index d7fa71b..03115c2 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -289,10 +289,6 @@ extern void sna_mode_wakeup(struct sna *sna);
 extern void sna_mode_redisplay(struct sna *sna);
 extern void sna_mode_fini(struct sna *sna);
 
-extern int sna_crtc_id(xf86CrtcPtr crtc);
-extern bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc);
-extern int sna_output_dpms_status(xf86OutputPtr output);
-
 extern int sna_page_flip(struct sna *sna,
 			 struct kgem_bo *bo,
 			 void *data,
@@ -356,9 +352,9 @@ static inline void sna_dri_destroy_window(WindowPtr win) { }
 static inline void sna_dri_close(struct sna *sna, ScreenPtr pScreen) { }
 #endif
 
-extern bool sna_crtc_on(xf86CrtcPtr crtc);
-int sna_crtc_to_pipe(xf86CrtcPtr crtc);
-int sna_crtc_to_plane(xf86CrtcPtr crtc);
+extern int sna_crtc_to_pipe(xf86CrtcPtr crtc);
+extern int sna_crtc_to_plane(xf86CrtcPtr crtc);
+extern int sna_crtc_id(xf86CrtcPtr crtc);
 
 CARD32 sna_format_for_depth(int depth);
 CARD32 sna_render_format_for_depth(int depth);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 0928f6a..87a69ba 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -113,21 +113,14 @@ int sna_crtc_id(xf86CrtcPtr crtc)
 	return to_sna_crtc(crtc)->id;
 }
 
-bool sna_crtc_on(xf86CrtcPtr crtc)
-{
-	return to_sna_crtc(crtc)->bo != NULL;
-}
-
 int sna_crtc_to_pipe(xf86CrtcPtr crtc)
 {
-	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
-	return sna_crtc->pipe;
+	return to_sna_crtc(crtc)->pipe;
 }
 
 int sna_crtc_to_plane(xf86CrtcPtr crtc)
 {
-	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
-	return sna_crtc->plane;
+	return to_sna_crtc(crtc)->plane;
 }
 
 static unsigned get_fb(struct sna *sna, struct kgem_bo *bo,
@@ -465,7 +458,7 @@ mode_to_kmode(struct drm_mode_modeinfo *kmode, DisplayModePtr mode)
 	kmode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 }
 
-bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
+static bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
 {
 	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	struct drm_mode_crtc mode;
@@ -1676,13 +1669,6 @@ sna_output_dpms(xf86OutputPtr output, int dpms)
 	}
 }
 
-int
-sna_output_dpms_status(xf86OutputPtr output)
-{
-	struct sna_output *sna_output = output->driver_private;
-	return sna_output->dpms_mode;
-}
-
 static bool
 sna_property_ignore(drmModePropertyPtr prop)
 {
@@ -2579,7 +2565,7 @@ sna_wait_for_scanline(struct sna *sna,
 	int y1, y2, pipe;
 
 	assert(crtc);
-	assert(sna_crtc_on(crtc));
+	assert(to_sna_crtc(crtc)->bo != NULL);
 	assert(pixmap == sna->front);
 
 	/* XXX WAIT_EVENT is still causing hangs on SNB */
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 6afeb51..5f237b0 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -553,7 +553,6 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 
 	DBG(("%s: flushing? %d\n", __FUNCTION__, flush));
 	if (flush) { /* STAT! */
-		assert(sna_crtc_is_bound(sna, crtc));
 		kgem_submit(&sna->kgem);
 		bo = kgem_get_last_request(&sna->kgem);
 	}
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index 110bb00..2332ce2 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -295,10 +295,8 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 	/* Push the frame to the GPU as soon as possible so
 	 * we can hit the next vsync.
 	 */
-	if (flush) {
-		assert(sna_crtc_is_bound(sna, crtc));
+	if (flush)
 		kgem_submit(&sna->kgem);
-	}
 
 	return ret;
 }
commit f517bdb12b909ef9d0897166bdabc537551a065b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 08:56:21 2012 +0100

    sna: Do not perform batch compaction on old kernels
    
    As they will reject the delta pointing outside of the target batch.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index e59811f..c528fe7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -697,12 +697,22 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		kgem->wedged = 1;
 	}
 
+	kgem->has_relaxed_delta =
+		gem_param(kgem, I915_PARAM_HAS_RELAXED_DELTA) > 0;
+	DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
+	     kgem->has_relaxed_delta));
+
 	kgem->batch_size = ARRAY_SIZE(kgem->batch);
 	if (gen == 22)
 		/* 865g cannot handle a batch spanning multiple pages */
 		kgem->batch_size = PAGE_SIZE / sizeof(uint32_t);
 	if (gen == 70)
 		kgem->batch_size = 16*1024;
+	if (!kgem->has_relaxed_delta)
+		kgem->batch_size = 4*1024;
+
+	DBG(("%s: maximum batch size? %d\n", __FUNCTION__,
+	     kgem->batch_size));
 
 	kgem->min_alignment = 4;
 	if (gen < 40)
@@ -1877,6 +1887,9 @@ static int compact_batch_surface(struct kgem *kgem)
 {
 	int size, shrink, n;
 
+	if (!kgem->has_relaxed_delta)
+		return kgem->batch_size;
+
 	/* See if we can pack the contents into one or two pages */
 	n = ALIGN(kgem->batch_size, 1024);
 	size = n - kgem->surface + kgem->nbatch;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index ba110b6..7596326 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -154,6 +154,7 @@ struct kgem {
 
 	uint32_t has_vmap :1;
 	uint32_t has_relaxed_fencing :1;
+	uint32_t has_relaxed_delta :1;
 	uint32_t has_semaphores :1;
 	uint32_t has_cache_level :1;
 	uint32_t has_llc :1;
commit ce69a1e8686889f1eebb4cb3a39c41f473e58b93
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 13 23:25:02 2012 +0100

    sna: Add some DBG for selecting glyph path
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index f8959e1..6f9faf4 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1433,6 +1433,7 @@ sna_glyphs(CARD8 op,
 	if (mask && dst->pCompositeClip->data == NULL &&
 	    (op_is_bounded(op) || (nlist == 1 && list->len == 1)) &&
 	    mask == glyphs_format(nlist, list, glyphs)) {
+		DBG(("%s: discarding mask\n", __FUNCTION__));
 		if (glyphs_to_dst(sna, op,
 				  src, dst,
 				  src_x, src_y,
@@ -1441,8 +1442,10 @@ sna_glyphs(CARD8 op,
 	}
 
 	/* Otherwise see if we can substitute a mask */
-	if (!mask)
+	if (!mask) {
 		mask = glyphs_format(nlist, list, glyphs);
+		DBG(("%s: substituting mask? %d\n", __FUNCTION__, mask!=NULL));
+	}
 	if (mask) {
 		if (glyphs_via_mask(sna, op,
 				    src, dst, mask,
commit b68b2d90915f7a97e43a39e3117927544886fba0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 13 23:24:32 2012 +0100

    sna: Add some DBG for short-circuiting choice of source bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 546148d..02727a3 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -510,6 +510,7 @@ sna_render_pixmap_bo(struct sna *sna,
 		if (priv->gpu_bo &&
 		    (DAMAGE_IS_ALL(priv->gpu_damage) || !priv->cpu_damage ||
 		     priv->gpu_bo->proxy)) {
+			DBG(("%s: GPU all damaged\n", __FUNCTION__));
 			channel->bo = kgem_bo_reference(priv->gpu_bo);
 			return 1;
 		}
@@ -517,6 +518,7 @@ sna_render_pixmap_bo(struct sna *sna,
 		if (priv->cpu_bo &&
 		    (DAMAGE_IS_ALL(priv->cpu_damage) || !priv->gpu_damage) &&
 		    !priv->cpu_bo->vmap && priv->cpu_bo->pitch < 4096) {
+			DBG(("%s: CPU all damaged\n", __FUNCTION__));
 			channel->bo = kgem_bo_reference(priv->cpu_bo);
 			return 1;
 		}
commit ad4786b285074b5bd70b3ad0bea6ec1b77ad6740
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 13 22:24:51 2012 +0100

    sna: Aim for consistency and use stdbool except for core X APIs
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 93880a8..622ba1f 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -57,8 +57,8 @@
 #define VERTEX(v) batch_emit_float(sna, v)
 
 static const struct blendinfo {
-	Bool dst_alpha;
-	Bool src_alpha;
+	bool dst_alpha;
+	bool src_alpha;
 	uint32_t src_blend;
 	uint32_t dst_blend;
 } gen2_blend_op[] = {
@@ -148,7 +148,7 @@ gen2_get_dst_format(uint32_t format)
 #undef BIAS
 }
 
-static Bool
+static bool
 gen2_check_dst_format(uint32_t format)
 {
 	switch (format) {
@@ -160,9 +160,9 @@ gen2_check_dst_format(uint32_t format)
 	case PICT_a8:
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -229,32 +229,32 @@ gen2_sampler_tiling_bits(uint32_t tiling)
 	return bits;
 }
 
-static Bool
+static bool
 gen2_check_filter(PicturePtr picture)
 {
 	switch (picture->filter) {
 	case PictFilterNearest:
 	case PictFilterBilinear:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
-static Bool
+static bool
 gen2_check_repeat(PicturePtr picture)
 {
 	if (!picture->repeat)
-		return TRUE;
+		return true;
 
 	switch (picture->repeatType) {
 	case RepeatNone:
 	case RepeatNormal:
 	case RepeatPad:
 	case RepeatReflect:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -406,7 +406,7 @@ gen2_get_blend_factors(const struct sna_composite_op *op,
 }
 
 static uint32_t gen2_get_blend_cntl(int op,
-				    Bool has_component_alpha,
+				    bool has_component_alpha,
 				    uint32_t dst_format)
 {
 	uint32_t sblend, dblend;
@@ -503,7 +503,7 @@ static void gen2_emit_invariant(struct sna *sna)
 	      ENABLE_COLOR_WRITE |
 	      ENABLE_TEX_CACHE);
 
-	sna->render_state.gen2.need_invariant = FALSE;
+	sna->render_state.gen2.need_invariant = false;
 }
 
 static void
@@ -1104,14 +1104,14 @@ static void gen2_render_composite_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, op);
 }
 
-static Bool
+static bool
 gen2_composite_solid_init(struct sna *sna,
 			  struct sna_composite_channel *channel,
 			  uint32_t color)
 {
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNormal;
-	channel->is_solid  = TRUE;
+	channel->is_solid  = true;
 	channel->width  = 1;
 	channel->height = 1;
 	channel->pict_format = PICT_a8r8g8b8;
@@ -1121,12 +1121,12 @@ gen2_composite_solid_init(struct sna *sna,
 
 	channel->scale[0]  = channel->scale[1]  = 1;
 	channel->offset[0] = channel->offset[1] = 0;
-	return TRUE;
+	return true;
 }
 
 #define xFixedToDouble(f) pixman_fixed_to_double(f)
 
-static Bool
+static bool
 gen2_composite_linear_init(struct sna *sna,
 			   PicturePtr picture,
 			   struct sna_composite_channel *channel,
@@ -1161,7 +1161,7 @@ gen2_composite_linear_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = picture->repeat ? picture->repeatType : RepeatNone;
-	channel->is_linear = TRUE;
+	channel->is_linear = true;
 	channel->width  = channel->bo->pitch / 4;
 	channel->height = 1;
 	channel->pict_format = PICT_a8r8g8b8;
@@ -1229,17 +1229,17 @@ gen2_composite_linear_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
-static Bool source_is_covered(PicturePtr picture,
+static bool source_is_covered(PicturePtr picture,
 			      int x, int y,
 			      int width, int height)
 {
 	int x1, y1, x2, y2;
 
 	if (picture->repeat && picture->repeatType != RepeatNone)
-		return TRUE;
+		return true;
 
 	if (picture->pDrawable == NULL)
-		return FALSE;
+		return false;
 
 	if (picture->transform) {
 		pixman_box16_t sample;
@@ -1268,7 +1268,7 @@ static Bool source_is_covered(PicturePtr picture,
 		y2 <= picture->pDrawable->height;
 }
 
-static Bool
+static bool
 gen2_check_card_format(struct sna *sna,
 		       PicturePtr picture,
 		       struct sna_composite_channel *channel,
@@ -1279,24 +1279,24 @@ gen2_check_card_format(struct sna *sna,
 
 	for (i = 0; i < ARRAY_SIZE(i8xx_tex_formats); i++) {
 		if (i8xx_tex_formats[i].fmt == format)
-			return TRUE;
+			return true;
 	}
 
 	for (i = 0; i < ARRAY_SIZE(i85x_tex_formats); i++) {
 		if (i85x_tex_formats[i].fmt == format) {
 			if (sna->kgem.gen >= 21)
-				return TRUE;
+				return true;
 
 			if (source_is_covered(picture, x, y, w,h)) {
 				channel->is_opaque = true;
-				return TRUE;
+				return true;
 			}
 
-			return FALSE;
+			return false;
 		}
 	}
 
-	return FALSE;
+	return false;
 }
 
 static int
@@ -1315,10 +1315,10 @@ gen2_composite_picture(struct sna *sna,
 	DBG(("%s: (%d, %d)x(%d, %d), dst=(%d, %d)\n",
 	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
 
-	channel->is_solid = FALSE;
-	channel->is_linear = FALSE;
-	channel->is_opaque = FALSE;
-	channel->is_affine = TRUE;
+	channel->is_solid = false;
+	channel->is_linear = false;
+	channel->is_opaque = false;
+	channel->is_affine = true;
 	channel->transform = NULL;
 
 	if (sna_picture_is_solid(picture, &color))
@@ -1398,7 +1398,7 @@ gen2_composite_picture(struct sna *sna,
 				    x, y, w, h, dst_x, dst_y);
 }
 
-static Bool
+static bool
 gen2_composite_set_target(struct sna *sna,
 			  struct sna_composite_op *op,
 			  PicturePtr dst)
@@ -1412,19 +1412,19 @@ gen2_composite_set_target(struct sna *sna,
 
 	priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_WRITE | MOVE_READ);
 	if (priv == NULL)
-		return FALSE;
+		return false;
 
 	if (priv->gpu_bo->pitch < 8) {
 		struct kgem_bo *bo;
 
 		if (priv->pinned)
-			return FALSE;
+			return false;
 
 		bo = kgem_replace_bo(&sna->kgem, priv->gpu_bo,
 				     op->dst.width, op->dst.height, 8,
 				     op->dst.pixmap->drawable.bitsPerPixel);
 		if (bo == NULL)
-			return FALSE;
+			return false;
 
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 		priv->gpu_bo = bo;
@@ -1437,10 +1437,10 @@ gen2_composite_set_target(struct sna *sna,
 
 	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
 			    &op->dst.x, &op->dst.y);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 try_blt(struct sna *sna,
 	PicturePtr dst,
 	PicturePtr src,
@@ -1450,34 +1450,34 @@ try_blt(struct sna *sna,
 
 	if (sna->kgem.mode != KGEM_RENDER) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst->pDrawable->width, dst->pDrawable->height)) {
 		DBG(("%s: target too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__,
 		     dst->pDrawable->width, dst->pDrawable->height));
-		return TRUE;
+		return true;
 	}
 
 	/* If it is a solid, try to use the BLT paths */
 	if (sna_picture_is_solid(src, &color))
-		return TRUE;
+		return true;
 
 	if (!src->pDrawable)
-		return FALSE;
+		return false;
 
 	if (too_large(src->pDrawable->width, src->pDrawable->height)) {
 		DBG(("%s: source too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__,
 		     src->pDrawable->width, src->pDrawable->height));
-		return TRUE;
+		return true;
 	}
 	return is_cpu(src->pDrawable);
 }
@@ -1486,14 +1486,14 @@ static bool
 is_unhandled_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
-		return FALSE;
+		return false;
 
 	switch (picture->pSourcePict->type) {
 	case SourcePictTypeSolidFill:
 	case SourcePictTypeLinear:
-		return FALSE;
+		return false;
 	default:
-		return TRUE;
+		return true;
 	}
 }
 
@@ -1564,7 +1564,7 @@ gen2_composite_fallback(struct sna *sna,
 	if (!gen2_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
 		     __FUNCTION__, dst->format));
-		return TRUE;
+		return true;
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -1586,11 +1586,11 @@ gen2_composite_fallback(struct sna *sna,
 	 */
 	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	/* If anything is on the GPU, push everything out to the GPU */
@@ -1598,18 +1598,18 @@ gen2_composite_fallback(struct sna *sna,
 	if (priv && priv->gpu_damage && !priv->clear) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (src_pixmap && !src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (mask_pixmap && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	/* However if the dst is not on the GPU and we need to
@@ -1619,25 +1619,25 @@ gen2_composite_fallback(struct sna *sna,
 	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst_pixmap->drawable.width,
 		      dst_pixmap->drawable.height) &&
 	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
 		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
-	return FALSE;
+	return false;
 }
 
 static int
@@ -1648,40 +1648,40 @@ reuse_source(struct sna *sna,
 	uint32_t color;
 
 	if (src_x != msk_x || src_y != msk_y)
-		return FALSE;
+		return false;
 
 	if (src == mask) {
 		DBG(("%s: mask is source\n", __FUNCTION__));
 		*mc = *sc;
 		mc->bo = kgem_bo_reference(mc->bo);
-		return TRUE;
+		return true;
 	}
 
 	if (sna_picture_is_solid(mask, &color))
 		return gen2_composite_solid_init(sna, mc, color);
 
 	if (sc->is_solid)
-		return FALSE;
+		return false;
 
 	if (src->pDrawable == NULL || mask->pDrawable != src->pDrawable)
-		return FALSE;
+		return false;
 
 	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
 
 	if (!sna_transform_equal(src->transform, mask->transform))
-		return FALSE;
+		return false;
 
 	if (!sna_picture_alphamap_equal(src, mask))
-		return FALSE;
+		return false;
 
 	if (!gen2_check_repeat(mask))
-		return FALSE;
+		return false;
 
 	if (!gen2_check_filter(mask))
-		return FALSE;
+		return false;
 
 	if (!gen2_check_format(sna, mask))
-		return FALSE;
+		return false;
 
 	DBG(("%s: reusing source channel for mask with a twist\n",
 	     __FUNCTION__));
@@ -1691,10 +1691,10 @@ reuse_source(struct sna *sna,
 	mc->filter = mask->filter;
 	mc->pict_format = mask->format;
 	mc->bo = kgem_bo_reference(mc->bo);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen2_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -1711,12 +1711,12 @@ gen2_render_composite(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen2_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 #if NO_COMPOSITE
 	if (mask)
-		return FALSE;
+		return false;
 
 	return sna_blt_composite(sna, op,
 				 src, dst,
@@ -1736,10 +1736,10 @@ gen2_render_composite(struct sna *sna,
 			      dst_x, dst_y,
 			      width, height,
 			      tmp))
-		return TRUE;
+		return true;
 
 	if (gen2_composite_fallback(sna, src, mask, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height))
 		return sna_tiling_composite(op, src, mask, dst,
@@ -1752,7 +1752,7 @@ gen2_render_composite(struct sna *sna,
 	if (!gen2_composite_set_target(sna, tmp, dst)) {
 		DBG(("%s: unable to set render target\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
@@ -1761,7 +1761,7 @@ gen2_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
@@ -1770,7 +1770,7 @@ gen2_render_composite(struct sna *sna,
 	    tmp->dst.bo->pitch > MAX_3D_PITCH) {
 		if (!sna_render_composite_redirect(sna, tmp,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen2_composite_picture(sna, src, &tmp->src,
@@ -1809,13 +1809,13 @@ gen2_render_composite(struct sna *sna,
 			 * and on the source value.  We can only get one of those
 			 * into the single source value that we get to blend with.
 			 */
-			tmp->has_component_alpha = TRUE;
+			tmp->has_component_alpha = true;
 			if (gen2_blend_op[op].src_alpha &&
 			    (gen2_blend_op[op].src_blend != BLENDFACTOR_ZERO)) {
 				if (op != PictOpOver)
-					return FALSE;
+					return false;
 
-				tmp->need_magic_ca_pass = TRUE;
+				tmp->need_magic_ca_pass = true;
 				tmp->op = PictOpOutReverse;
 			}
 		}
@@ -1865,7 +1865,7 @@ gen2_render_composite(struct sna *sna,
 	}
 
 	gen2_emit_composite_state(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_mask:
 	if (tmp->mask.bo)
@@ -1876,7 +1876,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
-	return FALSE;
+	return false;
 }
 
 fastcall static void
@@ -2094,7 +2094,7 @@ static void gen2_emit_composite_spans_state(struct sna *sna,
 	      I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
 	BATCH(!op->base.src.is_solid << 12);
 	BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY | S3_DIFFUSE_PRESENT);
-	BATCH(gen2_get_blend_cntl(op->base.op, FALSE, op->base.dst.format));
+	BATCH(gen2_get_blend_cntl(op->base.op, false, op->base.dst.format));
 	if (memcmp(sna->kgem.batch + sna->render_state.gen2.ls1 + 1,
 		   sna->kgem.batch + unwind + 1,
 		   3 * sizeof(uint32_t)) == 0)
@@ -2191,7 +2191,7 @@ gen2_render_composite_spans_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, &op->base);
 }
 
-static Bool
+static bool
 gen2_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -2206,17 +2206,17 @@ gen2_render_composite_spans(struct sna *sna,
 	     src_x, src_y, dst_x, dst_y, width, height));
 
 #if NO_COMPOSITE_SPANS
-	return FALSE;
+	return false;
 #endif
 
 	if (op >= ARRAY_SIZE(gen2_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (gen2_composite_fallback(sna, src, NULL, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
@@ -2225,7 +2225,7 @@ gen2_render_composite_spans(struct sna *sna,
 		if (!is_gpu(dst->pDrawable)) {
 			DBG(("%s: fallback, tiled operation not on GPU\n",
 			     __FUNCTION__));
-			return FALSE;
+			return false;
 		}
 
 		return sna_tiling_composite_spans(op, src, dst,
@@ -2236,7 +2236,7 @@ gen2_render_composite_spans(struct sna *sna,
 	if (!gen2_composite_set_target(sna, &tmp->base, dst)) {
 		DBG(("%s: unable to set render target\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
 
@@ -2245,7 +2245,7 @@ gen2_render_composite_spans(struct sna *sna,
 	    tmp->base.dst.bo->pitch > MAX_3D_PITCH) {
 		if (!sna_render_composite_redirect(sna, &tmp->base,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen2_composite_picture(sna, src, &tmp->base.src,
@@ -2294,7 +2294,7 @@ gen2_render_composite_spans(struct sna *sna,
 	}
 
 	gen2_emit_composite_spans_state(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_src:
 	if (tmp->base.src.bo)
@@ -2302,7 +2302,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
-	return FALSE;
+	return false;
 }
 
 static void
@@ -2347,7 +2347,7 @@ static void gen2_emit_fill_composite_state(struct sna *sna,
 	      I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
 	BATCH(0);
 	BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY);
-	BATCH(gen2_get_blend_cntl(op->op, FALSE, op->dst.format));
+	BATCH(gen2_get_blend_cntl(op->op, false, op->dst.format));
 	if (memcmp(sna->kgem.batch + sna->render_state.gen2.ls1 + 1,
 		   sna->kgem.batch + ls1 + 1,
 		   3 * sizeof(uint32_t)) == 0)
@@ -2364,7 +2364,7 @@ static void gen2_emit_fill_composite_state(struct sna *sna,
 	}
 }
 
-static Bool
+static bool
 gen2_render_fill_boxes_try_blt(struct sna *sna,
 			       CARD8 op, PictFormat format,
 			       const xRenderColor *color,
@@ -2375,7 +2375,7 @@ gen2_render_fill_boxes_try_blt(struct sna *sna,
 	uint32_t pixel;
 
 	if (op > PictOpSrc)
-		return FALSE;
+		return false;
 
 	if (op == PictOpClear) {
 		alu = GXclear;
@@ -2386,7 +2386,7 @@ gen2_render_fill_boxes_try_blt(struct sna *sna,
 					    color->blue,
 					    color->alpha,
 					    format))
-		return FALSE;
+		return false;
 	else
 		alu = GXcopy;
 
@@ -2395,7 +2395,7 @@ gen2_render_fill_boxes_try_blt(struct sna *sna,
 				  pixel, box, n);
 }
 
-static inline Bool prefer_blt_fill(struct sna *sna)
+static inline bool prefer_blt_fill(struct sna *sna)
 {
 #if PREFER_BLT_FILL
 	return true;
@@ -2404,7 +2404,7 @@ static inline Bool prefer_blt_fill(struct sna *sna)
 #endif
 }
 
-static inline Bool prefer_blt_copy(struct sna *sna, unsigned flags)
+static inline bool prefer_blt_copy(struct sna *sna, unsigned flags)
 {
 #if PREFER_BLT_COPY
 	return true;
@@ -2413,7 +2413,7 @@ static inline Bool prefer_blt_copy(struct sna *sna, unsigned flags)
 #endif
 }
 
-static Bool
+static bool
 gen2_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
 		       PictFormat format,
@@ -2427,7 +2427,7 @@ gen2_render_fill_boxes(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen2_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 #if NO_FILL_BOXES
@@ -2448,10 +2448,10 @@ gen2_render_fill_boxes(struct sna *sna,
 		if (gen2_render_fill_boxes_try_blt(sna, op, format, color,
 						   dst, dst_bo,
 						   box, n))
-			return TRUE;
+			return true;
 
 		if (!gen2_check_dst_format(format))
-			return FALSE;
+			return false;
 
 		assert(dst_bo->pitch >= 8);
 		return sna_tiling_fill_boxes(sna, op, format, color,
@@ -2462,7 +2462,7 @@ gen2_render_fill_boxes(struct sna *sna,
 	    gen2_render_fill_boxes_try_blt(sna, op, format, color,
 					   dst, dst_bo,
 					   box, n))
-		return TRUE;
+		return true;
 
 	if (op == PictOpClear)
 		pixel = 0;
@@ -2472,7 +2472,7 @@ gen2_render_fill_boxes(struct sna *sna,
 					  color->blue,
 					  color->alpha,
 					  PICT_a8r8g8b8))
-		return FALSE;
+		return false;
 
 	DBG(("%s: using shader for op=%d, format=%x, pixel=%x\n",
 	     __FUNCTION__, op, (int)format, pixel));
@@ -2516,7 +2516,7 @@ gen2_render_fill_boxes(struct sna *sna,
 	} while (n);
 
 	gen2_vertex_flush(sna, &tmp);
-	return TRUE;
+	return true;
 }
 
 static void gen2_emit_fill_state(struct sna *sna,
@@ -2621,7 +2621,7 @@ gen2_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 	gen2_vertex_flush(sna, &op->base);
 }
 
-static Bool
+static bool
 gen2_render_fill(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 uint32_t color,
@@ -2640,7 +2640,7 @@ gen2_render_fill(struct sna *sna, uint8_t alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 tmp))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(dst->drawable.width, dst->drawable.height) ||
@@ -2677,10 +2677,10 @@ gen2_render_fill(struct sna *sna, uint8_t alu,
 	tmp->done  = gen2_render_fill_op_done;
 
 	gen2_emit_fill_state(sna, &tmp->base);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen2_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 			     uint32_t color,
 			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -2698,7 +2698,7 @@ gen2_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint32_t color,
 		     int16_t x1, int16_t y1,
@@ -2716,7 +2716,7 @@ gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (prefer_blt_fill(sna) &&
 	    gen2_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(dst->drawable.width, dst->drawable.height) ||
@@ -2728,7 +2728,7 @@ gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		kgem_submit(&sna->kgem);
 		if (gen2_render_fill_one_try_blt(sna, dst, bo, color,
 						 x1, y1, x2, y2, alu))
-			return TRUE;
+			return true;
 		assert(kgem_check_bo(&sna->kgem, bo, NULL));
 	}
 
@@ -2757,7 +2757,7 @@ gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	VERTEX(y1);
 	gen2_vertex_flush(sna, &tmp);
 
-	return TRUE;
+	return true;
 }
 
 static void
@@ -2855,7 +2855,7 @@ static void gen2_emit_copy_state(struct sna *sna, const struct sna_composite_op
 	gen2_emit_texture(sna, &op->src, 0);
 }
 
-static Bool
+static bool
 gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -2865,7 +2865,7 @@ gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 #if NO_COPY_BOXES
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy_boxes(sna, alu,
 				  src_bo, src_dx, src_dy,
@@ -2884,7 +2884,7 @@ gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       dst_bo, dst_dx, dst_dy,
 			       dst->drawable.bitsPerPixel,
 			       box, n))
-		return TRUE;
+		return true;
 
 	if (src_bo == dst_bo || /* XXX handle overlap using 3D ? */
 	    too_large(src->drawable.width, src->drawable.height) ||
@@ -2983,7 +2983,7 @@ fallback:
 
 	gen2_vertex_flush(sna, &tmp);
 	sna_render_composite_redirect_done(sna, &tmp);
-	return TRUE;
+	return true;
 
 fallback_tiled:
 	return sna_tiling_copy_boxes(sna, alu,
@@ -3026,7 +3026,7 @@ gen2_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 	gen2_vertex_flush(sna, &op->base);
 }
 
-static Bool
+static bool
 gen2_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -3034,7 +3034,7 @@ gen2_render_copy(struct sna *sna, uint8_t alu,
 {
 #if NO_COPY
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy(sna, alu,
 			    src_bo, dst_bo,
@@ -3049,7 +3049,7 @@ gen2_render_copy(struct sna *sna, uint8_t alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
 			 tmp))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(src->drawable.width, src->drawable.height) ||
@@ -3058,7 +3058,7 @@ gen2_render_copy(struct sna *sna, uint8_t alu,
 	    dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy(sna, alu, src_bo, dst_bo,
 				    dst->drawable.bitsPerPixel,
@@ -3089,13 +3089,13 @@ fallback:
 	tmp->done = gen2_render_copy_done;
 
 	gen2_emit_composite_state(sna, &tmp->base);
-	return TRUE;
+	return true;
 }
 
 static void
 gen2_render_reset(struct sna *sna)
 {
-	sna->render_state.gen2.need_invariant = TRUE;
+	sna->render_state.gen2.need_invariant = true;
 	sna->render_state.gen2.logic_op_enabled = 0;
 	sna->render_state.gen2.vertex_offset = 0;
 	sna->render_state.gen2.target = 0;
@@ -3124,7 +3124,7 @@ gen2_render_context_switch(struct kgem *kgem,
 	sna->blt_state.fill_bo = 0;
 }
 
-Bool gen2_render_init(struct sna *sna)
+bool gen2_render_init(struct sna *sna)
 {
 	struct sna_render *render = &sna->render;
 
@@ -3148,5 +3148,5 @@ Bool gen2_render_init(struct sna *sna)
 
 	render->max_3d_size = MAX_3D_SIZE;
 	render->max_3d_pitch = MAX_3D_PITCH;
-	return TRUE;
+	return true;
 }
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 20286fc..e02eb89 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -72,8 +72,8 @@ enum gen3_radial_mode {
 };
 
 static const struct blendinfo {
-	Bool dst_alpha;
-	Bool src_alpha;
+	bool dst_alpha;
+	bool src_alpha;
 	uint32_t src_blend;
 	uint32_t dst_blend;
 } gen3_blend_op[] = {
@@ -101,21 +101,21 @@ static const struct blendinfo {
 static const struct formatinfo {
 	unsigned int fmt, xfmt;
 	uint32_t card_fmt;
-	Bool rb_reversed;
+	bool rb_reversed;
 } gen3_tex_formats[] = {
-	{PICT_a8, 0, MAPSURF_8BIT | MT_8BIT_A8, FALSE},
-	{PICT_a8r8g8b8, 0, MAPSURF_32BIT | MT_32BIT_ARGB8888, FALSE},
-	{PICT_x8r8g8b8, 0, MAPSURF_32BIT | MT_32BIT_XRGB8888, FALSE},
-	{PICT_a8b8g8r8, 0, MAPSURF_32BIT | MT_32BIT_ABGR8888, FALSE},
-	{PICT_x8b8g8r8, 0, MAPSURF_32BIT | MT_32BIT_XBGR8888, FALSE},
-	{PICT_a2r10g10b10, PICT_x2r10g10b10, MAPSURF_32BIT | MT_32BIT_ARGB2101010, FALSE},
-	{PICT_a2b10g10r10, PICT_x2b10g10r10, MAPSURF_32BIT | MT_32BIT_ABGR2101010, FALSE},
-	{PICT_r5g6b5, 0, MAPSURF_16BIT | MT_16BIT_RGB565, FALSE},
-	{PICT_b5g6r5, 0, MAPSURF_16BIT | MT_16BIT_RGB565, TRUE},
-	{PICT_a1r5g5b5, PICT_x1r5g5b5, MAPSURF_16BIT | MT_16BIT_ARGB1555, FALSE},
-	{PICT_a1b5g5r5, PICT_x1b5g5r5, MAPSURF_16BIT | MT_16BIT_ARGB1555, TRUE},
-	{PICT_a4r4g4b4, PICT_x4r4g4b4, MAPSURF_16BIT | MT_16BIT_ARGB4444, FALSE},
-	{PICT_a4b4g4r4, PICT_x4b4g4r4, MAPSURF_16BIT | MT_16BIT_ARGB4444, TRUE},
+	{PICT_a8, 0, MAPSURF_8BIT | MT_8BIT_A8, false},
+	{PICT_a8r8g8b8, 0, MAPSURF_32BIT | MT_32BIT_ARGB8888, false},
+	{PICT_x8r8g8b8, 0, MAPSURF_32BIT | MT_32BIT_XRGB8888, false},
+	{PICT_a8b8g8r8, 0, MAPSURF_32BIT | MT_32BIT_ABGR8888, false},
+	{PICT_x8b8g8r8, 0, MAPSURF_32BIT | MT_32BIT_XBGR8888, false},
+	{PICT_a2r10g10b10, PICT_x2r10g10b10, MAPSURF_32BIT | MT_32BIT_ARGB2101010, false},
+	{PICT_a2b10g10r10, PICT_x2b10g10r10, MAPSURF_32BIT | MT_32BIT_ABGR2101010, false},
+	{PICT_r5g6b5, 0, MAPSURF_16BIT | MT_16BIT_RGB565, false},
+	{PICT_b5g6r5, 0, MAPSURF_16BIT | MT_16BIT_RGB565, true},
+	{PICT_a1r5g5b5, PICT_x1r5g5b5, MAPSURF_16BIT | MT_16BIT_ARGB1555, false},
+	{PICT_a1b5g5r5, PICT_x1b5g5r5, MAPSURF_16BIT | MT_16BIT_ARGB1555, true},
+	{PICT_a4r4g4b4, PICT_x4r4g4b4, MAPSURF_16BIT | MT_16BIT_ARGB4444, false},
+	{PICT_a4b4g4r4, PICT_x4b4g4r4, MAPSURF_16BIT | MT_16BIT_ARGB4444, true},
 };
 
 #define xFixedToDouble(f) pixman_fixed_to_double(f)
@@ -136,14 +136,14 @@ static inline uint32_t gen3_buf_tiling(uint32_t tiling)
 	return v;
 }
 
-static inline Bool
+static inline bool
 gen3_check_pitch_3d(struct kgem_bo *bo)
 {
 	return bo->pitch <= MAX_3D_PITCH;
 }
 
 static uint32_t gen3_get_blend_cntl(int op,
-				    Bool has_component_alpha,
+				    bool has_component_alpha,
 				    uint32_t dst_format)
 {
 	uint32_t sblend = gen3_blend_op[op].src_blend;
@@ -193,7 +193,7 @@ static uint32_t gen3_get_blend_cntl(int op,
 		dblend << S6_CBUF_DST_BLEND_FACT_SHIFT);
 }
 
-static Bool gen3_check_dst_format(uint32_t format)
+static bool gen3_check_dst_format(uint32_t format)
 {
 	switch (format) {
 	case PICT_a8r8g8b8:
@@ -215,13 +215,13 @@ static Bool gen3_check_dst_format(uint32_t format)
 	case PICT_x4r4g4b4:
 	case PICT_a4b4g4r4:
 	case PICT_x4b4g4r4:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
-static Bool gen3_dst_rb_reversed(uint32_t format)
+static bool gen3_dst_rb_reversed(uint32_t format)
 {
 	switch (format) {
 	case PICT_a8r8g8b8:
@@ -234,9 +234,9 @@ static Bool gen3_dst_rb_reversed(uint32_t format)
 	case PICT_a8:
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return FALSE;
+		return false;
 	default:
-		return TRUE;
+		return true;
 	}
 }
 
@@ -277,7 +277,7 @@ static uint32_t gen3_get_dst_format(uint32_t format)
 #undef BIAS
 }
 
-static Bool gen3_check_format(PicturePtr p)
+static bool gen3_check_format(PicturePtr p)
 {
 	switch (p->format) {
 	case PICT_a8:
@@ -293,13 +293,13 @@ static Bool gen3_check_format(PicturePtr p)
 	case PICT_a1b5g5r5:
 	case PICT_a4r4g4b4:
 	case PICT_a4b4g4r4:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
-static Bool gen3_check_xformat(PicturePtr p)
+static bool gen3_check_xformat(PicturePtr p)
 {
 	switch (p->format) {
 	case PICT_a8r8g8b8:
@@ -321,9 +321,9 @@ static Bool gen3_check_xformat(PicturePtr p)
 	case PICT_x4r4g4b4:
 	case PICT_a4b4g4r4:
 	case PICT_x4b4g4r4:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -367,19 +367,19 @@ static uint32_t gen3_gradient_repeat(uint32_t repeat)
 #undef REPEAT
 }
 
-static Bool gen3_check_repeat(PicturePtr p)
+static bool gen3_check_repeat(PicturePtr p)
 {
 	if (!p->repeat)
-		return TRUE;
+		return true;
 
 	switch (p->repeatType) {
 	case RepeatNone:
 	case RepeatNormal:
 	case RepeatPad:
 	case RepeatReflect:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -404,9 +404,9 @@ static bool gen3_check_filter(PicturePtr p)
 	switch (p->filter) {
 	case PictFilterNearest:
 	case PictFilterBilinear:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -917,7 +917,7 @@ gen3_composite_emit_shader(struct sna *sna,
 			   const struct sna_composite_op *op,
 			   uint8_t blend)
 {
-	Bool dst_is_alpha = PIXMAN_FORMAT_RGB(op->dst.format) == 0;
+	bool dst_is_alpha = PIXMAN_FORMAT_RGB(op->dst.format) == 0;
 	const struct sna_composite_channel *src, *mask;
 	struct gen3_render_state *state = &sna->render_state.gen3;
 	uint32_t shader_offset, id;
@@ -1292,7 +1292,7 @@ static void gen3_emit_invariant(struct sna *sna)
 	OUT_BATCH(_3DSTATE_STIPPLE);
 	OUT_BATCH(0x00000000);
 
-	sna->render_state.gen3.need_invariant = FALSE;
+	sna->render_state.gen3.need_invariant = false;
 }
 
 #define MAX_OBJECTS 3 /* worst case: dst + src + mask  */
@@ -1585,7 +1585,7 @@ static void gen3_magic_ca_pass(struct sna *sna,
 	     sna->render.vertex_index - sna->render.vertex_start));
 
 	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(6) | 0);
-	OUT_BATCH(gen3_get_blend_cntl(PictOpAdd, TRUE, op->dst.format));
+	OUT_BATCH(gen3_get_blend_cntl(PictOpAdd, true, op->dst.format));
 	gen3_composite_emit_shader(sna, op, PictOpAdd);
 
 	OUT_BATCH(PRIM3D_RECTLIST | PRIM3D_INDIRECT_SEQUENTIAL |
@@ -1950,7 +1950,7 @@ gen3_render_reset(struct sna *sna)
 {
 	struct gen3_render_state *state = &sna->render_state.gen3;
 
-	state->need_invariant = TRUE;
+	state->need_invariant = true;
 	state->current_dst = 0;
 	state->tex_count = 0;
 	state->last_drawrect_limit = ~0U;
@@ -1999,7 +1999,7 @@ gen3_render_expire(struct kgem *kgem)
 	}
 }
 
-static Bool gen3_composite_channel_set_format(struct sna_composite_channel *channel,
+static bool gen3_composite_channel_set_format(struct sna_composite_channel *channel,
 					      CARD32 format)
 {
 	unsigned int i;
@@ -2008,23 +2008,23 @@ static Bool gen3_composite_channel_set_format(struct sna_composite_channel *chan
 		if (gen3_tex_formats[i].fmt == format) {
 			channel->card_format = gen3_tex_formats[i].card_fmt;
 			channel->rb_reversed = gen3_tex_formats[i].rb_reversed;
-			return TRUE;
+			return true;
 		}
 	}
-	return FALSE;
+	return false;
 }
 
-static Bool source_is_covered(PicturePtr picture,
+static bool source_is_covered(PicturePtr picture,
 			      int x, int y,
 			      int width, int height)
 {
 	int x1, y1, x2, y2;
 
 	if (picture->repeat && picture->repeatType != RepeatNone)
-		return TRUE;
+		return true;
 
 	if (picture->pDrawable == NULL)
-		return FALSE;
+		return false;
 
 	if (picture->transform) {
 		pixman_box16_t sample;
@@ -2053,7 +2053,7 @@ static Bool source_is_covered(PicturePtr picture,
 		y2 <= picture->pDrawable->height;
 }
 
-static Bool gen3_composite_channel_set_xformat(PicturePtr picture,
+static bool gen3_composite_channel_set_xformat(PicturePtr picture,
 					       struct sna_composite_channel *channel,
 					       int x, int y,
 					       int width, int height)
@@ -2061,24 +2061,24 @@ static Bool gen3_composite_channel_set_xformat(PicturePtr picture,
 	unsigned int i;
 
 	if (PICT_FORMAT_A(picture->format) != 0)
-		return FALSE;
+		return false;
 
 	if (width == 0 || height == 0)
-		return FALSE;
+		return false;
 
 	if (!source_is_covered(picture, x, y, width, height))
-		return FALSE;
+		return false;
 
 	for (i = 0; i < ARRAY_SIZE(gen3_tex_formats); i++) {
 		if (gen3_tex_formats[i].xfmt == picture->format) {
 			channel->card_format = gen3_tex_formats[i].card_fmt;
 			channel->rb_reversed = gen3_tex_formats[i].rb_reversed;
 			channel->alpha_fixup = true;
-			return TRUE;
+			return true;
 		}
 	}
 
-	return FALSE;
+	return false;
 }
 
 static int
@@ -2123,7 +2123,7 @@ static void gen3_composite_channel_convert(struct sna_composite_channel *channel
 		gen3_composite_channel_set_format(channel, channel->pict_format);
 }
 
-static Bool gen3_gradient_setup(struct sna *sna,
+static bool gen3_gradient_setup(struct sna *sna,
 				PicturePtr picture,
 				struct sna_composite_channel *channel,
 				int16_t ox, int16_t oy)
@@ -2140,14 +2140,14 @@ static Bool gen3_gradient_setup(struct sna *sna,
 		channel->repeat = picture->repeatType;
 		break;
 	default:
-		return FALSE;
+		return false;
 	}
 
 	channel->bo =
 		sna_render_get_gradient(sna,
 					(PictGradient *)picture->pSourcePict);
 	if (channel->bo == NULL)
-		return FALSE;
+		return false;
 
 	channel->pict_format = PICT_a8r8g8b8;
 	channel->card_format = MAPSURF_32BIT | MT_32BIT_ARGB8888;
@@ -2166,7 +2166,7 @@ static Bool gen3_gradient_setup(struct sna *sna,
 	channel->offset[0] = ox;
 	channel->offset[1] = oy;
 	channel->scale[0] = channel->scale[1] = 1;
-	return TRUE;
+	return true;
 }
 
 static int
@@ -2271,7 +2271,7 @@ gen3_init_radial(struct sna *sna,
 	return 1;
 }
 
-static Bool
+static bool
 sna_picture_is_clear(PicturePtr picture,
 		     int x, int y, int w, int h,
 		     uint32_t *color)
@@ -2279,20 +2279,20 @@ sna_picture_is_clear(PicturePtr picture,
 	struct sna_pixmap *priv;
 
 	if (!picture->pDrawable)
-		return FALSE;
+		return false;
 
 	priv = sna_pixmap(get_drawable_pixmap(picture->pDrawable));
 	if (priv == NULL || !priv->clear)
-		return FALSE;
+		return false;
 
 	if (!source_is_covered(picture, x, y, w, h))
-		return FALSE;
+		return false;
 
 	*color = priv->clear_color;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static int
 gen3_composite_picture(struct sna *sna,
 		       PicturePtr picture,
 		       struct sna_composite_op *op,
@@ -2426,7 +2426,7 @@ source_use_blt(struct sna *sna, PicturePtr picture)
 	return is_cpu(picture->pDrawable) || is_dirty(picture->pDrawable);
 }
 
-static Bool
+static bool
 try_blt(struct sna *sna,
 	PicturePtr dst,
 	PicturePtr src,
@@ -2434,20 +2434,20 @@ try_blt(struct sna *sna,
 {
 	if (sna->kgem.mode != KGEM_RENDER) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst->pDrawable->width, dst->pDrawable->height)) {
 		DBG(("%s: target too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__,
 		     dst->pDrawable->width, dst->pDrawable->height));
-		return TRUE;
+		return true;
 	}
 
 	/* is the source picture only in cpu memory e.g. a shm pixmap? */
@@ -2474,7 +2474,7 @@ gen3_align_vertex(struct sna *sna,
 	}
 }
 
-static Bool
+static bool
 gen3_composite_set_target(struct sna *sna,
 			  struct sna_composite_op *op,
 			  PicturePtr dst)
@@ -2497,7 +2497,7 @@ gen3_composite_set_target(struct sna *sna,
 	if (op->dst.bo == NULL) {
 		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
 		if (priv == NULL)
-			return FALSE;
+			return false;
 
 		/* For single-stream mode there should be no minimum alignment
 		 * required, except that the width must be at least 2 elements.
@@ -2506,14 +2506,14 @@ gen3_composite_set_target(struct sna *sna,
 			struct kgem_bo *bo;
 
 			if (priv->pinned)
-				return FALSE;
+				return false;
 
 			bo = kgem_replace_bo(&sna->kgem, priv->gpu_bo,
 					     op->dst.width, op->dst.height,
 					     2*op->dst.pixmap->drawable.bitsPerPixel,
 					     op->dst.pixmap->drawable.bitsPerPixel);
 			if (bo == NULL)
-				return FALSE;
+				return false;
 
 			kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 			priv->gpu_bo = bo;
@@ -2537,7 +2537,7 @@ gen3_composite_set_target(struct sna *sna,
 	     op->dst.x, op->dst.y,
 	     op->damage ? *op->damage : (void *)-1));
 
-	return TRUE;
+	return true;
 }
 
 static inline uint8_t
@@ -2634,7 +2634,7 @@ gen3_composite_fallback(struct sna *sna,
 	if (!gen3_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
 		     __FUNCTION__, dst->format));
-		return TRUE;
+		return true;
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -2656,11 +2656,11 @@ gen3_composite_fallback(struct sna *sna,
 	 */
 	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask &&
@@ -2670,7 +2670,7 @@ gen3_composite_fallback(struct sna *sna,
 	{
 		DBG(("%s: component-alpha mask with op=%d, should fallback\n",
 		     __FUNCTION__, op));
-		return TRUE;
+		return true;
 	}
 
 	/* If anything is on the GPU, push everything out to the GPU */
@@ -2678,18 +2678,18 @@ gen3_composite_fallback(struct sna *sna,
 	if (priv && priv->gpu_damage && !priv->clear) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (src_pixmap && !src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (mask_pixmap && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	/* However if the dst is not on the GPU and we need to
@@ -2699,25 +2699,25 @@ gen3_composite_fallback(struct sna *sna,
 	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst_pixmap->drawable.width,
 		      dst_pixmap->drawable.height) &&
 	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
 		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
-	return FALSE;
+	return false;
 }
 
 static int
@@ -2726,37 +2726,37 @@ reuse_source(struct sna *sna,
 	     PicturePtr mask, struct sna_composite_channel *mc, int msk_x, int msk_y)
 {
 	if (src_x != msk_x || src_y != msk_y)
-		return FALSE;
+		return false;
 
 	if (mask == src) {
 		*mc = *sc;
 		if (mc->bo)
 			kgem_bo_reference(mc->bo);
-		return TRUE;
+		return true;
 	}
 
 	if ((src->pDrawable == NULL || mask->pDrawable != src->pDrawable))
-		return FALSE;
+		return false;
 
 	if (sc->is_solid)
-		return FALSE;
+		return false;
 
 	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
 
 	if (!sna_transform_equal(src->transform, mask->transform))
-		return FALSE;
+		return false;
 
 	if (!sna_picture_alphamap_equal(src, mask))
-		return FALSE;
+		return false;
 
 	if (!gen3_check_repeat(mask))
-		return FALSE;
+		return false;
 
 	if (!gen3_check_filter(mask))
-		return FALSE;
+		return false;
 
 	if (!gen3_check_format(mask))
-		return FALSE;
+		return false;
 
 	DBG(("%s: reusing source channel for mask with a twist\n",
 	     __FUNCTION__));
@@ -2768,10 +2768,10 @@ reuse_source(struct sna *sna,
 	gen3_composite_channel_set_format(mc, mask->format);
 	if (mc->bo)
 		kgem_bo_reference(mc->bo);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen3_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -2788,12 +2788,12 @@ gen3_render_composite(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen3_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 #if NO_COMPOSITE
 	if (mask)
-		return FALSE;
+		return false;
 
 	return sna_blt_composite(sna, op,
 				 src, dst,
@@ -2813,10 +2813,10 @@ gen3_render_composite(struct sna *sna,
 			      dst_x, dst_y,
 			      width, height,
 			      tmp))
-		return TRUE;
+		return true;
 
 	if (gen3_composite_fallback(sna, op, src, mask, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height))
 		return sna_tiling_composite(op, src, mask, dst,
@@ -2829,7 +2829,7 @@ gen3_render_composite(struct sna *sna,
 	if (!gen3_composite_set_target(sna, tmp, dst)) {
 		DBG(("%s: unable to set render target\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (mask == NULL && sna->kgem.mode != KGEM_RENDER &&
@@ -2838,7 +2838,7 @@ gen3_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
@@ -2848,12 +2848,12 @@ gen3_render_composite(struct sna *sna,
 	    !gen3_check_pitch_3d(tmp->dst.bo)) {
 		if (!sna_render_composite_redirect(sna, tmp,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	tmp->u.gen3.num_constants = 0;
 	tmp->src.u.gen3.type = SHADER_TEXTURE;
-	tmp->src.is_affine = TRUE;
+	tmp->src.is_affine = true;
 	DBG(("%s: preparing source\n", __FUNCTION__));
 	switch (gen3_composite_picture(sna, src, tmp, &tmp->src,
 				       src_x, src_y,
@@ -2872,9 +2872,9 @@ gen3_render_composite(struct sna *sna,
 	DBG(("%s: source type=%d\n", __FUNCTION__, tmp->src.u.gen3.type));
 
 	tmp->mask.u.gen3.type = SHADER_NONE;
-	tmp->mask.is_affine = TRUE;
-	tmp->need_magic_ca_pass = FALSE;
-	tmp->has_component_alpha = FALSE;
+	tmp->mask.is_affine = true;
+	tmp->need_magic_ca_pass = false;
+	tmp->has_component_alpha = false;
 	if (mask && tmp->src.u.gen3.type != SHADER_ZERO) {
 		if (!reuse_source(sna,
 				  src, &tmp->src, src_x, src_y,
@@ -2915,16 +2915,16 @@ gen3_render_composite(struct sna *sna,
 				 */
 				DBG(("%s: component-alpha mask: %d\n",
 				     __FUNCTION__, tmp->mask.u.gen3.type));
-				tmp->has_component_alpha = TRUE;
+				tmp->has_component_alpha = true;
 				if (tmp->mask.u.gen3.type == SHADER_WHITE) {
 					tmp->mask.u.gen3.type = SHADER_NONE;
-					tmp->has_component_alpha = FALSE;
+					tmp->has_component_alpha = false;
 				} else if (gen3_blend_op[op].src_alpha &&
 					   (gen3_blend_op[op].src_blend != BLENDFACT_ZERO)) {
 					if (op != PictOpOver)
 						goto cleanup_mask;
 
-					tmp->need_magic_ca_pass = TRUE;
+					tmp->need_magic_ca_pass = true;
 					tmp->op = PictOpOutReverse;
 					sna->render.vertex_start = sna->render.vertex_index;
 				}
@@ -3037,7 +3037,7 @@ gen3_render_composite(struct sna *sna,
 
 	gen3_emit_composite_state(sna, tmp);
 	gen3_align_vertex(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_mask:
 	if (tmp->mask.bo)
@@ -3048,7 +3048,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
-	return FALSE;
+	return false;
 }
 
 static void
@@ -3367,7 +3367,7 @@ gen3_render_composite_spans_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, &op->base);
 }
 
-static Bool
+static bool
 gen3_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -3384,17 +3384,17 @@ gen3_render_composite_spans(struct sna *sna,
 	     src_x, src_y, dst_x, dst_y, width, height));
 
 #if NO_COMPOSITE_SPANS
-	return FALSE;
+	return false;
 #endif
 
 	if (op >= ARRAY_SIZE(gen3_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (gen3_composite_fallback(sna, op, src, NULL, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
@@ -3403,7 +3403,7 @@ gen3_render_composite_spans(struct sna *sna,
 		if (!is_gpu(dst->pDrawable)) {
 			DBG(("%s: fallback, tiled operation not on GPU\n",
 			     __FUNCTION__));
-			return FALSE;
+			return false;
 		}
 
 		return sna_tiling_composite_spans(op, src, dst,
@@ -3414,7 +3414,7 @@ gen3_render_composite_spans(struct sna *sna,
 	if (!gen3_composite_set_target(sna, &tmp->base, dst)) {
 		DBG(("%s: unable to set render target\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
 
@@ -3424,11 +3424,11 @@ gen3_render_composite_spans(struct sna *sna,
 	    !gen3_check_pitch_3d(tmp->base.dst.bo)) {
 		if (!sna_render_composite_redirect(sna, &tmp->base,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	tmp->base.src.u.gen3.type = SHADER_TEXTURE;
-	tmp->base.src.is_affine = TRUE;
+	tmp->base.src.is_affine = true;
 	DBG(("%s: preparing source\n", __FUNCTION__));
 	switch (gen3_composite_picture(sna, src, &tmp->base, &tmp->base.src,
 				       src_x, src_y,
@@ -3505,7 +3505,7 @@ gen3_render_composite_spans(struct sna *sna,
 
 	gen3_emit_composite_state(sna, &tmp->base);
 	gen3_align_vertex(sna, &tmp->base);
-	return TRUE;
+	return true;
 
 cleanup_src:
 	if (tmp->base.src.bo)
@@ -3513,7 +3513,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
-	return FALSE;
+	return false;
 }
 
 static void
@@ -3862,7 +3862,7 @@ gen3_get_inline_rectangles(struct sna *sna, int want, int floats_per_vertex)
 	return want;
 }
 
-static Bool
+static bool
 gen3_render_video(struct sna *sna,
 		  struct sna_video *video,
 		  struct sna_video_frame *frame,
@@ -3887,7 +3887,7 @@ gen3_render_video(struct sna *sna,
 
 	dst_bo = priv->gpu_bo;
 	if (dst_bo == NULL)
-		return FALSE;
+		return false;
 
 	if (too_large(pixmap->drawable.width, pixmap->drawable.height) ||
 	    !gen3_check_pitch_3d(dst_bo)) {
@@ -3900,7 +3900,7 @@ gen3_render_video(struct sna *sna,
 							   width, height, bpp),
 					0);
 		if (!dst_bo)
-			return FALSE;
+			return false;
 
 		pix_xoff = -dxo;
 		pix_yoff = -dyo;
@@ -4008,7 +4008,7 @@ gen3_render_video(struct sna *sna,
 	}
 	priv->clear = false;
 
-	return TRUE;
+	return true;
 }
 
 static void
@@ -4031,7 +4031,7 @@ gen3_render_copy_setup_source(struct sna_composite_channel *channel,
 	channel->is_affine = 1;
 }
 
-static Bool
+static bool
 gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -4041,7 +4041,7 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 #if NO_COPY_BOXES
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy_boxes(sna, alu,
 				  src_bo, src_dx, src_dy,
@@ -4059,7 +4059,7 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       dst_bo, dst_dx, dst_dy,
 			       dst->drawable.bitsPerPixel,
 			       box, n))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    src_bo == dst_bo || /* XXX handle overlap using 3D ? */
@@ -4068,7 +4068,7 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 fallback_blt:
 		if (!kgem_bo_can_blt(&sna->kgem, src_bo) ||
 		    !kgem_bo_can_blt(&sna->kgem, dst_bo))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
@@ -4164,7 +4164,7 @@ fallback_blt:
 
 	gen3_vertex_flush(sna);
 	sna_render_composite_redirect_done(sna, &tmp);
-	return TRUE;
+	return true;
 
 fallback_tiled:
 	return sna_tiling_copy_boxes(sna, alu,
@@ -4205,7 +4205,7 @@ gen3_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 		gen3_vertex_flush(sna);
 }
 
-static Bool
+static bool
 gen3_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -4213,7 +4213,7 @@ gen3_render_copy(struct sna *sna, uint8_t alu,
 {
 #if NO_COPY
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy(sna, alu,
 			    src_bo, dst_bo,
@@ -4228,7 +4228,7 @@ gen3_render_copy(struct sna *sna, uint8_t alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
 			 tmp))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -4237,7 +4237,7 @@ gen3_render_copy(struct sna *sna, uint8_t alu,
 	    src_bo->pitch > MAX_3D_PITCH || dst_bo->pitch > MAX_3D_PITCH) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy(sna, alu, src_bo, dst_bo,
 				    dst->drawable.bitsPerPixel,
@@ -4270,10 +4270,10 @@ fallback:
 
 	gen3_emit_composite_state(sna, &tmp->base);
 	gen3_align_vertex(sna, &tmp->base);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen3_render_fill_boxes_try_blt(struct sna *sna,
 			       CARD8 op, PictFormat format,
 			       const xRenderColor *color,
@@ -4286,11 +4286,11 @@ gen3_render_fill_boxes_try_blt(struct sna *sna,
 	if (dst_bo->tiling == I915_TILING_Y) {
 		DBG(("%s: y-tiling, can't blit\n", __FUNCTION__));
 		assert(!too_large(dst->drawable.width, dst->drawable.height));
-		return FALSE;
+		return false;
 	}
 
 	if (op > PictOpSrc)
-		return FALSE;
+		return false;
 
 	if (op == PictOpClear) {
 		alu = GXclear;
@@ -4301,7 +4301,7 @@ gen3_render_fill_boxes_try_blt(struct sna *sna,
 					    color->blue,
 					    color->alpha,
 					    format))
-		return FALSE;
+		return false;
 	else
 		alu = GXcopy;
 
@@ -4310,7 +4310,7 @@ gen3_render_fill_boxes_try_blt(struct sna *sna,
 				  pixel, box, n);
 }
 
-static inline Bool prefer_fill_blt(struct sna *sna)
+static inline bool prefer_fill_blt(struct sna *sna)
 {
 #if PREFER_BLT_FILL
 	return true;
@@ -4319,7 +4319,7 @@ static inline Bool prefer_fill_blt(struct sna *sna)
 #endif
 }
 
-static Bool
+static bool
 gen3_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
 		       PictFormat format,
@@ -4333,7 +4333,7 @@ gen3_render_fill_boxes(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen3_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 #if NO_FILL_BOXES
@@ -4354,10 +4354,10 @@ gen3_render_fill_boxes(struct sna *sna,
 		if (gen3_render_fill_boxes_try_blt(sna, op, format, color,
 						   dst, dst_bo,
 						   box, n))
-			return TRUE;
+			return true;
 
 		if (!gen3_check_dst_format(format))
-			return FALSE;
+			return false;
 
 		return sna_tiling_fill_boxes(sna, op, format, color,
 					     dst, dst_bo, box, n);
@@ -4367,7 +4367,7 @@ gen3_render_fill_boxes(struct sna *sna,
 	    gen3_render_fill_boxes_try_blt(sna, op, format, color,
 					   dst, dst_bo,
 					   box, n))
-		return TRUE;
+		return true;
 
 	if (op == PictOpClear) {
 		pixel = 0;
@@ -4379,7 +4379,7 @@ gen3_render_fill_boxes(struct sna *sna,
 					     color->alpha,
 					     PICT_a8r8g8b8)) {
 			assert(0);
-			return FALSE;
+			return false;
 		}
 	}
 	DBG(("%s: using shader for op=%d, format=%x, pixel=%x\n",
@@ -4430,7 +4430,7 @@ gen3_render_fill_boxes(struct sna *sna,
 	} while (n);
 
 	gen3_vertex_flush(sna);
-	return TRUE;
+	return true;
 }
 
 static void
@@ -4497,7 +4497,7 @@ gen3_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 		gen3_vertex_flush(sna);
 }
 
-static Bool
+static bool
 gen3_render_fill(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 uint32_t color,
@@ -4516,7 +4516,7 @@ gen3_render_fill(struct sna *sna, uint8_t alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 tmp))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -4560,10 +4560,10 @@ gen3_render_fill(struct sna *sna, uint8_t alu,
 
 	gen3_emit_composite_state(sna, &tmp->base);
 	gen3_align_vertex(sna, &tmp->base);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen3_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 			     uint32_t color,
 			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -4581,7 +4581,7 @@ gen3_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint32_t color,
 		     int16_t x1, int16_t y1,
@@ -4599,7 +4599,7 @@ gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (prefer_fill_blt(sna) &&
 	    gen3_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -4633,7 +4633,7 @@ gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		kgem_submit(&sna->kgem);
 		if (gen3_render_fill_one_try_blt(sna, dst, bo, color,
 						 x1, y1, x2, y2, alu))
-			return TRUE;
+			return true;
 	}
 
 	gen3_emit_composite_state(sna, &tmp);
@@ -4648,7 +4648,7 @@ gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	OUT_VERTEX(y1);
 	gen3_vertex_flush(sna);
 
-	return TRUE;
+	return true;
 }
 
 static void gen3_render_flush(struct sna *sna)
@@ -4661,7 +4661,7 @@ gen3_render_fini(struct sna *sna)
 {
 }
 
-Bool gen3_render_init(struct sna *sna)
+bool gen3_render_init(struct sna *sna)
 {
 	struct sna_render *render = &sna->render;
 
@@ -4686,5 +4686,5 @@ Bool gen3_render_init(struct sna *sna)
 
 	sna->kgem.retire = gen3_render_retire;
 	sna->kgem.expire = gen3_render_expire;
-	return TRUE;
+	return true;
 }
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 44504c5..2edfbd0 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -199,29 +199,29 @@ static const uint32_t ps_kernel_planar_static[][4] = {
 static const struct wm_kernel_info {
 	const void *data;
 	unsigned int size;
-	Bool has_mask;
+	bool has_mask;
 } wm_kernels[] = {
-	KERNEL(WM_KERNEL, ps_kernel_nomask_affine, FALSE),
-	KERNEL(WM_KERNEL_PROJECTIVE, ps_kernel_nomask_projective, FALSE),
+	KERNEL(WM_KERNEL, ps_kernel_nomask_affine, false),
+	KERNEL(WM_KERNEL_PROJECTIVE, ps_kernel_nomask_projective, false),
 
-	KERNEL(WM_KERNEL_MASK, ps_kernel_masknoca_affine, TRUE),
-	KERNEL(WM_KERNEL_MASK_PROJECTIVE, ps_kernel_masknoca_projective, TRUE),
+	KERNEL(WM_KERNEL_MASK, ps_kernel_masknoca_affine, true),
+	KERNEL(WM_KERNEL_MASK_PROJECTIVE, ps_kernel_masknoca_projective, true),
 
-	KERNEL(WM_KERNEL_MASKCA, ps_kernel_maskca_affine, TRUE),
-	KERNEL(WM_KERNEL_MASKCA_PROJECTIVE, ps_kernel_maskca_projective, TRUE),
+	KERNEL(WM_KERNEL_MASKCA, ps_kernel_maskca_affine, true),
+	KERNEL(WM_KERNEL_MASKCA_PROJECTIVE, ps_kernel_maskca_projective, true),
 
 	KERNEL(WM_KERNEL_MASKCA_SRCALPHA,
-	       ps_kernel_maskca_srcalpha_affine, TRUE),
+	       ps_kernel_maskca_srcalpha_affine, true),
 	KERNEL(WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
-	       ps_kernel_maskca_srcalpha_projective, TRUE),
+	       ps_kernel_maskca_srcalpha_projective, true),
 
-	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, FALSE),
-	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, FALSE),
+	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, false),
+	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, false),
 };
 #undef KERNEL
 
 static const struct blendinfo {
-	Bool src_alpha;
+	bool src_alpha;
 	uint32_t src_blend;
 	uint32_t dst_blend;
 } gen4_blend_op[] = {
@@ -291,7 +291,7 @@ static inline bool too_large(int width, int height)
 }
 
 static int
-gen4_choose_composite_kernel(int op, Bool has_mask, Bool is_ca, Bool is_affine)
+gen4_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
 {
 	int base;
 
@@ -326,7 +326,7 @@ static void gen4_magic_ca_pass(struct sna *sna,
 
 	gen4_emit_pipelined_pointers(sna, op, PictOpAdd,
 				     gen4_choose_composite_kernel(PictOpAdd,
-								  TRUE, TRUE, op->is_affine));
+								  true, true, op->is_affine));
 
 	OUT_BATCH(GEN4_3DPRIMITIVE |
 		  GEN4_3DPRIMITIVE_VERTEX_SEQUENTIAL |
@@ -494,7 +494,7 @@ static void gen4_vertex_close(struct sna *sna)
 
 
 static uint32_t gen4_get_blend(int op,
-			       Bool has_component_alpha,
+			       bool has_component_alpha,
 			       uint32_t dst_format)
 {
 	uint32_t src, dst;
@@ -556,7 +556,7 @@ static uint32_t gen4_get_dest_format(PictFormat format)
 	}
 }
 
-static Bool gen4_check_dst_format(PictFormat format)
+static bool gen4_check_dst_format(PictFormat format)
 {
 	switch (format) {
 	case PICT_a8r8g8b8:
@@ -571,10 +571,10 @@ static Bool gen4_check_dst_format(PictFormat format)
 	case PICT_a8:
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return TRUE;
+		return true;
 	default:
 		DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
-		return FALSE;
+		return false;
 	}
 }
 
@@ -687,13 +687,13 @@ static uint32_t gen4_check_filter(PicturePtr picture)
 	switch (picture->filter) {
 	case PictFilterNearest:
 	case PictFilterBilinear:
-		return TRUE;
+		return true;
 	default:
 		DBG(("%s: unknown filter: %s [%d]\n",
 		     __FUNCTION__,
 		     PictureGetFilterName(picture->filter),
 		     picture->filter));
-		return FALSE;
+		return false;
 	}
 }
 
@@ -716,18 +716,18 @@ static uint32_t gen4_repeat(uint32_t repeat)
 static bool gen4_check_repeat(PicturePtr picture)
 {
 	if (!picture->repeat)
-		return TRUE;
+		return true;
 
 	switch (picture->repeatType) {
 	case RepeatNone:
 	case RepeatNormal:
 	case RepeatPad:
 	case RepeatReflect:
-		return TRUE;
+		return true;
 	default:
 		DBG(("%s: unknown repeat: %d\n",
 		     __FUNCTION__, picture->repeatType));
-		return FALSE;
+		return false;
 	}
 }
 
@@ -741,7 +741,7 @@ gen4_bind_bo(struct sna *sna,
 	     uint32_t width,
 	     uint32_t height,
 	     uint32_t format,
-	     Bool is_dst)
+	     bool is_dst)
 {
 	struct gen4_surface_state *ss;
 	uint32_t domains;
@@ -948,7 +948,7 @@ gen4_emit_composite_primitive(struct sna *sna,
 			      const struct sna_composite_rectangles *r)
 {
 	float src_x[3], src_y[3], src_w[3], mask_x[3], mask_y[3], mask_w[3];
-	Bool is_affine = op->is_affine;
+	bool is_affine = op->is_affine;
 	const float *src_sf = op->src.scale;
 	const float *mask_sf = op->mask.scale;
 
@@ -1274,7 +1274,7 @@ gen4_emit_invariant(struct sna *sna)
 
 	gen4_emit_state_base_address(sna);
 
-	sna->render_state.gen4.needs_invariant = FALSE;
+	sna->render_state.gen4.needs_invariant = false;
 }
 
 static void
@@ -1393,11 +1393,11 @@ gen4_emit_vertex_elements(struct sna *sna,
 	/*
 	 * vertex data in vertex buffer
 	 *    position: (x, y)
-	 *    texture coordinate 0: (u0, v0) if (is_affine is TRUE) else (u0, v0, w0)
-	 *    texture coordinate 1 if (has_mask is TRUE): same as above
+	 *    texture coordinate 0: (u0, v0) if (is_affine is true) else (u0, v0, w0)
+	 *    texture coordinate 1 if (has_mask is true): same as above
 	 */
 	struct gen4_render_state *render = &sna->render_state.gen4;
-	Bool has_mask = op->mask.bo != NULL;
+	bool has_mask = op->mask.bo != NULL;
 	int nelem = has_mask ? 2 : 1;
 	int selem;
 	uint32_t w_component;
@@ -1494,12 +1494,12 @@ gen4_bind_surfaces(struct sna *sna,
 		gen4_bind_bo(sna,
 			    op->dst.bo, op->dst.width, op->dst.height,
 			    gen4_get_dest_format(op->dst.format),
-			    TRUE);
+			    true);
 	binding_table[1] =
 		gen4_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 	if (op->mask.bo)
 		binding_table[2] =
 			gen4_bind_bo(sna,
@@ -1507,7 +1507,7 @@ gen4_bind_surfaces(struct sna *sna,
 				     op->mask.width,
 				     op->mask.height,
 				     op->mask.card_format,
-				     FALSE);
+				     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen4.surface_table) == *(uint64_t*)binding_table &&
@@ -1676,7 +1676,7 @@ static void gen4_video_bind_surfaces(struct sna *sna,
 		gen4_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen4_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	for (n = 0; n < n_src; n++) {
 		binding_table[1+n] =
 			gen4_bind_video_source(sna,
@@ -1691,7 +1691,7 @@ static void gen4_video_bind_surfaces(struct sna *sna,
 	gen4_emit_state(sna, op, offset);
 }
 
-static Bool
+static bool
 gen4_render_video(struct sna *sna,
 		  struct sna_video *video,
 		  struct sna_video_frame *frame,
@@ -1710,7 +1710,7 @@ gen4_render_video(struct sna *sna,
 
 	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
-		return FALSE;
+		return false;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -1725,7 +1725,7 @@ gen4_render_video(struct sna *sna,
 	tmp.src.repeat = SAMPLER_EXTEND_PAD;
 	tmp.u.gen4.wm_kernel =
 		is_planar_fourcc(frame->id) ? WM_KERNEL_VIDEO_PLANAR : WM_KERNEL_VIDEO_PACKED;
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.u.gen4.ve_id = 1;
 	tmp.priv = frame;
@@ -1792,18 +1792,18 @@ gen4_render_video(struct sna *sna,
 
 	if (sna->render_state.gen4.vertex_offset)
 		gen4_vertex_flush(sna);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen4_composite_solid_init(struct sna *sna,
 			  struct sna_composite_channel *channel,
 			  uint32_t color)
 {
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNormal;
-	channel->is_affine = TRUE;
-	channel->is_solid  = TRUE;
+	channel->is_affine = true;
+	channel->is_solid  = true;
 	channel->transform = NULL;
 	channel->width  = 1;
 	channel->height = 1;
@@ -1816,7 +1816,7 @@ gen4_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
-static Bool
+static bool
 gen4_composite_linear_init(struct sna *sna,
 			   PicturePtr picture,
 			   struct sna_composite_channel *channel,
@@ -1946,7 +1946,7 @@ gen4_composite_picture(struct sna *sna,
 	DBG(("%s: (%d, %d)x(%d, %d), dst=(%d, %d)\n",
 	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
 
-	channel->is_solid = FALSE;
+	channel->is_solid = false;
 	channel->card_format = -1;
 
 	if (sna_picture_is_solid(picture, &color))
@@ -2050,7 +2050,7 @@ gen4_render_composite_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, op);
 }
 
-static Bool
+static bool
 gen4_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 {
 	struct sna_pixmap *priv;
@@ -2058,7 +2058,7 @@ gen4_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 	if (!gen4_check_dst_format(dst->format)) {
 		DBG(("%s: incompatible render target format %08x\n",
 		     __FUNCTION__, dst->format));
-		return FALSE;
+		return false;
 	}
 
 	op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -2067,7 +2067,7 @@ gen4_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 	op->dst.format = dst->format;
 	priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
-		return FALSE;
+		return false;
 
 	op->dst.bo = priv->gpu_bo;
 	op->damage = &priv->gpu_damage;
@@ -2079,20 +2079,20 @@ gen4_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 
 	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
 			    &op->dst.x, &op->dst.y);
-	return TRUE;
+	return true;
 }
 
-static inline Bool
+static inline bool
 picture_is_cpu(PicturePtr picture)
 {
 	if (!picture->pDrawable)
-		return FALSE;
+		return false;
 
 	/* If it is a solid, try to use the render paths */
 	if (picture->pDrawable->width == 1 &&
 	    picture->pDrawable->height == 1 &&
 	    picture->repeat)
-		return FALSE;
+		return false;
 
 	return is_cpu(picture->pDrawable);
 }
@@ -2107,20 +2107,20 @@ static inline bool prefer_blt(struct sna *sna)
 #endif
 }
 
-static Bool
+static bool
 try_blt(struct sna *sna,
 	PicturePtr source,
 	int width, int height)
 {
 	if (prefer_blt(sna)) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
-		return TRUE;
+		return true;
 	}
 
 	/* is the source picture only in cpu memory e.g. a shm pixmap? */
@@ -2133,9 +2133,9 @@ check_gradient(PicturePtr picture)
 	switch (picture->pSourcePict->type) {
 	case SourcePictTypeSolidFill:
 	case SourcePictTypeLinear:
-		return FALSE;
+		return false;
 	default:
-		return TRUE;
+		return true;
 	}
 }
 
@@ -2207,7 +2207,7 @@ gen4_composite_fallback(struct sna *sna,
 	if (!gen4_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
 		     __FUNCTION__, dst->format));
-		return TRUE;
+		return true;
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -2229,11 +2229,11 @@ gen4_composite_fallback(struct sna *sna,
 	 */
 	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	/* If anything is on the GPU, push everything out to the GPU */
@@ -2241,18 +2241,18 @@ gen4_composite_fallback(struct sna *sna,
 	if (priv && priv->gpu_damage && !priv->clear) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (!src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (mask && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	/* However if the dst is not on the GPU and we need to
@@ -2262,25 +2262,25 @@ gen4_composite_fallback(struct sna *sna,
 	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst_pixmap->drawable.width,
 		      dst_pixmap->drawable.height) &&
 	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
 		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
-	return FALSE;
+	return false;
 }
 
 static int
@@ -2291,40 +2291,40 @@ reuse_source(struct sna *sna,
 	uint32_t color;
 
 	if (src_x != msk_x || src_y != msk_y)
-		return FALSE;
+		return false;
 
 	if (src == mask) {
 		DBG(("%s: mask is source\n", __FUNCTION__));
 		*mc = *sc;
 		mc->bo = kgem_bo_reference(mc->bo);
-		return TRUE;
+		return true;
 	}
 
 	if (sna_picture_is_solid(mask, &color))
 		return gen4_composite_solid_init(sna, mc, color);
 
 	if (sc->is_solid)
-		return FALSE;
+		return false;
 
 	if (src->pDrawable == NULL || mask->pDrawable != src->pDrawable)
-		return FALSE;
+		return false;
 
 	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
 
 	if (!sna_transform_equal(src->transform, mask->transform))
-		return FALSE;
+		return false;
 
 	if (!sna_picture_alphamap_equal(src, mask))
-		return FALSE;
+		return false;
 
 	if (!gen4_check_repeat(mask))
-		return FALSE;
+		return false;
 
 	if (!gen4_check_filter(mask))
-		return FALSE;
+		return false;
 
 	if (!gen4_check_format(mask->format))
-		return FALSE;
+		return false;
 
 	DBG(("%s: reusing source channel for mask with a twist\n",
 	     __FUNCTION__));
@@ -2335,10 +2335,10 @@ reuse_source(struct sna *sna,
 	mc->pict_format = mask->format;
 	mc->card_format = gen4_get_card_format(mask->format);
 	mc->bo = kgem_bo_reference(mc->bo);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen4_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -2354,11 +2354,11 @@ gen4_render_composite(struct sna *sna,
 	     width, height, sna->kgem.mode));
 
 	if (op >= ARRAY_SIZE(gen4_blend_op))
-		return FALSE;
+		return false;
 
 #if NO_COMPOSITE
 	if (mask)
-		return FALSE;
+		return false;
 
 	return sna_blt_composite(sna, op,
 				 src, dst,
@@ -2374,10 +2374,10 @@ gen4_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	if (gen4_composite_fallback(sna, src, mask, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height))
 		return sna_tiling_composite(op, src, mask, dst,
@@ -2388,13 +2388,13 @@ gen4_render_composite(struct sna *sna,
 					    tmp);
 
 	if (!gen4_composite_set_target(dst, tmp))
-		return FALSE;
+		return false;
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->dst.width, tmp->dst.height) &&
 	    !sna_render_composite_redirect(sna, tmp,
 					   dst_x, dst_y, width, height))
-		return FALSE;
+		return false;
 
 	switch (gen4_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
@@ -2414,13 +2414,13 @@ gen4_render_composite(struct sna *sna,
 
 	tmp->op = op;
 	tmp->is_affine = tmp->src.is_affine;
-	tmp->has_component_alpha = FALSE;
-	tmp->need_magic_ca_pass = FALSE;
+	tmp->has_component_alpha = false;
+	tmp->need_magic_ca_pass = false;
 
 	tmp->prim_emit = gen4_emit_composite_primitive;
 	if (mask) {
 		if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format)) {
-			tmp->has_component_alpha = TRUE;
+			tmp->has_component_alpha = true;
 
 			/* Check if it's component alpha that relies on a source alpha and on
 			 * the source value.  We can only get one of those into the single
@@ -2435,7 +2435,7 @@ gen4_render_composite(struct sna *sna,
 					goto cleanup_src;
 				}
 
-				tmp->need_magic_ca_pass = TRUE;
+				tmp->need_magic_ca_pass = true;
 				tmp->op = PictOpOutReverse;
 			}
 		}
@@ -2504,7 +2504,7 @@ gen4_render_composite(struct sna *sna,
 
 	gen4_bind_surfaces(sna, tmp);
 	gen4_align_vertex(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_mask:
 	if (tmp->mask.bo)
@@ -2515,7 +2515,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
-	return FALSE;
+	return false;
 }
 
 static void
@@ -2532,12 +2532,12 @@ gen4_copy_bind_surfaces(struct sna *sna, const struct sna_composite_op *op)
 		gen4_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen4_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen4_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen4.surface_table) == *(uint64_t*)binding_table) {
@@ -2583,7 +2583,7 @@ static inline bool prefer_blt_copy(struct sna *sna, unsigned flags)
 	(void)flags;
 }
 
-static Bool
+static bool
 gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -2595,7 +2595,7 @@ gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 #if NO_COPY_BOXES
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy_boxes(sna, alu,
 				  src_bo, src_dx, src_dy,
@@ -2611,12 +2611,12 @@ gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       dst_bo, dst_dx, dst_dy,
 			       dst->drawable.bitsPerPixel,
 			       box, n))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo) {
 fallback_blt:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
@@ -2709,7 +2709,7 @@ fallback_blt:
 
 	tmp.mask.bo = NULL;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
 	tmp.u.gen4.ve_id = 1;
@@ -2739,7 +2739,7 @@ fallback_blt:
 	} while (--n);
 	sna_render_composite_redirect_done(sna, &tmp);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 
 fallback_tiled_src:
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -2770,7 +2770,7 @@ gen4_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 		gen4_vertex_flush(sna);
 }
 
-static Bool
+static bool
 gen4_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -2784,7 +2784,7 @@ gen4_render_copy(struct sna *sna, uint8_t alu,
 
 #if NO_COPY
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy(sna, alu,
 			    src_bo, dst_bo,
@@ -2798,14 +2798,14 @@ gen4_render_copy(struct sna *sna, uint8_t alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo ||
 	    too_large(src->drawable.width, src->drawable.height) ||
 	    too_large(dst->drawable.width, dst->drawable.height)) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy(sna, alu, src_bo, dst_bo,
 				    dst->drawable.bitsPerPixel,
@@ -2856,7 +2856,7 @@ fallback:
 
 	op->blt  = gen4_render_copy_blt;
 	op->done = gen4_render_copy_done;
-	return TRUE;
+	return true;
 }
 
 static void
@@ -2873,12 +2873,12 @@ gen4_fill_bind_surfaces(struct sna *sna, const struct sna_composite_op *op)
 		gen4_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen4_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen4_bind_bo(sna,
 			     op->src.bo, 1, 1,
 			     GEN4_SURFACEFORMAT_B8G8R8A8_UNORM,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen4.surface_table) == *(uint64_t*)binding_table) {
@@ -2912,7 +2912,7 @@ gen4_render_fill_rectangle(struct sna *sna,
 	FLUSH(op);
 }
 
-static Bool
+static bool
 gen4_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
 		       PictFormat format,
@@ -2926,7 +2926,7 @@ gen4_render_fill_boxes(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen4_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (op <= PictOpSrc &&
@@ -2950,10 +2950,10 @@ gen4_render_fill_boxes(struct sna *sna,
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
-			return TRUE;
+			return true;
 
 		if (!gen4_check_dst_format(format))
-			return FALSE;
+			return false;
 
 		if (too_large(dst->drawable.width, dst->drawable.height))
 			return sna_tiling_fill_boxes(sna, op, format, color,
@@ -2961,7 +2961,7 @@ gen4_render_fill_boxes(struct sna *sna,
 	}
 
 #if NO_FILL_BOXES
-	return FALSE;
+	return false;
 #endif
 
 	if (op == PictOpClear)
@@ -2972,7 +2972,7 @@ gen4_render_fill_boxes(struct sna *sna,
 					  color->blue,
 					  color->alpha,
 					  PICT_a8r8g8b8))
-		return FALSE;
+		return false;
 
 	DBG(("%s(%08x x %d)\n", __FUNCTION__, pixel, n));
 
@@ -2990,7 +2990,7 @@ gen4_render_fill_boxes(struct sna *sna,
 	tmp.src.filter = SAMPLER_FILTER_NEAREST;
 	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
 	tmp.u.gen4.ve_id = 1;
@@ -3012,7 +3012,7 @@ gen4_render_fill_boxes(struct sna *sna,
 	} while (--n);
 
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3054,7 +3054,7 @@ gen4_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 }
 
-static Bool
+static bool
 gen4_render_fill(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 uint32_t color,
@@ -3072,7 +3072,7 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(dst->drawable.width, dst->drawable.height))
@@ -3104,7 +3104,7 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 	op->base.mask.filter = SAMPLER_FILTER_NEAREST;
 	op->base.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	op->base.is_affine = TRUE;
+	op->base.is_affine = true;
 	op->base.floats_per_vertex = 3;
 	op->base.need_magic_ca_pass = 0;
 	op->base.has_component_alpha = 0;
@@ -3123,10 +3123,10 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 	op->box   = gen4_render_fill_op_box;
 	op->boxes = gen4_render_fill_op_boxes;
 	op->done  = gen4_render_fill_op_done;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen4_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 			     uint32_t color,
 			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -3144,7 +3144,7 @@ gen4_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint32_t color,
 		     int16_t x1, int16_t y1,
@@ -3162,12 +3162,12 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	if (gen4_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(dst->drawable.width, dst->drawable.height))
-		return FALSE;
+		return false;
 
 	if (alu == GXclear)
 		color = 0;
@@ -3192,10 +3192,10 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.has_component_alpha = 0;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
 	tmp.u.gen4.ve_id = 1;
@@ -3214,7 +3214,7 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3236,8 +3236,8 @@ discard_vbo(struct sna *sna)
 
 static void gen4_render_reset(struct sna *sna)
 {
-	sna->render_state.gen4.needs_invariant = TRUE;
-	sna->render_state.gen4.needs_urb = TRUE;
+	sna->render_state.gen4.needs_invariant = true;
+	sna->render_state.gen4.needs_urb = true;
 	sna->render_state.gen4.vb_id = 0;
 	sna->render_state.gen4.ve_id = -1;
 	sna->render_state.gen4.last_primitive = -1;
@@ -3293,7 +3293,7 @@ static uint32_t gen4_create_sf_state(struct sna_static_stream *stream,
 	sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
 	sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
 	sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
-	sf_state->sf5.viewport_transform = FALSE;	/* skip viewport */
+	sf_state->sf5.viewport_transform = false;	/* skip viewport */
 	sf_state->sf6.cull_mode = GEN4_CULLMODE_NONE;
 	sf_state->sf6.scissor = 0;
 	sf_state->sf7.trifan_pv = 2;
@@ -3321,7 +3321,7 @@ static uint32_t gen4_create_sampler_state(struct sna_static_stream *stream,
 }
 
 static void gen4_init_wm_state(struct gen4_wm_unit_state *state,
-			       Bool has_mask,
+			       bool has_mask,
 			       uint32_t kernel,
 			       uint32_t sampler)
 {
@@ -3415,7 +3415,7 @@ static uint32_t gen4_create_cc_unit_state(struct sna_static_stream *stream)
 	return sna_static_stream_offsetof(stream, base);
 }
 
-static Bool gen4_render_setup(struct sna *sna)
+static bool gen4_render_setup(struct sna *sna)
 {
 	struct gen4_render_state *state = &sna->render_state.gen4;
 	struct sna_static_stream general;
@@ -3490,10 +3490,10 @@ static Bool gen4_render_setup(struct sna *sna)
 	return state->general_bo != NULL;
 }
 
-Bool gen4_render_init(struct sna *sna)
+bool gen4_render_init(struct sna *sna)
 {
 	if (!gen4_render_setup(sna))
-		return FALSE;
+		return false;
 
 	sna->render.composite = gen4_render_composite;
 	sna->render.video = gen4_render_video;
@@ -3511,5 +3511,5 @@ Bool gen4_render_init(struct sna *sna)
 
 	sna->render.max_3d_size = GEN4_MAX_3D_SIZE;
 	sna->render.max_3d_pitch = 1 << 18;
-	return TRUE;
+	return true;
 }
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 7a20303..c4b1ecf 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -185,29 +185,29 @@ static const uint32_t ps_kernel_planar_static[][4] = {
 static const struct wm_kernel_info {
 	const void *data;
 	unsigned int size;
-	Bool has_mask;
+	bool has_mask;
 } wm_kernels[] = {
-	KERNEL(WM_KERNEL, ps_kernel_nomask_affine, FALSE),
-	KERNEL(WM_KERNEL_PROJECTIVE, ps_kernel_nomask_projective, FALSE),
+	KERNEL(WM_KERNEL, ps_kernel_nomask_affine, false),
+	KERNEL(WM_KERNEL_PROJECTIVE, ps_kernel_nomask_projective, false),
 
-	KERNEL(WM_KERNEL_MASK, ps_kernel_masknoca_affine, TRUE),
-	KERNEL(WM_KERNEL_MASK_PROJECTIVE, ps_kernel_masknoca_projective, TRUE),
+	KERNEL(WM_KERNEL_MASK, ps_kernel_masknoca_affine, true),
+	KERNEL(WM_KERNEL_MASK_PROJECTIVE, ps_kernel_masknoca_projective, true),
 
-	KERNEL(WM_KERNEL_MASKCA, ps_kernel_maskca_affine, TRUE),
-	KERNEL(WM_KERNEL_MASKCA_PROJECTIVE, ps_kernel_maskca_projective, TRUE),
+	KERNEL(WM_KERNEL_MASKCA, ps_kernel_maskca_affine, true),
+	KERNEL(WM_KERNEL_MASKCA_PROJECTIVE, ps_kernel_maskca_projective, true),
 
 	KERNEL(WM_KERNEL_MASKCA_SRCALPHA,
-	       ps_kernel_maskca_srcalpha_affine, TRUE),
+	       ps_kernel_maskca_srcalpha_affine, true),
 	KERNEL(WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
-	       ps_kernel_maskca_srcalpha_projective, TRUE),
+	       ps_kernel_maskca_srcalpha_projective, true),
 
-	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, FALSE),
-	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, FALSE),
+	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, false),
+	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, false),
 };
 #undef KERNEL
 
 static const struct blendinfo {
-	Bool src_alpha;
+	bool src_alpha;
 	uint32_t src_blend;
 	uint32_t dst_blend;
 } gen5_blend_op[] = {
@@ -278,7 +278,7 @@ static inline bool too_large(int width, int height)
 }
 
 static int
-gen5_choose_composite_kernel(int op, Bool has_mask, Bool is_ca, Bool is_affine)
+gen5_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
 {
 	int base;
 
@@ -311,7 +311,7 @@ static void gen5_magic_ca_pass(struct sna *sna,
 	gen5_emit_pipelined_pointers
 		(sna, op, PictOpAdd,
 		 gen5_choose_composite_kernel(PictOpAdd,
-					      TRUE, TRUE, op->is_affine));
+					      true, true, op->is_affine));
 
 	OUT_BATCH(GEN5_3DPRIMITIVE |
 		  GEN5_3DPRIMITIVE_VERTEX_SEQUENTIAL |
@@ -491,7 +491,7 @@ static void gen5_vertex_close(struct sna *sna)
 }
 
 static uint32_t gen5_get_blend(int op,
-			       Bool has_component_alpha,
+			       bool has_component_alpha,
 			       uint32_t dst_format)
 {
 	uint32_t src, dst;
@@ -552,7 +552,7 @@ static uint32_t gen5_get_dest_format(PictFormat format)
 	}
 }
 
-static Bool gen5_check_dst_format(PictFormat format)
+static bool gen5_check_dst_format(PictFormat format)
 {
 	switch (format) {
 	case PICT_a8r8g8b8:
@@ -567,10 +567,10 @@ static Bool gen5_check_dst_format(PictFormat format)
 	case PICT_a8:
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return TRUE;
+		return true;
 	default:
 		DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
-		return FALSE;
+		return false;
 	}
 }
 
@@ -683,10 +683,10 @@ static uint32_t gen5_check_filter(PicturePtr picture)
 	switch (picture->filter) {
 	case PictFilterNearest:
 	case PictFilterBilinear:
-		return TRUE;
+		return true;
 	default:
 		DBG(("%s: unknown filter: %x\n", __FUNCTION__, picture->filter));
-		return FALSE;
+		return false;
 	}
 }
 
@@ -709,18 +709,18 @@ static uint32_t gen5_repeat(uint32_t repeat)
 static bool gen5_check_repeat(PicturePtr picture)
 {
 	if (!picture->repeat)
-		return TRUE;
+		return true;
 
 	switch (picture->repeatType) {
 	case RepeatNone:
 	case RepeatNormal:
 	case RepeatPad:
 	case RepeatReflect:
-		return TRUE;
+		return true;
 	default:
 		DBG(("%s: unknown repeat: %x\n",
 		     __FUNCTION__, picture->repeatType));
-		return FALSE;
+		return false;
 	}
 }
 
@@ -745,7 +745,7 @@ gen5_bind_bo(struct sna *sna,
 	     uint32_t width,
 	     uint32_t height,
 	     uint32_t format,
-	     Bool is_dst)
+	     bool is_dst)
 {
 	uint32_t domains;
 	uint16_t offset;
@@ -951,7 +951,7 @@ gen5_emit_composite_primitive(struct sna *sna,
 			      const struct sna_composite_rectangles *r)
 {
 	float src_x[3], src_y[3], src_w[3], mask_x[3], mask_y[3], mask_w[3];
-	Bool is_affine = op->is_affine;
+	bool is_affine = op->is_affine;
 	const float *src_sf = op->src.scale;
 	const float *mask_sf = op->mask.scale;
 
@@ -1289,7 +1289,7 @@ gen5_emit_invariant(struct sna *sna)
 
 	gen5_emit_state_base_address(sna);
 
-	sna->render_state.gen5.needs_invariant = FALSE;
+	sna->render_state.gen5.needs_invariant = false;
 }
 
 static void
@@ -1409,12 +1409,12 @@ gen5_emit_vertex_elements(struct sna *sna,
 	/*
 	 * vertex data in vertex buffer
 	 *    position: (x, y)
-	 *    texture coordinate 0: (u0, v0) if (is_affine is TRUE) else (u0, v0, w0)
-	 *    texture coordinate 1 if (has_mask is TRUE): same as above
+	 *    texture coordinate 0: (u0, v0) if (is_affine is true) else (u0, v0, w0)
+	 *    texture coordinate 1 if (has_mask is true): same as above
 	 */
 	struct gen5_render_state *render = &sna->render_state.gen5;
-	Bool has_mask = op->mask.bo != NULL;
-	Bool is_affine = op->is_affine;
+	bool has_mask = op->mask.bo != NULL;
+	bool is_affine = op->is_affine;
 	int nelem = has_mask ? 2 : 1;
 	int selem = is_affine ? 2 : 3;
 	uint32_t w_component;
@@ -1518,12 +1518,12 @@ static void gen5_bind_surfaces(struct sna *sna,
 		gen5_bind_bo(sna,
 			    op->dst.bo, op->dst.width, op->dst.height,
 			    gen5_get_dest_format(op->dst.format),
-			    TRUE);
+			    true);
 	binding_table[1] =
 		gen5_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 	if (op->mask.bo)
 		binding_table[2] =
 			gen5_bind_bo(sna,
@@ -1531,7 +1531,7 @@ static void gen5_bind_surfaces(struct sna *sna,
 				     op->mask.width,
 				     op->mask.height,
 				     op->mask.card_format,
-				     FALSE);
+				     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen5.surface_table) == *(uint64_t*)binding_table &&
@@ -1706,7 +1706,7 @@ static void gen5_video_bind_surfaces(struct sna *sna,
 		gen5_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen5_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	for (n = 0; n < n_src; n++) {
 		binding_table[1+n] =
 			gen5_bind_video_source(sna,
@@ -1721,7 +1721,7 @@ static void gen5_video_bind_surfaces(struct sna *sna,
 	gen5_emit_state(sna, op, offset);
 }
 
-static Bool
+static bool
 gen5_render_video(struct sna *sna,
 		  struct sna_video *video,
 		  struct sna_video_frame *frame,
@@ -1740,7 +1740,7 @@ gen5_render_video(struct sna *sna,
 
 	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
-		return FALSE;
+		return false;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -1758,7 +1758,7 @@ gen5_render_video(struct sna *sna,
 	tmp.u.gen5.wm_kernel =
 		is_planar_fourcc(frame->id) ? WM_KERNEL_VIDEO_PLANAR : WM_KERNEL_VIDEO_PACKED;
 	tmp.u.gen5.ve_id = 1;
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.priv = frame;
@@ -1822,7 +1822,7 @@ gen5_render_video(struct sna *sna,
 	priv->clear = false;
 
 	gen5_vertex_flush(sna);
-	return TRUE;
+	return true;
 }
 
 static int
@@ -1832,8 +1832,8 @@ gen5_composite_solid_init(struct sna *sna,
 {
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNormal;
-	channel->is_affine = TRUE;
-	channel->is_solid  = TRUE;
+	channel->is_affine = true;
+	channel->is_solid  = true;
 	channel->transform = NULL;
 	channel->width  = 1;
 	channel->height = 1;
@@ -1846,7 +1846,7 @@ gen5_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
-static Bool
+static bool
 gen5_composite_linear_init(struct sna *sna,
 			   PicturePtr picture,
 			   struct sna_composite_channel *channel,
@@ -1976,7 +1976,7 @@ gen5_composite_picture(struct sna *sna,
 	DBG(("%s: (%d, %d)x(%d, %d), dst=(%d, %d)\n",
 	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
 
-	channel->is_solid = FALSE;
+	channel->is_solid = false;
 	channel->card_format = -1;
 
 	if (sna_picture_is_solid(picture, &color))
@@ -2076,7 +2076,7 @@ gen5_render_composite_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, op);
 }
 
-static Bool
+static bool
 gen5_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 {
 	struct sna_pixmap *priv;
@@ -2103,7 +2103,7 @@ gen5_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 	if (op->dst.bo == NULL) {
 		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
 		if (priv == NULL)
-			return FALSE;
+			return false;
 
 		op->dst.bo = priv->gpu_bo;
 		op->damage = &priv->gpu_damage;
@@ -2115,43 +2115,43 @@ gen5_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 
 	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
 			    &op->dst.x, &op->dst.y);
-	return TRUE;
+	return true;
 }
 
-static inline Bool
+static inline bool
 picture_is_cpu(PicturePtr picture)
 {
 	if (!picture->pDrawable)
-		return FALSE;
+		return false;
 
 	if (too_large(picture->pDrawable->width, picture->pDrawable->height))
-		return TRUE;
+		return true;
 
 	return is_cpu(picture->pDrawable) || is_dirty(picture->pDrawable);
 }
 
-static Bool
+static bool
 try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
 	int width, int height)
 {
 	if (sna->kgem.mode != KGEM_RENDER) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst->pDrawable->width, dst->pDrawable->height))
-		return TRUE;
+		return true;
 
 	/* The blitter is much faster for solids */
 	if (sna_picture_is_solid(src, NULL))
-		return TRUE;
+		return true;
 
 	/* is the source picture only in cpu memory e.g. a shm pixmap? */
 	return picture_is_cpu(src);
@@ -2161,14 +2161,14 @@ static bool
 is_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
-		return FALSE;
+		return false;
 
 	switch (picture->pSourcePict->type) {
 	case SourcePictTypeSolidFill:
 	case SourcePictTypeLinear:
-		return FALSE;
+		return false;
 	default:
-		return TRUE;
+		return true;
 	}
 }
 
@@ -2241,7 +2241,7 @@ gen5_composite_fallback(struct sna *sna,
 	if (!gen5_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
 		     __FUNCTION__, dst->format));
-		return TRUE;
+		return true;
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -2263,11 +2263,11 @@ gen5_composite_fallback(struct sna *sna,
 	 */
 	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	/* If anything is on the GPU, push everything out to the GPU */
@@ -2275,18 +2275,18 @@ gen5_composite_fallback(struct sna *sna,
 	if (priv && priv->gpu_damage && !priv->clear) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (src_pixmap && !src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (mask_pixmap && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	/* However if the dst is not on the GPU and we need to
@@ -2296,25 +2296,25 @@ gen5_composite_fallback(struct sna *sna,
 	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst_pixmap->drawable.width,
 		      dst_pixmap->drawable.height) &&
 	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
 		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
-	return FALSE;
+	return false;
 }
 
 static int
@@ -2325,40 +2325,40 @@ reuse_source(struct sna *sna,
 	uint32_t color;
 
 	if (src_x != msk_x || src_y != msk_y)
-		return FALSE;
+		return false;
 
 	if (src == mask) {
 		DBG(("%s: mask is source\n", __FUNCTION__));
 		*mc = *sc;
 		mc->bo = kgem_bo_reference(mc->bo);
-		return TRUE;
+		return true;
 	}
 
 	if (sna_picture_is_solid(mask, &color))
 		return gen5_composite_solid_init(sna, mc, color);
 
 	if (sc->is_solid)
-		return FALSE;
+		return false;
 
 	if (src->pDrawable == NULL || mask->pDrawable != src->pDrawable)
-		return FALSE;
+		return false;
 
 	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
 
 	if (!sna_transform_equal(src->transform, mask->transform))
-		return FALSE;
+		return false;
 
 	if (!sna_picture_alphamap_equal(src, mask))
-		return FALSE;
+		return false;
 
 	if (!gen5_check_repeat(mask))
-		return FALSE;
+		return false;
 
 	if (!gen5_check_filter(mask))
-		return FALSE;
+		return false;
 
 	if (!gen5_check_format(mask->format))
-		return FALSE;
+		return false;
 
 	DBG(("%s: reusing source channel for mask with a twist\n",
 	     __FUNCTION__));
@@ -2369,10 +2369,10 @@ reuse_source(struct sna *sna,
 	mc->pict_format = mask->format;
 	mc->card_format = gen5_get_card_format(mask->format);
 	mc->bo = kgem_bo_reference(mc->bo);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen5_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -2389,7 +2389,7 @@ gen5_render_composite(struct sna *sna,
 
 	if (op >= ARRAY_SIZE(gen5_blend_op)) {
 		DBG(("%s: unhandled blend op %d\n", __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (mask == NULL &&
@@ -2399,10 +2399,10 @@ gen5_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	if (gen5_composite_fallback(sna, src, mask, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height))
 		return sna_tiling_composite(op, src, mask, dst,
@@ -2414,7 +2414,7 @@ gen5_render_composite(struct sna *sna,
 
 	if (!gen5_composite_set_target(dst, tmp)) {
 		DBG(("%s: failed to set composite target\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
@@ -2423,14 +2423,14 @@ gen5_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->dst.width, tmp->dst.height) &&
 	    !sna_render_composite_redirect(sna, tmp,
 					   dst_x, dst_y, width, height))
-		return FALSE;
+		return false;
 
 	DBG(("%s: preparing source\n", __FUNCTION__));
 	switch (gen5_composite_picture(sna, src, &tmp->src,
@@ -2451,13 +2451,13 @@ gen5_render_composite(struct sna *sna,
 
 	tmp->op = op;
 	tmp->is_affine = tmp->src.is_affine;
-	tmp->has_component_alpha = FALSE;
-	tmp->need_magic_ca_pass = FALSE;
+	tmp->has_component_alpha = false;
+	tmp->need_magic_ca_pass = false;
 
 	tmp->prim_emit = gen5_emit_composite_primitive;
 	if (mask) {
 		if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format)) {
-			tmp->has_component_alpha = TRUE;
+			tmp->has_component_alpha = true;
 
 			/* Check if it's component alpha that relies on a source alpha and on
 			 * the source value.  We can only get one of those into the single
@@ -2470,7 +2470,7 @@ gen5_render_composite(struct sna *sna,
 					goto cleanup_src;
 				}
 
-				tmp->need_magic_ca_pass = TRUE;
+				tmp->need_magic_ca_pass = true;
 				tmp->op = PictOpOutReverse;
 			}
 		}
@@ -2536,7 +2536,7 @@ gen5_render_composite(struct sna *sna,
 
 	gen5_bind_surfaces(sna, tmp);
 	gen5_align_vertex(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_mask:
 	if (tmp->mask.bo)
@@ -2547,12 +2547,12 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
-	return FALSE;
+	return false;
 }
 
 /* A poor man's span interface. But better than nothing? */
 #if !NO_COMPOSITE_SPANS
-static Bool
+static bool
 gen5_composite_alpha_gradient_init(struct sna *sna,
 				   struct sna_composite_channel *channel)
 {
@@ -2560,8 +2560,8 @@ gen5_composite_alpha_gradient_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatPad;
-	channel->is_affine = TRUE;
-	channel->is_solid  = FALSE;
+	channel->is_affine = true;
+	channel->is_solid  = false;
 	channel->transform = NULL;
 	channel->width  = 256;
 	channel->height = 1;
@@ -2756,7 +2756,7 @@ gen5_render_composite_spans_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, &op->base);
 }
 
-static Bool
+static bool
 gen5_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -2771,13 +2771,13 @@ gen5_render_composite_spans(struct sna *sna,
 	     width, height, flags, sna->kgem.ring));
 
 	if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
-		return FALSE;
+		return false;
 
 	if (op >= ARRAY_SIZE(gen5_blend_op))
-		return FALSE;
+		return false;
 
 	if (gen5_composite_fallback(sna, src, NULL, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
@@ -2786,7 +2786,7 @@ gen5_render_composite_spans(struct sna *sna,
 		if (!is_gpu(dst->pDrawable)) {
 			DBG(("%s: fallback, tiled operation not on GPU\n",
 			     __FUNCTION__));
-			return FALSE;
+			return false;
 		}
 
 		return sna_tiling_composite_spans(op, src, dst,
@@ -2796,13 +2796,13 @@ gen5_render_composite_spans(struct sna *sna,
 
 	tmp->base.op = op;
 	if (!gen5_composite_set_target(dst, &tmp->base))
-		return FALSE;
+		return false;
 	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
 		if (!sna_render_composite_redirect(sna, &tmp->base,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen5_composite_picture(sna, src, &tmp->base.src,
@@ -2822,8 +2822,8 @@ gen5_render_composite_spans(struct sna *sna,
 
 	tmp->base.mask.bo = NULL;
 	tmp->base.is_affine = tmp->base.src.is_affine;
-	tmp->base.has_component_alpha = FALSE;
-	tmp->base.need_magic_ca_pass = FALSE;
+	tmp->base.has_component_alpha = false;
+	tmp->base.need_magic_ca_pass = false;
 
 	gen5_composite_alpha_gradient_init(sna, &tmp->base.mask);
 
@@ -2837,7 +2837,7 @@ gen5_render_composite_spans(struct sna *sna,
 
 	tmp->base.u.gen5.wm_kernel =
 		gen5_choose_composite_kernel(tmp->base.op,
-					     TRUE, FALSE,
+					     true, false,
 					     tmp->base.is_affine);
 	tmp->base.u.gen5.ve_id = 1 << 1 | tmp->base.is_affine;
 
@@ -2857,7 +2857,7 @@ gen5_render_composite_spans(struct sna *sna,
 
 	gen5_bind_surfaces(sna, &tmp->base);
 	gen5_align_vertex(sna, &tmp->base);
-	return TRUE;
+	return true;
 
 cleanup_src:
 	if (tmp->base.src.bo)
@@ -2865,7 +2865,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
-	return FALSE;
+	return false;
 }
 #endif
 
@@ -2884,12 +2884,12 @@ gen5_copy_bind_surfaces(struct sna *sna,
 		gen5_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen5_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen5_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen5.surface_table) == *(uint64_t*)binding_table) {
@@ -2900,7 +2900,7 @@ gen5_copy_bind_surfaces(struct sna *sna,
 	gen5_emit_state(sna, op, offset);
 }
 
-static Bool
+static bool
 gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -2914,12 +2914,12 @@ gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       dst_bo, dst_dx, dst_dy,
 			       dst->drawable.bitsPerPixel,
 			       box, n))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo) {
 fallback_blt:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
@@ -3013,7 +3013,7 @@ fallback_blt:
 		tmp.src.scale[1] = 1.f/src->drawable.height;
 	}
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
@@ -3066,7 +3066,7 @@ fallback_blt:
 	gen5_vertex_flush(sna);
 	sna_render_composite_redirect_done(sna, &tmp);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 
 fallback_tiled_src:
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -3115,7 +3115,7 @@ gen5_render_copy_done(struct sna *sna,
 	DBG(("%s()\n", __FUNCTION__));
 }
 
-static Bool
+static bool
 gen5_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -3128,14 +3128,14 @@ gen5_render_copy(struct sna *sna, uint8_t alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo ||
 	    too_large(src->drawable.width, src->drawable.height) ||
 	    too_large(dst->drawable.width, dst->drawable.height)) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy(sna, alu, src_bo, dst_bo,
 				    dst->drawable.bitsPerPixel,
@@ -3187,7 +3187,7 @@ fallback:
 				 src_bo, dst_bo,
 				 dst->drawable.bitsPerPixel,
 				 op))
-			return TRUE;
+			return true;
 	}
 
 	gen5_copy_bind_surfaces(sna, &op->base);
@@ -3195,7 +3195,7 @@ fallback:
 
 	op->blt  = gen5_render_copy_blt;
 	op->done = gen5_render_copy_done;
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3213,12 +3213,12 @@ gen5_fill_bind_surfaces(struct sna *sna,
 		gen5_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen5_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen5_bind_bo(sna,
 			     op->src.bo, 1, 1,
 			     GEN5_SURFACEFORMAT_B8G8R8A8_UNORM,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen5.surface_table) == *(uint64_t*)binding_table) {
@@ -3239,7 +3239,7 @@ static inline bool prefer_blt_fill(struct sna *sna)
 #endif
 }
 
-static Bool
+static bool
 gen5_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
 		       PictFormat format,
@@ -3256,7 +3256,7 @@ gen5_render_fill_boxes(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen5_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (op <= PictOpSrc &&
@@ -3280,10 +3280,10 @@ gen5_render_fill_boxes(struct sna *sna,
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
-			return TRUE;
+			return true;
 
 		if (!gen5_check_dst_format(format))
-			return FALSE;
+			return false;
 
 		if (too_large(dst->drawable.width, dst->drawable.height))
 			return sna_tiling_fill_boxes(sna, op, format, color,
@@ -3298,7 +3298,7 @@ gen5_render_fill_boxes(struct sna *sna,
 					  color->blue,
 					  color->alpha,
 					  PICT_a8r8g8b8))
-		return FALSE;
+		return false;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -3314,7 +3314,7 @@ gen5_render_fill_boxes(struct sna *sna,
 	tmp.src.filter = SAMPLER_FILTER_NEAREST;
 	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
@@ -3356,7 +3356,7 @@ gen5_render_fill_boxes(struct sna *sna,
 
 	gen5_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3448,7 +3448,7 @@ gen5_render_fill_op_done(struct sna *sna,
 	DBG(("%s()\n", __FUNCTION__));
 }
 
-static Bool
+static bool
 gen5_render_fill(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 uint32_t color,
@@ -3461,7 +3461,7 @@ gen5_render_fill(struct sna *sna, uint8_t alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(dst->drawable.width, dst->drawable.height))
@@ -3496,7 +3496,7 @@ gen5_render_fill(struct sna *sna, uint8_t alu,
 	op->base.mask.filter = SAMPLER_FILTER_NEAREST;
 	op->base.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	op->base.is_affine = TRUE;
+	op->base.is_affine = true;
 	op->base.floats_per_vertex = 3;
 	op->base.floats_per_rect = 9;
 	op->base.u.gen5.wm_kernel = WM_KERNEL;
@@ -3514,10 +3514,10 @@ gen5_render_fill(struct sna *sna, uint8_t alu,
 	op->box   = gen5_render_fill_op_box;
 	op->boxes = gen5_render_fill_op_boxes;
 	op->done  = gen5_render_fill_op_done;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen5_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 			     uint32_t color,
 			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -3535,7 +3535,7 @@ gen5_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint32_t color,
 		     int16_t x1, int16_t y1,
@@ -3553,7 +3553,7 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (prefer_blt_fill(sna) &&
 	    gen5_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -3584,11 +3584,11 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
 	tmp.u.gen5.ve_id = 1;
@@ -3619,7 +3619,7 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen5_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3683,7 +3683,7 @@ gen5_render_expire(struct kgem *kgem)
 
 static void gen5_render_reset(struct sna *sna)
 {
-	sna->render_state.gen5.needs_invariant = TRUE;
+	sna->render_state.gen5.needs_invariant = true;
 	sna->render_state.gen5.vb_id = 0;
 	sna->render_state.gen5.ve_id = -1;
 	sna->render_state.gen5.last_primitive = -1;
@@ -3739,7 +3739,7 @@ static uint32_t gen5_create_sf_state(struct sna_static_stream *stream,
 	sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
 	sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
 	sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
-	sf_state->sf5.viewport_transform = FALSE;	/* skip viewport */
+	sf_state->sf5.viewport_transform = false;	/* skip viewport */
 	sf_state->sf6.cull_mode = GEN5_CULLMODE_NONE;
 	sf_state->sf6.scissor = 0;
 	sf_state->sf7.trifan_pv = 2;
@@ -3767,7 +3767,7 @@ static uint32_t gen5_create_sampler_state(struct sna_static_stream *stream,
 }
 
 static void gen5_init_wm_state(struct gen5_wm_unit_state *state,
-			       Bool has_mask,
+			       bool has_mask,
 			       uint32_t kernel,
 			       uint32_t sampler)
 {
@@ -3866,7 +3866,7 @@ static uint32_t gen5_create_cc_unit_state(struct sna_static_stream *stream)
 	return sna_static_stream_offsetof(stream, base);
 }
 
-static Bool gen5_render_setup(struct sna *sna)
+static bool gen5_render_setup(struct sna *sna)
 {
 	struct gen5_render_state *state = &sna->render_state.gen5;
 	struct sna_static_stream general;
@@ -3941,10 +3941,10 @@ static Bool gen5_render_setup(struct sna *sna)
 	return state->general_bo != NULL;
 }
 
-Bool gen5_render_init(struct sna *sna)
+bool gen5_render_init(struct sna *sna)
 {
 	if (!gen5_render_setup(sna))
-		return FALSE;
+		return false;
 
 	sna->kgem.context_switch = gen5_render_context_switch;
 	sna->kgem.retire = gen5_render_retire;
@@ -3969,5 +3969,5 @@ Bool gen5_render_init(struct sna *sna)
 
 	sna->render.max_3d_size = MAX_3D_SIZE;
 	sna->render.max_3d_pitch = 1 << 18;
-	return TRUE;
+	return true;
 }
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 6d8fbfd..ccf27be 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -140,27 +140,27 @@ static const struct wm_kernel_info {
 	const char *name;
 	const void *data;
 	unsigned int size;
-	Bool has_mask;
+	bool has_mask;
 } wm_kernels[] = {
-	KERNEL(NOMASK, ps_kernel_nomask_affine, FALSE),
-	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, FALSE),
+	KERNEL(NOMASK, ps_kernel_nomask_affine, false),
+	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, false),
 
-	KERNEL(MASK, ps_kernel_masknoca_affine, TRUE),
-	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, TRUE),
+	KERNEL(MASK, ps_kernel_masknoca_affine, true),
+	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, true),
 
-	KERNEL(MASKCA, ps_kernel_maskca_affine, TRUE),
-	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, TRUE),
+	KERNEL(MASKCA, ps_kernel_maskca_affine, true),
+	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, true),
 
-	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, TRUE),
-	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, TRUE),
+	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, true),
+	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, true),
 
-	KERNEL(VIDEO_PLANAR, ps_kernel_planar, FALSE),
-	KERNEL(VIDEO_PACKED, ps_kernel_packed, FALSE),
+	KERNEL(VIDEO_PLANAR, ps_kernel_planar, false),
+	KERNEL(VIDEO_PACKED, ps_kernel_packed, false),
 };
 #undef KERNEL
 
 static const struct blendinfo {
-	Bool src_alpha;
+	bool src_alpha;
 	uint32_t src_blend;
 	uint32_t dst_blend;
 } gen6_blend_op[] = {
@@ -290,7 +290,7 @@ static uint32_t gen6_get_dest_format(PictFormat format)
 	}
 }
 
-static Bool gen6_check_dst_format(PictFormat format)
+static bool gen6_check_dst_format(PictFormat format)
 {
 	switch (format) {
 	case PICT_a8r8g8b8:
@@ -305,9 +305,9 @@ static Bool gen6_check_dst_format(PictFormat format)
 	case PICT_a8:
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return TRUE;
+		return true;
 	}
-	return FALSE;
+	return false;
 }
 
 static bool gen6_check_format(uint32_t format)
@@ -349,9 +349,9 @@ static uint32_t gen6_check_filter(PicturePtr picture)
 	switch (picture->filter) {
 	case PictFilterNearest:
 	case PictFilterBilinear:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -374,21 +374,21 @@ static uint32_t gen6_repeat(uint32_t repeat)
 static bool gen6_check_repeat(PicturePtr picture)
 {
 	if (!picture->repeat)
-		return TRUE;
+		return true;
 
 	switch (picture->repeatType) {
 	case RepeatNone:
 	case RepeatNormal:
 	case RepeatPad:
 	case RepeatReflect:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
 static int
-gen6_choose_composite_kernel(int op, Bool has_mask, Bool is_ca, Bool is_affine)
+gen6_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
 {
 	int base;
 
@@ -553,7 +553,7 @@ gen6_emit_invariant(struct sna *sna)
 	gen6_emit_wm_constants(sna);
 	gen6_emit_null_depth_buffer(sna);
 
-	sna->render_state.gen6.needs_invariant = FALSE;
+	sna->render_state.gen6.needs_invariant = false;
 }
 
 static bool
@@ -608,7 +608,7 @@ gen6_emit_sampler(struct sna *sna, uint32_t state)
 }
 
 static void
-gen6_emit_sf(struct sna *sna, Bool has_mask)
+gen6_emit_sf(struct sna *sna, bool has_mask)
 {
 	int num_sf_outputs = has_mask ? 2 : 1;
 
@@ -745,8 +745,8 @@ gen6_emit_vertex_elements(struct sna *sna,
 	/*
 	 * vertex data in vertex buffer
 	 *    position: (x, y)
-	 *    texture coordinate 0: (u0, v0) if (is_affine is TRUE) else (u0, v0, w0)
-	 *    texture coordinate 1 if (has_mask is TRUE): same as above
+	 *    texture coordinate 0: (u0, v0) if (is_affine is true) else (u0, v0, w0)
+	 *    texture coordinate 1 if (has_mask is true): same as above
 	 */
 	struct gen6_render_state *render = &sna->render_state.gen6;
 	int nelem = op->mask.bo ? 2 : 1;
@@ -879,10 +879,10 @@ static void gen6_magic_ca_pass(struct sna *sna,
 
 	gen6_emit_flush(sna);
 
-	gen6_emit_cc(sna, PictOpAdd, TRUE, op->dst.format);
+	gen6_emit_cc(sna, PictOpAdd, true, op->dst.format);
 	gen6_emit_wm(sna,
 		     gen6_choose_composite_kernel(PictOpAdd,
-						  TRUE, TRUE,
+						  true, true,
 						  op->is_affine),
 		     3, 2);
 
@@ -1170,7 +1170,7 @@ gen6_bind_bo(struct sna *sna,
 	     uint32_t width,
 	     uint32_t height,
 	     uint32_t format,
-	     Bool is_dst)
+	     bool is_dst)
 {
 	uint32_t *ss;
 	uint32_t domains;
@@ -1640,12 +1640,12 @@ static void gen6_emit_composite_state(struct sna *sna,
 		gen6_bind_bo(sna,
 			    op->dst.bo, op->dst.width, op->dst.height,
 			    gen6_get_dest_format(op->dst.format),
-			    TRUE);
+			    true);
 	binding_table[1] =
 		gen6_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 	if (op->mask.bo) {
 		binding_table[2] =
 			gen6_bind_bo(sna,
@@ -1653,7 +1653,7 @@ static void gen6_emit_composite_state(struct sna *sna,
 				     op->mask.width,
 				     op->mask.height,
 				     op->mask.card_format,
-				     FALSE);
+				     false);
 	}
 
 	if (sna->kgem.surface == offset &&
@@ -1871,7 +1871,7 @@ static void gen6_emit_video_state(struct sna *sna,
 		gen6_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen6_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	for (n = 0; n < n_src; n++) {
 		binding_table[1+n] =
 			gen6_bind_video_source(sna,
@@ -1886,7 +1886,7 @@ static void gen6_emit_video_state(struct sna *sna,
 	gen6_emit_state(sna, op, offset | dirty);
 }
 
-static Bool
+static bool
 gen6_render_video(struct sna *sna,
 		  struct sna_video *video,
 		  struct sna_video_frame *frame,
@@ -1911,7 +1911,7 @@ gen6_render_video(struct sna *sna,
 
 	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
-		return FALSE;
+		return false;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -1928,7 +1928,7 @@ gen6_render_video(struct sna *sna,
 
 	tmp.mask.bo = NULL;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 
@@ -2004,10 +2004,10 @@ gen6_render_video(struct sna *sna,
 	priv->clear = false;
 
 	gen6_vertex_flush(sna);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen6_composite_solid_init(struct sna *sna,
 			  struct sna_composite_channel *channel,
 			  uint32_t color)
@@ -2016,8 +2016,8 @@ gen6_composite_solid_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNormal;
-	channel->is_affine = TRUE;
-	channel->is_solid  = TRUE;
+	channel->is_affine = true;
+	channel->is_solid  = true;
 	channel->is_opaque = (color >> 24) == 0xff;
 	channel->transform = NULL;
 	channel->width  = 1;
@@ -2031,7 +2031,7 @@ gen6_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
-static Bool
+static bool
 gen6_composite_linear_init(struct sna *sna,
 			   PicturePtr picture,
 			   struct sna_composite_channel *channel,
@@ -2161,7 +2161,7 @@ gen6_composite_picture(struct sna *sna,
 	DBG(("%s: (%d, %d)x(%d, %d), dst=(%d, %d)\n",
 	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
 
-	channel->is_solid = FALSE;
+	channel->is_solid = false;
 	channel->card_format = -1;
 
 	if (sna_picture_is_solid(picture, &color))
@@ -2264,7 +2264,7 @@ static void gen6_render_composite_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, op);
 }
 
-static Bool
+static bool
 gen6_composite_set_target(struct sna *sna,
 			  struct sna_composite_op *op,
 			  PicturePtr dst)
@@ -2290,7 +2290,7 @@ gen6_composite_set_target(struct sna *sna,
 	if (op->dst.bo == NULL) {
 		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
 		if (priv == NULL)
-			return FALSE;
+			return false;
 
 		op->dst.bo = priv->gpu_bo;
 		op->damage = &priv->gpu_damage;
@@ -2307,7 +2307,7 @@ gen6_composite_set_target(struct sna *sna,
 	     op->dst.width, op->dst.height,
 	     op->dst.bo->pitch,
 	     op->dst.x, op->dst.y));
-	return TRUE;
+	return true;
 }
 
 static bool prefer_blt_ring(struct sna *sna)
@@ -2320,27 +2320,27 @@ static bool can_switch_rings(struct sna *sna)
 	return sna->kgem.mode == KGEM_NONE && sna->kgem.has_semaphores && !NO_RING_SWITCH;
 }
 
-static Bool
+static bool
 try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
 	int width, int height)
 {
 	if (prefer_blt_ring(sna)) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst->pDrawable->width, dst->pDrawable->height)) {
 		DBG(("%s: dst too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__,
 		     dst->pDrawable->width, dst->pDrawable->height));
-		return TRUE;
+		return true;
 	}
 
 	if (src->pDrawable &&
@@ -2348,31 +2348,31 @@ try_blt(struct sna *sna,
 		DBG(("%s: src too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__,
 		     src->pDrawable->width, src->pDrawable->height));
-		return TRUE;
+		return true;
 	}
 
 	if (can_switch_rings(sna)) {
 		if (sna_picture_is_solid(src, NULL))
-			return TRUE;
+			return true;
 		if (src->pDrawable)
-			return TRUE;
+			return true;
 	}
 
-	return FALSE;
+	return false;
 }
 
 static bool
 check_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
-		return FALSE;
+		return false;
 
 	switch (picture->pSourcePict->type) {
 	case SourcePictTypeSolidFill:
 	case SourcePictTypeLinear:
-		return FALSE;
+		return false;
 	default:
-		return TRUE;
+		return true;
 	}
 }
 
@@ -2443,7 +2443,7 @@ gen6_composite_fallback(struct sna *sna,
 	if (!gen6_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
 		     __FUNCTION__, dst->format));
-		return TRUE;
+		return true;
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -2465,11 +2465,11 @@ gen6_composite_fallback(struct sna *sna,
 	 */
 	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	/* If anything is on the GPU, push everything out to the GPU */
@@ -2479,18 +2479,18 @@ gen6_composite_fallback(struct sna *sna,
 	     (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)))) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (src_pixmap && !src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (mask_pixmap && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	/* However if the dst is not on the GPU and we need to
@@ -2500,25 +2500,25 @@ gen6_composite_fallback(struct sna *sna,
 	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst_pixmap->drawable.width,
 		      dst_pixmap->drawable.height) &&
 	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
 		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
-	return FALSE;
+	return false;
 }
 
 static int
@@ -2529,40 +2529,40 @@ reuse_source(struct sna *sna,
 	uint32_t color;
 
 	if (src_x != msk_x || src_y != msk_y)
-		return FALSE;
+		return false;
 
 	if (src == mask) {
 		DBG(("%s: mask is source\n", __FUNCTION__));
 		*mc = *sc;
 		mc->bo = kgem_bo_reference(mc->bo);
-		return TRUE;
+		return true;
 	}
 
 	if (sna_picture_is_solid(mask, &color))
 		return gen6_composite_solid_init(sna, mc, color);
 
 	if (sc->is_solid)
-		return FALSE;
+		return false;
 
 	if (src->pDrawable == NULL || mask->pDrawable != src->pDrawable)
-		return FALSE;
+		return false;
 
 	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
 
 	if (!sna_transform_equal(src->transform, mask->transform))
-		return FALSE;
+		return false;
 
 	if (!sna_picture_alphamap_equal(src, mask))
-		return FALSE;
+		return false;
 
 	if (!gen6_check_repeat(mask))
-		return FALSE;
+		return false;
 
 	if (!gen6_check_filter(mask))
-		return FALSE;
+		return false;
 
 	if (!gen6_check_format(mask->format))
-		return FALSE;
+		return false;
 
 	DBG(("%s: reusing source channel for mask with a twist\n",
 	     __FUNCTION__));
@@ -2573,10 +2573,10 @@ reuse_source(struct sna *sna,
 	mc->pict_format = mask->format;
 	mc->card_format = gen6_get_card_format(mask->format);
 	mc->bo = kgem_bo_reference(mc->bo);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen6_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -2589,11 +2589,11 @@ gen6_render_composite(struct sna *sna,
 		      struct sna_composite_op *tmp)
 {
 	if (op >= ARRAY_SIZE(gen6_blend_op))
-		return FALSE;
+		return false;
 
 #if NO_COMPOSITE
 	if (mask)
-		return FALSE;
+		return false;
 
 	return sna_blt_composite(sna, op,
 				 src, dst,
@@ -2612,10 +2612,10 @@ gen6_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	if (gen6_composite_fallback(sna, src, mask, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height))
 		return sna_tiling_composite(op, src, mask, dst,
@@ -2629,7 +2629,7 @@ gen6_render_composite(struct sna *sna,
 		op = PictOpSrc;
 	tmp->op = op;
 	if (!gen6_composite_set_target(sna, tmp, dst))
-		return FALSE;
+		return false;
 
 	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
 	    sna_blt_composite(sna, op,
@@ -2637,14 +2637,14 @@ gen6_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->dst.width, tmp->dst.height)) {
 		if (!sna_render_composite_redirect(sna, tmp,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen6_composite_picture(sna, src, &tmp->src,
@@ -2672,12 +2672,12 @@ gen6_render_composite(struct sna *sna,
 		if (tmp->redirect.real_bo)
 			kgem_bo_destroy(&sna->kgem, tmp->redirect.real_bo);
 		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
-		return TRUE;
+		return true;
 	}
 
 	tmp->is_affine = tmp->src.is_affine;
-	tmp->has_component_alpha = FALSE;
-	tmp->need_magic_ca_pass = FALSE;
+	tmp->has_component_alpha = false;
+	tmp->need_magic_ca_pass = false;
 
 	tmp->mask.bo = NULL;
 	tmp->mask.filter = SAMPLER_FILTER_NEAREST;
@@ -2686,7 +2686,7 @@ gen6_render_composite(struct sna *sna,
 	tmp->prim_emit = gen6_emit_composite_primitive;
 	if (mask) {
 		if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format)) {
-			tmp->has_component_alpha = TRUE;
+			tmp->has_component_alpha = true;
 
 			/* Check if it's component alpha that relies on a source alpha and on
 			 * the source value.  We can only get one of those into the single
@@ -2697,7 +2697,7 @@ gen6_render_composite(struct sna *sna,
 				if (op != PictOpOver)
 					goto cleanup_src;
 
-				tmp->need_magic_ca_pass = TRUE;
+				tmp->need_magic_ca_pass = true;
 				tmp->op = PictOpOutReverse;
 			}
 		}
@@ -2785,7 +2785,7 @@ gen6_render_composite(struct sna *sna,
 
 	gen6_emit_composite_state(sna, tmp);
 	gen6_align_vertex(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_mask:
 	if (tmp->mask.bo)
@@ -2796,12 +2796,12 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
-	return FALSE;
+	return false;
 }
 
 /* A poor man's span interface. But better than nothing? */
 #if !NO_COMPOSITE_SPANS
-static Bool
+static bool
 gen6_composite_alpha_gradient_init(struct sna *sna,
 				   struct sna_composite_channel *channel)
 {
@@ -2809,8 +2809,8 @@ gen6_composite_alpha_gradient_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatPad;
-	channel->is_affine = TRUE;
-	channel->is_solid  = FALSE;
+	channel->is_affine = true;
+	channel->is_solid  = false;
 	channel->transform = NULL;
 	channel->width  = 256;
 	channel->height = 1;
@@ -3059,7 +3059,7 @@ gen6_render_composite_spans_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, &op->base);
 }
 
-static Bool
+static bool
 gen6_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -3074,13 +3074,13 @@ gen6_render_composite_spans(struct sna *sna,
 	     width, height, flags, sna->kgem.ring));
 
 	if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
-		return FALSE;
+		return false;
 
 	if (op >= ARRAY_SIZE(gen6_blend_op))
-		return FALSE;
+		return false;
 
 	if (gen6_composite_fallback(sna, src, NULL, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
@@ -3089,7 +3089,7 @@ gen6_render_composite_spans(struct sna *sna,
 		if (!is_gpu(dst->pDrawable)) {
 			DBG(("%s: fallback, tiled operation not on GPU\n",
 			     __FUNCTION__));
-			return FALSE;
+			return false;
 		}
 
 		return sna_tiling_composite_spans(op, src, dst,
@@ -3099,13 +3099,13 @@ gen6_render_composite_spans(struct sna *sna,
 
 	tmp->base.op = op;
 	if (!gen6_composite_set_target(sna, &tmp->base, dst))
-		return FALSE;
+		return false;
 	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
 		if (!sna_render_composite_redirect(sna, &tmp->base,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen6_composite_picture(sna, src, &tmp->base.src,
@@ -3126,8 +3126,8 @@ gen6_render_composite_spans(struct sna *sna,
 	tmp->base.mask.bo = NULL;
 
 	tmp->base.is_affine = tmp->base.src.is_affine;
-	tmp->base.has_component_alpha = FALSE;
-	tmp->base.need_magic_ca_pass = FALSE;
+	tmp->base.has_component_alpha = false;
+	tmp->base.need_magic_ca_pass = false;
 
 	gen6_composite_alpha_gradient_init(sna, &tmp->base.mask);
 
@@ -3150,7 +3150,7 @@ gen6_render_composite_spans(struct sna *sna,
 
 	tmp->base.u.gen6.wm_kernel =
 		gen6_choose_composite_kernel(tmp->base.op,
-					     TRUE, FALSE,
+					     true, false,
 					     tmp->base.is_affine);
 	tmp->base.u.gen6.nr_surfaces = 3;
 	tmp->base.u.gen6.nr_inputs = 2;
@@ -3174,7 +3174,7 @@ gen6_render_composite_spans(struct sna *sna,
 
 	gen6_emit_composite_state(sna, &tmp->base);
 	gen6_align_vertex(sna, &tmp->base);
-	return TRUE;
+	return true;
 
 cleanup_src:
 	if (tmp->base.src.bo)
@@ -3182,7 +3182,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
-	return FALSE;
+	return false;
 }
 #endif
 
@@ -3203,12 +3203,12 @@ gen6_emit_copy_state(struct sna *sna,
 		gen6_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen6_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen6_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen6.surface_table) == *(uint64_t*)binding_table) {
@@ -3273,7 +3273,7 @@ overlaps(struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		extents.y1 + src_dy < extents.y2 + dst_dy);
 }
 
-static Bool
+static bool
 gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -3283,7 +3283,7 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 #if NO_COPY_BOXES
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy_boxes(sna, alu,
 				  src_bo, src_dx, src_dy,
@@ -3306,7 +3306,7 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       dst_bo, dst_dx, dst_dy,
 			       dst->drawable.bitsPerPixel,
 			       box, n))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    overlaps(src_bo, src_dx, src_dy,
@@ -3404,7 +3404,7 @@ fallback_blt:
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
@@ -3468,7 +3468,7 @@ fallback_blt:
 	gen6_vertex_flush(sna);
 	sna_render_composite_redirect_done(sna, &tmp);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 
 fallback_tiled_src:
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -3513,7 +3513,7 @@ gen6_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 		gen6_vertex_flush(sna);
 }
 
-static Bool
+static bool
 gen6_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -3521,7 +3521,7 @@ gen6_render_copy(struct sna *sna, uint8_t alu,
 {
 #if NO_COPY
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy(sna, alu,
 			    src_bo, dst_bo,
@@ -3540,14 +3540,14 @@ gen6_render_copy(struct sna *sna, uint8_t alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo ||
 	    too_large(src->drawable.width, src->drawable.height) ||
 	    too_large(dst->drawable.width, dst->drawable.height)) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy(sna, alu, src_bo, dst_bo,
 				    dst->drawable.bitsPerPixel,
@@ -3605,7 +3605,7 @@ fallback:
 
 	op->blt  = gen6_render_copy_blt;
 	op->done = gen6_render_copy_done;
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3624,12 +3624,12 @@ gen6_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 		gen6_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen6_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen6_bind_bo(sna,
 			     op->src.bo, 1, 1,
 			     GEN6_SURFACEFORMAT_B8G8R8A8_UNORM,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen6.surface_table) == *(uint64_t*)binding_table) {
@@ -3649,7 +3649,7 @@ static inline bool prefer_blt_fill(struct sna *sna,
 		untiled_tlb_miss(bo));
 }
 
-static Bool
+static bool
 gen6_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
 		       PictFormat format,
@@ -3667,7 +3667,7 @@ gen6_render_fill_boxes(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen6_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (op <= PictOpSrc &&
@@ -3691,10 +3691,10 @@ gen6_render_fill_boxes(struct sna *sna,
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
-			return TRUE;
+			return true;
 
 		if (!gen6_check_dst_format(format))
-			return FALSE;
+			return false;
 
 		if (too_large(dst->drawable.width, dst->drawable.height))
 			return sna_tiling_fill_boxes(sna, op, format, color,
@@ -3702,7 +3702,7 @@ gen6_render_fill_boxes(struct sna *sna,
 	}
 
 #if NO_FILL_BOXES
-	return FALSE;
+	return false;
 #endif
 
 	if (op == PictOpClear) {
@@ -3714,7 +3714,7 @@ gen6_render_fill_boxes(struct sna *sna,
 				     color->blue,
 				     color->alpha,
 				     PICT_a8r8g8b8))
-		return FALSE;
+		return false;
 
 	DBG(("%s(%08x x %d [(%d, %d), (%d, %d) ...])\n",
 	     __FUNCTION__, pixel, n,
@@ -3737,11 +3737,11 @@ gen6_render_fill_boxes(struct sna *sna,
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
-	tmp.has_component_alpha = FALSE;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.has_component_alpha = false;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
 	tmp.u.gen6.nr_surfaces = 2;
@@ -3781,7 +3781,7 @@ gen6_render_fill_boxes(struct sna *sna,
 
 	gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3872,7 +3872,7 @@ gen6_render_op_fill_done(struct sna *sna, const struct sna_fill_op *op)
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 }
 
-static Bool
+static bool
 gen6_render_fill(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 uint32_t color,
@@ -3892,7 +3892,7 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(dst->drawable.width, dst->drawable.height))
@@ -3924,9 +3924,9 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 	op->base.mask.filter = SAMPLER_FILTER_NEAREST;
 	op->base.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	op->base.is_affine = TRUE;
-	op->base.has_component_alpha = FALSE;
-	op->base.need_magic_ca_pass = FALSE;
+	op->base.is_affine = true;
+	op->base.has_component_alpha = false;
+	op->base.need_magic_ca_pass = false;
 	op->base.floats_per_vertex = 3;
 	op->base.floats_per_rect = 9;
 
@@ -3947,10 +3947,10 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 	op->box  = gen6_render_op_fill_box;
 	op->boxes = gen6_render_op_fill_boxes;
 	op->done = gen6_render_op_fill_done;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen6_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 			     uint32_t color,
 			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -3968,7 +3968,7 @@ gen6_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint32_t color,
 		     int16_t x1, int16_t y1,
@@ -3986,7 +3986,7 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (prefer_blt_fill(sna, bo) &&
 	    gen6_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -4017,11 +4017,11 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
 	tmp.u.gen6.nr_surfaces = 2;
@@ -4054,10 +4054,10 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen6_render_clear_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 {
 	BoxRec box;
@@ -4072,7 +4072,7 @@ gen6_render_clear_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 				  0, &box, 1);
 }
 
-static Bool
+static bool
 gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 {
 	struct sna_composite_op tmp;
@@ -4089,7 +4089,7 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	/* Prefer to use the BLT if, and only if, already engaged */
 	if (sna->kgem.ring == KGEM_BLT &&
 	    gen6_render_clear_try_blt(sna, dst, bo))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(dst->drawable.width, dst->drawable.height))
@@ -4112,11 +4112,11 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
 	tmp.u.gen6.nr_surfaces = 2;
@@ -4148,7 +4148,7 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
-	return TRUE;
+	return true;
 }
 
 static void gen6_render_flush(struct sna *sna)
@@ -4206,7 +4206,7 @@ gen6_render_expire(struct kgem *kgem)
 
 static void gen6_render_reset(struct sna *sna)
 {
-	sna->render_state.gen6.needs_invariant = TRUE;
+	sna->render_state.gen6.needs_invariant = true;
 	sna->render_state.gen6.first_state_packet = true;
 	sna->render_state.gen6.vb_id = 0;
 	sna->render_state.gen6.ve_id = -1;
@@ -4226,7 +4226,7 @@ static void gen6_render_fini(struct sna *sna)
 	kgem_bo_destroy(&sna->kgem, sna->render_state.gen6.general_bo);
 }
 
-static Bool gen6_render_setup(struct sna *sna)
+static bool gen6_render_setup(struct sna *sna)
 {
 	struct gen6_render_state *state = &sna->render_state.gen6;
 	struct sna_static_stream general;
@@ -4272,10 +4272,10 @@ static Bool gen6_render_setup(struct sna *sna)
 	return state->general_bo != NULL;
 }
 
-Bool gen6_render_init(struct sna *sna)
+bool gen6_render_init(struct sna *sna)
 {
 	if (!gen6_render_setup(sna))
-		return FALSE;
+		return false;
 
 	sna->kgem.context_switch = gen6_render_context_switch;
 	sna->kgem.retire = gen6_render_retire;
@@ -4301,5 +4301,5 @@ Bool gen6_render_init(struct sna *sna)
 
 	sna->render.max_3d_size = GEN6_MAX_SIZE;
 	sna->render.max_3d_pitch = 1 << 18;
-	return TRUE;
+	return true;
 }
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index afb4b9b..e76acd8 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -173,27 +173,27 @@ static const struct wm_kernel_info {
 	const char *name;
 	const void *data;
 	unsigned int size;
-	Bool has_mask;
+	bool has_mask;
 } wm_kernels[] = {
-	KERNEL(NOMASK, ps_kernel_nomask_affine, FALSE),
-	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, FALSE),
+	KERNEL(NOMASK, ps_kernel_nomask_affine, false),
+	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, false),
 
-	KERNEL(MASK, ps_kernel_masknoca_affine, TRUE),
-	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, TRUE),
+	KERNEL(MASK, ps_kernel_masknoca_affine, true),
+	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, true),
 
-	KERNEL(MASKCA, ps_kernel_maskca_affine, TRUE),
-	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, TRUE),
+	KERNEL(MASKCA, ps_kernel_maskca_affine, true),
+	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, true),
 
-	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, TRUE),
-	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, TRUE),
+	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, true),
+	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, true),
 
-	KERNEL(VIDEO_PLANAR, ps_kernel_planar, FALSE),
-	KERNEL(VIDEO_PACKED, ps_kernel_packed, FALSE),
+	KERNEL(VIDEO_PLANAR, ps_kernel_planar, false),
+	KERNEL(VIDEO_PACKED, ps_kernel_packed, false),
 };
 #undef KERNEL
 
 static const struct blendinfo {
-	Bool src_alpha;
+	bool src_alpha;
 	uint32_t src_blend;
 	uint32_t dst_blend;
 } gen7_blend_op[] = {
@@ -261,7 +261,7 @@ static inline bool too_large(int width, int height)
 }
 
 static uint32_t gen7_get_blend(int op,
-			       Bool has_component_alpha,
+			       bool has_component_alpha,
 			       uint32_t dst_format)
 {
 	uint32_t src, dst;
@@ -323,7 +323,7 @@ static uint32_t gen7_get_dest_format(PictFormat format)
 	}
 }
 
-static Bool gen7_check_dst_format(PictFormat format)
+static bool gen7_check_dst_format(PictFormat format)
 {
 	switch (format) {
 	case PICT_a8r8g8b8:
@@ -338,9 +338,9 @@ static Bool gen7_check_dst_format(PictFormat format)
 	case PICT_a8:
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return TRUE;
+		return true;
 	}
-	return FALSE;
+	return false;
 }
 
 static bool gen7_check_format(uint32_t format)
@@ -382,9 +382,9 @@ static uint32_t gen7_check_filter(PicturePtr picture)
 	switch (picture->filter) {
 	case PictFilterNearest:
 	case PictFilterBilinear:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -407,21 +407,21 @@ static uint32_t gen7_repeat(uint32_t repeat)
 static bool gen7_check_repeat(PicturePtr picture)
 {
 	if (!picture->repeat)
-		return TRUE;
+		return true;
 
 	switch (picture->repeatType) {
 	case RepeatNone:
 	case RepeatNormal:
 	case RepeatPad:
 	case RepeatReflect:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
 static int
-gen7_choose_composite_kernel(int op, Bool has_mask, Bool is_ca, Bool is_affine)
+gen7_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
 {
 	int base;
 
@@ -736,7 +736,7 @@ gen7_emit_invariant(struct sna *sna)
 	gen7_disable_streamout(sna);
 	gen7_emit_null_depth_buffer(sna);
 
-	sna->render_state.gen7.needs_invariant = FALSE;
+	sna->render_state.gen7.needs_invariant = false;
 }
 
 static void
@@ -777,7 +777,7 @@ gen7_emit_sampler(struct sna *sna, uint32_t state)
 }
 
 static void
-gen7_emit_sf(struct sna *sna, Bool has_mask)
+gen7_emit_sf(struct sna *sna, bool has_mask)
 {
 	int num_sf_outputs = has_mask ? 2 : 1;
 
@@ -876,8 +876,8 @@ gen7_emit_vertex_elements(struct sna *sna,
 	/*
 	 * vertex data in vertex buffer
 	 *    position: (x, y)
-	 *    texture coordinate 0: (u0, v0) if (is_affine is TRUE) else (u0, v0, w0)
-	 *    texture coordinate 1 if (has_mask is TRUE): same as above
+	 *    texture coordinate 0: (u0, v0) if (is_affine is true) else (u0, v0, w0)
+	 *    texture coordinate 1 if (has_mask is true): same as above
 	 */
 	struct gen7_render_state *render = &sna->render_state.gen7;
 	int nelem = op->mask.bo ? 2 : 1;
@@ -1039,10 +1039,10 @@ static void gen7_magic_ca_pass(struct sna *sna,
 
 	gen7_emit_pipe_invalidate(sna, true);
 
-	gen7_emit_cc(sna, gen7_get_blend(PictOpAdd, TRUE, op->dst.format));
+	gen7_emit_cc(sna, gen7_get_blend(PictOpAdd, true, op->dst.format));
 	gen7_emit_wm(sna,
 		     gen7_choose_composite_kernel(PictOpAdd,
-						  TRUE, TRUE,
+						  true, true,
 						  op->is_affine),
 		     3, 2);
 
@@ -1311,7 +1311,7 @@ gen7_bind_bo(struct sna *sna,
 	     uint32_t width,
 	     uint32_t height,
 	     uint32_t format,
-	     Bool is_dst)
+	     bool is_dst)
 {
 	uint32_t *ss;
 	uint32_t domains;
@@ -1763,12 +1763,12 @@ static void gen7_emit_composite_state(struct sna *sna,
 		gen7_bind_bo(sna,
 			    op->dst.bo, op->dst.width, op->dst.height,
 			    gen7_get_dest_format(op->dst.format),
-			    TRUE);
+			    true);
 	binding_table[1] =
 		gen7_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 	if (op->mask.bo) {
 		binding_table[2] =
 			gen7_bind_bo(sna,
@@ -1776,7 +1776,7 @@ static void gen7_emit_composite_state(struct sna *sna,
 				     op->mask.width,
 				     op->mask.height,
 				     op->mask.card_format,
-				     FALSE);
+				     false);
 	}
 
 	if (sna->kgem.surface == offset &&
@@ -1990,7 +1990,7 @@ static void gen7_emit_video_state(struct sna *sna,
 		gen7_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen7_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	for (n = 0; n < n_src; n++) {
 		binding_table[1+n] =
 			gen7_bind_video_source(sna,
@@ -2005,7 +2005,7 @@ static void gen7_emit_video_state(struct sna *sna,
 	gen7_emit_state(sna, op, offset);
 }
 
-static Bool
+static bool
 gen7_render_video(struct sna *sna,
 		  struct sna_video *video,
 		  struct sna_video_frame *frame,
@@ -2030,7 +2030,7 @@ gen7_render_video(struct sna *sna,
 
 	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
-		return FALSE;
+		return false;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -2047,7 +2047,7 @@ gen7_render_video(struct sna *sna,
 
 	tmp.mask.bo = NULL;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 
@@ -2123,10 +2123,10 @@ gen7_render_video(struct sna *sna,
 	priv->clear = false;
 
 	gen7_vertex_flush(sna);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen7_composite_solid_init(struct sna *sna,
 			  struct sna_composite_channel *channel,
 			  uint32_t color)
@@ -2135,8 +2135,8 @@ gen7_composite_solid_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNormal;
-	channel->is_affine = TRUE;
-	channel->is_solid  = TRUE;
+	channel->is_affine = true;
+	channel->is_solid  = true;
 	channel->is_opaque = (color >> 24) == 0xff;
 	channel->transform = NULL;
 	channel->width  = 1;
@@ -2150,7 +2150,7 @@ gen7_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
-static Bool
+static bool
 gen7_composite_linear_init(struct sna *sna,
 			   PicturePtr picture,
 			   struct sna_composite_channel *channel,
@@ -2280,7 +2280,7 @@ gen7_composite_picture(struct sna *sna,
 	DBG(("%s: (%d, %d)x(%d, %d), dst=(%d, %d)\n",
 	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
 
-	channel->is_solid = FALSE;
+	channel->is_solid = false;
 	channel->card_format = -1;
 
 	if (sna_picture_is_solid(picture, &color))
@@ -2381,7 +2381,7 @@ static void gen7_render_composite_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, op);
 }
 
-static Bool
+static bool
 gen7_composite_set_target(struct sna *sna, struct sna_composite_op *op, PicturePtr dst)
 {
 	struct sna_pixmap *priv;
@@ -2405,7 +2405,7 @@ gen7_composite_set_target(struct sna *sna, struct sna_composite_op *op, PictureP
 	if (op->dst.bo == NULL) {
 		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
 		if (priv == NULL)
-			return FALSE;
+			return false;
 
 		op->dst.bo = priv->gpu_bo;
 		op->damage = &priv->gpu_damage;
@@ -2422,7 +2422,7 @@ gen7_composite_set_target(struct sna *sna, struct sna_composite_op *op, PictureP
 	     op->dst.width, op->dst.height,
 	     op->dst.bo->pitch,
 	     op->dst.x, op->dst.y));
-	return TRUE;
+	return true;
 }
 
 inline static bool can_switch_rings(struct sna *sna)
@@ -2435,42 +2435,42 @@ inline static bool prefer_blt_ring(struct sna *sna)
 	return sna->kgem.ring != KGEM_RENDER || can_switch_rings(sna);
 }
 
-static Bool
+static bool
 try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
 	int width, int height)
 {
 	if (sna->kgem.ring == KGEM_BLT) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
-		return TRUE;
+		return true;
 	}
 
 	if (can_switch_rings(sna)) {
 		if (sna_picture_is_solid(src, NULL))
-			return TRUE;
+			return true;
 	}
 
-	return FALSE;
+	return false;
 }
 
 static bool
 check_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
-		return FALSE;
+		return false;
 
 	switch (picture->pSourcePict->type) {
 	case SourcePictTypeSolidFill:
 	case SourcePictTypeLinear:
-		return FALSE;
+		return false;
 	default:
-		return TRUE;
+		return true;
 	}
 }
 
@@ -2541,7 +2541,7 @@ gen7_composite_fallback(struct sna *sna,
 	if (!gen7_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
 		     __FUNCTION__, dst->format));
-		return TRUE;
+		return true;
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -2563,11 +2563,11 @@ gen7_composite_fallback(struct sna *sna,
 	 */
 	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	/* If anything is on the GPU, push everything out to the GPU */
@@ -2577,18 +2577,18 @@ gen7_composite_fallback(struct sna *sna,
 	     (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)))) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (src_pixmap && !src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (mask_pixmap && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	/* However if the dst is not on the GPU and we need to
@@ -2598,25 +2598,25 @@ gen7_composite_fallback(struct sna *sna,
 	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst_pixmap->drawable.width,
 		      dst_pixmap->drawable.height) &&
 	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
 		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
-	return FALSE;
+	return false;
 }
 
 static int
@@ -2627,40 +2627,40 @@ reuse_source(struct sna *sna,
 	uint32_t color;
 
 	if (src_x != msk_x || src_y != msk_y)
-		return FALSE;
+		return false;
 
 	if (src == mask) {
 		DBG(("%s: mask is source\n", __FUNCTION__));
 		*mc = *sc;
 		mc->bo = kgem_bo_reference(mc->bo);
-		return TRUE;
+		return true;
 	}
 
 	if (sna_picture_is_solid(mask, &color))
 		return gen7_composite_solid_init(sna, mc, color);
 
 	if (sc->is_solid)
-		return FALSE;
+		return false;
 
 	if (src->pDrawable == NULL || mask->pDrawable != src->pDrawable)
-		return FALSE;
+		return false;
 
 	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
 
 	if (!sna_transform_equal(src->transform, mask->transform))
-		return FALSE;
+		return false;
 
 	if (!sna_picture_alphamap_equal(src, mask))
-		return FALSE;
+		return false;
 
 	if (!gen7_check_repeat(mask))
-		return FALSE;
+		return false;
 
 	if (!gen7_check_filter(mask))
-		return FALSE;
+		return false;
 
 	if (!gen7_check_format(mask->format))
-		return FALSE;
+		return false;
 
 	DBG(("%s: reusing source channel for mask with a twist\n",
 	     __FUNCTION__));
@@ -2671,10 +2671,10 @@ reuse_source(struct sna *sna,
 	mc->pict_format = mask->format;
 	mc->card_format = gen7_get_card_format(mask->format);
 	mc->bo = kgem_bo_reference(mc->bo);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen7_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -2687,11 +2687,11 @@ gen7_render_composite(struct sna *sna,
 		      struct sna_composite_op *tmp)
 {
 	if (op >= ARRAY_SIZE(gen7_blend_op))
-		return FALSE;
+		return false;
 
 #if NO_COMPOSITE
 	if (mask)
-		return FALSE;
+		return false;
 
 	return sna_blt_composite(sna, op,
 				 src, dst,
@@ -2710,10 +2710,10 @@ gen7_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	if (gen7_composite_fallback(sna, src, mask, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height))
 		return sna_tiling_composite(op, src, mask, dst,
@@ -2727,7 +2727,7 @@ gen7_render_composite(struct sna *sna,
 		op = PictOpSrc;
 	tmp->op = op;
 	if (!gen7_composite_set_target(sna, tmp, dst))
-		return FALSE;
+		return false;
 
 	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
 	    sna_blt_composite(sna, op,
@@ -2735,14 +2735,14 @@ gen7_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->dst.width, tmp->dst.height)) {
 		if (!sna_render_composite_redirect(sna, tmp,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen7_composite_picture(sna, src, &tmp->src,
@@ -2770,12 +2770,12 @@ gen7_render_composite(struct sna *sna,
 		if (tmp->redirect.real_bo)
 			kgem_bo_destroy(&sna->kgem, tmp->redirect.real_bo);
 		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
-		return TRUE;
+		return true;
 	}
 
 	tmp->is_affine = tmp->src.is_affine;
-	tmp->has_component_alpha = FALSE;
-	tmp->need_magic_ca_pass = FALSE;
+	tmp->has_component_alpha = false;
+	tmp->need_magic_ca_pass = false;
 
 	tmp->mask.bo = NULL;
 	tmp->mask.filter = SAMPLER_FILTER_NEAREST;
@@ -2784,7 +2784,7 @@ gen7_render_composite(struct sna *sna,
 	tmp->prim_emit = gen7_emit_composite_primitive;
 	if (mask) {
 		if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format)) {
-			tmp->has_component_alpha = TRUE;
+			tmp->has_component_alpha = true;
 
 			/* Check if it's component alpha that relies on a source alpha and on
 			 * the source value.  We can only get one of those into the single
@@ -2795,7 +2795,7 @@ gen7_render_composite(struct sna *sna,
 				if (op != PictOpOver)
 					goto cleanup_src;
 
-				tmp->need_magic_ca_pass = TRUE;
+				tmp->need_magic_ca_pass = true;
 				tmp->op = PictOpOutReverse;
 			}
 		}
@@ -2874,7 +2874,7 @@ gen7_render_composite(struct sna *sna,
 
 	gen7_emit_composite_state(sna, tmp);
 	gen7_align_vertex(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_mask:
 	if (tmp->mask.bo)
@@ -2885,12 +2885,12 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
-	return FALSE;
+	return false;
 }
 
 /* A poor man's span interface. But better than nothing? */
 #if !NO_COMPOSITE_SPANS
-static Bool
+static bool
 gen7_composite_alpha_gradient_init(struct sna *sna,
 				   struct sna_composite_channel *channel)
 {
@@ -2898,8 +2898,8 @@ gen7_composite_alpha_gradient_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatPad;
-	channel->is_affine = TRUE;
-	channel->is_solid  = FALSE;
+	channel->is_affine = true;
+	channel->is_solid  = false;
 	channel->transform = NULL;
 	channel->width  = 256;
 	channel->height = 1;
@@ -3148,7 +3148,7 @@ gen7_render_composite_spans_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, &op->base);
 }
 
-static Bool
+static bool
 gen7_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -3163,10 +3163,10 @@ gen7_render_composite_spans(struct sna *sna,
 	     width, height, flags, sna->kgem.ring));
 
 	if (op >= ARRAY_SIZE(gen7_blend_op))
-		return FALSE;
+		return false;
 
 	if (gen7_composite_fallback(sna, src, NULL, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
@@ -3175,7 +3175,7 @@ gen7_render_composite_spans(struct sna *sna,
 		if (!is_gpu(dst->pDrawable)) {
 			DBG(("%s: fallback, tiled operation not on GPU\n",
 			     __FUNCTION__));
-			return FALSE;
+			return false;
 		}
 
 		return sna_tiling_composite_spans(op, src, dst,
@@ -3185,13 +3185,13 @@ gen7_render_composite_spans(struct sna *sna,
 
 	tmp->base.op = op;
 	if (!gen7_composite_set_target(sna, &tmp->base, dst))
-		return FALSE;
+		return false;
 	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
 		if (!sna_render_composite_redirect(sna, &tmp->base,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen7_composite_picture(sna, src, &tmp->base.src,
@@ -3212,8 +3212,8 @@ gen7_render_composite_spans(struct sna *sna,
 	tmp->base.mask.bo = NULL;
 
 	tmp->base.is_affine = tmp->base.src.is_affine;
-	tmp->base.has_component_alpha = FALSE;
-	tmp->base.need_magic_ca_pass = FALSE;
+	tmp->base.has_component_alpha = false;
+	tmp->base.need_magic_ca_pass = false;
 
 	gen7_composite_alpha_gradient_init(sna, &tmp->base.mask);
 
@@ -3236,7 +3236,7 @@ gen7_render_composite_spans(struct sna *sna,
 
 	tmp->base.u.gen7.wm_kernel =
 		gen7_choose_composite_kernel(tmp->base.op,
-					     TRUE, FALSE,
+					     true, false,
 					     tmp->base.is_affine);
 	tmp->base.u.gen7.nr_surfaces = 3;
 	tmp->base.u.gen7.nr_inputs = 2;
@@ -3260,7 +3260,7 @@ gen7_render_composite_spans(struct sna *sna,
 
 	gen7_emit_composite_state(sna, &tmp->base);
 	gen7_align_vertex(sna, &tmp->base);
-	return TRUE;
+	return true;
 
 cleanup_src:
 	if (tmp->base.src.bo)
@@ -3268,7 +3268,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
-	return FALSE;
+	return false;
 }
 #endif
 
@@ -3287,12 +3287,12 @@ gen7_emit_copy_state(struct sna *sna,
 		gen7_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen7_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen7_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen7.surface_table) == *(uint64_t*)binding_table) {
@@ -3357,7 +3357,7 @@ overlaps(struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		extents.y1 + src_dy < extents.y2 + dst_dy);
 }
 
-static Bool
+static bool
 gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -3367,7 +3367,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 #if NO_COPY_BOXES
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy_boxes(sna, alu,
 				  src_bo, src_dx, src_dy,
@@ -3390,7 +3390,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       dst_bo, dst_dx, dst_dy,
 			       dst->drawable.bitsPerPixel,
 			       box, n))
-		return TRUE;
+		return true;
 
 	if ((too_large(dst->drawable.width, dst->drawable.height) ||
 	     too_large(src->drawable.width, src->drawable.height)) &&
@@ -3415,7 +3415,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 				       dst_bo, dst_dx, dst_dy,
 				       dst->drawable.bitsPerPixel,
 				       box, n))
-			return TRUE;
+			return true;
 	}
 
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -3514,7 +3514,7 @@ fallback_blt:
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
@@ -3577,7 +3577,7 @@ fallback_blt:
 	gen7_vertex_flush(sna);
 	sna_render_composite_redirect_done(sna, &tmp);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 
 fallback_tiled_src:
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -3620,7 +3620,7 @@ gen7_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 		gen7_vertex_flush(sna);
 }
 
-static Bool
+static bool
 gen7_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -3628,7 +3628,7 @@ gen7_render_copy(struct sna *sna, uint8_t alu,
 {
 #if NO_COPY
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy(sna, alu,
 			    src_bo, dst_bo,
@@ -3647,14 +3647,14 @@ gen7_render_copy(struct sna *sna, uint8_t alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo ||
 	    too_large(src->drawable.width, src->drawable.height) ||
 	    too_large(dst->drawable.width, dst->drawable.height)) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy(sna, alu, src_bo, dst_bo,
 				    dst->drawable.bitsPerPixel,
@@ -3712,7 +3712,7 @@ fallback:
 
 	op->blt  = gen7_render_copy_blt;
 	op->done = gen7_render_copy_done;
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3735,12 +3735,12 @@ gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 		gen7_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen7_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen7_bind_bo(sna,
 			     op->src.bo, 1, 1,
 			     GEN7_SURFACEFORMAT_B8G8R8A8_UNORM,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen7.surface_table) == *(uint64_t*)binding_table) {
@@ -3758,7 +3758,7 @@ static inline bool prefer_blt_fill(struct sna *sna,
 	return prefer_blt_ring(sna) || untiled_tlb_miss(bo);
 }
 
-static Bool
+static bool
 gen7_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
 		       PictFormat format,
@@ -3776,7 +3776,7 @@ gen7_render_fill_boxes(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen7_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (op <= PictOpSrc &&
@@ -3800,10 +3800,10 @@ gen7_render_fill_boxes(struct sna *sna,
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
-			return TRUE;
+			return true;
 
 		if (!gen7_check_dst_format(format))
-			return FALSE;
+			return false;
 
 		if (too_large(dst->drawable.width, dst->drawable.height))
 			return sna_tiling_fill_boxes(sna, op, format, color,
@@ -3811,7 +3811,7 @@ gen7_render_fill_boxes(struct sna *sna,
 	}
 
 #if NO_FILL_BOXES
-	return FALSE;
+	return false;
 #endif
 
 	if (op == PictOpClear) {
@@ -3823,7 +3823,7 @@ gen7_render_fill_boxes(struct sna *sna,
 				     color->blue,
 				     color->alpha,
 				     PICT_a8r8g8b8))
-		return FALSE;
+		return false;
 
 	DBG(("%s(%08x x %d [(%d, %d), (%d, %d) ...])\n",
 	     __FUNCTION__, pixel, n,
@@ -3846,11 +3846,11 @@ gen7_render_fill_boxes(struct sna *sna,
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
-	tmp.has_component_alpha = FALSE;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.has_component_alpha = false;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
 	tmp.u.gen7.nr_surfaces = 2;
@@ -3893,7 +3893,7 @@ gen7_render_fill_boxes(struct sna *sna,
 
 	gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3982,7 +3982,7 @@ gen7_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 }
 
-static Bool
+static bool
 gen7_render_fill(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 uint32_t color,
@@ -4002,7 +4002,7 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(dst->drawable.width, dst->drawable.height))
@@ -4034,9 +4034,9 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 	op->base.mask.filter = SAMPLER_FILTER_NEAREST;
 	op->base.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	op->base.is_affine = TRUE;
-	op->base.has_component_alpha = FALSE;
-	op->base.need_magic_ca_pass = FALSE;
+	op->base.is_affine = true;
+	op->base.has_component_alpha = false;
+	op->base.need_magic_ca_pass = false;
 	op->base.floats_per_vertex = 3;
 	op->base.floats_per_rect = 9;
 
@@ -4057,10 +4057,10 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 	op->box   = gen7_render_fill_op_box;
 	op->boxes = gen7_render_fill_op_boxes;
 	op->done  = gen7_render_fill_op_done;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen7_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 			     uint32_t color,
 			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -4078,7 +4078,7 @@ gen7_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint32_t color,
 		     int16_t x1, int16_t y1,
@@ -4096,7 +4096,7 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (prefer_blt_fill(sna, bo) &&
 	    gen7_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -4127,11 +4127,11 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
 	tmp.u.gen7.nr_surfaces = 2;
@@ -4164,10 +4164,10 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen7_render_clear_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 {
 	BoxRec box;
@@ -4182,7 +4182,7 @@ gen7_render_clear_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 				  0, &box, 1);
 }
 
-static Bool
+static bool
 gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 {
 	struct sna_composite_op tmp;
@@ -4199,7 +4199,7 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	/* Prefer to use the BLT if already engaged */
 	if (sna->kgem.ring == KGEM_BLT &&
 	    gen7_render_clear_try_blt(sna, dst, bo))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(dst->drawable.width, dst->drawable.height))
@@ -4222,11 +4222,11 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
 	tmp.u.gen7.nr_surfaces = 2;
@@ -4258,7 +4258,7 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
-	return TRUE;
+	return true;
 }
 
 static void gen7_render_flush(struct sna *sna)
@@ -4317,7 +4317,7 @@ gen7_render_expire(struct kgem *kgem)
 
 static void gen7_render_reset(struct sna *sna)
 {
-	sna->render_state.gen7.needs_invariant = TRUE;
+	sna->render_state.gen7.needs_invariant = true;
 	sna->render_state.gen7.vb_id = 0;
 	sna->render_state.gen7.ve_id = -1;
 	sna->render_state.gen7.last_primitive = -1;
@@ -4336,7 +4336,7 @@ static void gen7_render_fini(struct sna *sna)
 	kgem_bo_destroy(&sna->kgem, sna->render_state.gen7.general_bo);
 }
 
-static Bool gen7_render_setup(struct sna *sna)
+static bool gen7_render_setup(struct sna *sna)
 {
 	struct gen7_render_state *state = &sna->render_state.gen7;
 	struct sna_static_stream general;
@@ -4385,10 +4385,10 @@ static Bool gen7_render_setup(struct sna *sna)
 	return state->general_bo != NULL;
 }
 
-Bool gen7_render_init(struct sna *sna)
+bool gen7_render_init(struct sna *sna)
 {
 	if (!gen7_render_setup(sna))
-		return FALSE;
+		return false;
 
 	sna->kgem.context_switch = gen7_render_context_switch;
 	sna->kgem.retire = gen7_render_retire;
@@ -4414,5 +4414,5 @@ Bool gen7_render_init(struct sna *sna)
 
 	sna->render.max_3d_size = GEN7_MAX_SIZE;
 	sna->render.max_3d_pitch = 1 << 18;
-	return TRUE;
+	return true;
 }
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index bea7aea..e59811f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -359,7 +359,7 @@ void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
 	bo->domain = DOMAIN_NONE;
 }
 
-Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
+bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 		   const void *data, int length)
 {
 	assert(bo->refcnt);
@@ -369,11 +369,11 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 
 	assert(length <= bytes(bo));
 	if (gem_write(kgem->fd, bo->handle, 0, length, data))
-		return FALSE;
+		return false;
 
 	DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain));
 	kgem_bo_retire(kgem, bo);
-	return TRUE;
+	return true;
 }
 
 static uint32_t gem_create(int fd, int num_pages)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 273240f..ba110b6 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -406,7 +406,7 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo);
 uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
 
-Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
+bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 		   const void *data, int length);
 
 int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
@@ -513,7 +513,7 @@ static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
 static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
 {
 	if (bo == NULL)
-		return FALSE;
+		return false;
 
 	return bo->dirty;
 }
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 6920343..d7fa71b 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -261,7 +261,7 @@ struct sna {
 	OptionInfoPtr Options;
 
 	/* Driver phase/state information */
-	Bool suspended;
+	bool suspended;
 
 #if HAVE_UDEV
 	struct udev_monitor *uevent_monitor;
@@ -281,7 +281,7 @@ struct sna {
 #endif
 };
 
-Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna);
+bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna);
 void sna_mode_adjust_frame(struct sna *sna, int x, int y);
 extern void sna_mode_update(struct sna *sna);
 extern void sna_mode_disable_unused(struct sna *sna);
@@ -343,7 +343,7 @@ extern bool sna_wait_for_scanline(struct sna *sna, PixmapPtr pixmap,
 				  xf86CrtcPtr crtc, const BoxRec *clip);
 
 #if HAVE_DRI2_H
-Bool sna_dri_open(struct sna *sna, ScreenPtr pScreen);
+bool sna_dri_open(struct sna *sna, ScreenPtr pScreen);
 void sna_dri_page_flip_handler(struct sna *sna, struct drm_event_vblank *event);
 void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event);
 void sna_dri_destroy_window(WindowPtr win);
@@ -478,7 +478,7 @@ static inline struct kgem_bo *sna_pixmap_pin(PixmapPtr pixmap)
 }
 
 
-static inline Bool
+static inline bool
 _sna_transform_point(const PictTransform *transform,
 		     int64_t x, int64_t y, int64_t result[3])
 {
@@ -515,10 +515,10 @@ sna_get_transformed_coordinates_3d(int x, int y,
 				   const PictTransform *transform,
 				   float *x_out, float *y_out, float *z_out);
 
-Bool sna_transform_is_affine(const PictTransform *t);
-Bool sna_transform_is_integer_translation(const PictTransform *t,
+bool sna_transform_is_affine(const PictTransform *t);
+bool sna_transform_is_integer_translation(const PictTransform *t,
 					  int16_t *tx, int16_t *ty);
-Bool sna_transform_is_translation(const PictTransform *t,
+bool sna_transform_is_translation(const PictTransform *t,
 				  pixman_fixed_t *tx, pixman_fixed_t *ty);
 
 static inline bool
@@ -564,10 +564,10 @@ void sna_accel_watch_flush(struct sna *sna, int enable);
 void sna_accel_close(struct sna *sna);
 void sna_accel_free(struct sna *sna);
 
-Bool sna_accel_create(struct sna *sna);
+bool sna_accel_create(struct sna *sna);
 void sna_copy_fbcon(struct sna *sna);
 
-Bool sna_composite_create(struct sna *sna);
+bool sna_composite_create(struct sna *sna);
 void sna_composite_close(struct sna *sna);
 
 void sna_composite(CARD8 op,
@@ -612,7 +612,7 @@ void sna_composite_trifan(CARD8 op,
 			  INT16 xSrc, INT16 ySrc,
 			  int npoints, xPointFixed *points);
 
-Bool sna_gradients_create(struct sna *sna);
+bool sna_gradients_create(struct sna *sna);
 void sna_gradients_close(struct sna *sna);
 
 bool sna_glyphs_create(struct sna *sna);
@@ -651,14 +651,14 @@ struct kgem_bo *sna_replace__xor(struct sna *sna,
 				 const void *src, int stride,
 				 uint32_t and, uint32_t or);
 
-Bool
+bool
 sna_compute_composite_extents(BoxPtr extents,
 			      PicturePtr src, PicturePtr mask, PicturePtr dst,
 			      INT16 src_x,  INT16 src_y,
 			      INT16 mask_x, INT16 mask_y,
 			      INT16 dst_x,  INT16 dst_y,
 			      CARD16 width, CARD16 height);
-Bool
+bool
 sna_compute_composite_region(RegionPtr region,
 			     PicturePtr src, PicturePtr mask, PicturePtr dst,
 			     INT16 src_x,  INT16 src_y,
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f4921b2..406cbfa 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -353,7 +353,7 @@ sna_fill_init_blt(struct sna_fill_op *fill,
 	return sna->render.fill(sna, alu, pixmap, bo, pixel, fill);
 }
 
-static Bool
+static bool
 sna_copy_init_blt(struct sna_copy_op *copy,
 		  struct sna *sna,
 		  PixmapPtr src, struct kgem_bo *src_bo,
@@ -1077,11 +1077,11 @@ static inline bool use_cpu_bo_for_read(struct sna_pixmap *priv)
 {
 #if 0
 	if (pixmap->devPrivate.ptr == NULL)
-		return TRUE;
+		return true;
 #endif
 
 	if (priv->cpu_bo == NULL)
-		return FALSE;
+		return false;
 
 	return kgem_bo_is_busy(priv->gpu_bo) || kgem_bo_is_busy(priv->cpu_bo);
 }
@@ -1284,7 +1284,7 @@ skip_inplace_map:
 
 		n = sna_damage_get_boxes(priv->gpu_damage, &box);
 		if (n) {
-			Bool ok = FALSE;
+			bool ok = false;
 
 			if (use_cpu_bo_for_write(sna, priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
@@ -1332,7 +1332,7 @@ done:
 	return true;
 }
 
-static Bool
+static bool
 region_subsumes_drawable(RegionPtr region, DrawablePtr drawable)
 {
 	const BoxRec *extents;
@@ -1738,13 +1738,13 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		BoxPtr box;
 		int n = sna_damage_get_boxes(priv->gpu_damage, &box);
 		if (n) {
-			Bool ok;
+			bool ok;
 
 			DBG(("%s: forced migration\n", __FUNCTION__));
 
 			assert(pixmap_contains_damage(pixmap, priv->gpu_damage));
 
-			ok = FALSE;
+			ok = false;
 			if (use_cpu_bo_for_write(sna, priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
@@ -1851,7 +1851,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				n = sna_damage_get_boxes(priv->gpu_damage,
 							 &box);
 				if (n) {
-					Bool ok = FALSE;
+					bool ok = false;
 
 					if (use_cpu_bo_for_write(sna, priv))
 						ok = sna->render.copy_boxes(sna, GXcopy,
@@ -1873,7 +1873,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 								      &r->extents)) {
 				BoxPtr box = REGION_RECTS(r);
 				int n = REGION_NUM_RECTS(r);
-				Bool ok = FALSE;
+				bool ok = false;
 
 				DBG(("%s: region wholly inside damage\n",
 				     __FUNCTION__));
@@ -1898,7 +1898,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				if (sna_damage_intersect(priv->gpu_damage, r, &need)) {
 					BoxPtr box = REGION_RECTS(&need);
 					int n = REGION_NUM_RECTS(&need);
-					Bool ok = FALSE;
+					bool ok = false;
 
 					DBG(("%s: region intersects damage\n",
 					     __FUNCTION__));
@@ -2160,7 +2160,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 
 		n = sna_damage_get_boxes(priv->cpu_damage, (BoxPtr *)&box);
 		if (n) {
-			Bool ok = FALSE;
+			bool ok = false;
 
 			if (use_cpu_bo_for_read(priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
@@ -2199,7 +2199,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 		priv->undamaged = true;
 	} else if (DAMAGE_IS_ALL(priv->cpu_damage) ||
 		   sna_damage_contains_box__no_reduce(priv->cpu_damage, box)) {
-		Bool ok = FALSE;
+		bool ok = false;
 		if (use_cpu_bo_for_read(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
@@ -2225,10 +2225,10 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 		priv->undamaged = true;
 	} else if (sna_damage_intersect(priv->cpu_damage, &r, &i)) {
 		int n = REGION_NUM_RECTS(&i);
-		Bool ok;
+		bool ok;
 
 		box = REGION_RECTS(&i);
-		ok = FALSE;
+		ok = false;
 		if (use_cpu_bo_for_read(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
@@ -2664,12 +2664,12 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 
 	n = sna_damage_get_boxes(priv->cpu_damage, &box);
 	if (n) {
-		Bool ok;
+		bool ok;
 
 		assert(pixmap_contains_damage(pixmap, priv->cpu_damage));
 		DBG(("%s: uploading %d damage boxes\n", __FUNCTION__, n));
 
-		ok = FALSE;
+		ok = false;
 		if (use_cpu_bo_for_read(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
@@ -2937,7 +2937,7 @@ static inline void box32_add_rect(Box32Rec *box, const xRectangle *r)
 		box->y2 = v;
 }
 
-static Bool
+static bool
 sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 			 int x, int y, int w, int h, char *bits, int stride)
 {
@@ -2956,11 +2956,11 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	     box->x1, box->y1, box->x2, box->y2));
 
 	if (gc->alu != GXcopy)
-		return FALSE;
+		return false;
 
 	if (priv->gpu_bo == NULL &&
 	    !sna_pixmap_create_mappable_gpu(pixmap))
-		return FALSE;
+		return false;
 
 	assert(priv->gpu_bo);
 	assert(priv->gpu_bo->proxy == NULL);
@@ -3023,7 +3023,7 @@ static bool upload_inplace(struct sna *sna,
 	return false;
 }
 
-static Bool
+static bool
 sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		    int x, int y, int w, int  h, char *bits, int stride)
 {
@@ -3286,7 +3286,7 @@ static inline uint8_t blt_depth(int depth)
 	}
 }
 
-static Bool
+static bool
 sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		     int x, int y, int w, int  h, char *bits)
 {
@@ -3409,7 +3409,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	return true;
 }
 
-static Bool
+static bool
 sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		     int x, int y, int w, int  h, int left,char *bits)
 {
@@ -3825,7 +3825,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	int n = RegionNumRects(region);
 	int stride, bpp;
 	char *bits;
-	Bool replaces;
+	bool replaces;
 
 	if (n == 0)
 		return;
@@ -4482,7 +4482,7 @@ out:
 			   0, NULL);
 }
 
-inline static Bool
+inline static bool
 box_intersect(BoxPtr a, const BoxRec *b)
 {
 	if (a->x1 < b->x1)
@@ -4875,7 +4875,7 @@ sna_fill_spans__dash_clip_boxes(DrawablePtr drawable,
 		sna_fill_spans__fill_clip_boxes(drawable, gc, n, pt, width, sorted);
 }
 
-static Bool
+static bool
 sna_fill_spans_blt(DrawablePtr drawable,
 		   struct kgem_bo *bo, struct sna_damage **damage,
 		   GCPtr gc, uint32_t pixel,
@@ -4971,7 +4971,7 @@ no_damage_clipped:
 		region_set(&clip, extents);
 		region_maybe_clip(&clip, gc->pCompositeClip);
 		if (!RegionNotEmpty(&clip))
-			return TRUE;
+			return true;
 
 		assert(dx + clip.extents.x1 >= 0);
 		assert(dy + clip.extents.y1 >= 0);
@@ -5072,7 +5072,7 @@ damage_clipped:
 		region_set(&clip, extents);
 		region_maybe_clip(&clip, gc->pCompositeClip);
 		if (!RegionNotEmpty(&clip))
-			return TRUE;
+			return true;
 
 		assert(dx + clip.extents.x1 >= 0);
 		assert(dy + clip.extents.y1 >= 0);
@@ -5176,10 +5176,10 @@ damage_clipped:
 done:
 	fill.done(sna, &fill);
 	assert_pixmap_damage(pixmap);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 			     struct kgem_bo *bo,
 			     struct sna_damage **damage,
@@ -5952,7 +5952,7 @@ empty:
 				 dst_x, dst_y, bit);
 }
 
-static Bool
+static bool
 sna_poly_point_blt(DrawablePtr drawable,
 		   struct kgem_bo *bo,
 		   struct sna_damage **damage,
@@ -5970,7 +5970,7 @@ sna_poly_point_blt(DrawablePtr drawable,
 	     __FUNCTION__, gc->alu, gc->fgPixel, clipped));
 
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, gc->fgPixel))
-		return FALSE;
+		return false;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -6045,7 +6045,7 @@ sna_poly_point_blt(DrawablePtr drawable,
 	}
 	fill.done(sna, &fill);
 	assert_pixmap_damage(pixmap);
-	return TRUE;
+	return true;
 }
 
 static unsigned
@@ -6183,7 +6183,7 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 	DBG(("%s: alu=%d, pixel=%lx, n=%d, clipped=%d, damage=%p\n",
 	     __FUNCTION__, gc->alu, gc->fgPixel, _n, clipped, damage));
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, gc->fgPixel))
-		return FALSE;
+		return false;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -6191,7 +6191,7 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 	if (clipped) {
 		region_maybe_clip(&clip, gc->pCompositeClip);
 		if (!RegionNotEmpty(&clip))
-			return TRUE;
+			return true;
 	}
 
 	jump = _jump[(damage != NULL) | !!(dx|dy) << 1];
@@ -6536,7 +6536,7 @@ damage_offset:
 	goto *ret;
 }
 
-static Bool
+static bool
 sna_poly_line_blt(DrawablePtr drawable,
 		  struct kgem_bo *bo,
 		  struct sna_damage **damage,
@@ -6554,7 +6554,7 @@ sna_poly_line_blt(DrawablePtr drawable,
 	DBG(("%s: alu=%d, fg=%08x\n", __FUNCTION__, gc->alu, (unsigned)pixel));
 
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, pixel))
-		return FALSE;
+		return false;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -6616,7 +6616,7 @@ sna_poly_line_blt(DrawablePtr drawable,
 		region_set(&clip, extents);
 		region_maybe_clip(&clip, gc->pCompositeClip);
 		if (!RegionNotEmpty(&clip))
-			return TRUE;
+			return true;
 
 		last.x = pt->x + drawable->x;
 		last.y = pt->y + drawable->y;
@@ -6750,7 +6750,7 @@ sna_poly_line_blt(DrawablePtr drawable,
 	}
 	fill.done(sna, &fill);
 	assert_pixmap_damage(pixmap);
-	return TRUE;
+	return true;
 }
 
 static unsigned
@@ -7217,7 +7217,7 @@ static inline void box_from_seg(BoxPtr b, xSegment *seg, GCPtr gc)
 	     b->x1, b->y1, b->x2, b->y2));
 }
 
-static Bool
+static bool
 sna_poly_segment_blt(DrawablePtr drawable,
 		     struct kgem_bo *bo,
 		     struct sna_damage **damage,
@@ -7235,7 +7235,7 @@ sna_poly_segment_blt(DrawablePtr drawable,
 	     __FUNCTION__, n, gc->alu, gc->fgPixel, clipped));
 
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, pixel))
-		return FALSE;
+		return false;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -7357,7 +7357,7 @@ sna_poly_segment_blt(DrawablePtr drawable,
 done:
 	fill.done(sna, &fill);
 	assert_pixmap_damage(pixmap);
-	return TRUE;
+	return true;
 }
 
 static bool
@@ -7389,7 +7389,7 @@ sna_poly_zero_segment_blt(DrawablePtr drawable,
 	DBG(("%s: alu=%d, pixel=%lx, n=%d, clipped=%d, damage=%p\n",
 	     __FUNCTION__, gc->alu, gc->fgPixel, _n, clipped, damage));
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, gc->fgPixel))
-		return FALSE;
+		return false;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -7397,7 +7397,7 @@ sna_poly_zero_segment_blt(DrawablePtr drawable,
 	if (clipped) {
 		region_maybe_clip(&clip, gc->pCompositeClip);
 		if (!RegionNotEmpty(&clip))
-			return TRUE;
+			return true;
 	}
 	DBG(("%s: [clipped] extents=(%d, %d), (%d, %d), delta=(%d, %d)\n",
 	     __FUNCTION__,
@@ -8060,7 +8060,7 @@ sna_poly_rectangle_extents(DrawablePtr drawable, GCPtr gc,
 	return 1 | clipped << 1;
 }
 
-static Bool
+static bool
 sna_poly_rectangle_blt(DrawablePtr drawable,
 		       struct kgem_bo *bo,
 		       struct sna_damage **damage,
@@ -8082,7 +8082,7 @@ sna_poly_rectangle_blt(DrawablePtr drawable,
 	DBG(("%s: n=%d, alu=%d, width=%d, fg=%08lx, damge=%p, clipped?=%d\n",
 	     __FUNCTION__, n, gc->alu, gc->lineWidth, gc->fgPixel, damage, clipped));
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, gc->fgPixel))
-		return FALSE;
+		return false;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -8534,7 +8534,7 @@ done:
 	}
 	fill.done(sna, &fill);
 	assert_pixmap_damage(pixmap);
-	return TRUE;
+	return true;
 }
 
 static void
@@ -8804,7 +8804,7 @@ out:
 	RegionUninit(&data.region);
 }
 
-static Bool
+static bool
 sna_poly_fill_rect_blt(DrawablePtr drawable,
 		       struct kgem_bo *bo,
 		       struct sna_damage **damage,
@@ -8873,7 +8873,7 @@ sna_poly_fill_rect_blt(DrawablePtr drawable,
 
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, pixel)) {
 		DBG(("%s: unsupported blt\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
@@ -8995,7 +8995,7 @@ sna_poly_fill_rect_blt(DrawablePtr drawable,
 done:
 	fill.done(sna, &fill);
 	assert_pixmap_damage(pixmap);
-	return TRUE;
+	return true;
 }
 
 static uint32_t
@@ -9192,7 +9192,7 @@ sna_pixmap_get_source_bo(PixmapPtr pixmap)
 	return kgem_bo_reference(priv->gpu_bo);
 }
 
-static Bool
+static bool
 sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 			     struct kgem_bo *bo,
 			     struct sna_damage **damage,
@@ -9232,13 +9232,13 @@ sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 	if (tile_bo == NULL) {
 		DBG(("%s: unable to move tile go GPU, fallback\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (!sna_copy_init_blt(&copy, sna, tile, tile_bo, pixmap, bo, alu)) {
 		DBG(("%s: unsupported blt\n", __FUNCTION__));
 		kgem_bo_destroy(&sna->kgem, tile_bo);
-		return FALSE;
+		return false;
 	}
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
@@ -9434,7 +9434,7 @@ done:
 	copy.done(sna, &copy);
 	assert_pixmap_damage(pixmap);
 	kgem_bo_destroy(&sna->kgem, tile_bo);
-	return TRUE;
+	return true;
 }
 
 static bool
@@ -12132,7 +12132,7 @@ static int sna_create_gc(GCPtr gc)
 
 	gc->funcs = (GCFuncs *)&sna_gc_funcs;
 	gc->ops = (GCOps *)&sna_gc_ops;
-	return TRUE;
+	return true;
 }
 
 static void
@@ -12257,7 +12257,7 @@ static Bool sna_change_window_attributes(WindowPtr win, unsigned long mask)
 		ret &= sna_validate_pixmap(&win->drawable, win->background.pixmap);
 	}
 
-	if (mask & CWBorderPixmap && win->borderIsPixel == FALSE) {
+	if (mask & CWBorderPixmap && win->borderIsPixel == false) {
 		DBG(("%s: flushing border pixmap\n", __FUNCTION__));
 		ret &= sna_validate_pixmap(&win->drawable, win->border.pixmap);
 	}
@@ -12881,21 +12881,21 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 		   "SNA initialized with %s backend\n",
 		   backend);
 
-	return TRUE;
+	return true;
 }
 
-Bool sna_accel_create(struct sna *sna)
+bool sna_accel_create(struct sna *sna)
 {
 	if (!sna_glyphs_create(sna))
-		return FALSE;
+		return false;
 
 	if (!sna_gradients_create(sna))
-		return FALSE;
+		return false;
 
 	if (!sna_composite_create(sna))
-		return FALSE;
+		return false;
 
-	return TRUE;
+	return true;
 }
 
 void sna_accel_watch_flush(struct sna *sna, int enable)
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 1d2678a..80fad6d 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -184,7 +184,7 @@ static bool sna_blt_fill_init(struct sna *sna,
 		sna->blt_state.fill_alu = alu;
 	}
 
-	return TRUE;
+	return true;
 }
 
 noinline static void sna_blt_fill_begin(struct sna *sna,
@@ -242,7 +242,7 @@ inline static void sna_blt_fill_one(struct sna *sna,
 	b[2] = b[1] + (height << 16 | width);
 }
 
-static Bool sna_blt_copy_init(struct sna *sna,
+static bool sna_blt_copy_init(struct sna *sna,
 			      struct sna_blt_state *blt,
 			      struct kgem_bo *src,
 			      struct kgem_bo *dst,
@@ -288,15 +288,15 @@ static Bool sna_blt_copy_init(struct sna *sna,
 	if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
 		_kgem_submit(kgem);
 		if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL))
-			return FALSE;
+			return false;
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
 	sna->blt_state.fill_bo = 0;
-	return TRUE;
+	return true;
 }
 
-static Bool sna_blt_alpha_fixup_init(struct sna *sna,
+static bool sna_blt_alpha_fixup_init(struct sna *sna,
 				     struct sna_blt_state *blt,
 				     struct kgem_bo *src,
 				     struct kgem_bo *dst,
@@ -340,12 +340,12 @@ static Bool sna_blt_alpha_fixup_init(struct sna *sna,
 	if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
 		_kgem_submit(kgem);
 		if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL))
-			return FALSE;
+			return false;
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
 	sna->blt_state.fill_bo = 0;
-	return TRUE;
+	return true;
 }
 
 static void sna_blt_alpha_fixup_one(struct sna *sna,
@@ -468,7 +468,7 @@ static void sna_blt_copy_one(struct sna *sna,
 	kgem->nbatch += 8;
 }
 
-Bool
+bool
 sna_get_rgba_from_pixel(uint32_t pixel,
 			uint16_t *red,
 			uint16_t *green,
@@ -504,7 +504,7 @@ sna_get_rgba_from_pixel(uint32_t pixel,
 		gshift = rshift + rbits;
 		bshift = gshift + gbits;
 	} else {
-		return FALSE;
+		return false;
 	}
 
 	if (rbits) {
@@ -543,10 +543,10 @@ sna_get_rgba_from_pixel(uint32_t pixel,
 	} else
 		*alpha = 0xffff;
 
-	return TRUE;
+	return true;
 }
 
-Bool
+bool
 _sna_get_pixel_from_rgba(uint32_t * pixel,
 			uint16_t red,
 			uint16_t green,
@@ -566,11 +566,11 @@ _sna_get_pixel_from_rgba(uint32_t * pixel,
 
 	if (PICT_FORMAT_TYPE(format) == PICT_TYPE_A) {
 		*pixel = alpha >> (16 - abits);
-		return TRUE;
+		return true;
 	}
 
 	if (!PICT_FORMAT_COLOR(format))
-		return FALSE;
+		return false;
 
 	if (PICT_FORMAT_TYPE(format) == PICT_TYPE_ARGB) {
 		bshift = 0;
@@ -588,7 +588,7 @@ _sna_get_pixel_from_rgba(uint32_t * pixel,
 		gshift = rshift + rbits;
 		bshift = gshift + gbits;
 	} else
-		return FALSE;
+		return false;
 
 	*pixel = 0;
 	*pixel |= (blue  >> (16 - bbits)) << bshift;
@@ -596,7 +596,7 @@ _sna_get_pixel_from_rgba(uint32_t * pixel,
 	*pixel |= (red   >> (16 - rbits)) << rshift;
 	*pixel |= (alpha >> (16 - abits)) << ashift;
 
-	return TRUE;
+	return true;
 }
 
 uint32_t
@@ -638,43 +638,43 @@ get_solid_color(PicturePtr picture, uint32_t format)
 		return color_convert(get_pixel(picture), picture->format, format);
 }
 
-static Bool
+static bool
 is_solid(PicturePtr picture)
 {
 	if (picture->pSourcePict) {
 		if (picture->pSourcePict->type == SourcePictTypeSolidFill)
-			return TRUE;
+			return true;
 	}
 
 	if (picture->pDrawable) {
 		if (picture->pDrawable->width  == 1 &&
 		    picture->pDrawable->height == 1 &&
 		    picture->repeat)
-			return TRUE;
+			return true;
 	}
 
-	return FALSE;
+	return false;
 }
 
-Bool
+bool
 sna_picture_is_solid(PicturePtr picture, uint32_t *color)
 {
 	if (!is_solid(picture))
-		return FALSE;
+		return false;
 
 	if (color)
 		*color = get_solid_color(picture, PICT_a8r8g8b8);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 pixel_is_opaque(uint32_t pixel, uint32_t format)
 {
 	unsigned int abits;
 
 	abits = PICT_FORMAT_A(format);
 	if (!abits)
-		return TRUE;
+		return true;
 
 	if (PICT_FORMAT_TYPE(format) == PICT_TYPE_A ||
 	    PICT_FORMAT_TYPE(format) == PICT_TYPE_BGRA) {
@@ -684,10 +684,10 @@ pixel_is_opaque(uint32_t pixel, uint32_t format)
 		unsigned int ashift = PICT_FORMAT_BPP(format) - abits;
 		return (pixel >> ashift) == (unsigned)((1 << abits) - 1);
 	} else
-		return FALSE;
+		return false;
 }
 
-static Bool
+static bool
 pixel_is_white(uint32_t pixel, uint32_t format)
 {
 	switch (PICT_FORMAT_TYPE(format)) {
@@ -697,11 +697,11 @@ pixel_is_white(uint32_t pixel, uint32_t format)
 	case PICT_TYPE_BGRA:
 		return pixel == ((1U << PICT_FORMAT_BPP(format)) - 1);
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
-static Bool
+static bool
 is_opaque_solid(PicturePtr picture)
 {
 	if (picture->pSourcePict) {
@@ -711,7 +711,7 @@ is_opaque_solid(PicturePtr picture)
 		return pixel_is_opaque(get_pixel(picture), picture->format);
 }
 
-static Bool
+static bool
 is_white(PicturePtr picture)
 {
 	if (picture->pSourcePict) {
@@ -727,7 +727,7 @@ sna_composite_mask_is_opaque(PicturePtr mask)
 	if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format))
 		return is_solid(mask) && is_white(mask);
 	else if (!PICT_FORMAT_A(mask->format))
-		return TRUE;
+		return true;
 	else
 		return is_solid(mask) && is_opaque_solid(mask);
 }
@@ -900,7 +900,20 @@ static void blt_composite_nop_boxes(struct sna *sna,
 {
 }
 
-static Bool
+static bool
+begin_blt(struct sna *sna,
+	  struct sna_composite_op *op)
+{
+	if (!kgem_check_bo_fenced(&sna->kgem, op->dst.bo)) {
+		_kgem_submit(&sna->kgem);
+		assert(kgem_check_bo_fenced(&sna->kgem, op->dst.bo));
+		_kgem_set_mode(&sna->kgem, KGEM_BLT);
+	}
+
+	return true;
+}
+
+static bool
 prepare_blt_nop(struct sna *sna,
 		struct sna_composite_op *op)
 {
@@ -910,10 +923,10 @@ prepare_blt_nop(struct sna *sna,
 	op->box   = blt_composite_nop_box;
 	op->boxes = blt_composite_nop_boxes;
 	op->done  = nop_done;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 prepare_blt_clear(struct sna *sna,
 		  struct sna_composite_op *op)
 {
@@ -929,10 +942,13 @@ prepare_blt_clear(struct sna *sna,
 	}
 	op->done = nop_done;
 
-	return sna_blt_fill_init(sna, &op->u.blt,
+	if (!sna_blt_fill_init(sna, &op->u.blt,
 				 op->dst.bo,
 				 op->dst.pixmap->drawable.bitsPerPixel,
-				 GXclear, 0);
+				 GXclear, 0))
+		return false;
+
+	return begin_blt(sna, op);
 }
 
 static bool
@@ -952,10 +968,13 @@ prepare_blt_fill(struct sna *sna,
 	}
 	op->done = nop_done;
 
-	return sna_blt_fill_init(sna, &op->u.blt, op->dst.bo,
-				 op->dst.pixmap->drawable.bitsPerPixel,
-				 GXcopy,
-				 get_solid_color(source, op->dst.format));
+	if (!sna_blt_fill_init(sna, &op->u.blt, op->dst.bo,
+			       op->dst.pixmap->drawable.bitsPerPixel,
+			       GXcopy,
+			       get_solid_color(source, op->dst.format)))
+		return false;
+
+	return begin_blt(sna, op);
 }
 
 fastcall static void
@@ -1116,7 +1135,7 @@ blt_composite_copy_boxes_with_alpha(struct sna *sna,
 	} while(--nbox);
 }
 
-static Bool
+static bool
 prepare_blt_copy(struct sna *sna,
 		 struct sna_composite_op *op,
 		 uint32_t alpha_fixup)
@@ -1126,7 +1145,7 @@ prepare_blt_copy(struct sna *sna,
 
 	if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo)) {
 		DBG(("%s: fallback -- can't blt from source\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (!kgem_check_many_bo_fenced(&sna->kgem, op->dst.bo, priv->gpu_bo, NULL)) {
@@ -1134,7 +1153,7 @@ prepare_blt_copy(struct sna *sna,
 		if (!kgem_check_many_bo_fenced(&sna->kgem,
 					       op->dst.bo, priv->gpu_bo, NULL)) {
 			DBG(("%s: fallback -- no room in aperture\n", __FUNCTION__));
-			return FALSE;
+			return false;
 		}
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
@@ -1151,22 +1170,26 @@ prepare_blt_copy(struct sna *sna,
 		op->box   = blt_composite_copy_box_with_alpha;
 		op->boxes = blt_composite_copy_boxes_with_alpha;
 
-		return sna_blt_alpha_fixup_init(sna, &op->u.blt,
-						priv->gpu_bo,
-						op->dst.bo,
-						src->drawable.bitsPerPixel,
-						alpha_fixup);
+		if (!sna_blt_alpha_fixup_init(sna, &op->u.blt,
+					      priv->gpu_bo,
+					      op->dst.bo,
+					      src->drawable.bitsPerPixel,
+					      alpha_fixup))
+			return false;
 	} else {
 		op->blt   = blt_composite_copy;
 		op->box   = blt_composite_copy_box;
 		op->boxes = blt_composite_copy_boxes;
 
-		return sna_blt_copy_init(sna, &op->u.blt,
-					 priv->gpu_bo,
-					 op->dst.bo,
-					 src->drawable.bitsPerPixel,
-					 GXcopy);
+		if (!sna_blt_copy_init(sna, &op->u.blt,
+				       priv->gpu_bo,
+				       op->dst.bo,
+				       src->drawable.bitsPerPixel,
+				       GXcopy))
+			return false;
 	}
+
+	return begin_blt(sna, op);
 }
 
 fastcall static void
@@ -1405,7 +1428,7 @@ blt_put_composite_boxes_with_alpha(struct sna *sna,
 	}
 }
 
-static Bool
+static bool
 prepare_blt_put(struct sna *sna,
 		struct sna_composite_op *op,
 		uint32_t alpha_fixup)
@@ -1428,29 +1451,33 @@ prepare_blt_put(struct sna *sna,
 			op->box   = blt_composite_copy_box_with_alpha;
 			op->boxes = blt_composite_copy_boxes_with_alpha;
 
-			return sna_blt_alpha_fixup_init(sna, &op->u.blt,
-							src_bo, op->dst.bo,
-							op->dst.pixmap->drawable.bitsPerPixel,
-							alpha_fixup);
+			if (!sna_blt_alpha_fixup_init(sna, &op->u.blt,
+						      src_bo, op->dst.bo,
+						      op->dst.pixmap->drawable.bitsPerPixel,
+						      alpha_fixup))
+				return false;
 		} else {
 			op->blt   = blt_composite_copy;
 			op->box   = blt_composite_copy_box;
 			op->boxes = blt_composite_copy_boxes;
 
-			return sna_blt_copy_init(sna, &op->u.blt,
-						 src_bo, op->dst.bo,
-						 op->dst.pixmap->drawable.bitsPerPixel,
-						 GXcopy);
+			if (!sna_blt_copy_init(sna, &op->u.blt,
+					       src_bo, op->dst.bo,
+					       op->dst.pixmap->drawable.bitsPerPixel,
+					       GXcopy))
+				return false;
 		}
+
+		return begin_blt(sna, op);
 	} else {
 		if (!sna_pixmap_move_to_cpu(src, MOVE_READ))
-			return FALSE;
+			return false;
 
 		assert(src->devKind);
 		assert(src->devPrivate.ptr);
 
 		if (alpha_fixup)
-			return FALSE; /* XXX */
+			return false; /* XXX */
 
 		if (alpha_fixup) {
 			op->u.blt.pixel = alpha_fixup;
@@ -1464,24 +1491,24 @@ prepare_blt_put(struct sna *sna,
 		}
 	}
 
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 has_gpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
 {
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	BoxRec area;
 
 	if (!priv)
-		return FALSE;
+		return false;
 	if (!priv->gpu_bo)
-		return FALSE;
+		return false;
 
 	if (priv->cpu_damage == NULL)
-		return TRUE;
+		return true;
 	if (priv->cpu_damage->mode == DAMAGE_ALL)
-		return FALSE;
+		return false;
 
 	area.x1 = x;
 	area.y1 = y;
@@ -1489,24 +1516,24 @@ has_gpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
 	area.y2 = y + h;
 	if (priv->gpu_damage &&
 	    sna_damage_contains_box__no_reduce(priv->gpu_damage, &area))
-		return TRUE;
+		return true;
 
 	return sna_damage_contains_box(priv->cpu_damage,
 				       &area) == PIXMAN_REGION_OUT;
 }
 
-static Bool
+static bool
 has_cpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
 {
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	BoxRec area;
 
 	if (!priv)
-		return TRUE;
+		return true;
 	if (priv->gpu_damage == NULL)
-		return TRUE;
+		return true;
 	if (priv->gpu_damage->mode == DAMAGE_ALL)
-		return FALSE;
+		return false;
 
 	area.x1 = x;
 	area.y1 = y;
@@ -1514,7 +1541,7 @@ has_cpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
 	area.y2 = y + h;
 	if (priv->cpu_damage &&
 	    sna_damage_contains_box__no_reduce(priv->cpu_damage, &area))
-		return TRUE;
+		return true;
 
 	return sna_damage_contains_box(priv->gpu_damage,
 				       &area) == PIXMAN_REGION_OUT;
@@ -1552,7 +1579,7 @@ reduce_damage(struct sna_composite_op *op,
 				      PICT_FORMAT_G(format),		\
 				      PICT_FORMAT_B(format))
 
-Bool
+bool
 sna_blt_composite(struct sna *sna,
 		  uint32_t op,
 		  PicturePtr src,
@@ -1568,10 +1595,10 @@ sna_blt_composite(struct sna *sna,
 	int16_t tx, ty;
 	uint32_t alpha_fixup;
 	bool was_clear;
-	Bool ret;
+	bool ret;
 
 #if DEBUG_NO_BLT || NO_BLT_COMPOSITE
-	return FALSE;
+	return false;
 #endif
 
 	DBG(("%s (%d, %d), (%d, %d), %dx%d\n",
@@ -1585,7 +1612,7 @@ sna_blt_composite(struct sna *sna,
 	default:
 		DBG(("%s: unhandled bpp: %d\n", __FUNCTION__,
 		     dst->pDrawable->bitsPerPixel));
-		return FALSE;
+		return false;
 	}
 
 	was_clear = sna_drawable_is_clear(dst->pDrawable);
@@ -1593,12 +1620,12 @@ sna_blt_composite(struct sna *sna,
 	priv = sna_pixmap_move_to_gpu(tmp->dst.pixmap, MOVE_WRITE | MOVE_READ);
 	if (priv == NULL) {
 		DBG(("%s: dst not attached\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo)) {
 		DBG(("%s: can not blit to dst, tiling? %d, pitch? %d\n",
 		     __FUNCTION__, priv->gpu_bo->tiling, priv->gpu_bo->pitch));
-		return FALSE;
+		return false;
 	}
 
 	tmp->dst.format = dst->format;
@@ -1613,12 +1640,6 @@ sna_blt_composite(struct sna *sna,
 	if (width && height)
 		reduce_damage(tmp, dst_x, dst_y, width, height);
 
-	if (!kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo)) {
-		_kgem_submit(&sna->kgem);
-		assert(kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo));
-		_kgem_set_mode(&sna->kgem, KGEM_BLT);
-	}
-
 	if (op == PictOpClear) {
 clear:
 		if (was_clear)
@@ -1640,7 +1661,7 @@ clear:
 		if (op != PictOpSrc) {
 			DBG(("%s: unsuported op [%d] for blitting\n",
 			     __FUNCTION__, op));
-			return FALSE;
+			return false;
 		}
 
 		return prepare_blt_fill(sna, tmp, src);
@@ -1649,13 +1670,13 @@ clear:
 	if (!src->pDrawable) {
 		DBG(("%s: unsuported procedural source\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (src->filter == PictFilterConvolution) {
 		DBG(("%s: convolutions filters not handled\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (op == PictOpOver && PICT_FORMAT_A(src_format) == 0)
@@ -1664,25 +1685,25 @@ clear:
 	if (op != PictOpSrc) {
 		DBG(("%s: unsuported op [%d] for blitting\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (!sna_transform_is_integer_translation(src->transform, &tx, &ty)) {
 		DBG(("%s: source transform is not an integer translation\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	x += tx;
 	y += ty;
 
-	if ((x > src->pDrawable->width ||
-	     y > src->pDrawable->height ||
-	     x + width < 0 ||
-	     y + height < 0) &&
+	if ((x >= src->pDrawable->width ||
+	     y >= src->pDrawable->height ||
+	     x + width <= 0 ||
+	     y + height <= 0) &&
 	    (!src->repeat || src->repeatType == RepeatNone)) {
 		DBG(("%s: source is outside of valid area, converting to clear\n",
 		     __FUNCTION__));
-		return prepare_blt_clear(sna, tmp);
+		goto clear;
 	}
 
 	alpha_fixup = 0;
@@ -1694,7 +1715,7 @@ clear:
 				       dst->format)))) {
 		DBG(("%s: incompatible src/dst formats src=%08x, dst=%08x\n",
 		     __FUNCTION__, (unsigned)src_format, dst->format));
-		return FALSE;
+		return false;
 	}
 
 	/* XXX tiling? fixup extend none? */
@@ -1704,7 +1725,7 @@ clear:
 		DBG(("%s: source extends outside (%d, %d), (%d, %d) of valid drawable %dx%d\n",
 		     __FUNCTION__,
 		     x, y, x+width, y+width, src->pDrawable->width, src->pDrawable->height));
-		return FALSE;
+		return false;
 	}
 
 	src_pixmap = get_drawable_pixmap(src->pDrawable);
@@ -1717,7 +1738,7 @@ clear:
 		DBG(("%s: source extends outside (%d, %d), (%d, %d) of valid pixmap %dx%d\n",
 		     __FUNCTION__,
 		     x, y, x+width, y+width, src_pixmap->drawable.width, src_pixmap->drawable.height));
-		return FALSE;
+		return false;
 	}
 
 	tmp->u.blt.src_pixmap = src_pixmap;
@@ -1773,7 +1794,7 @@ bool sna_blt_fill(struct sna *sna, uint8_t alu,
 		  struct sna_fill_op *fill)
 {
 #if DEBUG_NO_BLT || NO_BLT_FILL
-	return FALSE;
+	return false;
 #endif
 
 	DBG(("%s(alu=%d, pixel=%x, bpp=%d)\n", __FUNCTION__, alu, pixel, bpp));
@@ -1781,18 +1802,18 @@ bool sna_blt_fill(struct sna *sna, uint8_t alu,
 	if (!kgem_bo_can_blt(&sna->kgem, bo)) {
 		DBG(("%s: rejected due to incompatible Y-tiling\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (!sna_blt_fill_init(sna, &fill->base.u.blt,
 			       bo, bpp, alu, pixel))
-		return FALSE;
+		return false;
 
 	fill->blt   = sna_blt_fill_op_blt;
 	fill->box   = sna_blt_fill_op_box;
 	fill->boxes = sna_blt_fill_op_boxes;
 	fill->done  = sna_blt_fill_op_done;
-	return TRUE;
+	return true;
 }
 
 static void sna_blt_copy_op_blt(struct sna *sna,
@@ -1825,26 +1846,26 @@ bool sna_blt_copy(struct sna *sna, uint8_t alu,
 		  struct sna_copy_op *op)
 {
 #if DEBUG_NO_BLT || NO_BLT_COPY
-	return FALSE;
+	return false;
 #endif
 
 	if (!kgem_bo_can_blt(&sna->kgem, src))
-		return FALSE;
+		return false;
 
 	if (!kgem_bo_can_blt(&sna->kgem, dst))
-		return FALSE;
+		return false;
 
 	if (!sna_blt_copy_init(sna, &op->base.u.blt,
 			       src, dst,
 			       bpp, alu))
-		return FALSE;
+		return false;
 
 	op->blt  = sna_blt_copy_op_blt;
 	if (sna->kgem.gen >= 60)
 		op->done = gen6_blt_copy_op_done;
 	else
 		op->done = sna_blt_copy_op_done;
-	return TRUE;
+	return true;
 }
 
 static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
@@ -1945,7 +1966,7 @@ static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
 	return true;
 }
 
-Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
+bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 			struct kgem_bo *bo, int bpp,
 			uint32_t pixel,
 			const BoxRec *box, int nbox)
@@ -1954,7 +1975,7 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 	uint32_t br13, cmd;
 
 #if DEBUG_NO_BLT || NO_BLT_FILL_BOXES
-	return FALSE;
+	return false;
 #endif
 
 	DBG(("%s (%d, %08x, %d) x %d\n",
@@ -1962,7 +1983,7 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 
 	if (!kgem_bo_can_blt(kgem, bo)) {
 		DBG(("%s: fallback -- dst uses Y-tiling\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (alu == GXclear)
@@ -1975,7 +1996,7 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 	}
 
 	if (nbox == 1 && sna_blt_fill_box(sna, alu, bo, bpp, pixel, box))
-		return TRUE;
+		return true;
 
 	br13 = bo->pitch;
 	cmd = XY_SCANLINE_BLT;
@@ -2090,10 +2111,10 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 		}
 	} while (nbox);
 
-	return TRUE;
+	return true;
 }
 
-Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
+bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 			struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 			int bpp, const BoxRec *box, int nbox)
@@ -2102,7 +2123,7 @@ Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 	unsigned src_pitch, br13, cmd;
 
 #if DEBUG_NO_BLT || NO_BLT_COPY_BOXES
-	return FALSE;
+	return false;
 #endif
 
 	DBG(("%s src=(%d, %d) -> (%d, %d) x %d, tiling=(%d, %d), pitch=(%d, %d)\n",
@@ -2115,7 +2136,7 @@ Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		     __FUNCTION__,
 		     kgem_bo_can_blt(kgem, src_bo),
 		     kgem_bo_can_blt(kgem, dst_bo)));
-		return FALSE;
+		return false;
 	}
 
 	cmd = XY_SRC_COPY_BLT_CMD;
@@ -2227,7 +2248,7 @@ Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		kgem->nbatch += 3;
 	}
 
-	return TRUE;
+	return true;
 }
 
 static void box_extents(const BoxRec *box, int n, BoxRec *extents)
@@ -2247,20 +2268,20 @@ static void box_extents(const BoxRec *box, int n, BoxRec *extents)
 	}
 }
 
-Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
+bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 				 PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 				 PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 				 const BoxRec *box, int nbox)
 {
 	struct kgem_bo *free_bo = NULL;
-	Bool ret;
+	bool ret;
 
 	DBG(("%s: alu=%d, n=%d\n", __FUNCTION__, alu, nbox));
 
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable)) {
 		DBG(("%s: mismatching depths %d -> %d\n",
 		     __FUNCTION__, src->drawable.depth, dst->drawable.depth));
-		return FALSE;
+		return false;
 	}
 
 	if (src_bo == dst_bo) {
@@ -2289,7 +2310,7 @@ Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 				if (free_bo == NULL) {
 					DBG(("%s: fallback -- temp allocation failed\n",
 					     __FUNCTION__));
-					return FALSE;
+					return false;
 				}
 
 				if (!sna_blt_copy_boxes(sna, GXcopy,
@@ -2300,7 +2321,7 @@ Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 					DBG(("%s: fallback -- temp copy failed\n",
 					     __FUNCTION__));
 					kgem_bo_destroy(&sna->kgem, free_bo);
-					return FALSE;
+					return false;
 				}
 
 				src_dx = -extents.x1;
@@ -2318,7 +2339,7 @@ Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 			if (src_bo == NULL) {
 				DBG(("%s: fallback -- src y-tiling conversion failed\n",
 				     __FUNCTION__));
-				return FALSE;
+				return false;
 			}
 		}
 
@@ -2330,7 +2351,7 @@ Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 			if (dst_bo == NULL) {
 				DBG(("%s: fallback -- dst y-tiling conversion failed\n",
 				     __FUNCTION__));
-				return FALSE;
+				return false;
 			}
 		}
 	}
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 2fcc0d9..60179c4 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -41,7 +41,7 @@
 
 #define BOUND(v)	(INT16) ((v) < MINSHORT ? MINSHORT : (v) > MAXSHORT ? MAXSHORT : (v))
 
-Bool sna_composite_create(struct sna *sna)
+bool sna_composite_create(struct sna *sna)
 {
 	xRenderColor color ={ 0 };
 	int error;
@@ -101,7 +101,7 @@ clip_to_dst(pixman_region16_t *region,
 			return FALSE;
 		}
 
-		return TRUE;
+		return true;
 	} else if (region_is_empty(clip)) {
 		return FALSE;
 	} else {
@@ -116,13 +116,13 @@ clip_to_dst(pixman_region16_t *region,
 	}
 }
 
-static inline Bool
+static inline bool
 clip_to_src(RegionPtr region, PicturePtr p, int dx, int	 dy)
 {
-	Bool result;
+	bool result;
 
 	if (p->clientClipType == CT_NONE)
-		return TRUE;
+		return true;
 
 	pixman_region_translate(p->clientClip,
 				p->clipOrigin.x + dx,
@@ -137,7 +137,7 @@ clip_to_src(RegionPtr region, PicturePtr p, int dx, int	 dy)
 	return result && !region_is_empty(region);
 }
 
-Bool
+bool
 sna_compute_composite_region(RegionPtr region,
 			     PicturePtr src, PicturePtr mask, PicturePtr dst,
 			     INT16 src_x,  INT16 src_y,
@@ -302,7 +302,7 @@ trim_source_extents(BoxPtr extents, const PicturePtr p, int dx, int dy)
 	     extents->x2, extents->y2));
 }
 
-Bool
+bool
 sna_compute_composite_extents(BoxPtr extents,
 			      PicturePtr src, PicturePtr mask, PicturePtr dst,
 			      INT16 src_x,  INT16 src_y,
@@ -604,7 +604,7 @@ static int16_t bound(int16_t a, uint16_t b)
 	return v;
 }
 
-static Bool
+static bool
 _pixman_region_init_clipped_rectangles(pixman_region16_t *region,
 				       unsigned int num_rects,
 				       xRectangle *rects,
diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index 4bd4b9b..7d78372 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -1310,23 +1310,23 @@ bool _sna_damage_contains_box__no_reduce(const struct sna_damage *damage,
 						(BoxPtr)box) == PIXMAN_REGION_IN;
 }
 
-static Bool __sna_damage_intersect(struct sna_damage *damage,
+static bool __sna_damage_intersect(struct sna_damage *damage,
 				   RegionPtr region, RegionPtr result)
 {
 	assert(damage && damage->mode != DAMAGE_ALL);
 	if (region->extents.x2 <= damage->extents.x1 ||
 	    region->extents.x1 >= damage->extents.x2)
-		return FALSE;
+		return false;
 
 	if (region->extents.y2 <= damage->extents.y1 ||
 	    region->extents.y1 >= damage->extents.y2)
-		return FALSE;
+		return false;
 
 	if (damage->dirty)
 		__sna_damage_reduce(damage);
 
 	if (!pixman_region_not_empty(&damage->region))
-		return FALSE;
+		return false;
 
 	RegionNull(result);
 	RegionIntersect(result, &damage->region, region);
@@ -1335,12 +1335,12 @@ static Bool __sna_damage_intersect(struct sna_damage *damage,
 }
 
 #if HAS_DEBUG_FULL
-Bool _sna_damage_intersect(struct sna_damage *damage,
+bool _sna_damage_intersect(struct sna_damage *damage,
 			   RegionPtr region, RegionPtr result)
 {
 	char damage_buf[1000];
 	char region_buf[120];
-	Bool ret;
+	bool ret;
 
 	ErrorF("%s(%s, %s)...\n", __FUNCTION__,
 	       _debug_describe_damage(damage_buf, sizeof(damage_buf), damage),
@@ -1356,7 +1356,7 @@ Bool _sna_damage_intersect(struct sna_damage *damage,
 	return ret;
 }
 #else
-Bool _sna_damage_intersect(struct sna_damage *damage,
+bool _sna_damage_intersect(struct sna_damage *damage,
 			  RegionPtr region, RegionPtr result)
 {
 	return __sna_damage_intersect(damage, region, result);
@@ -1563,16 +1563,16 @@ static bool st_check_equal(struct sna_damage_selftest *test,
 	if (d_num != r_num) {
 		ErrorF("%s: damage and ref contain different number of rectangles\n",
 		       __FUNCTION__);
-		return FALSE;
+		return false;
 	}
 
 	if (memcmp(d_boxes, r_boxes, d_num*sizeof(BoxRec))) {
 		ErrorF("%s: damage and ref contain different rectangles\n",
 		       __FUNCTION__);
-		return FALSE;
+		return false;
 	}
 
-	return TRUE;
+	return true;
 }
 
 void sna_damage_selftest(void)
diff --git a/src/sna/sna_damage.h b/src/sna/sna_damage.h
index 21db3e3..5e800b7 100644
--- a/src/sna/sna_damage.h
+++ b/src/sna/sna_damage.h
@@ -178,10 +178,10 @@ static inline void sna_damage_subtract_boxes(struct sna_damage **damage,
 	assert(*damage == NULL || (*damage)->mode != DAMAGE_ALL);
 }
 
-Bool _sna_damage_intersect(struct sna_damage *damage,
+bool _sna_damage_intersect(struct sna_damage *damage,
 			  RegionPtr region, RegionPtr result);
 
-static inline Bool sna_damage_intersect(struct sna_damage *damage,
+static inline bool sna_damage_intersect(struct sna_damage *damage,
 					RegionPtr region, RegionPtr result)
 {
 	assert(damage);
@@ -197,13 +197,13 @@ sna_damage_overlaps_box(const struct sna_damage *damage,
 {
 	if (box->x2 <= damage->extents.x1 ||
 	    box->x1 >= damage->extents.x2)
-		return FALSE;
+		return false;
 
 	if (box->y2 <= damage->extents.y1 ||
 	    box->y1 >= damage->extents.y2)
-		return FALSE;
+		return false;
 
-	return TRUE;
+	return true;
 }
 
 int _sna_damage_contains_box(struct sna_damage *damage,
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 9140caf..0928f6a 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -82,7 +82,7 @@ struct sna_output {
 	int num_props;
 	struct sna_property *props;
 
-	Bool has_panel_limits;
+	bool has_panel_limits;
 	int panel_hdisplay;
 	int panel_vdisplay;
 
@@ -506,7 +506,7 @@ sna_crtc_force_outputs_on(xf86CrtcPtr crtc)
 	}
 }
 
-static Bool
+static bool
 sna_crtc_apply(xf86CrtcPtr crtc)
 {
 	struct sna *sna = to_sna(crtc->scrn);
@@ -515,7 +515,7 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	struct drm_mode_crtc arg;
 	uint32_t output_ids[16];
 	int output_count = 0;
-	int i, ret = FALSE;
+	int i;
 
 	DBG(("%s\n", __FUNCTION__));
 	kgem_bo_submit(&sna->kgem, sna_crtc->bo);
@@ -560,12 +560,11 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	     sna_crtc->shadow ? " [shadow]" : "",
 	     output_count));
 
-	ret = drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_SETCRTC, &arg);
-	if (ret)
-		return FALSE;
+	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_SETCRTC, &arg))
+		return false;
 
 	sna_crtc_force_outputs_on(crtc);
-	return TRUE;
+	return true;
 }
 
 static bool sna_mode_enable_shadow(struct sna *sna)
@@ -1357,7 +1356,7 @@ sna_crtc_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	     __FUNCTION__, num, sna_crtc->id, sna_crtc->pipe));
 }
 
-static Bool
+static bool
 is_panel(int type)
 {
 	return (type == DRM_MODE_CONNECTOR_LVDS ||
@@ -1561,7 +1560,7 @@ sna_output_get_modes(xf86OutputPtr output)
 	 * the fullscreen experience.
 	 * If it is incorrect, please fix me.
 	 */
-	sna_output->has_panel_limits = FALSE;
+	sna_output->has_panel_limits = false;
 	if (is_panel(koutput->connector_type)) {
 		for (i = 0; i < koutput->count_modes; i++) {
 			drmModeModeInfo *mode_ptr;
@@ -1684,22 +1683,22 @@ sna_output_dpms_status(xf86OutputPtr output)
 	return sna_output->dpms_mode;
 }
 
-static Bool
+static bool
 sna_property_ignore(drmModePropertyPtr prop)
 {
 	if (!prop)
-		return TRUE;
+		return true;
 
 	/* ignore blob prop */
 	if (prop->flags & DRM_MODE_PROP_BLOB)
-		return TRUE;
+		return true;
 
 	/* ignore standard property */
 	if (!strcmp(prop->name, "EDID") ||
 	    !strcmp(prop->name, "DPMS"))
-		return TRUE;
+		return true;
 
-	return FALSE;
+	return false;
 }
 
 static void
@@ -2015,7 +2014,7 @@ sna_zaphod_match(const char *s, const char *output)
 		s++;
 	} while (i < sizeof(t));
 
-	return FALSE;
+	return false;
 }
 
 static void
@@ -2347,7 +2346,7 @@ static void set_size_range(struct sna *sna)
 	xf86CrtcSetSizeRange(sna->scrn, 320, 200, INT16_MAX, INT16_MAX);
 }
 
-Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
+bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
 {
 	struct sna_mode *mode = &sna->mode;
 	int i;
@@ -2361,7 +2360,7 @@ Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
 	if (!mode->kmode) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "failed to get resources: %s\n", strerror(errno));
-		return FALSE;
+		return false;
 	}
 
 	set_size_range(sna);
@@ -2374,7 +2373,7 @@ Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
 
 	xf86InitialConfiguration(scrn, TRUE);
 
-	return TRUE;
+	return true;
 }
 
 void
@@ -2576,7 +2575,7 @@ sna_wait_for_scanline(struct sna *sna,
 		      xf86CrtcPtr crtc,
 		      const BoxRec *clip)
 {
-	Bool full_height;
+	bool full_height;
 	int y1, y2, pipe;
 
 	assert(crtc);
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 405a7cd..6afeb51 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -864,7 +864,7 @@ sna_dri_frame_event_info_free(struct sna *sna,
 	free(info);
 }
 
-static Bool
+static bool
 sna_dri_page_flip(struct sna *sna, struct sna_dri_frame_event *info)
 {
 	struct kgem_bo *bo = get_private(info->back)->bo;
@@ -873,7 +873,7 @@ sna_dri_page_flip(struct sna *sna, struct sna_dri_frame_event *info)
 
 	info->count = sna_page_flip(sna, bo, info, info->pipe);
 	if (info->count == 0)
-		return FALSE;
+		return false;
 
 	info->old_front.name = info->front->name;
 	info->old_front.bo = get_private(info->front)->bo;
@@ -882,10 +882,10 @@ sna_dri_page_flip(struct sna *sna, struct sna_dri_frame_event *info)
 
 	info->front->name = info->back->name;
 	get_private(info->front)->bo = bo;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 can_flip(struct sna * sna,
 	 DrawablePtr draw,
 	 DRI2BufferPtr front,
@@ -895,22 +895,22 @@ can_flip(struct sna * sna,
 	PixmapPtr pixmap;
 
 	if (draw->type == DRAWABLE_PIXMAP)
-		return FALSE;
+		return false;
 
 	if (!sna->scrn->vtSema) {
 		DBG(("%s: no, not attached to VT\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (sna->flags & SNA_NO_FLIP) {
 		DBG(("%s: no, pageflips disabled\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (front->format != back->format) {
 		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
 		     __FUNCTION__, front->format, back->format));
-		return FALSE;
+		return false;
 	}
 
 	if (front->attachment != DRI2BufferFrontLeft) {
@@ -918,19 +918,19 @@ can_flip(struct sna * sna,
 		     __FUNCTION__,
 		     front->attachment,
 		     DRI2BufferFrontLeft));
-		return FALSE;
+		return false;
 	}
 
 	if (sna->mode.shadow_active) {
 		DBG(("%s: no, shadow enabled\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	pixmap = get_drawable_pixmap(draw);
 	if (pixmap != sna->front) {
 		DBG(("%s: no, window is not on the front buffer\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	DBG(("%s: window size: %dx%d, clip=(%d, %d), (%d, %d)\n",
@@ -949,7 +949,7 @@ can_flip(struct sna * sna,
 		     draw->pScreen->root->winSize.extents.y1,
 		     draw->pScreen->root->winSize.extents.x2,
 		     draw->pScreen->root->winSize.extents.y2));
-		return FALSE;
+		return false;
 	}
 
 	if (draw->x != 0 || draw->y != 0 ||
@@ -964,7 +964,7 @@ can_flip(struct sna * sna,
 		     draw->width, draw->height,
 		     pixmap->drawable.width,
 		     pixmap->drawable.height));
-		return FALSE;
+		return false;
 	}
 
 	/* prevent an implicit tiling mode change */
@@ -973,13 +973,13 @@ can_flip(struct sna * sna,
 		     __FUNCTION__,
 		     get_private(front)->bo->tiling,
 		     get_private(back)->bo->tiling));
-		return FALSE;
+		return false;
 	}
 
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 can_exchange(struct sna * sna,
 	     DrawablePtr draw,
 	     DRI2BufferPtr front,
@@ -989,19 +989,19 @@ can_exchange(struct sna * sna,
 	PixmapPtr pixmap;
 
 	if (draw->type == DRAWABLE_PIXMAP)
-		return TRUE;
+		return true;
 
 	if (front->format != back->format) {
 		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
 		     __FUNCTION__, front->format, back->format));
-		return FALSE;
+		return false;
 	}
 
 	pixmap = get_window_pixmap(win);
 	if (pixmap == sna->front) {
 		DBG(("%s: no, window is attached to the front buffer\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (pixmap->drawable.width != win->drawable.width ||
@@ -1012,10 +1012,10 @@ can_exchange(struct sna * sna,
 		     win->drawable.height,
 		     pixmap->drawable.width,
 		     pixmap->drawable.height));
-		return FALSE;
+		return false;
 	}
 
-	return TRUE;
+	return true;
 }
 
 inline static uint32_t pipe_select(int pipe)
@@ -1234,7 +1234,7 @@ sna_dri_flip_continue(struct sna *sna,
 
 	info->count = sna_page_flip(sna, bo, info, info->pipe);
 	if (info->count == 0)
-		return FALSE;
+		return false;
 
 	set_bo(sna->front, bo);
 
@@ -1251,7 +1251,7 @@ sna_dri_flip_continue(struct sna *sna,
 
 	sna->dri.flip_pending = info;
 
-	return TRUE;
+	return true;
 }
 
 static void sna_dri_flip_event(struct sna *sna,
@@ -1404,7 +1404,7 @@ sna_dri_page_flip_handler(struct sna *sna,
 	sna_dri_flip_event(sna, info);
 }
 
-static int
+static bool
 sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		      DRI2BufferPtr back, CARD64 *target_msc, CARD64 divisor,
 		      CARD64 remainder, DRI2SwapEventPtr func, void *data)
@@ -1425,7 +1425,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 	pipe = sna_dri_get_pipe(draw);
 	if (pipe == -1)
-		return FALSE;
+		return false;
 
 	/* Truncate to match kernel interfaces; means occasional overflow
 	 * misses, but that's generally not a big deal */
@@ -1441,7 +1441,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 			if (info->draw == draw) {
 				DBG(("%s: chaining flip\n", __FUNCTION__));
 				info->next_front.name = 1;
-				return TRUE;
+				return true;
 			} else {
 				/* We need to first wait (one vblank) for the
 				 * async flips to complete before this client
@@ -1455,7 +1455,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 		info = calloc(1, sizeof(struct sna_dri_frame_event));
 		if (info == NULL)
-			return FALSE;
+			return false;
 
 		info->type = type;
 
@@ -1474,7 +1474,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		if (!sna_dri_page_flip(sna, info)) {
 			DBG(("%s: failed to queue page flip\n", __FUNCTION__));
 			sna_dri_frame_event_info_free(sna, draw, info);
-			return FALSE;
+			return false;
 		}
 
 		if (type != DRI2_FLIP) {
@@ -1497,7 +1497,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	} else {
 		info = calloc(1, sizeof(struct sna_dri_frame_event));
 		if (info == NULL)
-			return FALSE;
+			return false;
 
 		info->draw = draw;
 		info->client = client;
@@ -1517,7 +1517,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		vbl.request.sequence = 0;
 		if (sna_wait_vblank(sna, &vbl)) {
 			sna_dri_frame_event_info_free(sna, draw, info);
-			return FALSE;
+			return false;
 		}
 
 		current_msc = vbl.reply.sequence;
@@ -1573,13 +1573,13 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		vbl.request.signal = (unsigned long)info;
 		if (sna_wait_vblank(sna, &vbl)) {
 			sna_dri_frame_event_info_free(sna, draw, info);
-			return FALSE;
+			return false;
 		}
 
 		info->frame = *target_msc;
 	}
 
-	return TRUE;
+	return true;
 }
 
 static void
@@ -2121,7 +2121,7 @@ out_complete:
 }
 #endif
 
-Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
+bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 {
 	DRI2InfoRec info;
 	int major = 1, minor = 0;
@@ -2134,7 +2134,7 @@ Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 	if (wedged(sna)) {
 		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
 			   "cannot enable DRI2 whilst the GPU is wedged\n");
-		return FALSE;
+		return false;
 	}
 
 	if (xf86LoaderCheckSymbol("DRI2Version"))
@@ -2143,7 +2143,7 @@ Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 	if (minor < 1) {
 		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
 			   "DRI2 requires DRI2 module version 1.1.0 or later\n");
-		return FALSE;
+		return false;
 	}
 
 	sna->deviceName = drmGetDeviceNameFromFd(sna->kgem.fd);
diff --git a/src/sna/sna_gradient.c b/src/sna/sna_gradient.c
index d9f6293..a364c11 100644
--- a/src/sna/sna_gradient.c
+++ b/src/sna/sna_gradient.c
@@ -85,12 +85,12 @@ sna_gradient_sample_width(PictGradient *gradient)
 	return min(width, 1024);
 }
 
-static Bool
+static bool
 _gradient_color_stops_equal(PictGradient *pattern,
 			    struct sna_gradient_cache *cache)
 {
     if (cache->nstops != pattern->nstops)
-	    return FALSE;
+	    return false;
 
     return memcmp(cache->stops,
 		  pattern->stops,
@@ -323,7 +323,7 @@ done:
 	return kgem_bo_reference(cache->bo[i]);
 }
 
-static Bool sna_alpha_cache_init(struct sna *sna)
+static bool sna_alpha_cache_init(struct sna *sna)
 {
 	struct sna_alpha_cache *cache = &sna->render.alpha_cache;
 	uint32_t color[256];
@@ -333,7 +333,7 @@ static Bool sna_alpha_cache_init(struct sna *sna)
 
 	cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(color), 0);
 	if (!cache->cache_bo)
-		return FALSE;
+		return false;
 
 	for (i = 0; i < 256; i++) {
 		color[i] = i << 24;
@@ -342,14 +342,14 @@ static Bool sna_alpha_cache_init(struct sna *sna)
 						 sizeof(uint32_t)*i,
 						 sizeof(uint32_t));
 		if (cache->bo[i] == NULL)
-			return FALSE;
+			return false;
 
 		cache->bo[i]->pitch = 4;
 	}
 	return kgem_bo_write(&sna->kgem, cache->cache_bo, color, sizeof(color));
 }
 
-static Bool sna_solid_cache_init(struct sna *sna)
+static bool sna_solid_cache_init(struct sna *sna)
 {
 	struct sna_solid_cache *cache = &sna->render.solid_cache;
 
@@ -358,7 +358,7 @@ static Bool sna_solid_cache_init(struct sna *sna)
 	cache->cache_bo =
 		kgem_create_linear(&sna->kgem, sizeof(cache->color), 0);
 	if (!cache->cache_bo)
-		return FALSE;
+		return false;
 
 	/*
 	 * Initialise [0] with white since it is very common and filling the
@@ -368,27 +368,27 @@ static Bool sna_solid_cache_init(struct sna *sna)
 	cache->bo[0] = kgem_create_proxy(&sna->kgem, cache->cache_bo,
 					 0, sizeof(uint32_t));
 	if (cache->bo[0] == NULL)
-		return FALSE;
+		return false;
 
 	cache->bo[0]->pitch = 4;
 	cache->dirty = 1;
 	cache->size = 1;
 	cache->last = 0;
 
-	return TRUE;
+	return true;
 }
 
-Bool sna_gradients_create(struct sna *sna)
+bool sna_gradients_create(struct sna *sna)
 {
 	DBG(("%s\n", __FUNCTION__));
 
 	if (!sna_alpha_cache_init(sna))
-		return FALSE;
+		return false;
 
 	if (!sna_solid_cache_init(sna))
-		return FALSE;
+		return false;
 
-	return TRUE;
+	return true;
 }
 
 void sna_gradients_close(struct sna *sna)
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 2baee4c..f1df84a 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -41,7 +41,7 @@
 
 /* XXX Need to avoid using GTT fenced access for I915_TILING_Y on 855GM */
 
-static Bool
+static bool
 box_intersect(BoxPtr a, const BoxRec *b)
 {
 	if (a->x1 < b->x1)
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 24922b3..546148d 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -70,7 +70,7 @@ sna_render_format_for_depth(int depth)
 	}
 }
 
-static Bool
+static bool
 no_render_composite(struct sna *sna,
 		    uint8_t op,
 		    PicturePtr src,
@@ -98,7 +98,7 @@ no_render_composite(struct sna *sna,
 	(void)mask_y;
 }
 
-static Bool
+static bool
 no_render_copy_boxes(struct sna *sna, uint8_t alu,
 		     PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		     PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -116,7 +116,7 @@ no_render_copy_boxes(struct sna *sna, uint8_t alu,
 				  box, n);
 }
 
-static Bool
+static bool
 no_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -133,7 +133,7 @@ no_render_copy(struct sna *sna, uint8_t alu,
 	return FALSE;
 }
 
-static Bool
+static bool
 no_render_fill_boxes(struct sna *sna,
 		     CARD8 op,
 		     PictFormat format,
@@ -176,7 +176,7 @@ no_render_fill_boxes(struct sna *sna,
 				  pixel, box, n);
 }
 
-static Bool
+static bool
 no_render_fill(struct sna *sna, uint8_t alu,
 	       PixmapPtr dst, struct kgem_bo *dst_bo,
 	       uint32_t color,
@@ -189,7 +189,7 @@ no_render_fill(struct sna *sna, uint8_t alu,
 			    tmp);
 }
 
-static Bool
+static bool
 no_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		   uint32_t color,
 		   int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -209,7 +209,7 @@ no_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 no_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 {
 	DBG(("%s: pixmap=%ld %dx%d\n", __FUNCTION__,
@@ -1717,7 +1717,7 @@ sna_render_picture_convert(struct sna *sna,
 	return 1;
 }
 
-Bool
+bool
 sna_render_composite_redirect(struct sna *sna,
 			      struct sna_composite_op *op,
 			      int x, int y, int width, int height)
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index fae5872..b003e7b 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -192,7 +192,7 @@ struct sna_render {
 	int max_3d_size;
 	int max_3d_pitch;
 
-	Bool (*composite)(struct sna *sna, uint8_t op,
+	bool (*composite)(struct sna *sna, uint8_t op,
 			  PicturePtr dst, PicturePtr src, PicturePtr mask,
 			  int16_t src_x, int16_t src_y,
 			  int16_t msk_x, int16_t msk_y,
@@ -200,7 +200,7 @@ struct sna_render {
 			  int16_t w, int16_t h,
 			  struct sna_composite_op *tmp);
 
-	Bool (*composite_spans)(struct sna *sna, uint8_t op,
+	bool (*composite_spans)(struct sna *sna, uint8_t op,
 				PicturePtr dst, PicturePtr src,
 				int16_t src_x, int16_t src_y,
 				int16_t dst_x, int16_t dst_y,
@@ -210,7 +210,7 @@ struct sna_render {
 #define COMPOSITE_SPANS_RECTILINEAR 0x1
 #define COMPOSITE_SPANS_INPLACE_HINT 0x2
 
-	Bool (*video)(struct sna *sna,
+	bool (*video)(struct sna *sna,
 		      struct sna_video *video,
 		      struct sna_video_frame *frame,
 		      RegionPtr dstRegion,
@@ -218,29 +218,29 @@ struct sna_render {
 		      short drw_w, short drw_h,
 		      PixmapPtr pixmap);
 
-	Bool (*fill_boxes)(struct sna *sna,
+	bool (*fill_boxes)(struct sna *sna,
 			   CARD8 op,
 			   PictFormat format,
 			   const xRenderColor *color,
 			   PixmapPtr dst, struct kgem_bo *dst_bo,
 			   const BoxRec *box, int n);
-	Bool (*fill)(struct sna *sna, uint8_t alu,
+	bool (*fill)(struct sna *sna, uint8_t alu,
 		     PixmapPtr dst, struct kgem_bo *dst_bo,
 		     uint32_t color,
 		     struct sna_fill_op *tmp);
-	Bool (*fill_one)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo,
+	bool (*fill_one)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo,
 			 uint32_t color,
 			 int16_t x1, int16_t y1, int16_t x2, int16_t y2,
 			 uint8_t alu);
-	Bool (*clear)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo);
+	bool (*clear)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo);
 
-	Bool (*copy_boxes)(struct sna *sna, uint8_t alu,
+	bool (*copy_boxes)(struct sna *sna, uint8_t alu,
 			   PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			   PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 			   const BoxRec *box, int n, unsigned flags);
 #define COPY_LAST 0x1
 
-	Bool (*copy)(struct sna *sna, uint8_t alu,
+	bool (*copy)(struct sna *sna, uint8_t alu,
 		     PixmapPtr src, struct kgem_bo *src_bo,
 		     PixmapPtr dst, struct kgem_bo *dst_bo,
 		     struct sna_copy_op *op);
@@ -295,7 +295,7 @@ struct sna_render {
 
 struct gen2_render_state {
 	uint32_t target;
-	Bool need_invariant;
+	bool need_invariant;
 	uint32_t logic_op_enabled;
 	uint32_t ls1, ls2, vft;
 	uint32_t diffuse;
@@ -305,7 +305,7 @@ struct gen2_render_state {
 
 struct gen3_render_state {
 	uint32_t current_dst;
-	Bool need_invariant;
+	bool need_invariant;
 	uint32_t tex_count;
 	uint32_t last_drawrect_limit;
 	uint32_t last_target;
@@ -344,8 +344,8 @@ struct gen4_render_state {
 	int16_t floats_per_vertex;
 	uint16_t surface_table;
 
-	Bool needs_invariant;
-	Bool needs_urb;
+	bool needs_invariant;
+	bool needs_urb;
 };
 
 struct gen5_render_state {
@@ -366,7 +366,7 @@ struct gen5_render_state {
 	uint16_t surface_table;
 	uint16_t last_pipelined_pointers;
 
-	Bool needs_invariant;
+	bool needs_invariant;
 };
 
 enum {
@@ -413,8 +413,8 @@ struct gen6_render_state {
 	int16_t floats_per_vertex;
 	uint16_t surface_table;
 
-	Bool needs_invariant;
-	Bool first_state_packet;
+	bool needs_invariant;
+	bool first_state_packet;
 };
 
 enum {
@@ -462,8 +462,8 @@ struct gen7_render_state {
 	int16_t floats_per_vertex;
 	uint16_t surface_table;
 
-	Bool needs_invariant;
-	Bool emit_flush;
+	bool needs_invariant;
+	bool emit_flush;
 };
 
 struct sna_static_stream {
@@ -494,24 +494,24 @@ sna_render_get_gradient(struct sna *sna,
 
 uint32_t sna_rgba_for_color(uint32_t color, int depth);
 uint32_t sna_rgba_to_color(uint32_t rgba, uint32_t format);
-Bool sna_get_rgba_from_pixel(uint32_t pixel,
+bool sna_get_rgba_from_pixel(uint32_t pixel,
 			     uint16_t *red,
 			     uint16_t *green,
 			     uint16_t *blue,
 			     uint16_t *alpha,
 			     uint32_t format);
-Bool sna_picture_is_solid(PicturePtr picture, uint32_t *color);
+bool sna_picture_is_solid(PicturePtr picture, uint32_t *color);
 
 void no_render_init(struct sna *sna);
 
-Bool gen2_render_init(struct sna *sna);
-Bool gen3_render_init(struct sna *sna);
-Bool gen4_render_init(struct sna *sna);
-Bool gen5_render_init(struct sna *sna);
-Bool gen6_render_init(struct sna *sna);
-Bool gen7_render_init(struct sna *sna);
+bool gen2_render_init(struct sna *sna);
+bool gen3_render_init(struct sna *sna);
+bool gen4_render_init(struct sna *sna);
+bool gen5_render_init(struct sna *sna);
+bool gen6_render_init(struct sna *sna);
+bool gen7_render_init(struct sna *sna);
 
-Bool sna_tiling_composite(uint32_t op,
+bool sna_tiling_composite(uint32_t op,
 			  PicturePtr src,
 			  PicturePtr mask,
 			  PicturePtr dst,
@@ -520,7 +520,7 @@ Bool sna_tiling_composite(uint32_t op,
 			  int16_t dst_x, int16_t dst_y,
 			  int16_t width, int16_t height,
 			  struct sna_composite_op *tmp);
-Bool sna_tiling_composite_spans(uint32_t op,
+bool sna_tiling_composite_spans(uint32_t op,
 				PicturePtr src,
 				PicturePtr dst,
 				int16_t src_x,  int16_t src_y,
@@ -528,24 +528,24 @@ Bool sna_tiling_composite_spans(uint32_t op,
 				int16_t width,  int16_t height,
 				unsigned flags,
 				struct sna_composite_spans_op *tmp);
-Bool sna_tiling_fill_boxes(struct sna *sna,
+bool sna_tiling_fill_boxes(struct sna *sna,
 			   CARD8 op,
 			   PictFormat format,
 			   const xRenderColor *color,
 			   PixmapPtr dst, struct kgem_bo *dst_bo,
 			   const BoxRec *box, int n);
 
-Bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
+bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 			   PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			   PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 			   const BoxRec *box, int n);
 
-Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
+bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
 			       struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			       struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 			       int bpp, const BoxRec *box, int nbox);
 
-Bool sna_blt_composite(struct sna *sna,
+bool sna_blt_composite(struct sna *sna,
 		       uint32_t op,
 		       PicturePtr src,
 		       PicturePtr dst,
@@ -566,30 +566,30 @@ bool sna_blt_copy(struct sna *sna, uint8_t alu,
 		  int bpp,
 		  struct sna_copy_op *copy);
 
-Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
+bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 			struct kgem_bo *bo,
 			int bpp,
 			uint32_t pixel,
 			const BoxRec *box, int n);
 
-Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
+bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 			struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 			int bpp,
 			const BoxRec *box, int n);
-Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
+bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 				 PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 				 PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 				 const BoxRec *box, int nbox);
 
-Bool _sna_get_pixel_from_rgba(uint32_t *pixel,
+bool _sna_get_pixel_from_rgba(uint32_t *pixel,
 			     uint16_t red,
 			     uint16_t green,
 			     uint16_t blue,
 			     uint16_t alpha,
 			     uint32_t format);
 
-static inline Bool
+static inline bool
 sna_get_pixel_from_rgba(uint32_t * pixel,
 			uint16_t red,
 			uint16_t green,
@@ -671,7 +671,7 @@ inline static void sna_render_composite_redirect_init(struct sna_composite_op *o
 	t->damage = NULL;
 }
 
-Bool
+bool
 sna_render_composite_redirect(struct sna *sna,
 			      struct sna_composite_op *op,
 			      int x, int y, int width, int height);
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 2210127..32eb54e 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -67,7 +67,7 @@ static inline void batch_emit_float(struct sna *sna, float f)
 	batch_emit(sna, u.dw);
 }
 
-static inline Bool
+static inline bool
 is_gpu(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
@@ -81,7 +81,7 @@ is_gpu(DrawablePtr drawable)
 	return priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo);
 }
 
-static inline Bool
+static inline bool
 is_cpu(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
@@ -98,7 +98,7 @@ is_cpu(DrawablePtr drawable)
 	return true;
 }
 
-static inline Bool
+static inline bool
 is_dirty(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
@@ -119,36 +119,36 @@ too_small(struct sna_pixmap *priv)
 	return (priv->create & KGEM_CAN_CREATE_GPU) == 0;
 }
 
-static inline Bool
+static inline bool
 unattached(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
 	return priv == NULL || (priv->gpu_damage == NULL && priv->cpu_damage);
 }
 
-static inline Bool
+static inline bool
 picture_is_gpu(PicturePtr picture)
 {
 	if (!picture || !picture->pDrawable)
-		return FALSE;
+		return false;
 	return is_gpu(picture->pDrawable);
 }
 
-static inline Bool sna_blt_compare_depth(DrawablePtr src, DrawablePtr dst)
+static inline bool sna_blt_compare_depth(DrawablePtr src, DrawablePtr dst)
 {
 	if (src->depth == dst->depth)
-		return TRUE;
+		return true;
 
 	/* Also allow for the alpha to be discarded on a copy */
 	if (src->bitsPerPixel != dst->bitsPerPixel)
-		return FALSE;
+		return false;
 
 	if (dst->depth == 24 && src->depth == 32)
-		return TRUE;
+		return true;
 
 	/* Note that a depth-16 pixmap is r5g6b5, not x1r5g5b5. */
 
-	return FALSE;
+	return false;
 }
 
 static inline struct kgem_bo *
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index fdc297a..e048361 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -265,7 +265,7 @@ done:
 	free(tile);
 }
 
-Bool
+bool
 sna_tiling_composite(uint32_t op,
 		     PicturePtr src,
 		     PicturePtr mask,
@@ -285,11 +285,11 @@ sna_tiling_composite(uint32_t op,
 
 	priv = sna_pixmap(get_drawable_pixmap(dst->pDrawable));
 	if (priv == NULL || priv->gpu_bo == NULL)
-		return FALSE;
+		return false;
 
 	tile = malloc(sizeof(*tile));
 	if (!tile)
-		return FALSE;
+		return false;
 
 	tile->op = op;
 
@@ -315,7 +315,7 @@ sna_tiling_composite(uint32_t op,
 	tmp->done  = sna_tiling_composite_done;
 
 	tmp->priv = tile;
-	return TRUE;
+	return true;
 }
 
 fastcall static void
@@ -522,7 +522,7 @@ done:
 	free(tile);
 }
 
-Bool
+bool
 sna_tiling_composite_spans(uint32_t op,
 			   PicturePtr src,
 			   PicturePtr dst,
@@ -541,11 +541,11 @@ sna_tiling_composite_spans(uint32_t op,
 
 	priv = sna_pixmap(get_drawable_pixmap(dst->pDrawable));
 	if (priv == NULL || priv->gpu_bo == NULL)
-		return FALSE;
+		return false;
 
 	tile = malloc(sizeof(*tile));
 	if (!tile)
-		return FALSE;
+		return false;
 
 	tile->op = op;
 	tile->flags = flags;
@@ -571,10 +571,10 @@ sna_tiling_composite_spans(uint32_t op,
 	tmp->done  = sna_tiling_composite_spans_done;
 
 	tmp->base.priv = tile;
-	return TRUE;
+	return true;
 }
 
-Bool
+bool
 sna_tiling_fill_boxes(struct sna *sna,
 		      CARD8 op,
 		      PictFormat format,
@@ -585,7 +585,7 @@ sna_tiling_fill_boxes(struct sna *sna,
 	RegionRec region, tile, this;
 	struct kgem_bo *bo;
 	int step;
-	Bool ret = FALSE;
+	bool ret = false;
 
 	pixman_region_init_rects(&region, box, n);
 
@@ -674,7 +674,7 @@ sna_tiling_fill_boxes(struct sna *sna,
 		}
 	}
 
-	ret = TRUE;
+	ret = true;
 	goto done;
 err:
 	kgem_bo_destroy(&sna->kgem, bo);
@@ -684,7 +684,7 @@ done:
 	return ret;
 }
 
-Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
+bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
 			       struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			       struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 			       int bpp, const BoxRec *box, int nbox)
@@ -692,7 +692,7 @@ Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
 	RegionRec region, tile, this;
 	struct kgem_bo *bo;
 	int step;
-	Bool ret = FALSE;
+	bool ret = false;
 
 	if (!kgem_bo_can_blt(&sna->kgem, src_bo) ||
 	    !kgem_bo_can_blt(&sna->kgem, dst_bo)) {
@@ -701,7 +701,7 @@ Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		     __FUNCTION__,
 		     kgem_bo_can_blt(&sna->kgem, src_bo),
 		     kgem_bo_can_blt(&sna->kgem, dst_bo)));
-		return FALSE;
+		return false;
 	}
 
 	pixman_region_init_rects(&region, box, nbox);
@@ -773,7 +773,7 @@ Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		}
 	}
 
-	ret = TRUE;
+	ret = true;
 	goto done;
 err:
 	kgem_bo_destroy(&sna->kgem, bo);
@@ -783,7 +783,7 @@ done:
 	return ret;
 }
 
-static Bool
+static bool
 box_intersect(BoxPtr a, const BoxRec *b)
 {
 	if (a->x1 < b->x1)
@@ -798,7 +798,7 @@ box_intersect(BoxPtr a, const BoxRec *b)
 	return a->x1 < a->x2 && a->y1 < a->y2;
 }
 
-Bool
+bool
 sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 		      PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		      PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -807,7 +807,7 @@ sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 	BoxRec extents, tile, stack[64], *clipped, *c;
 	PixmapRec p;
 	int i, step, tiling;
-	Bool ret = FALSE;
+	bool ret = false;
 
 	extents = box[0];
 	for (i = 1; i < n; i++) {
@@ -905,7 +905,7 @@ sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 		}
 	}
 
-	ret = TRUE;
+	ret = true;
 tiled_error:
 	if (clipped != stack)
 		free(clipped);
diff --git a/src/sna/sna_transform.c b/src/sna/sna_transform.c
index 54852b1..55cc1ad 100644
--- a/src/sna/sna_transform.c
+++ b/src/sna/sna_transform.c
@@ -38,22 +38,22 @@
  *
  * transform may be null.
  */
-Bool sna_transform_is_affine(const PictTransform *t)
+bool sna_transform_is_affine(const PictTransform *t)
 {
 	if (t == NULL)
-		return TRUE;
+		return true;
 
 	return t->matrix[2][0] == 0 && t->matrix[2][1] == 0;
 }
 
-Bool
+bool
 sna_transform_is_translation(const PictTransform *t,
 			     pixman_fixed_t *tx,
 			     pixman_fixed_t *ty)
 {
 	if (t == NULL) {
 		*tx = *ty = 0;
-		return TRUE;
+		return true;
 	}
 
 	if (t->matrix[0][0] != IntToxFixed(1) ||
@@ -63,19 +63,19 @@ sna_transform_is_translation(const PictTransform *t,
 	    t->matrix[2][0] != 0 ||
 	    t->matrix[2][1] != 0 ||
 	    t->matrix[2][2] != IntToxFixed(1))
-		return FALSE;
+		return false;
 
 	*tx = t->matrix[0][2];
 	*ty = t->matrix[1][2];
-	return TRUE;
+	return true;
 }
 
-Bool
+bool
 sna_transform_is_integer_translation(const PictTransform *t, int16_t *tx, int16_t *ty)
 {
 	if (t == NULL) {
 		*tx = *ty = 0;
-		return TRUE;
+		return true;
 	}
 
 	if (t->matrix[0][0] != IntToxFixed(1) ||
@@ -85,15 +85,15 @@ sna_transform_is_integer_translation(const PictTransform *t, int16_t *tx, int16_
 	    t->matrix[2][0] != 0 ||
 	    t->matrix[2][1] != 0 ||
 	    t->matrix[2][2] != IntToxFixed(1))
-		return FALSE;
+		return false;
 
 	if (pixman_fixed_fraction(t->matrix[0][2]) ||
 	    pixman_fixed_fraction(t->matrix[1][2]))
-		return FALSE;
+		return false;
 
 	*tx = pixman_fixed_to_int(t->matrix[0][2]);
 	*ty = pixman_fixed_to_int(t->matrix[1][2]);
-	return TRUE;
+	return true;
 }
 
 /**
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 2341cb3..1553f58 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2221,9 +2221,9 @@ static int operator_is_bounded(uint8_t op)
 	case PictOpOver:
 	case PictOpOutReverse:
 	case PictOpAdd:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -2366,7 +2366,7 @@ trapezoids_inplace_fallback(CARD8 op,
 
 	image = NULL;
 	if (sna_drawable_move_to_cpu(dst->pDrawable, MOVE_READ | MOVE_WRITE))
-		image = image_from_pict(dst, FALSE, &dx, &dy);
+		image = image_from_pict(dst, false, &dx, &dy);
 	if (image) {
 		dx += dst->pDrawable->x;
 		dy += dst->pDrawable->y;
@@ -2529,7 +2529,7 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 	}
 }
 
-static Bool
+static bool
 composite_aligned_boxes(struct sna *sna,
 			CARD8 op,
 			PicturePtr src,
@@ -2542,7 +2542,7 @@ composite_aligned_boxes(struct sna *sna,
 	BoxRec stack_boxes[64], *boxes;
 	pixman_region16_t region, clip;
 	struct sna_composite_op tmp;
-	Bool ret = true;
+	bool ret = true;
 	int dx, dy, n, num_boxes;
 
 	if (NO_ALIGNED_BOXES)
@@ -3306,7 +3306,7 @@ pixman:
 			continue;
 		}
 
-		pi.image = image_from_pict(dst, FALSE, &pi.dx, &pi.dy);
+		pi.image = image_from_pict(dst, false, &pi.dx, &pi.dy);
 		pi.source = pixman_image_create_bits(PIXMAN_a8r8g8b8, 1, 1, NULL, 0);
 		pixman_image_set_repeat(pi.source, PIXMAN_REPEAT_NORMAL);
 		pi.bits = pixman_image_get_data(pi.source);
@@ -3467,8 +3467,8 @@ composite_unaligned_boxes_inplace(CARD8 op,
 			}
 		}
 
-		pi.image = image_from_pict(dst, FALSE, &pi.dx, &pi.dy);
-		pi.source = image_from_pict(src, FALSE, &pi.sx, &pi.sy);
+		pi.image = image_from_pict(dst, false, &pi.dx, &pi.dy);
+		pi.source = image_from_pict(src, false, &pi.sx, &pi.sy);
 		pi.sx += src_x;
 		pi.sy += src_y;
 		pi.mask = pixman_image_create_bits(PIXMAN_a8, 1, 1, NULL, 0);
@@ -4743,10 +4743,10 @@ unbounded_pass:
 
 		op = 0;
 	} else {
-		inplace.composite.dst = image_from_pict(dst, FALSE,
+		inplace.composite.dst = image_from_pict(dst, false,
 							&inplace.composite.dx,
 							&inplace.composite.dy);
-		inplace.composite.src = image_from_pict(src, FALSE,
+		inplace.composite.src = image_from_pict(src, false,
 							&inplace.composite.sx,
 							&inplace.composite.sy);
 		inplace.composite.sx +=
@@ -5879,7 +5879,7 @@ sna_add_traps(PicturePtr picture, INT16 x, INT16 y, int n, xTrap *t)
 		pixman_image_t *image;
 		int dx, dy;
 
-		if (!(image = image_from_pict(picture, FALSE, &dx, &dy)))
+		if (!(image = image_from_pict(picture, false, &dx, &dy)))
 			return;
 
 		pixman_add_traps(image, x + dx, y + dy, n, (pixman_trap_t *)t);
@@ -5910,9 +5910,9 @@ xTriangleValid(const xTriangle *t)
 
 	/* if the length of any edge is zero, the area must be zero */
 	if (v1.x == 0 && v1.y == 0)
-		return FALSE;
+		return false;
 	if (v2.x == 0 && v2.y == 0)
-		return FALSE;
+		return false;
 
 	/* if the cross-product is zero, so it the size */
 	return v2.y * v1.x != v1.y * v2.x;
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index 71d1bbc..b76a3c4 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -66,11 +66,11 @@
 #define _SNA_XVMC_SERVER_
 #include "sna_video_hwmc.h"
 #else
-static inline Bool sna_video_xvmc_setup(struct sna *sna,
+static inline bool sna_video_xvmc_setup(struct sna *sna,
 					ScreenPtr ptr,
 					XF86VideoAdaptorPtr target)
 {
-	return FALSE;
+	return false;
 }
 #endif
 
@@ -123,7 +123,7 @@ void sna_video_buffer_fini(struct sna *sna,
 	video->buf = bo;
 }
 
-Bool
+bool
 sna_video_clip_helper(ScrnInfoPtr scrn,
 		      struct sna_video *video,
 		      struct sna_video_frame *frame,
@@ -135,7 +135,7 @@ sna_video_clip_helper(ScrnInfoPtr scrn,
 		      short drw_w, short drw_h,
 		      RegionPtr reg)
 {
-	Bool ret;
+	bool ret;
 	RegionRec crtc_region_local;
 	RegionPtr crtc_region = reg;
 	INT32 x1, x2, y1, y2;
@@ -432,7 +432,7 @@ sna_copy_packed_data(struct sna_video *video,
 	}
 }
 
-Bool
+bool
 sna_video_copy_data(struct sna *sna,
 		    struct sna_video *video,
 		    struct sna_video_frame *frame,
@@ -465,7 +465,7 @@ sna_video_copy_data(struct sna *sna,
 								       KGEM_BUFFER_WRITE | KGEM_BUFFER_WRITE_INPLACE,
 								       (void **)&dst);
 					if (frame->bo == NULL)
-						return FALSE;
+						return false;
 
 					memcpy(dst, buf,
 					       pitch[1]*frame->height +
@@ -477,7 +477,7 @@ sna_video_copy_data(struct sna *sna,
 					frame->VBufOffset = frame->UBufOffset;
 					frame->UBufOffset = tmp;
 				}
-				return TRUE;
+				return true;
 			}
 		} else {
 			if (frame->width*2 == frame->pitch[0]) {
@@ -490,13 +490,13 @@ sna_video_copy_data(struct sna *sna,
 								       KGEM_BUFFER_WRITE | KGEM_BUFFER_WRITE_INPLACE,
 								       (void **)&dst);
 					if (frame->bo == NULL)
-						return FALSE;
+						return false;
 
 					memcpy(dst,
 					       buf + (frame->top * frame->width*2) + (frame->left << 1),
 					       frame->nlines*frame->width*2);
 				}
-				return TRUE;
+				return true;
 			}
 		}
 	}
@@ -505,13 +505,13 @@ sna_video_copy_data(struct sna *sna,
 	if (frame->bo) {
 		dst = kgem_bo_map__gtt(&sna->kgem, frame->bo);
 		if (dst == NULL)
-			return FALSE;
+			return false;
 	} else {
 		frame->bo = kgem_create_buffer(&sna->kgem, frame->size,
 					       KGEM_BUFFER_WRITE | KGEM_BUFFER_WRITE_INPLACE,
 					       (void **)&dst);
 		if (frame->bo == NULL)
-			return FALSE;
+			return false;
 	}
 
 	if (is_planar_fourcc(frame->id))
@@ -519,7 +519,7 @@ sna_video_copy_data(struct sna *sna,
 	else
 		sna_copy_packed_data(video, frame, buf, dst);
 
-	return TRUE;
+	return true;
 }
 
 void sna_video_init(struct sna *sna, ScreenPtr screen)
@@ -528,7 +528,7 @@ void sna_video_init(struct sna *sna, ScreenPtr screen)
 	XF86VideoAdaptorPtr textured, overlay;
 	int num_adaptors;
 	int prefer_overlay =
-	    xf86ReturnOptValBool(sna->Options, OPTION_PREFER_OVERLAY, FALSE);
+	    xf86ReturnOptValBool(sna->Options, OPTION_PREFER_OVERLAY, false);
 
 	if (!xf86LoaderCheckSymbol("xf86XVListGenericAdaptors"))
 		return;
diff --git a/src/sna/sna_video.h b/src/sna/sna_video.h
index 7bfc971..3ce72c0 100644
--- a/src/sna/sna_video.h
+++ b/src/sna/sna_video.h
@@ -57,7 +57,7 @@ struct sna_video {
 	struct kgem_bo *old_buf[2];
 	struct kgem_bo *buf;
 
-	Bool textured;
+	bool textured;
 	Rotation rotation;
 	int plane;
 
@@ -100,7 +100,7 @@ static inline int is_planar_fourcc(int id)
 	}
 }
 
-Bool
+bool
 sna_video_clip_helper(ScrnInfoPtr scrn,
 		      struct sna_video *adaptor_priv,
 		      struct sna_video_frame *frame,
@@ -123,7 +123,7 @@ sna_video_buffer(struct sna *sna,
 		 struct sna_video *video,
 		 struct sna_video_frame *frame);
 
-Bool
+bool
 sna_video_copy_data(struct sna *sna,
 		    struct sna_video *video,
 		    struct sna_video_frame *frame,
diff --git a/src/sna/sna_video_overlay.c b/src/sna/sna_video_overlay.c
index 99f9ca5..068f234 100644
--- a/src/sna/sna_video_overlay.c
+++ b/src/sna/sna_video_overlay.c
@@ -100,7 +100,7 @@ static const XF86ImageRec Images[NUM_IMAGES] = {
 };
 
 /* kernel modesetting overlay functions */
-static Bool sna_has_overlay(struct sna *sna)
+static bool sna_has_overlay(struct sna *sna)
 {
 	struct drm_i915_getparam gp;
 	int has_overlay = 0;
@@ -113,7 +113,7 @@ static Bool sna_has_overlay(struct sna *sna)
 	return ret == 0 && has_overlay;
 }
 
-static Bool sna_video_overlay_update_attrs(struct sna *sna,
+static bool sna_video_overlay_update_attrs(struct sna *sna,
 					   struct sna_video *video)
 {
 	struct drm_intel_overlay_attrs attrs;
@@ -348,7 +348,7 @@ update_dst_box_to_crtc_coords(struct sna *sna, xf86CrtcPtr crtc, BoxPtr dstBox)
 	return;
 }
 
-static Bool
+static bool
 sna_video_overlay_show(struct sna *sna,
 		       struct sna_video *video,
 		       struct sna_video_frame *frame,
@@ -684,7 +684,7 @@ XF86VideoAdaptorPtr sna_video_overlay_setup(struct sna *sna,
 	adaptor->PutImage = sna_video_overlay_put_image;
 	adaptor->QueryImageAttributes = sna_video_overlay_query_video_attributes;
 
-	video->textured = FALSE;
+	video->textured = false;
 	video->color_key = sna_video_overlay_color_key(sna);
 	video->brightness = -19;	/* (255/219) * -16 */
 	video->contrast = 75;	/* 255/219 * 64 */
diff --git a/src/sna/sna_video_sprite.c b/src/sna/sna_video_sprite.c
index d0a4808..87c5845 100644
--- a/src/sna/sna_video_sprite.c
+++ b/src/sna/sna_video_sprite.c
@@ -43,7 +43,7 @@
 #define IMAGE_MAX_WIDTH		2048
 #define IMAGE_MAX_HEIGHT	2048
 
-#define MAKE_ATOM(a) MakeAtom(a, sizeof(a) - 1, TRUE)
+#define MAKE_ATOM(a) MakeAtom(a, sizeof(a) - 1, true)
 
 static Atom xvColorKey;
 
@@ -85,7 +85,7 @@ static int sna_video_sprite_set_attr(ScrnInfoPtr scrn,
 	struct sna_video *video = data;
 
 	if (attribute == xvColorKey) {
-		video->color_key_changed = TRUE;
+		video->color_key_changed = true;
 		video->color_key = value;
 		DBG(("COLORKEY = %d\n", value));
 	} else
@@ -167,7 +167,7 @@ update_dst_box_to_crtc_coords(struct sna *sna, xf86CrtcPtr crtc, BoxPtr dstBox)
 	}
 }
 
-static Bool
+static bool
 sna_video_sprite_show(struct sna *sna,
 		      struct sna_video *video,
 		      struct sna_video_frame *frame,
@@ -199,7 +199,7 @@ sna_video_sprite_show(struct sna *sna,
 			xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
 				   "failed to update color key\n");
 
-		video->color_key_changed = FALSE;
+		video->color_key_changed = false;
 	}
 #endif
 
@@ -406,9 +406,9 @@ XF86VideoAdaptorPtr sna_video_sprite_setup(struct sna *sna,
 	adaptor->PutImage = sna_video_sprite_put_image;
 	adaptor->QueryImageAttributes = sna_video_sprite_query_attrs;
 
-	video->textured = FALSE;
+	video->textured = false;
 	video->color_key = sna_video_sprite_color_key(sna);
-	video->color_key_changed = TRUE;
+	video->color_key_changed = true;
 	video->brightness = -19;	/* (255/219) * -16 */
 	video->contrast = 75;	/* 255/219 * 64 */
 	video->saturation = 146;	/* 128/112 * 128 */
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index 805aee7..110bb00 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -39,7 +39,7 @@
 #include "sna_video_hwmc.h"
 #endif
 
-#define MAKE_ATOM(a) MakeAtom(a, sizeof(a) - 1, TRUE)
+#define MAKE_ATOM(a) MakeAtom(a, sizeof(a) - 1, true)
 
 static Atom xvBrightness, xvContrast, xvSyncToVblank;
 
@@ -197,7 +197,7 @@ sna_video_textured_best_size(ScrnInfoPtr scrn,
  * id is a fourcc code for the format of the video.
  * buf is the pointer to the source data in system memory.
  * width and height are the w/h of the source data.
- * If "sync" is TRUE, then we must be finished with *buf at the point of return
+ * If "sync" is true, then we must be finished with *buf at the point of return
  * (which we always are).
  * clip is the clipping region in screen space.
  * data is a pointer to our port private.
@@ -221,8 +221,8 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	BoxRec dstBox;
 	xf86CrtcPtr crtc;
-	Bool flush = false;
-	Bool ret;
+	bool flush = false;
+	bool ret;
 
 	DBG(("%s: src=(%d, %d),(%d, %d), dst=(%d, %d),(%d, %d), id=%d, sizep=%dx%d, sync?=%d\n",
 	     __FUNCTION__,
@@ -385,7 +385,7 @@ XF86VideoAdaptorPtr sna_video_textured_setup(struct sna *sna,
 	if (wedged(sna)) {
 		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
 			   "cannot enable XVideo whilst the GPU is wedged\n");
-		return FALSE;
+		return NULL;
 	}
 
 	adaptor = calloc(1, sizeof(XF86VideoAdaptorRec));
@@ -448,7 +448,7 @@ XF86VideoAdaptorPtr sna_video_textured_setup(struct sna *sna,
 	for (i = 0; i < nports; i++) {
 		struct sna_video *v = &video[i];
 
-		v->textured = TRUE;
+		v->textured = true;
 		v->rotation = RR_Rotate_0;
 		v->SyncToVblank = 1;
 
commit a05c3547bba52288bae872ea672ffe2f4dab2ffa
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 13 15:06:15 2012 +0100

    sna/gen4: Simplify comparing the pipeline-pointers against the previous
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index ed85554..44504c5 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1335,7 +1335,8 @@ gen4_emit_pipelined_pointers(struct sna *sna,
 			     const struct sna_composite_op *op,
 			     int blend, int kernel)
 {
-	uint16_t offset = sna->kgem.nbatch, last;
+	uint32_t key;
+	uint16_t sp, bp;
 
 	DBG(("%s: has_mask=%d, src=(%d, %d), mask=(%d, %d),kernel=%d, blend=%d, ca=%d, format=%x\n",
 	     __FUNCTION__, op->mask.bo != NULL,
@@ -1343,28 +1344,28 @@ gen4_emit_pipelined_pointers(struct sna *sna,
 	     op->mask.filter, op->mask.repeat,
 	     kernel, blend, op->has_component_alpha, (int)op->dst.format));
 
+	sp = SAMPLER_OFFSET(op->src.filter, op->src.repeat,
+			      op->mask.filter, op->mask.repeat,
+			      kernel);
+	bp = gen4_get_blend(blend, op->has_component_alpha, op->dst.format);
+
+	key = op->mask.bo != NULL;
+	key |= sp << 1;
+	key |= bp << 16;
+
+	if (key == sna->render_state.gen4.last_pipelined_pointers)
+		return;
+
 	OUT_BATCH(GEN4_3DSTATE_PIPELINED_POINTERS | 5);
 	OUT_BATCH(sna->render_state.gen4.vs);
 	OUT_BATCH(GEN4_GS_DISABLE); /* passthrough */
 	OUT_BATCH(GEN4_CLIP_DISABLE); /* passthrough */
 	OUT_BATCH(sna->render_state.gen4.sf[op->mask.bo != NULL]);
-	OUT_BATCH(sna->render_state.gen4.wm +
-		  SAMPLER_OFFSET(op->src.filter, op->src.repeat,
-				 op->mask.filter, op->mask.repeat,
-				 kernel));
-	OUT_BATCH(sna->render_state.gen4.cc +
-		  gen4_get_blend(blend, op->has_component_alpha, op->dst.format));
-
-	last = sna->render_state.gen4.last_pipelined_pointers;
-	if (last &&
-	    sna->kgem.batch[offset + 4] == sna->kgem.batch[last + 4] &&
-	    sna->kgem.batch[offset + 5] == sna->kgem.batch[last + 5] &&
-	    sna->kgem.batch[offset + 6] == sna->kgem.batch[last + 6]) {
-		sna->kgem.nbatch = offset;
-	} else {
-		sna->render_state.gen4.last_pipelined_pointers = offset;
-		gen4_emit_urb(sna);
-	}
+	OUT_BATCH(sna->render_state.gen4.wm + sp);
+	OUT_BATCH(sna->render_state.gen4.cc + bp);
+
+	sna->render_state.gen4.last_pipelined_pointers = key;
+	gen4_emit_urb(sna);
 }
 
 static void
@@ -3240,7 +3241,7 @@ static void gen4_render_reset(struct sna *sna)
 	sna->render_state.gen4.vb_id = 0;
 	sna->render_state.gen4.ve_id = -1;
 	sna->render_state.gen4.last_primitive = -1;
-	sna->render_state.gen4.last_pipelined_pointers = 0;
+	sna->render_state.gen4.last_pipelined_pointers = -1;
 
 	sna->render_state.gen4.drawrect_offset = -1;
 	sna->render_state.gen4.drawrect_limit = -1;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 0eb7e90..fae5872 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -338,11 +338,11 @@ struct gen4_render_state {
 	uint32_t drawrect_offset;
 	uint32_t drawrect_limit;
 	uint32_t vb_id;
+	uint32_t last_pipelined_pointers;
 	uint16_t vertex_offset;
 	uint16_t last_primitive;
 	int16_t floats_per_vertex;
 	uint16_t surface_table;
-	uint16_t last_pipelined_pointers;
 
 	Bool needs_invariant;
 	Bool needs_urb;


More information about the xorg-commit mailing list