[Mesa-dev] [PATCH 9/9] radeonsi: rename r600_texture::resource to buffer
Marek Olšák
maraeo at gmail.com
Tue Apr 17 00:42:11 UTC 2018
From: Marek Olšák <marek.olsak at amd.com>
r600_resource could be renamed to si_buffer.
---
src/gallium/drivers/radeon/radeon_vcn_dec.c | 2 +-
src/gallium/drivers/radeonsi/cik_sdma.c | 22 +--
src/gallium/drivers/radeonsi/si_blit.c | 50 +++---
src/gallium/drivers/radeonsi/si_clear.c | 36 ++--
src/gallium/drivers/radeonsi/si_descriptors.c | 24 +--
src/gallium/drivers/radeonsi/si_dma.c | 16 +-
src/gallium/drivers/radeonsi/si_pipe.h | 4 +-
src/gallium/drivers/radeonsi/si_state.c | 70 ++++----
src/gallium/drivers/radeonsi/si_texture.c | 168 +++++++++---------
src/gallium/drivers/radeonsi/si_uvd.c | 10 +-
10 files changed, 201 insertions(+), 201 deletions(-)
diff --git a/src/gallium/drivers/radeon/radeon_vcn_dec.c b/src/gallium/drivers/radeon/radeon_vcn_dec.c
index 46ad2853f1c..cee32d482f2 100644
--- a/src/gallium/drivers/radeon/radeon_vcn_dec.c
+++ b/src/gallium/drivers/radeon/radeon_vcn_dec.c
@@ -979,21 +979,21 @@ static struct pb_buffer *rvcn_dec_message_decode(struct radeon_decoder *dec,
break;
}
default:
assert(0);
return NULL;
}
if (dec->ctx.res)
decode->hw_ctxt_size = dec->ctx.res->buf->size;
- return luma->resource.buf;
+ return luma->buffer.buf;
}
static void rvcn_dec_message_destroy(struct radeon_decoder *dec)
{
rvcn_dec_message_header_t *header = dec->msg;
memset(dec->msg, 0, sizeof(rvcn_dec_message_header_t));
header->header_size = sizeof(rvcn_dec_message_header_t);
header->total_size = sizeof(rvcn_dec_message_header_t) -
sizeof(rvcn_dec_message_index_t);
diff --git a/src/gallium/drivers/radeonsi/cik_sdma.c b/src/gallium/drivers/radeonsi/cik_sdma.c
index 690e7ff5499..7a4b479b7eb 100644
--- a/src/gallium/drivers/radeonsi/cik_sdma.c
+++ b/src/gallium/drivers/radeonsi/cik_sdma.c
@@ -140,63 +140,63 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
unsigned dst_level,
unsigned dstx, unsigned dsty, unsigned dstz,
struct pipe_resource *src,
unsigned src_level,
const struct pipe_box *src_box)
{
struct radeon_info *info = &sctx->screen->info;
struct r600_texture *rsrc = (struct r600_texture*)src;
struct r600_texture *rdst = (struct r600_texture*)dst;
unsigned bpp = rdst->surface.bpe;
- uint64_t dst_address = rdst->resource.gpu_address +
+ uint64_t dst_address = rdst->buffer.gpu_address +
rdst->surface.u.legacy.level[dst_level].offset;
- uint64_t src_address = rsrc->resource.gpu_address +
+ uint64_t src_address = rsrc->buffer.gpu_address +
rsrc->surface.u.legacy.level[src_level].offset;
unsigned dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
unsigned src_mode = rsrc->surface.u.legacy.level[src_level].mode;
unsigned dst_tile_index = rdst->surface.u.legacy.tiling_index[dst_level];
unsigned src_tile_index = rsrc->surface.u.legacy.tiling_index[src_level];
unsigned dst_tile_mode = info->si_tile_mode_array[dst_tile_index];
unsigned src_tile_mode = info->si_tile_mode_array[src_tile_index];
unsigned dst_micro_mode = G_009910_MICRO_TILE_MODE_NEW(dst_tile_mode);
unsigned src_micro_mode = G_009910_MICRO_TILE_MODE_NEW(src_tile_mode);
unsigned dst_tile_swizzle = dst_mode == RADEON_SURF_MODE_2D ?
rdst->surface.tile_swizzle : 0;
unsigned src_tile_swizzle = src_mode == RADEON_SURF_MODE_2D ?
rsrc->surface.tile_swizzle : 0;
unsigned dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x;
unsigned src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x;
uint64_t dst_slice_pitch = ((uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4) / bpp;
uint64_t src_slice_pitch = ((uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4) / bpp;
- unsigned dst_width = minify_as_blocks(rdst->resource.b.b.width0,
+ unsigned dst_width = minify_as_blocks(rdst->buffer.b.b.width0,
dst_level, rdst->surface.blk_w);
- unsigned src_width = minify_as_blocks(rsrc->resource.b.b.width0,
+ unsigned src_width = minify_as_blocks(rsrc->buffer.b.b.width0,
src_level, rsrc->surface.blk_w);
- unsigned dst_height = minify_as_blocks(rdst->resource.b.b.height0,
+ unsigned dst_height = minify_as_blocks(rdst->buffer.b.b.height0,
dst_level, rdst->surface.blk_h);
- unsigned src_height = minify_as_blocks(rsrc->resource.b.b.height0,
+ unsigned src_height = minify_as_blocks(rsrc->buffer.b.b.height0,
src_level, rsrc->surface.blk_h);
unsigned srcx = src_box->x / rsrc->surface.blk_w;
unsigned srcy = src_box->y / rsrc->surface.blk_h;
unsigned srcz = src_box->z;
unsigned copy_width = DIV_ROUND_UP(src_box->width, rsrc->surface.blk_w);
unsigned copy_height = DIV_ROUND_UP(src_box->height, rsrc->surface.blk_h);
unsigned copy_depth = src_box->depth;
assert(src_level <= src->last_level);
assert(dst_level <= dst->last_level);
assert(rdst->surface.u.legacy.level[dst_level].offset +
dst_slice_pitch * bpp * (dstz + src_box->depth) <=
- rdst->resource.buf->size);
+ rdst->buffer.buf->size);
assert(rsrc->surface.u.legacy.level[src_level].offset +
src_slice_pitch * bpp * (srcz + src_box->depth) <=
- rsrc->resource.buf->size);
+ rsrc->buffer.buf->size);
if (!si_prepare_for_dma_blit(sctx, rdst, dst_level, dstx, dsty,
dstz, rsrc, src_level, src_box))
return false;
dstx /= rdst->surface.blk_w;
dsty /= rdst->surface.blk_h;
if (srcx >= (1 << 14) ||
srcy >= (1 << 14) ||
@@ -225,21 +225,21 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
(copy_width < (1 << 14) &&
copy_height < (1 << 14) &&
copy_depth < (1 << 11))) &&
/* HW limitation - some CIK parts: */
((sctx->family != CHIP_BONAIRE &&
sctx->family != CHIP_KAVERI) ||
(srcx + copy_width != (1 << 14) &&
srcy + copy_height != (1 << 14)))) {
struct radeon_winsys_cs *cs = sctx->dma_cs;
- si_need_dma_space(sctx, 13, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(sctx, 13, &rdst->buffer, &rsrc->buffer);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
(util_logbase2(bpp) << 29));
radeon_emit(cs, src_address);
radeon_emit(cs, src_address >> 32);
radeon_emit(cs, srcx | (srcy << 16));
radeon_emit(cs, srcz | ((src_pitch - 1) << 16));
radeon_emit(cs, src_slice_pitch - 1);
radeon_emit(cs, dst_address);
@@ -388,21 +388,21 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
pitch_tile_max < (1 << 11) &&
slice_tile_max < (1 << 22) &&
linear_pitch <= (1 << 14) &&
linear_slice_pitch <= (1 << 28) &&
copy_width_aligned <= (1 << 14) &&
copy_height <= (1 << 14) &&
copy_depth <= (1 << 11)) {
struct radeon_winsys_cs *cs = sctx->dma_cs;
uint32_t direction = linear == rdst ? 1u << 31 : 0;
- si_need_dma_space(sctx, 14, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(sctx, 14, &rdst->buffer, &rsrc->buffer);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
direction);
radeon_emit(cs, tiled_address);
radeon_emit(cs, tiled_address >> 32);
radeon_emit(cs, tiled_x | (tiled_y << 16));
radeon_emit(cs, tiled_z | (pitch_tile_max << 16));
radeon_emit(cs, slice_tile_max);
radeon_emit(cs, encode_tile_info(sctx, tiled, tiled_level, true));
@@ -482,21 +482,21 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
/* HW limitation - some CIK parts: */
((sctx->family != CHIP_BONAIRE &&
sctx->family != CHIP_KAVERI &&
sctx->family != CHIP_KABINI &&
sctx->family != CHIP_MULLINS) ||
(srcx + copy_width_aligned != (1 << 14) &&
srcy + copy_height_aligned != (1 << 14) &&
dstx + copy_width != (1 << 14)))) {
struct radeon_winsys_cs *cs = sctx->dma_cs;
- si_need_dma_space(sctx, 15, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(sctx, 15, &rdst->buffer, &rsrc->buffer);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0));
radeon_emit(cs, src_address);
radeon_emit(cs, src_address >> 32);
radeon_emit(cs, srcx | (srcy << 16));
radeon_emit(cs, srcz | (src_pitch_tile_max << 16));
radeon_emit(cs, src_slice_tile_max);
radeon_emit(cs, encode_tile_info(sctx, rsrc, src_level, true));
radeon_emit(cs, dst_address);
diff --git a/src/gallium/drivers/radeonsi/si_blit.c b/src/gallium/drivers/radeonsi/si_blit.c
index bd20a900e69..1cbd26f46e0 100644
--- a/src/gallium/drivers/radeonsi/si_blit.c
+++ b/src/gallium/drivers/radeonsi/si_blit.c
@@ -113,55 +113,55 @@ si_blit_dbcb_copy(struct si_context *sctx,
assert(sctx->dbcb_depth_copy_enabled || sctx->dbcb_stencil_copy_enabled);
sctx->decompression_enabled = true;
while (level_mask) {
unsigned level = u_bit_scan(&level_mask);
/* The smaller the mipmap level, the less layers there are
* as far as 3D textures are concerned. */
- max_layer = util_max_layer(&src->resource.b.b, level);
+ max_layer = util_max_layer(&src->buffer.b.b, level);
checked_last_layer = MIN2(last_layer, max_layer);
surf_tmpl.u.tex.level = level;
for (layer = first_layer; layer <= checked_last_layer; layer++) {
struct pipe_surface *zsurf, *cbsurf;
- surf_tmpl.format = src->resource.b.b.format;
+ surf_tmpl.format = src->buffer.b.b.format;
surf_tmpl.u.tex.first_layer = layer;
surf_tmpl.u.tex.last_layer = layer;
- zsurf = sctx->b.create_surface(&sctx->b, &src->resource.b.b, &surf_tmpl);
+ zsurf = sctx->b.create_surface(&sctx->b, &src->buffer.b.b, &surf_tmpl);
- surf_tmpl.format = dst->resource.b.b.format;
- cbsurf = sctx->b.create_surface(&sctx->b, &dst->resource.b.b, &surf_tmpl);
+ surf_tmpl.format = dst->buffer.b.b.format;
+ cbsurf = sctx->b.create_surface(&sctx->b, &dst->buffer.b.b, &surf_tmpl);
for (sample = first_sample; sample <= last_sample; sample++) {
if (sample != sctx->dbcb_copy_sample) {
sctx->dbcb_copy_sample = sample;
si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
}
si_blitter_begin(sctx, SI_DECOMPRESS);
util_blitter_custom_depth_stencil(sctx->blitter, zsurf, cbsurf, 1 << sample,
sctx->custom_dsa_flush, 1.0f);
si_blitter_end(sctx);
}
pipe_surface_reference(&zsurf, NULL);
pipe_surface_reference(&cbsurf, NULL);
}
if (first_layer == 0 && last_layer >= max_layer &&
- first_sample == 0 && last_sample >= u_max_sample(&src->resource.b.b))
+ first_sample == 0 && last_sample >= u_max_sample(&src->buffer.b.b))
fully_copied_levels |= 1u << level;
}
sctx->decompression_enabled = false;
sctx->dbcb_depth_copy_enabled = false;
sctx->dbcb_stencil_copy_enabled = false;
si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
return fully_copied_levels;
}
@@ -171,21 +171,21 @@ void si_blit_decompress_depth(struct pipe_context *ctx,
struct r600_texture *staging,
unsigned first_level, unsigned last_level,
unsigned first_layer, unsigned last_layer,
unsigned first_sample, unsigned last_sample)
{
const struct util_format_description *desc;
unsigned planes = 0;
assert(staging != NULL && "use si_blit_decompress_zs_in_place instead");
- desc = util_format_description(staging->resource.b.b.format);
+ desc = util_format_description(staging->buffer.b.b.format);
if (util_format_has_depth(desc))
planes |= PIPE_MASK_Z;
if (util_format_has_stencil(desc))
planes |= PIPE_MASK_S;
si_blit_dbcb_copy(
(struct si_context *)ctx, texture, staging, planes,
u_bit_consecutive(first_level, last_level - first_level + 1),
first_layer, last_layer, first_sample, last_sample);
@@ -205,39 +205,39 @@ si_blit_decompress_zs_planes_in_place(struct si_context *sctx,
if (!level_mask)
return;
if (planes & PIPE_MASK_S)
sctx->db_flush_stencil_inplace = true;
if (planes & PIPE_MASK_Z)
sctx->db_flush_depth_inplace = true;
si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
- surf_tmpl.format = texture->resource.b.b.format;
+ surf_tmpl.format = texture->buffer.b.b.format;
sctx->decompression_enabled = true;
while (level_mask) {
unsigned level = u_bit_scan(&level_mask);
surf_tmpl.u.tex.level = level;
/* The smaller the mipmap level, the less layers there are
* as far as 3D textures are concerned. */
- max_layer = util_max_layer(&texture->resource.b.b, level);
+ max_layer = util_max_layer(&texture->buffer.b.b, level);
checked_last_layer = MIN2(last_layer, max_layer);
for (layer = first_layer; layer <= checked_last_layer; layer++) {
surf_tmpl.u.tex.first_layer = layer;
surf_tmpl.u.tex.last_layer = layer;
- zsurf = sctx->b.create_surface(&sctx->b, &texture->resource.b.b, &surf_tmpl);
+ zsurf = sctx->b.create_surface(&sctx->b, &texture->buffer.b.b, &surf_tmpl);
si_blitter_begin(sctx, SI_DECOMPRESS);
util_blitter_custom_depth_stencil(sctx->blitter, zsurf, NULL, ~0,
sctx->custom_dsa_flush,
1.0f);
si_blitter_end(sctx);
pipe_surface_reference(&zsurf, NULL);
}
@@ -334,43 +334,43 @@ si_decompress_depth(struct si_context *sctx,
u_log_printf(sctx->log,
"\n------------------------------------------------\n"
"Decompress Depth (levels %u - %u, levels Z: 0x%x S: 0x%x)\n\n",
first_level, last_level, levels_z, levels_s);
/* We may have to allocate the flushed texture here when called from
* si_decompress_subresource.
*/
if (copy_planes &&
(tex->flushed_depth_texture ||
- si_init_flushed_depth_texture(&sctx->b, &tex->resource.b.b, NULL))) {
+ si_init_flushed_depth_texture(&sctx->b, &tex->buffer.b.b, NULL))) {
struct r600_texture *dst = tex->flushed_depth_texture;
unsigned fully_copied_levels;
unsigned levels = 0;
assert(tex->flushed_depth_texture);
- if (util_format_is_depth_and_stencil(dst->resource.b.b.format))
+ if (util_format_is_depth_and_stencil(dst->buffer.b.b.format))
copy_planes = PIPE_MASK_Z | PIPE_MASK_S;
if (copy_planes & PIPE_MASK_Z) {
levels |= levels_z;
levels_z = 0;
}
if (copy_planes & PIPE_MASK_S) {
levels |= levels_s;
levels_s = 0;
}
fully_copied_levels = si_blit_dbcb_copy(
sctx, tex, dst, copy_planes, levels,
first_layer, last_layer,
- 0, u_max_sample(&tex->resource.b.b));
+ 0, u_max_sample(&tex->buffer.b.b));
if (copy_planes & PIPE_MASK_Z)
tex->dirty_level_mask &= ~fully_copied_levels;
if (copy_planes & PIPE_MASK_S)
tex->stencil_dirty_level_mask &= ~fully_copied_levels;
}
if (inplace_planes) {
bool has_htile = si_htile_enabled(tex, first_level);
bool tc_compat_htile = vi_tc_compat_htile_enabled(tex, first_level);
@@ -391,29 +391,29 @@ si_decompress_depth(struct si_context *sctx,
*/
if (inplace_planes & PIPE_MASK_Z)
tex->dirty_level_mask &= ~levels_z;
if (inplace_planes & PIPE_MASK_S)
tex->stencil_dirty_level_mask &= ~levels_s;
}
/* Only in-place decompression needs to flush DB caches, or
* when we don't decompress but TC-compatible planes are dirty.
*/
- si_make_DB_shader_coherent(sctx, tex->resource.b.b.nr_samples,
+ si_make_DB_shader_coherent(sctx, tex->buffer.b.b.nr_samples,
inplace_planes & PIPE_MASK_S,
tc_compat_htile);
}
/* set_framebuffer_state takes care of coherency for single-sample.
* The DB->CB copy uses CB for the final writes.
*/
- if (copy_planes && tex->resource.b.b.nr_samples > 1)
- si_make_CB_shader_coherent(sctx, tex->resource.b.b.nr_samples,
+ if (copy_planes && tex->buffer.b.b.nr_samples > 1)
+ si_make_CB_shader_coherent(sctx, tex->buffer.b.b.nr_samples,
false);
}
static void
si_decompress_sampler_depth_textures(struct si_context *sctx,
struct si_samplers *textures)
{
unsigned i;
unsigned mask = textures->needs_depth_decompress_mask;
@@ -427,21 +427,21 @@ si_decompress_sampler_depth_textures(struct si_context *sctx,
view = textures->views[i];
assert(view);
sview = (struct si_sampler_view*)view;
tex = (struct r600_texture *)view->texture;
assert(tex->db_compatible);
si_decompress_depth(sctx, tex,
sview->is_stencil_sampler ? PIPE_MASK_S : PIPE_MASK_Z,
view->u.tex.first_level, view->u.tex.last_level,
- 0, util_max_layer(&tex->resource.b.b, view->u.tex.first_level));
+ 0, util_max_layer(&tex->buffer.b.b, view->u.tex.first_level));
}
}
static void si_blit_decompress_color(struct si_context *sctx,
struct r600_texture *rtex,
unsigned first_level, unsigned last_level,
unsigned first_layer, unsigned last_layer,
bool need_dcc_decompress)
{
void* custom_blend;
@@ -476,31 +476,31 @@ static void si_blit_decompress_color(struct si_context *sctx,
custom_blend = sctx->custom_blend_eliminate_fastclear;
}
sctx->decompression_enabled = true;
while (level_mask) {
unsigned level = u_bit_scan(&level_mask);
/* The smaller the mipmap level, the less layers there are
* as far as 3D textures are concerned. */
- max_layer = util_max_layer(&rtex->resource.b.b, level);
+ max_layer = util_max_layer(&rtex->buffer.b.b, level);
checked_last_layer = MIN2(last_layer, max_layer);
for (layer = first_layer; layer <= checked_last_layer; layer++) {
struct pipe_surface *cbsurf, surf_tmpl;
- surf_tmpl.format = rtex->resource.b.b.format;
+ surf_tmpl.format = rtex->buffer.b.b.format;
surf_tmpl.u.tex.level = level;
surf_tmpl.u.tex.first_layer = layer;
surf_tmpl.u.tex.last_layer = layer;
- cbsurf = sctx->b.create_surface(&sctx->b, &rtex->resource.b.b, &surf_tmpl);
+ cbsurf = sctx->b.create_surface(&sctx->b, &rtex->buffer.b.b, &surf_tmpl);
/* Required before and after FMASK and DCC_DECOMPRESS. */
if (custom_blend == sctx->custom_blend_fmask_decompress ||
custom_blend == sctx->custom_blend_dcc_decompress)
sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_CB;
si_blitter_begin(sctx, SI_DECOMPRESS);
util_blitter_custom_color(sctx->blitter, cbsurf, custom_blend);
si_blitter_end(sctx);
@@ -512,34 +512,34 @@ static void si_blit_decompress_color(struct si_context *sctx,
}
/* The texture will always be dirty if some layers aren't flushed.
* I don't think this case occurs often though. */
if (first_layer == 0 && last_layer >= max_layer) {
rtex->dirty_level_mask &= ~(1 << level);
}
}
sctx->decompression_enabled = false;
- si_make_CB_shader_coherent(sctx, rtex->resource.b.b.nr_samples,
+ si_make_CB_shader_coherent(sctx, rtex->buffer.b.b.nr_samples,
vi_dcc_enabled(rtex, first_level));
}
static void
si_decompress_color_texture(struct si_context *sctx, struct r600_texture *tex,
unsigned first_level, unsigned last_level)
{
/* CMASK or DCC can be discarded and we can still end up here. */
if (!tex->cmask.size && !tex->fmask.size && !tex->dcc_offset)
return;
si_blit_decompress_color(sctx, tex, first_level, last_level, 0,
- util_max_layer(&tex->resource.b.b, first_level),
+ util_max_layer(&tex->buffer.b.b, first_level),
false);
}
static void
si_decompress_sampler_color_textures(struct si_context *sctx,
struct si_samplers *textures)
{
unsigned i;
unsigned mask = textures->needs_color_decompress_mask;
@@ -743,21 +743,21 @@ static void si_decompress_resident_textures(struct si_context *sctx)
util_dynarray_foreach(&sctx->resident_tex_needs_depth_decompress,
struct si_texture_handle *, tex_handle) {
struct pipe_sampler_view *view = (*tex_handle)->view;
struct si_sampler_view *sview = (struct si_sampler_view *)view;
struct r600_texture *tex = (struct r600_texture *)view->texture;
si_decompress_depth(sctx, tex,
sview->is_stencil_sampler ? PIPE_MASK_S : PIPE_MASK_Z,
view->u.tex.first_level, view->u.tex.last_level,
- 0, util_max_layer(&tex->resource.b.b, view->u.tex.first_level));
+ 0, util_max_layer(&tex->buffer.b.b, view->u.tex.first_level));
}
}
static void si_decompress_resident_images(struct si_context *sctx)
{
util_dynarray_foreach(&sctx->resident_img_needs_color_decompress,
struct si_image_handle *, img_handle) {
struct pipe_image_view *view = &(*img_handle)->view;
struct r600_texture *tex = (struct r600_texture *)view->resource;
@@ -1321,22 +1321,22 @@ static void si_flush_resource(struct pipe_context *ctx,
rtex->separate_dcc_dirty = false;
vi_separate_dcc_process_and_reset_stats(ctx, rtex);
}
}
void si_decompress_dcc(struct si_context *sctx, struct r600_texture *rtex)
{
if (!rtex->dcc_offset)
return;
- si_blit_decompress_color(sctx, rtex, 0, rtex->resource.b.b.last_level,
- 0, util_max_layer(&rtex->resource.b.b, 0),
+ si_blit_decompress_color(sctx, rtex, 0, rtex->buffer.b.b.last_level,
+ 0, util_max_layer(&rtex->buffer.b.b, 0),
true);
}
void si_init_blit_functions(struct si_context *sctx)
{
sctx->b.resource_copy_region = si_resource_copy_region;
sctx->b.blit = si_blit;
sctx->b.flush_resource = si_flush_resource;
sctx->b.generate_mipmap = si_generate_mipmap;
}
diff --git a/src/gallium/drivers/radeonsi/si_clear.c b/src/gallium/drivers/radeonsi/si_clear.c
index d25e2c713f6..34df1138a6b 100644
--- a/src/gallium/drivers/radeonsi/si_clear.c
+++ b/src/gallium/drivers/radeonsi/si_clear.c
@@ -227,66 +227,66 @@ void vi_dcc_clear_level(struct si_context *sctx,
{
struct pipe_resource *dcc_buffer;
uint64_t dcc_offset, clear_size;
assert(vi_dcc_enabled(rtex, level));
if (rtex->dcc_separate_buffer) {
dcc_buffer = &rtex->dcc_separate_buffer->b.b;
dcc_offset = 0;
} else {
- dcc_buffer = &rtex->resource.b.b;
+ dcc_buffer = &rtex->buffer.b.b;
dcc_offset = rtex->dcc_offset;
}
if (sctx->chip_class >= GFX9) {
/* Mipmap level clears aren't implemented. */
- assert(rtex->resource.b.b.last_level == 0);
+ assert(rtex->buffer.b.b.last_level == 0);
/* 4x and 8x MSAA needs a sophisticated compute shader for
* the clear. See AMDVLK. */
- assert(rtex->resource.b.b.nr_samples <= 2);
+ assert(rtex->buffer.b.b.nr_samples <= 2);
clear_size = rtex->surface.dcc_size;
} else {
- unsigned num_layers = util_num_layers(&rtex->resource.b.b, level);
+ unsigned num_layers = util_num_layers(&rtex->buffer.b.b, level);
/* If this is 0, fast clear isn't possible. (can occur with MSAA) */
assert(rtex->surface.u.legacy.level[level].dcc_fast_clear_size);
/* Layered 4x and 8x MSAA DCC fast clears need to clear
* dcc_fast_clear_size bytes for each layer. A compute shader
* would be more efficient than separate per-layer clear operations.
*/
- assert(rtex->resource.b.b.nr_samples <= 2 || num_layers == 1);
+ assert(rtex->buffer.b.b.nr_samples <= 2 || num_layers == 1);
dcc_offset += rtex->surface.u.legacy.level[level].dcc_offset;
clear_size = rtex->surface.u.legacy.level[level].dcc_fast_clear_size *
num_layers;
}
si_clear_buffer(sctx, dcc_buffer, dcc_offset, clear_size,
clear_value, SI_COHERENCY_CB_META);
}
/* Set the same micro tile mode as the destination of the last MSAA resolve.
* This allows hitting the MSAA resolve fast path, which requires that both
* src and dst micro tile modes match.
*/
static void si_set_optimal_micro_tile_mode(struct si_screen *sscreen,
struct r600_texture *rtex)
{
- if (rtex->resource.b.is_shared ||
- rtex->resource.b.b.nr_samples <= 1 ||
+ if (rtex->buffer.b.is_shared ||
+ rtex->buffer.b.b.nr_samples <= 1 ||
rtex->surface.micro_tile_mode == rtex->last_msaa_resolve_target_micro_mode)
return;
assert(sscreen->info.chip_class >= GFX9 ||
rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
- assert(rtex->resource.b.b.last_level == 0);
+ assert(rtex->buffer.b.b.last_level == 0);
if (sscreen->info.chip_class >= GFX9) {
/* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
assert(rtex->surface.u.gfx9.surf.swizzle_mode >= 4);
/* If you do swizzle_mode % 4, you'll get:
* 0 = Depth
* 1 = Standard,
* 2 = Displayable
* 3 = Rotated
@@ -404,40 +404,40 @@ static void si_do_fast_color_clear(struct si_context *sctx,
continue;
tex = (struct r600_texture *)fb->cbufs[i]->texture;
/* TODO: GFX9: Implement DCC fast clear for level 0 of
* mipmapped textures. Mipmapped DCC has to clear a rectangular
* area of DCC for level 0 (because the whole miptree is
* organized in a 2D plane).
*/
if (sctx->chip_class >= GFX9 &&
- tex->resource.b.b.last_level > 0)
+ tex->buffer.b.b.last_level > 0)
continue;
/* the clear is allowed if all layers are bound */
if (fb->cbufs[i]->u.tex.first_layer != 0 ||
- fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->resource.b.b, 0)) {
+ fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->buffer.b.b, 0)) {
continue;
}
/* only supported on tiled surfaces */
if (tex->surface.is_linear) {
continue;
}
/* shared textures can't use fast clear without an explicit flush,
* because there is no way to communicate the clear color among
* all clients
*/
- if (tex->resource.b.is_shared &&
- !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
+ if (tex->buffer.b.is_shared &&
+ !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
continue;
/* fast color clear with 1D tiling doesn't work on old kernels and CIK */
if (sctx->chip_class == CIK &&
tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
sctx->screen->info.drm_major == 2 &&
sctx->screen->info.drm_minor < 38) {
continue;
}
@@ -459,48 +459,48 @@ static void si_do_fast_color_clear(struct si_context *sctx,
}
bool need_decompress_pass = false;
/* Use a slow clear for small surfaces where the cost of
* the eliminate pass can be higher than the benefit of fast
* clear. The closed driver does this, but the numbers may differ.
*
* This helps on both dGPUs and APUs, even small APUs like Mullins.
*/
- bool too_small = tex->resource.b.b.nr_samples <= 1 &&
- tex->resource.b.b.width0 *
- tex->resource.b.b.height0 <= 512 * 512;
+ bool too_small = tex->buffer.b.b.nr_samples <= 1 &&
+ tex->buffer.b.b.width0 *
+ tex->buffer.b.b.height0 <= 512 * 512;
/* Try to clear DCC first, otherwise try CMASK. */
if (vi_dcc_enabled(tex, 0)) {
uint32_t reset_value;
bool eliminate_needed;
if (sctx->screen->debug_flags & DBG(NO_DCC_CLEAR))
continue;
/* This can only occur with MSAA. */
if (sctx->chip_class == VI &&
!tex->surface.u.legacy.level[level].dcc_fast_clear_size)
continue;
- if (!vi_get_fast_clear_parameters(tex->resource.b.b.format,
+ if (!vi_get_fast_clear_parameters(tex->buffer.b.b.format,
fb->cbufs[i]->format,
color, &reset_value,
&eliminate_needed))
continue;
if (eliminate_needed && too_small)
continue;
/* DCC fast clear with MSAA should clear CMASK to 0xC. */
- if (tex->resource.b.b.nr_samples >= 2 && tex->cmask.size) {
+ if (tex->buffer.b.b.nr_samples >= 2 && tex->cmask.size) {
/* TODO: This doesn't work with MSAA. */
if (eliminate_needed)
continue;
si_clear_buffer(sctx, &tex->cmask_buffer->b.b,
tex->cmask.offset, tex->cmask.size,
0xCCCCCCCC, SI_COHERENCY_CB_META);
need_decompress_pass = true;
}
@@ -578,21 +578,21 @@ static void si_clear(struct pipe_context *ctx, unsigned buffers,
tex = (struct r600_texture *)fb->cbufs[i]->texture;
if (tex->fmask.size == 0)
tex->dirty_level_mask &= ~(1 << fb->cbufs[i]->u.tex.level);
}
}
if (zstex &&
si_htile_enabled(zstex, zsbuf->u.tex.level) &&
zsbuf->u.tex.first_layer == 0 &&
- zsbuf->u.tex.last_layer == util_max_layer(&zstex->resource.b.b, 0)) {
+ zsbuf->u.tex.last_layer == util_max_layer(&zstex->buffer.b.b, 0)) {
/* TC-compatible HTILE only supports depth clears to 0 or 1. */
if (buffers & PIPE_CLEAR_DEPTH &&
(!zstex->tc_compatible_htile ||
depth == 0 || depth == 1)) {
/* Need to disable EXPCLEAR temporarily if clearing
* to a new value. */
if (!zstex->depth_cleared || zstex->depth_clear_value != depth) {
sctx->db_depth_disable_expclear = true;
}
diff --git a/src/gallium/drivers/radeonsi/si_descriptors.c b/src/gallium/drivers/radeonsi/si_descriptors.c
index 6771b62a9fb..9f2433a5b72 100644
--- a/src/gallium/drivers/radeonsi/si_descriptors.c
+++ b/src/gallium/drivers/radeonsi/si_descriptors.c
@@ -252,21 +252,21 @@ static void si_sampler_view_add_buffer(struct si_context *sctx,
struct r600_texture *rtex;
enum radeon_bo_priority priority;
if (!resource)
return;
if (resource->target != PIPE_BUFFER) {
struct r600_texture *tex = (struct r600_texture*)resource;
if (tex->is_depth && !si_can_sample_zs(tex, is_stencil_sampler))
- resource = &tex->flushed_depth_texture->resource.b.b;
+ resource = &tex->flushed_depth_texture->buffer.b.b;
}
rres = r600_resource(resource);
priority = si_get_sampler_view_priority(rres);
radeon_add_to_gfx_buffer_list_check_mem(sctx, rres, usage, priority,
check_mem);
if (resource->target == PIPE_BUFFER)
return;
@@ -323,21 +323,21 @@ void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
unsigned block_width, bool is_stencil,
uint32_t *state)
{
uint64_t va, meta_va = 0;
if (tex->is_depth && !si_can_sample_zs(tex, is_stencil)) {
tex = tex->flushed_depth_texture;
is_stencil = false;
}
- va = tex->resource.gpu_address;
+ va = tex->buffer.gpu_address;
if (sscreen->info.chip_class >= GFX9) {
/* Only stencil_offset needs to be added here. */
if (is_stencil)
va += tex->surface.u.gfx9.stencil_offset;
else
va += tex->surface.u.gfx9.surf_offset;
} else {
va += base_level_info->offset;
}
@@ -351,31 +351,31 @@ void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
*/
if (sscreen->info.chip_class >= GFX9 ||
base_level_info->mode == RADEON_SURF_MODE_2D)
state[0] |= tex->surface.tile_swizzle;
if (sscreen->info.chip_class >= VI) {
state[6] &= C_008F28_COMPRESSION_EN;
state[7] = 0;
if (vi_dcc_enabled(tex, first_level)) {
- meta_va = (!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
+ meta_va = (!tex->dcc_separate_buffer ? tex->buffer.gpu_address : 0) +
tex->dcc_offset;
if (sscreen->info.chip_class == VI) {
meta_va += base_level_info->dcc_offset;
assert(base_level_info->mode == RADEON_SURF_MODE_2D);
}
meta_va |= (uint32_t)tex->surface.tile_swizzle << 8;
} else if (vi_tc_compat_htile_enabled(tex, first_level)) {
- meta_va = tex->resource.gpu_address + tex->htile_offset;
+ meta_va = tex->buffer.gpu_address + tex->htile_offset;
}
if (meta_va) {
state[6] |= S_008F28_COMPRESSION_EN(1);
state[7] = meta_va >> 8;
}
}
if (sscreen->info.chip_class >= GFX9) {
state[3] &= C_008F1C_SW_MODE;
@@ -430,35 +430,35 @@ static void si_set_sampler_state_desc(struct si_sampler_state *sstate,
memcpy(desc, sstate->val, 4*4);
}
static void si_set_sampler_view_desc(struct si_context *sctx,
struct si_sampler_view *sview,
struct si_sampler_state *sstate,
uint32_t *desc)
{
struct pipe_sampler_view *view = &sview->base;
struct r600_texture *rtex = (struct r600_texture *)view->texture;
- bool is_buffer = rtex->resource.b.b.target == PIPE_BUFFER;
+ bool is_buffer = rtex->buffer.b.b.target == PIPE_BUFFER;
if (unlikely(!is_buffer && sview->dcc_incompatible)) {
if (vi_dcc_enabled(rtex, view->u.tex.first_level))
if (!si_texture_disable_dcc(sctx, rtex))
si_decompress_dcc(sctx, rtex);
sview->dcc_incompatible = false;
}
assert(rtex); /* views with texture == NULL aren't supported */
memcpy(desc, sview->state, 8*4);
if (is_buffer) {
- si_set_buf_desc_address(&rtex->resource,
+ si_set_buf_desc_address(&rtex->buffer,
sview->base.u.buf.offset,
desc + 4);
} else {
bool is_separate_stencil = rtex->db_compatible &&
sview->is_stencil_sampler;
si_set_mutable_tex_desc_fields(sctx->screen, rtex,
sview->base_level_info,
sview->base_level,
sview->base.u.tex.first_level,
@@ -510,22 +510,22 @@ static void si_set_sampler_view(struct si_context *sctx,
if (samplers->views[slot] == view && !disallow_early_out)
return;
if (view) {
struct r600_texture *rtex = (struct r600_texture *)view->texture;
si_set_sampler_view_desc(sctx, rview,
samplers->sampler_states[slot], desc);
- if (rtex->resource.b.b.target == PIPE_BUFFER) {
- rtex->resource.bind_history |= PIPE_BIND_SAMPLER_VIEW;
+ if (rtex->buffer.b.b.target == PIPE_BUFFER) {
+ rtex->buffer.bind_history |= PIPE_BIND_SAMPLER_VIEW;
samplers->needs_depth_decompress_mask &= ~(1u << slot);
samplers->needs_color_decompress_mask &= ~(1u << slot);
} else {
if (depth_needs_decompression(rtex)) {
samplers->needs_depth_decompress_mask |= 1u << slot;
} else {
samplers->needs_depth_decompress_mask &= ~(1u << slot);
}
if (color_needs_decompression(rtex)) {
samplers->needs_color_decompress_mask |= 1u << slot;
@@ -899,42 +899,42 @@ void si_update_ps_colorbuf0_slot(struct si_context *sctx)
struct pipe_image_view view;
assert(tex);
assert(!tex->is_depth);
/* Disable DCC, because the texture is used as both a sampler
* and color buffer.
*/
si_texture_disable_dcc(sctx, tex);
- if (tex->resource.b.b.nr_samples <= 1 && tex->cmask_buffer) {
+ if (tex->buffer.b.b.nr_samples <= 1 && tex->cmask_buffer) {
/* Disable CMASK. */
- assert(tex->cmask_buffer != &tex->resource);
+ assert(tex->cmask_buffer != &tex->buffer);
si_eliminate_fast_color_clear(sctx, tex);
si_texture_discard_cmask(sctx->screen, tex);
}
view.resource = surf->texture;
view.format = surf->format;
view.access = PIPE_IMAGE_ACCESS_READ;
view.u.tex.first_layer = surf->u.tex.first_layer;
view.u.tex.last_layer = surf->u.tex.last_layer;
view.u.tex.level = surf->u.tex.level;
/* Set the descriptor. */
uint32_t *desc = descs->list + slot*4;
memset(desc, 0, 16 * 4);
si_set_shader_image_desc(sctx, &view, true, desc, desc + 8);
- pipe_resource_reference(&buffers->buffers[slot], &tex->resource.b.b);
+ pipe_resource_reference(&buffers->buffers[slot], &tex->buffer.b.b);
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- &tex->resource, RADEON_USAGE_READ,
+ &tex->buffer, RADEON_USAGE_READ,
RADEON_PRIO_SHADER_RW_IMAGE);
buffers->enabled_mask |= 1u << slot;
} else {
/* Clear the descriptor. */
memset(descs->list + slot*4, 0, 8*4);
pipe_resource_reference(&buffers->buffers[slot], NULL);
buffers->enabled_mask &= ~(1u << slot);
}
sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
diff --git a/src/gallium/drivers/radeonsi/si_dma.c b/src/gallium/drivers/radeonsi/si_dma.c
index 909c301d9f8..7bdee525be1 100644
--- a/src/gallium/drivers/radeonsi/si_dma.c
+++ b/src/gallium/drivers/radeonsi/si_dma.c
@@ -156,49 +156,49 @@ static void si_dma_copy_tile(struct si_context *ctx,
lbpp = util_logbase2(bpp);
pitch_tile_max = ((pitch / bpp) / 8) - 1;
linear_x = detile ? dst_x : src_x;
linear_y = detile ? dst_y : src_y;
linear_z = detile ? dst_z : src_z;
tiled_x = detile ? src_x : dst_x;
tiled_y = detile ? src_y : dst_y;
tiled_z = detile ? src_z : dst_z;
- assert(!util_format_is_depth_and_stencil(rtiled->resource.b.b.format));
+ assert(!util_format_is_depth_and_stencil(rtiled->buffer.b.b.format));
array_mode = G_009910_ARRAY_MODE(tile_mode);
slice_tile_max = (rtiled->surface.u.legacy.level[tiled_lvl].nblk_x *
rtiled->surface.u.legacy.level[tiled_lvl].nblk_y) / (8*8) - 1;
/* linear height must be the same as the slice tile max height, it's ok even
* if the linear destination/source have smaller heigh as the size of the
* dma packet will be using the copy_height which is always smaller or equal
* to the linear height
*/
height = rtiled->surface.u.legacy.level[tiled_lvl].nblk_y;
base = rtiled->surface.u.legacy.level[tiled_lvl].offset;
addr = rlinear->surface.u.legacy.level[linear_lvl].offset;
addr += (uint64_t)rlinear->surface.u.legacy.level[linear_lvl].slice_size_dw * 4 * linear_z;
addr += linear_y * pitch + linear_x * bpp;
bank_h = G_009910_BANK_HEIGHT(tile_mode);
bank_w = G_009910_BANK_WIDTH(tile_mode);
mt_aspect = G_009910_MACRO_TILE_ASPECT(tile_mode);
/* Non-depth modes don't have TILE_SPLIT set. */
tile_split = util_logbase2(rtiled->surface.u.legacy.tile_split >> 6);
nbanks = G_009910_NUM_BANKS(tile_mode);
- base += rtiled->resource.gpu_address;
- addr += rlinear->resource.gpu_address;
+ base += rtiled->buffer.gpu_address;
+ addr += rlinear->buffer.gpu_address;
pipe_config = G_009910_PIPE_CONFIG(tile_mode);
mt = G_009910_MICRO_TILE_MODE(tile_mode);
size = copy_height * pitch;
ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
- si_need_dma_space(ctx, ncopy * 9, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(ctx, ncopy * 9, &rdst->buffer, &rsrc->buffer);
for (i = 0; i < ncopy; i++) {
cheight = copy_height;
if (cheight * pitch > SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE) {
cheight = SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE / pitch;
}
size = cheight * pitch;
radeon_emit(cs, SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd, size / 4));
radeon_emit(cs, base >> 8);
radeon_emit(cs, (detile << 31) | (array_mode << 27) |
@@ -264,30 +264,30 @@ static void si_dma_copy(struct pipe_context *ctx,
goto fallback;
src_x = util_format_get_nblocksx(src->format, src_box->x);
dst_x = util_format_get_nblocksx(src->format, dst_x);
src_y = util_format_get_nblocksy(src->format, src_box->y);
dst_y = util_format_get_nblocksy(src->format, dst_y);
bpp = rdst->surface.bpe;
dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.bpe;
src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.bpe;
- src_w = u_minify(rsrc->resource.b.b.width0, src_level);
- dst_w = u_minify(rdst->resource.b.b.width0, dst_level);
+ src_w = u_minify(rsrc->buffer.b.b.width0, src_level);
+ dst_w = u_minify(rdst->buffer.b.b.width0, dst_level);
dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
src_mode = rsrc->surface.u.legacy.level[src_level].mode;
if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w ||
src_box->width != src_w ||
- src_box->height != u_minify(rsrc->resource.b.b.height0, src_level) ||
- src_box->height != u_minify(rdst->resource.b.b.height0, dst_level) ||
+ src_box->height != u_minify(rsrc->buffer.b.b.height0, src_level) ||
+ src_box->height != u_minify(rdst->buffer.b.b.height0, dst_level) ||
rsrc->surface.u.legacy.level[src_level].nblk_y !=
rdst->surface.u.legacy.level[dst_level].nblk_y) {
/* FIXME si can do partial blit */
goto fallback;
}
/* the x test here are currently useless (because we don't support partial blit)
* but keep them around so we don't forget about those
*/
if ((src_pitch % 8) || (src_box->x % 8) || (dst_x % 8) ||
(src_box->y % 8) || (dst_y % 8) || (src_box->height % 8)) {
diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h
index 6da1d73d26d..823509524d4 100644
--- a/src/gallium/drivers/radeonsi/si_pipe.h
+++ b/src/gallium/drivers/radeonsi/si_pipe.h
@@ -243,21 +243,21 @@ struct r600_fmask_info {
struct r600_cmask_info {
uint64_t offset;
uint64_t size;
unsigned alignment;
unsigned slice_tile_max;
uint64_t base_address_reg;
};
struct r600_texture {
- struct r600_resource resource;
+ struct r600_resource buffer;
struct radeon_surf surface;
uint64_t size;
struct r600_texture *flushed_depth_texture;
/* Colorbuffer compression and fast clear. */
struct r600_fmask_info fmask;
struct r600_cmask_info cmask;
struct r600_resource *cmask_buffer;
uint64_t dcc_offset; /* 0 = disabled */
@@ -1289,21 +1289,21 @@ static inline struct r600_resource *r600_resource(struct pipe_resource *r)
static inline void
r600_resource_reference(struct r600_resource **ptr, struct r600_resource *res)
{
pipe_resource_reference((struct pipe_resource **)ptr,
(struct pipe_resource *)res);
}
static inline void
r600_texture_reference(struct r600_texture **ptr, struct r600_texture *res)
{
- pipe_resource_reference((struct pipe_resource **)ptr, &res->resource.b.b);
+ pipe_resource_reference((struct pipe_resource **)ptr, &res->buffer.b.b);
}
static inline bool
vi_dcc_enabled(struct r600_texture *tex, unsigned level)
{
return tex->dcc_offset && level < tex->surface.num_dcc_levels;
}
static inline unsigned
si_tile_mode_index(struct r600_texture *rtex, unsigned level, bool stencil)
diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c
index 0f3d945dff4..09050d495eb 100644
--- a/src/gallium/drivers/radeonsi/si_state.c
+++ b/src/gallium/drivers/radeonsi/si_state.c
@@ -1886,21 +1886,21 @@ static unsigned si_tex_compare(unsigned compare)
case PIPE_FUNC_GEQUAL:
return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
case PIPE_FUNC_ALWAYS:
return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
}
}
static unsigned si_tex_dim(struct si_screen *sscreen, struct r600_texture *rtex,
unsigned view_target, unsigned nr_samples)
{
- unsigned res_target = rtex->resource.b.b.target;
+ unsigned res_target = rtex->buffer.b.b.target;
if (view_target == PIPE_TEXTURE_CUBE ||
view_target == PIPE_TEXTURE_CUBE_ARRAY)
res_target = view_target;
/* If interpreting cubemaps as something else, set 2D_ARRAY. */
else if (res_target == PIPE_TEXTURE_CUBE ||
res_target == PIPE_TEXTURE_CUBE_ARRAY)
res_target = PIPE_TEXTURE_2D_ARRAY;
/* GFX9 allocates 1D textures as 2D. */
@@ -2401,22 +2401,22 @@ static void si_initialize_color_surface(struct si_context *sctx,
ntype != V_028C70_NUMBER_SRGB &&
format != V_028C70_COLOR_8_24 &&
format != V_028C70_COLOR_24_8) |
S_028C70_NUMBER_TYPE(ntype) |
S_028C70_ENDIAN(endian);
/* Intensity is implemented as Red, so treat it that way. */
color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == PIPE_SWIZZLE_1 ||
util_format_is_intensity(surf->base.format));
- if (rtex->resource.b.b.nr_samples > 1) {
- unsigned log_samples = util_logbase2(rtex->resource.b.b.nr_samples);
+ if (rtex->buffer.b.b.nr_samples > 1) {
+ unsigned log_samples = util_logbase2(rtex->buffer.b.b.nr_samples);
color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
S_028C74_NUM_FRAGMENTS(log_samples);
if (rtex->fmask.size) {
color_info |= S_028C70_COMPRESSION(1);
unsigned fmask_bankh = util_logbase2(rtex->fmask.bank_height);
if (sctx->chip_class == SI) {
/* due to a hw bug, FMASK_BANK_HEIGHT must be set on SI too */
@@ -2429,21 +2429,21 @@ static void si_initialize_color_surface(struct si_context *sctx,
unsigned max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_256B;
unsigned min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_32B;
/* amdvlk: [min-compressed-block-size] should be set to 32 for dGPU and
64 for APU because all of our APUs to date use DIMMs which have
a request granularity size of 64B while all other chips have a
32B request size */
if (!sctx->screen->info.has_dedicated_vram)
min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_64B;
- if (rtex->resource.b.b.nr_samples > 1) {
+ if (rtex->buffer.b.b.nr_samples > 1) {
if (rtex->surface.bpe == 1)
max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
else if (rtex->surface.bpe == 2)
max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
}
surf->cb_dcc_control = S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(max_uncompressed_block_size) |
S_028C78_MIN_COMPRESSED_BLOCK_SIZE(min_compressed_block_size) |
S_028C78_INDEPENDENT_64B_BLOCKS(1);
}
@@ -2451,28 +2451,28 @@ static void si_initialize_color_surface(struct si_context *sctx,
/* This must be set for fast clear to work without FMASK. */
if (!rtex->fmask.size && sctx->chip_class == SI) {
unsigned bankh = util_logbase2(rtex->surface.u.legacy.bankh);
color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
}
unsigned color_view = S_028C6C_SLICE_START(surf->base.u.tex.first_layer) |
S_028C6C_SLICE_MAX(surf->base.u.tex.last_layer);
if (sctx->chip_class >= GFX9) {
- unsigned mip0_depth = util_max_layer(&rtex->resource.b.b, 0);
+ unsigned mip0_depth = util_max_layer(&rtex->buffer.b.b, 0);
color_view |= S_028C6C_MIP_LEVEL(surf->base.u.tex.level);
color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
S_028C74_RESOURCE_TYPE(rtex->surface.u.gfx9.resource_type);
surf->cb_color_attrib2 = S_028C68_MIP0_WIDTH(surf->width0 - 1) |
S_028C68_MIP0_HEIGHT(surf->height0 - 1) |
- S_028C68_MAX_MIP(rtex->resource.b.b.last_level);
+ S_028C68_MAX_MIP(rtex->buffer.b.b.last_level);
}
surf->cb_color_view = color_view;
surf->cb_color_info = color_info;
surf->cb_color_attrib = color_attrib;
/* Determine pixel shader export format */
si_choose_spi_color_formats(surf, format, swap, ntype, rtex->is_depth);
surf->color_initialized = true;
@@ -2485,89 +2485,89 @@ static void si_init_depth_surface(struct si_context *sctx,
unsigned level = surf->base.u.tex.level;
unsigned format, stencil_format;
uint32_t z_info, s_info;
format = si_translate_dbformat(rtex->db_render_format);
stencil_format = rtex->surface.has_stencil ?
V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
assert(format != V_028040_Z_INVALID);
if (format == V_028040_Z_INVALID)
- PRINT_ERR("Invalid DB format: %d, disabling DB.\n", rtex->resource.b.b.format);
+ PRINT_ERR("Invalid DB format: %d, disabling DB.\n", rtex->buffer.b.b.format);
surf->db_depth_view = S_028008_SLICE_START(surf->base.u.tex.first_layer) |
S_028008_SLICE_MAX(surf->base.u.tex.last_layer);
surf->db_htile_data_base = 0;
surf->db_htile_surface = 0;
if (sctx->chip_class >= GFX9) {
assert(rtex->surface.u.gfx9.surf_offset == 0);
- surf->db_depth_base = rtex->resource.gpu_address >> 8;
- surf->db_stencil_base = (rtex->resource.gpu_address +
+ surf->db_depth_base = rtex->buffer.gpu_address >> 8;
+ surf->db_stencil_base = (rtex->buffer.gpu_address +
rtex->surface.u.gfx9.stencil_offset) >> 8;
z_info = S_028038_FORMAT(format) |
- S_028038_NUM_SAMPLES(util_logbase2(rtex->resource.b.b.nr_samples)) |
+ S_028038_NUM_SAMPLES(util_logbase2(rtex->buffer.b.b.nr_samples)) |
S_028038_SW_MODE(rtex->surface.u.gfx9.surf.swizzle_mode) |
- S_028038_MAXMIP(rtex->resource.b.b.last_level);
+ S_028038_MAXMIP(rtex->buffer.b.b.last_level);
s_info = S_02803C_FORMAT(stencil_format) |
S_02803C_SW_MODE(rtex->surface.u.gfx9.stencil.swizzle_mode);
surf->db_z_info2 = S_028068_EPITCH(rtex->surface.u.gfx9.surf.epitch);
surf->db_stencil_info2 = S_02806C_EPITCH(rtex->surface.u.gfx9.stencil.epitch);
surf->db_depth_view |= S_028008_MIPID(level);
- surf->db_depth_size = S_02801C_X_MAX(rtex->resource.b.b.width0 - 1) |
- S_02801C_Y_MAX(rtex->resource.b.b.height0 - 1);
+ surf->db_depth_size = S_02801C_X_MAX(rtex->buffer.b.b.width0 - 1) |
+ S_02801C_Y_MAX(rtex->buffer.b.b.height0 - 1);
if (si_htile_enabled(rtex, level)) {
z_info |= S_028038_TILE_SURFACE_ENABLE(1) |
S_028038_ALLOW_EXPCLEAR(1);
if (rtex->tc_compatible_htile) {
unsigned max_zplanes = 4;
if (rtex->db_render_format == PIPE_FORMAT_Z16_UNORM &&
- rtex->resource.b.b.nr_samples > 1)
+ rtex->buffer.b.b.nr_samples > 1)
max_zplanes = 2;
z_info |= S_028038_DECOMPRESS_ON_N_ZPLANES(max_zplanes + 1) |
S_028038_ITERATE_FLUSH(1);
s_info |= S_02803C_ITERATE_FLUSH(1);
}
if (rtex->surface.has_stencil) {
/* Stencil buffer workaround ported from the SI-CI-VI code.
* See that for explanation.
*/
- s_info |= S_02803C_ALLOW_EXPCLEAR(rtex->resource.b.b.nr_samples <= 1);
+ s_info |= S_02803C_ALLOW_EXPCLEAR(rtex->buffer.b.b.nr_samples <= 1);
} else {
/* Use all HTILE for depth if there's no stencil. */
s_info |= S_02803C_TILE_STENCIL_DISABLE(1);
}
- surf->db_htile_data_base = (rtex->resource.gpu_address +
+ surf->db_htile_data_base = (rtex->buffer.gpu_address +
rtex->htile_offset) >> 8;
surf->db_htile_surface = S_028ABC_FULL_CACHE(1) |
S_028ABC_PIPE_ALIGNED(rtex->surface.u.gfx9.htile.pipe_aligned) |
S_028ABC_RB_ALIGNED(rtex->surface.u.gfx9.htile.rb_aligned);
}
} else {
/* SI-CI-VI */
struct legacy_surf_level *levelinfo = &rtex->surface.u.legacy.level[level];
assert(levelinfo->nblk_x % 8 == 0 && levelinfo->nblk_y % 8 == 0);
- surf->db_depth_base = (rtex->resource.gpu_address +
+ surf->db_depth_base = (rtex->buffer.gpu_address +
rtex->surface.u.legacy.level[level].offset) >> 8;
- surf->db_stencil_base = (rtex->resource.gpu_address +
+ surf->db_stencil_base = (rtex->buffer.gpu_address +
rtex->surface.u.legacy.stencil_level[level].offset) >> 8;
z_info = S_028040_FORMAT(format) |
- S_028040_NUM_SAMPLES(util_logbase2(rtex->resource.b.b.nr_samples));
+ S_028040_NUM_SAMPLES(util_logbase2(rtex->buffer.b.b.nr_samples));
s_info = S_028044_FORMAT(stencil_format);
surf->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!rtex->tc_compatible_htile);
if (sctx->chip_class >= CIK) {
struct radeon_info *info = &sctx->screen->info;
unsigned index = rtex->surface.u.legacy.tiling_index[level];
unsigned stencil_index = rtex->surface.u.legacy.stencil_tiling_index[level];
unsigned macro_index = rtex->surface.u.legacy.macro_tile_index;
unsigned tile_mode = info->si_tile_mode_array[index];
unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
@@ -2603,40 +2603,40 @@ static void si_init_depth_surface(struct si_context *sctx,
* combination of MSAA, fast stencil clear and stencil
* decompress messes with subsequent stencil buffer
* uses. Problem was reproduced on Verde, Bonaire,
* Tonga, and Carrizo.
*
* Disabling EXPCLEAR works around the problem.
*
* Check piglit's arb_texture_multisample-stencil-clear
* test if you want to try changing this.
*/
- if (rtex->resource.b.b.nr_samples <= 1)
+ if (rtex->buffer.b.b.nr_samples <= 1)
s_info |= S_028044_ALLOW_EXPCLEAR(1);
} else if (!rtex->tc_compatible_htile) {
/* Use all of the htile_buffer for depth if there's no stencil.
* This must not be set when TC-compatible HTILE is enabled
* due to a hw bug.
*/
s_info |= S_028044_TILE_STENCIL_DISABLE(1);
}
- surf->db_htile_data_base = (rtex->resource.gpu_address +
+ surf->db_htile_data_base = (rtex->buffer.gpu_address +
rtex->htile_offset) >> 8;
surf->db_htile_surface = S_028ABC_FULL_CACHE(1);
if (rtex->tc_compatible_htile) {
surf->db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
- if (rtex->resource.b.b.nr_samples <= 1)
+ if (rtex->buffer.b.b.nr_samples <= 1)
z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(5);
- else if (rtex->resource.b.b.nr_samples <= 4)
+ else if (rtex->buffer.b.b.nr_samples <= 4)
z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(3);
else
z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(2);
}
}
}
surf->db_z_info = z_info;
surf->db_stencil_info = s_info;
@@ -2952,64 +2952,64 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
cb = (struct r600_surface*)state->cbufs[i];
if (!cb) {
radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
S_028C70_FORMAT(V_028C70_COLOR_INVALID));
continue;
}
tex = (struct r600_texture *)cb->base.texture;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- &tex->resource, RADEON_USAGE_READWRITE,
- tex->resource.b.b.nr_samples > 1 ?
+ &tex->buffer, RADEON_USAGE_READWRITE,
+ tex->buffer.b.b.nr_samples > 1 ?
RADEON_PRIO_COLOR_BUFFER_MSAA :
RADEON_PRIO_COLOR_BUFFER);
- if (tex->cmask_buffer && tex->cmask_buffer != &tex->resource) {
+ if (tex->cmask_buffer && tex->cmask_buffer != &tex->buffer) {
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
tex->cmask_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_CMASK);
}
if (tex->dcc_separate_buffer)
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
tex->dcc_separate_buffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_DCC);
/* Compute mutable surface parameters. */
- cb_color_base = tex->resource.gpu_address >> 8;
+ cb_color_base = tex->buffer.gpu_address >> 8;
cb_color_fmask = 0;
cb_color_cmask = tex->cmask.base_address_reg;
cb_dcc_base = 0;
cb_color_info = cb->cb_color_info | tex->cb_color_info;
cb_color_attrib = cb->cb_color_attrib;
if (cb->base.u.tex.level > 0)
cb_color_info &= C_028C70_FAST_CLEAR;
if (tex->fmask.size) {
- cb_color_fmask = (tex->resource.gpu_address + tex->fmask.offset) >> 8;
+ cb_color_fmask = (tex->buffer.gpu_address + tex->fmask.offset) >> 8;
cb_color_fmask |= tex->fmask.tile_swizzle;
}
/* Set up DCC. */
if (vi_dcc_enabled(tex, cb->base.u.tex.level)) {
bool is_msaa_resolve_dst = state->cbufs[0] &&
state->cbufs[0]->texture->nr_samples > 1 &&
state->cbufs[1] == &cb->base &&
state->cbufs[1]->texture->nr_samples <= 1;
if (!is_msaa_resolve_dst)
cb_color_info |= S_028C70_DCC_ENABLE(1);
- cb_dcc_base = ((!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
+ cb_dcc_base = ((!tex->dcc_separate_buffer ? tex->buffer.gpu_address : 0) +
tex->dcc_offset) >> 8;
cb_dcc_base |= tex->surface.tile_swizzle;
}
if (sctx->chip_class >= GFX9) {
struct gfx9_surf_meta_flags meta;
if (tex->dcc_offset)
meta = tex->surface.u.gfx9.dcc;
else
@@ -3110,21 +3110,21 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
for (; i < 8 ; i++)
if (sctx->framebuffer.dirty_cbufs & (1 << i))
radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 0);
/* ZS buffer. */
if (state->zsbuf && sctx->framebuffer.dirty_zsbuf) {
struct r600_surface *zb = (struct r600_surface*)state->zsbuf;
struct r600_texture *rtex = (struct r600_texture*)zb->base.texture;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- &rtex->resource, RADEON_USAGE_READWRITE,
+ &rtex->buffer, RADEON_USAGE_READWRITE,
zb->base.texture->nr_samples > 1 ?
RADEON_PRIO_DEPTH_BUFFER_MSAA :
RADEON_PRIO_DEPTH_BUFFER);
if (sctx->chip_class >= GFX9) {
radeon_set_context_reg_seq(cs, R_028014_DB_HTILE_DATA_BASE, 3);
radeon_emit(cs, zb->db_htile_data_base); /* DB_HTILE_DATA_BASE */
radeon_emit(cs, S_028018_BASE_HI(zb->db_htile_data_base >> 32)); /* DB_HTILE_DATA_BASE_HI */
radeon_emit(cs, zb->db_depth_size); /* DB_DEPTH_SIZE */
@@ -3528,21 +3528,21 @@ si_make_texture_descriptor(struct si_screen *screen,
bool sampler,
enum pipe_texture_target target,
enum pipe_format pipe_format,
const unsigned char state_swizzle[4],
unsigned first_level, unsigned last_level,
unsigned first_layer, unsigned last_layer,
unsigned width, unsigned height, unsigned depth,
uint32_t *state,
uint32_t *fmask_state)
{
- struct pipe_resource *res = &tex->resource.b.b;
+ struct pipe_resource *res = &tex->buffer.b.b;
const struct util_format_description *desc;
unsigned char swizzle[4];
int first_non_void;
unsigned num_format, data_format, type;
uint64_t va;
desc = util_format_description(pipe_format);
if (desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS) {
const unsigned char swizzle_xxxx[4] = {0, 0, 0, 0};
@@ -3707,21 +3707,21 @@ si_make_texture_descriptor(struct si_screen *screen,
* The hw doesn't need to know the total number of layers.
*/
if (type == V_008F1C_SQ_RSRC_IMG_3D)
state[4] |= S_008F20_DEPTH(depth - 1);
else
state[4] |= S_008F20_DEPTH(last_layer);
state[4] |= S_008F20_BC_SWIZZLE(bc_swizzle);
state[5] |= S_008F24_MAX_MIP(res->nr_samples > 1 ?
util_logbase2(res->nr_samples) :
- tex->resource.b.b.last_level);
+ tex->buffer.b.b.last_level);
} else {
state[3] |= S_008F1C_POW2_PAD(res->last_level > 0);
state[4] |= S_008F20_DEPTH(depth - 1);
state[5] |= S_008F24_LAST_ARRAY(last_layer);
}
if (tex->dcc_offset) {
state[6] = S_008F28_ALPHA_IS_ON_MSB(vi_alpha_is_on_msb(pipe_format));
} else {
/* The last dword is unused by hw. The shader uses it to clear
@@ -3732,21 +3732,21 @@ si_make_texture_descriptor(struct si_screen *screen,
state[7] = C_008F30_MAX_ANISO_RATIO;
else
state[7] = 0xffffffff;
}
}
/* Initialize the sampler view for FMASK. */
if (tex->fmask.size) {
uint32_t data_format, num_format;
- va = tex->resource.gpu_address + tex->fmask.offset;
+ va = tex->buffer.gpu_address + tex->fmask.offset;
if (screen->info.chip_class >= GFX9) {
data_format = V_008F14_IMG_DATA_FORMAT_FMASK;
switch (res->nr_samples) {
case 2:
num_format = V_008F14_IMG_FMASK_8_2_2;
break;
case 4:
num_format = V_008F14_IMG_FMASK_8_4_4;
break;
@@ -3900,22 +3900,22 @@ si_create_sampler_view_custom(struct pipe_context *ctx,
pipe_resource_reference(&view->base.texture, NULL);
FREE(view);
return NULL;
}
assert(tmp->flushed_depth_texture);
/* Override format for the case where the flushed texture
* contains only Z or only S.
*/
- if (tmp->flushed_depth_texture->resource.b.b.format != tmp->resource.b.b.format)
- pipe_format = tmp->flushed_depth_texture->resource.b.b.format;
+ if (tmp->flushed_depth_texture->buffer.b.b.format != tmp->buffer.b.b.format)
+ pipe_format = tmp->flushed_depth_texture->buffer.b.b.format;
tmp = tmp->flushed_depth_texture;
}
surflevel = tmp->surface.u.legacy.level;
if (tmp->db_compatible) {
if (!view->is_stencil_sampler)
pipe_format = tmp->db_render_format;
diff --git a/src/gallium/drivers/radeonsi/si_texture.c b/src/gallium/drivers/radeonsi/si_texture.c
index 7ab6699d96a..b994a8bb503 100644
--- a/src/gallium/drivers/radeonsi/si_texture.c
+++ b/src/gallium/drivers/radeonsi/si_texture.c
@@ -51,22 +51,22 @@ bool si_prepare_for_dma_blit(struct si_context *sctx,
unsigned src_level,
const struct pipe_box *src_box)
{
if (!sctx->dma_cs)
return false;
if (rdst->surface.bpe != rsrc->surface.bpe)
return false;
/* MSAA: Blits don't exist in the real world. */
- if (rsrc->resource.b.b.nr_samples > 1 ||
- rdst->resource.b.b.nr_samples > 1)
+ if (rsrc->buffer.b.b.nr_samples > 1 ||
+ rdst->buffer.b.b.nr_samples > 1)
return false;
/* Depth-stencil surfaces:
* When dst is linear, the DB->CB copy preserves HTILE.
* When dst is tiled, the 3D path must be used to update HTILE.
*/
if (rsrc->is_depth || rdst->is_depth)
return false;
/* DCC as:
@@ -78,31 +78,31 @@ bool si_prepare_for_dma_blit(struct si_context *sctx,
return false;
/* CMASK as:
* src: Both texture and SDMA paths need decompression. Use SDMA.
* dst: If overwriting the whole texture, discard CMASK and use
* SDMA. Otherwise, use the 3D path.
*/
if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) {
/* The CMASK clear is only enabled for the first level. */
assert(dst_level == 0);
- if (!util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level,
+ if (!util_texrange_covers_whole_level(&rdst->buffer.b.b, dst_level,
dstx, dsty, dstz, src_box->width,
src_box->height, src_box->depth))
return false;
si_texture_discard_cmask(sctx->screen, rdst);
}
/* All requirements are met. Prepare textures for SDMA. */
if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))
- sctx->b.flush_resource(&sctx->b, &rsrc->resource.b.b);
+ sctx->b.flush_resource(&sctx->b, &rsrc->buffer.b.b);
assert(!(rsrc->dirty_level_mask & (1 << src_level)));
assert(!(rdst->dirty_level_mask & (1 << dst_level)));
return true;
}
/* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
static void si_copy_region_with_blit(struct pipe_context *pipe,
struct pipe_resource *dst,
@@ -394,59 +394,59 @@ static void si_surface_import_metadata(struct si_screen *sscreen,
void si_eliminate_fast_color_clear(struct si_context *sctx,
struct r600_texture *rtex)
{
struct si_screen *sscreen = sctx->screen;
struct pipe_context *ctx = &sctx->b;
if (ctx == sscreen->aux_context)
mtx_lock(&sscreen->aux_context_lock);
unsigned n = sctx->num_decompress_calls;
- ctx->flush_resource(ctx, &rtex->resource.b.b);
+ ctx->flush_resource(ctx, &rtex->buffer.b.b);
/* Flush only if any fast clear elimination took place. */
if (n != sctx->num_decompress_calls)
ctx->flush(ctx, NULL, 0);
if (ctx == sscreen->aux_context)
mtx_unlock(&sscreen->aux_context_lock);
}
void si_texture_discard_cmask(struct si_screen *sscreen,
struct r600_texture *rtex)
{
if (!rtex->cmask.size)
return;
- assert(rtex->resource.b.b.nr_samples <= 1);
+ assert(rtex->buffer.b.b.nr_samples <= 1);
/* Disable CMASK. */
memset(&rtex->cmask, 0, sizeof(rtex->cmask));
- rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;
+ rtex->cmask.base_address_reg = rtex->buffer.gpu_address >> 8;
rtex->dirty_level_mask = 0;
rtex->cb_color_info &= ~S_028C70_FAST_CLEAR(1);
- if (rtex->cmask_buffer != &rtex->resource)
+ if (rtex->cmask_buffer != &rtex->buffer)
r600_resource_reference(&rtex->cmask_buffer, NULL);
/* Notify all contexts about the change. */
p_atomic_inc(&sscreen->dirty_tex_counter);
p_atomic_inc(&sscreen->compressed_colortex_counter);
}
static bool si_can_disable_dcc(struct r600_texture *rtex)
{
/* We can't disable DCC if it can be written by another process. */
return rtex->dcc_offset &&
- (!rtex->resource.b.is_shared ||
- !(rtex->resource.external_usage & PIPE_HANDLE_USAGE_WRITE));
+ (!rtex->buffer.b.is_shared ||
+ !(rtex->buffer.external_usage & PIPE_HANDLE_USAGE_WRITE));
}
static bool si_texture_discard_dcc(struct si_screen *sscreen,
struct r600_texture *rtex)
{
if (!si_can_disable_dcc(rtex))
return false;
assert(rtex->dcc_separate_buffer == NULL);
@@ -500,26 +500,26 @@ bool si_texture_disable_dcc(struct si_context *sctx,
return si_texture_discard_dcc(sscreen, rtex);
}
static void si_reallocate_texture_inplace(struct si_context *sctx,
struct r600_texture *rtex,
unsigned new_bind_flag,
bool invalidate_storage)
{
struct pipe_screen *screen = sctx->b.screen;
struct r600_texture *new_tex;
- struct pipe_resource templ = rtex->resource.b.b;
+ struct pipe_resource templ = rtex->buffer.b.b;
unsigned i;
templ.bind |= new_bind_flag;
- if (rtex->resource.b.is_shared)
+ if (rtex->buffer.b.is_shared)
return;
if (new_bind_flag == PIPE_BIND_LINEAR) {
if (rtex->surface.is_linear)
return;
/* This fails with MSAA, depth, and compressed textures. */
if (si_choose_tiling(sctx->screen, &templ, false) !=
RADEON_SURF_MODE_LINEAR_ALIGNED)
return;
@@ -531,40 +531,40 @@ static void si_reallocate_texture_inplace(struct si_context *sctx,
/* Copy the pixels to the new texture. */
if (!invalidate_storage) {
for (i = 0; i <= templ.last_level; i++) {
struct pipe_box box;
u_box_3d(0, 0, 0,
u_minify(templ.width0, i), u_minify(templ.height0, i),
util_num_layers(&templ, i), &box);
- sctx->dma_copy(&sctx->b, &new_tex->resource.b.b, i, 0, 0, 0,
- &rtex->resource.b.b, i, &box);
+ sctx->dma_copy(&sctx->b, &new_tex->buffer.b.b, i, 0, 0, 0,
+ &rtex->buffer.b.b, i, &box);
}
}
if (new_bind_flag == PIPE_BIND_LINEAR) {
si_texture_discard_cmask(sctx->screen, rtex);
si_texture_discard_dcc(sctx->screen, rtex);
}
/* Replace the structure fields of rtex. */
- rtex->resource.b.b.bind = templ.bind;
- pb_reference(&rtex->resource.buf, new_tex->resource.buf);
- rtex->resource.gpu_address = new_tex->resource.gpu_address;
- rtex->resource.vram_usage = new_tex->resource.vram_usage;
- rtex->resource.gart_usage = new_tex->resource.gart_usage;
- rtex->resource.bo_size = new_tex->resource.bo_size;
- rtex->resource.bo_alignment = new_tex->resource.bo_alignment;
- rtex->resource.domains = new_tex->resource.domains;
- rtex->resource.flags = new_tex->resource.flags;
+ rtex->buffer.b.b.bind = templ.bind;
+ pb_reference(&rtex->buffer.buf, new_tex->buffer.buf);
+ rtex->buffer.gpu_address = new_tex->buffer.gpu_address;
+ rtex->buffer.vram_usage = new_tex->buffer.vram_usage;
+ rtex->buffer.gart_usage = new_tex->buffer.gart_usage;
+ rtex->buffer.bo_size = new_tex->buffer.bo_size;
+ rtex->buffer.bo_alignment = new_tex->buffer.bo_alignment;
+ rtex->buffer.domains = new_tex->buffer.domains;
+ rtex->buffer.flags = new_tex->buffer.flags;
rtex->size = new_tex->size;
rtex->db_render_format = new_tex->db_render_format;
rtex->db_compatible = new_tex->db_compatible;
rtex->can_sample_z = new_tex->can_sample_z;
rtex->can_sample_s = new_tex->can_sample_s;
rtex->surface = new_tex->surface;
rtex->fmask = new_tex->fmask;
rtex->cmask = new_tex->cmask;
rtex->cb_color_info = new_tex->cb_color_info;
rtex->last_msaa_resolve_target_micro_mode = new_tex->last_msaa_resolve_target_micro_mode;
@@ -590,21 +590,21 @@ static void si_reallocate_texture_inplace(struct si_context *sctx,
static uint32_t si_get_bo_metadata_word1(struct si_screen *sscreen)
{
return (ATI_VENDOR_ID << 16) | sscreen->info.pci_id;
}
static void si_query_opaque_metadata(struct si_screen *sscreen,
struct r600_texture *rtex,
struct radeon_bo_metadata *md)
{
- struct pipe_resource *res = &rtex->resource.b.b;
+ struct pipe_resource *res = &rtex->buffer.b.b;
static const unsigned char swizzle[] = {
PIPE_SWIZZLE_X,
PIPE_SWIZZLE_Y,
PIPE_SWIZZLE_Z,
PIPE_SWIZZLE_W
};
uint32_t desc[8], i;
bool is_array = util_texture_is_array(res->target);
/* DRM 2.x.x doesn't support this. */
@@ -704,21 +704,21 @@ static boolean si_texture_get_handle(struct pipe_screen* screen,
if (resource->target != PIPE_BUFFER) {
/* This is not supported now, but it might be required for OpenCL
* interop in the future.
*/
if (resource->nr_samples > 1 || rtex->is_depth)
return false;
/* Move a suballocated texture into a non-suballocated allocation. */
if (sscreen->ws->buffer_is_suballocated(res->buf) ||
rtex->surface.tile_swizzle ||
- (rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
+ (rtex->buffer.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
sscreen->info.has_local_buffers &&
whandle->type != DRM_API_HANDLE_TYPE_KMS)) {
assert(!res->b.is_shared);
si_reallocate_texture_inplace(sctx, rtex,
PIPE_BIND_SHARED, false);
flush = true;
assert(res->b.b.bind & PIPE_BIND_SHARED);
assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
assert(!(res->flags & RADEON_FLAG_NO_INTERPROCESS_SHARING));
assert(rtex->surface.tile_swizzle == 0);
@@ -767,21 +767,21 @@ static boolean si_texture_get_handle(struct pipe_screen* screen,
offset = rtex->surface.u.legacy.level[0].offset;
stride = rtex->surface.u.legacy.level[0].nblk_x *
rtex->surface.bpe;
slice_size = (uint64_t)rtex->surface.u.legacy.level[0].slice_size_dw * 4;
}
} else {
/* Buffer exports are for the OpenCL interop. */
/* Move a suballocated buffer into a non-suballocated allocation. */
if (sscreen->ws->buffer_is_suballocated(res->buf) ||
/* A DMABUF export always fails if the BO is local. */
- (rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
+ (rtex->buffer.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
sscreen->info.has_local_buffers)) {
assert(!res->b.is_shared);
/* Allocate a new buffer with PIPE_BIND_SHARED. */
struct pipe_resource templ = res->b.b;
templ.bind |= PIPE_BIND_SHARED;
struct pipe_resource *newb =
screen->resource_create(screen, &templ);
if (!newb)
@@ -823,43 +823,43 @@ static boolean si_texture_get_handle(struct pipe_screen* screen,
}
return sscreen->ws->buffer_get_handle(res->buf, stride, offset,
slice_size, whandle);
}
static void si_texture_destroy(struct pipe_screen *screen,
struct pipe_resource *ptex)
{
struct r600_texture *rtex = (struct r600_texture*)ptex;
- struct r600_resource *resource = &rtex->resource;
+ struct r600_resource *resource = &rtex->buffer;
r600_texture_reference(&rtex->flushed_depth_texture, NULL);
- if (rtex->cmask_buffer != &rtex->resource) {
+ if (rtex->cmask_buffer != &rtex->buffer) {
r600_resource_reference(&rtex->cmask_buffer, NULL);
}
pb_reference(&resource->buf, NULL);
r600_resource_reference(&rtex->dcc_separate_buffer, NULL);
r600_resource_reference(&rtex->last_dcc_separate_buffer, NULL);
FREE(rtex);
}
static const struct u_resource_vtbl si_texture_vtbl;
/* The number of samples can be specified independently of the texture. */
void si_texture_get_fmask_info(struct si_screen *sscreen,
struct r600_texture *rtex,
unsigned nr_samples,
struct r600_fmask_info *out)
{
/* FMASK is allocated like an ordinary texture. */
- struct pipe_resource templ = rtex->resource.b.b;
+ struct pipe_resource templ = rtex->buffer.b.b;
struct radeon_surf fmask = {};
unsigned flags, bpe;
memset(out, 0, sizeof(*out));
if (sscreen->info.chip_class >= GFX9) {
out->alignment = rtex->surface.u.gfx9.fmask_alignment;
out->size = rtex->surface.u.gfx9.fmask_size;
out->tile_swizzle = rtex->surface.u.gfx9.fmask_tile_swizzle;
return;
@@ -898,21 +898,21 @@ void si_texture_get_fmask_info(struct si_screen *sscreen,
out->bank_height = fmask.u.legacy.bankh;
out->tile_swizzle = fmask.tile_swizzle;
out->alignment = MAX2(256, fmask.surf_alignment);
out->size = fmask.surf_size;
}
static void si_texture_allocate_fmask(struct si_screen *sscreen,
struct r600_texture *rtex)
{
si_texture_get_fmask_info(sscreen, rtex,
- rtex->resource.b.b.nr_samples, &rtex->fmask);
+ rtex->buffer.b.b.nr_samples, &rtex->fmask);
rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment);
rtex->size = rtex->fmask.offset + rtex->fmask.size;
}
void si_texture_get_cmask_info(struct si_screen *sscreen,
struct r600_texture *rtex,
struct r600_cmask_info *out)
{
unsigned pipe_interleave_bytes = sscreen->info.pipe_interleave_bytes;
@@ -942,33 +942,33 @@ void si_texture_get_cmask_info(struct si_screen *sscreen,
cl_width = 64;
cl_height = 64;
break;
default:
assert(0);
return;
}
unsigned base_align = num_pipes * pipe_interleave_bytes;
- unsigned width = align(rtex->resource.b.b.width0, cl_width*8);
- unsigned height = align(rtex->resource.b.b.height0, cl_height*8);
+ unsigned width = align(rtex->buffer.b.b.width0, cl_width*8);
+ unsigned height = align(rtex->buffer.b.b.height0, cl_height*8);
unsigned slice_elements = (width * height) / (8*8);
/* Each element of CMASK is a nibble. */
unsigned slice_bytes = slice_elements / 2;
out->slice_tile_max = (width * height) / (128*128);
if (out->slice_tile_max)
out->slice_tile_max -= 1;
out->alignment = MAX2(256, base_align);
- out->size = util_num_layers(&rtex->resource.b.b, 0) *
+ out->size = util_num_layers(&rtex->buffer.b.b, 0) *
align(slice_bytes, base_align);
}
static void si_texture_allocate_cmask(struct si_screen *sscreen,
struct r600_texture *rtex)
{
si_texture_get_cmask_info(sscreen, rtex, &rtex->cmask);
rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment);
rtex->size = rtex->cmask.offset + rtex->cmask.size;
@@ -1022,32 +1022,32 @@ static void si_texture_get_htile_size(struct si_screen *sscreen,
break;
case 16:
cl_width = 128;
cl_height = 64;
break;
default:
assert(0);
return;
}
- width = align(rtex->resource.b.b.width0, cl_width * 8);
- height = align(rtex->resource.b.b.height0, cl_height * 8);
+ width = align(rtex->buffer.b.b.width0, cl_width * 8);
+ height = align(rtex->buffer.b.b.height0, cl_height * 8);
slice_elements = (width * height) / (8 * 8);
slice_bytes = slice_elements * 4;
pipe_interleave_bytes = sscreen->info.pipe_interleave_bytes;
base_align = num_pipes * pipe_interleave_bytes;
rtex->surface.htile_alignment = base_align;
rtex->surface.htile_size =
- util_num_layers(&rtex->resource.b.b, 0) *
+ util_num_layers(&rtex->buffer.b.b, 0) *
align(slice_bytes, base_align);
}
static void si_texture_allocate_htile(struct si_screen *sscreen,
struct r600_texture *rtex)
{
if (sscreen->info.chip_class <= VI && !rtex->tc_compatible_htile)
si_texture_get_htile_size(sscreen, rtex);
if (!rtex->surface.htile_size)
@@ -1059,26 +1059,26 @@ static void si_texture_allocate_htile(struct si_screen *sscreen,
void si_print_texture_info(struct si_screen *sscreen,
struct r600_texture *rtex, struct u_log_context *log)
{
int i;
/* Common parameters. */
u_log_printf(log, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
"blk_h=%u, array_size=%u, last_level=%u, "
"bpe=%u, nsamples=%u, flags=0x%x, %s\n",
- rtex->resource.b.b.width0, rtex->resource.b.b.height0,
- rtex->resource.b.b.depth0, rtex->surface.blk_w,
+ rtex->buffer.b.b.width0, rtex->buffer.b.b.height0,
+ rtex->buffer.b.b.depth0, rtex->surface.blk_w,
rtex->surface.blk_h,
- rtex->resource.b.b.array_size, rtex->resource.b.b.last_level,
- rtex->surface.bpe, rtex->resource.b.b.nr_samples,
- rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format));
+ rtex->buffer.b.b.array_size, rtex->buffer.b.b.last_level,
+ rtex->surface.bpe, rtex->buffer.b.b.nr_samples,
+ rtex->surface.flags, util_format_short_name(rtex->buffer.b.b.format));
if (sscreen->info.chip_class >= GFX9) {
u_log_printf(log, " Surf: size=%"PRIu64", slice_size=%"PRIu64", "
"alignment=%u, swmode=%u, epitch=%u, pitch=%u\n",
rtex->surface.surf_size,
rtex->surface.u.gfx9.surf_slice_size,
rtex->surface.surf_alignment,
rtex->surface.u.gfx9.surf.swizzle_mode,
rtex->surface.u.gfx9.surf.epitch,
rtex->surface.u.gfx9.surf_pitch);
@@ -1155,55 +1155,55 @@ void si_print_texture_info(struct si_screen *sscreen,
u_log_printf(log, " HTile: offset=%"PRIu64", size=%u, "
"alignment=%u, TC_compatible = %u\n",
rtex->htile_offset, rtex->surface.htile_size,
rtex->surface.htile_alignment,
rtex->tc_compatible_htile);
if (rtex->dcc_offset) {
u_log_printf(log, " DCC: offset=%"PRIu64", size=%u, alignment=%u\n",
rtex->dcc_offset, rtex->surface.dcc_size,
rtex->surface.dcc_alignment);
- for (i = 0; i <= rtex->resource.b.b.last_level; i++)
+ for (i = 0; i <= rtex->buffer.b.b.last_level; i++)
u_log_printf(log, " DCCLevel[%i]: enabled=%u, offset=%u, "
"fast_clear_size=%u\n",
i, i < rtex->surface.num_dcc_levels,
rtex->surface.u.legacy.level[i].dcc_offset,
rtex->surface.u.legacy.level[i].dcc_fast_clear_size);
}
- for (i = 0; i <= rtex->resource.b.b.last_level; i++)
+ for (i = 0; i <= rtex->buffer.b.b.last_level; i++)
u_log_printf(log, " Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
"npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
"mode=%u, tiling_index = %u\n",
i, rtex->surface.u.legacy.level[i].offset,
(uint64_t)rtex->surface.u.legacy.level[i].slice_size_dw * 4,
- u_minify(rtex->resource.b.b.width0, i),
- u_minify(rtex->resource.b.b.height0, i),
- u_minify(rtex->resource.b.b.depth0, i),
+ u_minify(rtex->buffer.b.b.width0, i),
+ u_minify(rtex->buffer.b.b.height0, i),
+ u_minify(rtex->buffer.b.b.depth0, i),
rtex->surface.u.legacy.level[i].nblk_x,
rtex->surface.u.legacy.level[i].nblk_y,
rtex->surface.u.legacy.level[i].mode,
rtex->surface.u.legacy.tiling_index[i]);
if (rtex->surface.has_stencil) {
u_log_printf(log, " StencilLayout: tilesplit=%u\n",
rtex->surface.u.legacy.stencil_tile_split);
- for (i = 0; i <= rtex->resource.b.b.last_level; i++) {
+ for (i = 0; i <= rtex->buffer.b.b.last_level; i++) {
u_log_printf(log, " StencilLevel[%i]: offset=%"PRIu64", "
"slice_size=%"PRIu64", npix_x=%u, "
"npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
"mode=%u, tiling_index = %u\n",
i, rtex->surface.u.legacy.stencil_level[i].offset,
(uint64_t)rtex->surface.u.legacy.stencil_level[i].slice_size_dw * 4,
- u_minify(rtex->resource.b.b.width0, i),
- u_minify(rtex->resource.b.b.height0, i),
- u_minify(rtex->resource.b.b.depth0, i),
+ u_minify(rtex->buffer.b.b.width0, i),
+ u_minify(rtex->buffer.b.b.height0, i),
+ u_minify(rtex->buffer.b.b.depth0, i),
rtex->surface.u.legacy.stencil_level[i].nblk_x,
rtex->surface.u.legacy.stencil_level[i].nblk_y,
rtex->surface.u.legacy.stencil_level[i].mode,
rtex->surface.u.legacy.stencil_tiling_index[i]);
}
}
}
/* Common processing for r600_texture_create and r600_texture_from_handle */
static struct r600_texture *
@@ -1213,29 +1213,29 @@ si_texture_create_object(struct pipe_screen *screen,
struct radeon_surf *surface)
{
struct r600_texture *rtex;
struct r600_resource *resource;
struct si_screen *sscreen = (struct si_screen*)screen;
rtex = CALLOC_STRUCT(r600_texture);
if (!rtex)
return NULL;
- resource = &rtex->resource;
+ resource = &rtex->buffer;
resource->b.b = *base;
resource->b.b.next = NULL;
resource->b.vtbl = &si_texture_vtbl;
pipe_reference_init(&resource->b.b.reference, 1);
resource->b.b.screen = screen;
/* don't include stencil-only formats which we don't support for rendering */
- rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
+ rtex->is_depth = util_format_has_depth(util_format_description(rtex->buffer.b.b.format));
rtex->surface = *surface;
rtex->size = rtex->surface.surf_size;
rtex->tc_compatible_htile = rtex->surface.htile_size != 0 &&
(rtex->surface.flags &
RADEON_SURF_TC_COMPATIBLE_HTILE);
/* TC-compatible HTILE:
* - VI only supports Z32_FLOAT.
@@ -1277,21 +1277,21 @@ si_texture_create_object(struct pipe_screen *screen,
if (!(sscreen->debug_flags & DBG(NO_HYPERZ)))
si_texture_allocate_htile(sscreen, rtex);
}
} else {
if (base->nr_samples > 1 &&
!buf &&
!(sscreen->debug_flags & DBG(NO_FMASK))) {
si_texture_allocate_fmask(sscreen, rtex);
si_texture_allocate_cmask(sscreen, rtex);
- rtex->cmask_buffer = &rtex->resource;
+ rtex->cmask_buffer = &rtex->buffer;
if (!rtex->fmask.size || !rtex->cmask.size) {
FREE(rtex);
return NULL;
}
}
/* Shared textures must always set up DCC here.
* If it's not present, it will be disabled by
* apply_opaque_metadata later.
@@ -1331,42 +1331,42 @@ si_texture_create_object(struct pipe_screen *screen,
si_screen_clear_buffer(sscreen, &rtex->cmask_buffer->b.b,
rtex->cmask.offset, rtex->cmask.size,
0xCCCCCCCC);
}
if (rtex->htile_offset) {
uint32_t clear_value = 0;
if (sscreen->info.chip_class >= GFX9 || rtex->tc_compatible_htile)
clear_value = 0x0000030F;
- si_screen_clear_buffer(sscreen, &rtex->resource.b.b,
+ si_screen_clear_buffer(sscreen, &rtex->buffer.b.b,
rtex->htile_offset,
rtex->surface.htile_size,
clear_value);
}
/* Initialize DCC only if the texture is not being imported. */
if (!buf && rtex->dcc_offset) {
- si_screen_clear_buffer(sscreen, &rtex->resource.b.b,
+ si_screen_clear_buffer(sscreen, &rtex->buffer.b.b,
rtex->dcc_offset,
rtex->surface.dcc_size,
0xFFFFFFFF);
}
/* Initialize the CMASK base register value. */
rtex->cmask.base_address_reg =
- (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
+ (rtex->buffer.gpu_address + rtex->cmask.offset) >> 8;
if (sscreen->debug_flags & DBG(VM)) {
fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
- rtex->resource.gpu_address,
- rtex->resource.gpu_address + rtex->resource.buf->size,
+ rtex->buffer.gpu_address,
+ rtex->buffer.gpu_address + rtex->buffer.buf->size,
base->width0, base->height0, util_num_layers(base, 0), base->last_level+1,
base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
}
if (sscreen->debug_flags & DBG(TEX)) {
puts("Texture:");
struct u_log_context log;
u_log_context_init(&log);
si_print_texture_info(sscreen, rtex, &log);
u_log_new_page_print(&log, stdout);
@@ -1510,27 +1510,27 @@ static struct pipe_resource *si_texture_from_handle(struct pipe_screen *screen,
r = si_init_surface(sscreen, &surface, templ, array_mode, stride,
offset, true, is_scanout, false, false);
if (r) {
return NULL;
}
rtex = si_texture_create_object(screen, templ, buf, &surface);
if (!rtex)
return NULL;
- rtex->resource.b.is_shared = true;
- rtex->resource.external_usage = usage;
+ rtex->buffer.b.is_shared = true;
+ rtex->buffer.external_usage = usage;
si_apply_opaque_metadata(sscreen, rtex, &metadata);
assert(rtex->surface.tile_swizzle == 0);
- return &rtex->resource.b.b;
+ return &rtex->buffer.b.b;
}
bool si_init_flushed_depth_texture(struct pipe_context *ctx,
struct pipe_resource *texture,
struct r600_texture **staging)
{
struct r600_texture *rtex = (struct r600_texture*)texture;
struct pipe_resource resource;
struct r600_texture **flushed_depth_texture = staging ?
staging : &rtex->flushed_depth_texture;
@@ -1619,44 +1619,44 @@ static void si_init_temp_resource_from_box(struct pipe_resource *res,
} else {
res->target = PIPE_TEXTURE_2D;
}
}
static bool si_can_invalidate_texture(struct si_screen *sscreen,
struct r600_texture *rtex,
unsigned transfer_usage,
const struct pipe_box *box)
{
- return !rtex->resource.b.is_shared &&
+ return !rtex->buffer.b.is_shared &&
!(transfer_usage & PIPE_TRANSFER_READ) &&
- rtex->resource.b.b.last_level == 0 &&
- util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
+ rtex->buffer.b.b.last_level == 0 &&
+ util_texrange_covers_whole_level(&rtex->buffer.b.b, 0,
box->x, box->y, box->z,
box->width, box->height,
box->depth);
}
static void si_texture_invalidate_storage(struct si_context *sctx,
struct r600_texture *rtex)
{
struct si_screen *sscreen = sctx->screen;
/* There is no point in discarding depth and tiled buffers. */
assert(!rtex->is_depth);
assert(rtex->surface.is_linear);
/* Reallocate the buffer in the same pipe_resource. */
- si_alloc_resource(sscreen, &rtex->resource);
+ si_alloc_resource(sscreen, &rtex->buffer);
/* Initialize the CMASK base address (needed even without CMASK). */
rtex->cmask.base_address_reg =
- (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
+ (rtex->buffer.gpu_address + rtex->cmask.offset) >> 8;
p_atomic_inc(&sscreen->dirty_tex_counter);
sctx->num_alloc_tex_transfer_bytes += rtex->size;
}
static void *si_texture_transfer_map(struct pipe_context *ctx,
struct pipe_resource *texture,
unsigned level,
unsigned usage,
@@ -1699,26 +1699,26 @@ static void *si_texture_transfer_map(struct pipe_context *ctx,
* Reading from VRAM or GTT WC is slow, always use the staging
* texture in this case.
*
* Use the staging texture for uploads if the underlying BO
* is busy.
*/
if (!rtex->surface.is_linear)
use_staging_texture = true;
else if (usage & PIPE_TRANSFER_READ)
use_staging_texture =
- rtex->resource.domains & RADEON_DOMAIN_VRAM ||
- rtex->resource.flags & RADEON_FLAG_GTT_WC;
+ rtex->buffer.domains & RADEON_DOMAIN_VRAM ||
+ rtex->buffer.flags & RADEON_FLAG_GTT_WC;
/* Write & linear only: */
- else if (si_rings_is_buffer_referenced(sctx, rtex->resource.buf,
+ else if (si_rings_is_buffer_referenced(sctx, rtex->buffer.buf,
RADEON_USAGE_READWRITE) ||
- !sctx->ws->buffer_wait(rtex->resource.buf, 0,
+ !sctx->ws->buffer_wait(rtex->buffer.buf, 0,
RADEON_USAGE_READWRITE)) {
/* It's busy. */
if (si_can_invalidate_texture(sctx->screen, rtex,
usage, box))
si_texture_invalidate_storage(sctx, rtex);
else
use_staging_texture = true;
}
}
@@ -1726,21 +1726,21 @@ static void *si_texture_transfer_map(struct pipe_context *ctx,
if (!trans)
return NULL;
pipe_resource_reference(&trans->b.b.resource, texture);
trans->b.b.level = level;
trans->b.b.usage = usage;
trans->b.b.box = *box;
if (rtex->is_depth) {
struct r600_texture *staging_depth;
- if (rtex->resource.b.b.nr_samples > 1) {
+ if (rtex->buffer.b.b.nr_samples > 1) {
/* MSAA depth buffers need to be converted to single sample buffers.
*
* Mapping MSAA depth buffers can occur if ReadPixels is called
* with a multisample GLX visual.
*
* First downsample the depth buffer to a temporary texture,
* then decompress the temporary one to staging.
*
* Only the region being mapped is transfered.
*/
@@ -1785,57 +1785,57 @@ static void *si_texture_transfer_map(struct pipe_context *ctx,
level, level,
box->z, box->z + box->depth - 1,
0, 0);
offset = si_texture_get_offset(sctx->screen, staging_depth,
level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
}
- trans->staging = &staging_depth->resource;
+ trans->staging = &staging_depth->buffer;
buf = trans->staging;
} else if (use_staging_texture) {
struct pipe_resource resource;
struct r600_texture *staging;
si_init_temp_resource_from_box(&resource, texture, box, level,
SI_RESOURCE_FLAG_TRANSFER);
resource.usage = (usage & PIPE_TRANSFER_READ) ?
PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
/* Create the temporary texture. */
staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
if (!staging) {
PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
FREE(trans);
return NULL;
}
- trans->staging = &staging->resource;
+ trans->staging = &staging->buffer;
/* Just get the strides. */
si_texture_get_offset(sctx->screen, staging, 0, NULL,
&trans->b.b.stride,
&trans->b.b.layer_stride);
if (usage & PIPE_TRANSFER_READ)
si_copy_to_staging_texture(ctx, trans);
else
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
buf = trans->staging;
} else {
/* the resource is mapped directly */
offset = si_texture_get_offset(sctx->screen, rtex, level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
- buf = &rtex->resource;
+ buf = &rtex->buffer;
}
if (!(map = si_buffer_map_sync_with_rings(sctx, buf, usage))) {
r600_resource_reference(&trans->staging, NULL);
FREE(trans);
return NULL;
}
*ptransfer = &trans->b.b;
return map + offset;
@@ -1843,21 +1843,21 @@ static void *si_texture_transfer_map(struct pipe_context *ctx,
static void si_texture_transfer_unmap(struct pipe_context *ctx,
struct pipe_transfer* transfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct pipe_resource *texture = transfer->resource;
struct r600_texture *rtex = (struct r600_texture*)texture;
if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
- if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
+ if (rtex->is_depth && rtex->buffer.b.b.nr_samples <= 1) {
ctx->resource_copy_region(ctx, texture, transfer->level,
transfer->box.x, transfer->box.y, transfer->box.z,
&rtransfer->staging->b.b, transfer->level,
&transfer->box);
} else {
si_copy_from_staging_texture(ctx, rtransfer);
}
}
if (rtransfer->staging) {
@@ -2141,21 +2141,21 @@ static void vi_dcc_clean_up_context_slot(struct si_context *sctx,
* Return the per-context slot where DCC statistics queries for the texture live.
*/
static unsigned vi_get_context_dcc_stats_index(struct si_context *sctx,
struct r600_texture *tex)
{
int i, empty_slot = -1;
/* Remove zombie textures (textures kept alive by this array only). */
for (i = 0; i < ARRAY_SIZE(sctx->dcc_stats); i++)
if (sctx->dcc_stats[i].tex &&
- sctx->dcc_stats[i].tex->resource.b.b.reference.count == 1)
+ sctx->dcc_stats[i].tex->buffer.b.b.reference.count == 1)
vi_dcc_clean_up_context_slot(sctx, i);
/* Find the texture. */
for (i = 0; i < ARRAY_SIZE(sctx->dcc_stats); i++) {
/* Return if found. */
if (sctx->dcc_stats[i].tex == tex) {
sctx->dcc_stats[i].last_use_timestamp = os_time_get();
return i;
}
@@ -2236,24 +2236,24 @@ static bool vi_should_enable_separate_dcc(struct r600_texture *tex)
return tex->ps_draw_ratio + tex->num_slow_clears >= 5;
}
/* Called by fast clear. */
void vi_separate_dcc_try_enable(struct si_context *sctx,
struct r600_texture *tex)
{
/* The intent is to use this with shared displayable back buffers,
* but it's not strictly limited only to them.
*/
- if (!tex->resource.b.is_shared ||
- !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) ||
- tex->resource.b.b.target != PIPE_TEXTURE_2D ||
- tex->resource.b.b.last_level > 0 ||
+ if (!tex->buffer.b.is_shared ||
+ !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) ||
+ tex->buffer.b.b.target != PIPE_TEXTURE_2D ||
+ tex->buffer.b.b.last_level > 0 ||
!tex->surface.dcc_size)
return;
if (tex->dcc_offset)
return; /* already enabled */
/* Enable the DCC stat gathering. */
if (!tex->dcc_gather_statistics) {
tex->dcc_gather_statistics = true;
vi_separate_dcc_start_query(sctx, tex);
@@ -2311,21 +2311,21 @@ void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
/* Read the results. */
ctx->get_query_result(ctx, sctx->dcc_stats[i].ps_stats[2],
true, &result);
si_query_hw_reset_buffers(sctx,
(struct si_query_hw*)
sctx->dcc_stats[i].ps_stats[2]);
/* Compute the approximate number of fullscreen draws. */
tex->ps_draw_ratio =
result.pipeline_statistics.ps_invocations /
- (tex->resource.b.b.width0 * tex->resource.b.b.height0);
+ (tex->buffer.b.b.width0 * tex->buffer.b.b.height0);
sctx->last_tex_ps_draw_ratio = tex->ps_draw_ratio;
disable = tex->dcc_separate_buffer &&
!vi_should_enable_separate_dcc(tex);
}
tex->num_slow_clears = 0;
/* stop the statistics query for ps_stats[0] */
if (query_active)
@@ -2448,26 +2448,26 @@ si_texture_from_memobj(struct pipe_screen *screen,
rtex = si_texture_create_object(screen, templ, memobj->buf, &surface);
if (!rtex)
return NULL;
/* r600_texture_create_object doesn't increment refcount of
* memobj->buf, so increment it here.
*/
pb_reference(&buf, memobj->buf);
- rtex->resource.b.is_shared = true;
- rtex->resource.external_usage = PIPE_HANDLE_USAGE_READ_WRITE;
+ rtex->buffer.b.is_shared = true;
+ rtex->buffer.external_usage = PIPE_HANDLE_USAGE_READ_WRITE;
si_apply_opaque_metadata(sscreen, rtex, &metadata);
- return &rtex->resource.b.b;
+ return &rtex->buffer.b.b;
}
static bool si_check_resource_capability(struct pipe_screen *screen,
struct pipe_resource *resource,
unsigned bind)
{
struct r600_texture *tex = (struct r600_texture*)resource;
/* Buffers only support the linear flag. */
if (resource->target == PIPE_BUFFER)
diff --git a/src/gallium/drivers/radeonsi/si_uvd.c b/src/gallium/drivers/radeonsi/si_uvd.c
index 4165725b0e9..ee8ed58b401 100644
--- a/src/gallium/drivers/radeonsi/si_uvd.c
+++ b/src/gallium/drivers/radeonsi/si_uvd.c
@@ -77,32 +77,32 @@ struct pipe_video_buffer *si_video_buffer_create(struct pipe_context *pipe,
if (!resources[i])
goto error;
}
}
for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
if (!resources[i])
continue;
surfaces[i] = & resources[i]->surface;
- pbs[i] = &resources[i]->resource.buf;
+ pbs[i] = &resources[i]->buffer.buf;
}
si_vid_join_surfaces(ctx, pbs, surfaces);
for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
if (!resources[i])
continue;
/* reset the address */
- resources[i]->resource.gpu_address = ctx->ws->buffer_get_virtual_address(
- resources[i]->resource.buf);
+ resources[i]->buffer.gpu_address = ctx->ws->buffer_get_virtual_address(
+ resources[i]->buffer.buf);
}
vidtemplate.height *= array_size;
return vl_video_buffer_create_ex2(pipe, &vidtemplate, (struct pipe_resource **)resources);
error:
for (i = 0; i < VL_NUM_COMPONENTS; ++i)
r600_texture_reference(&resources[i], NULL);
return NULL;
@@ -115,32 +115,32 @@ static struct pb_buffer* si_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_bu
struct r600_texture *luma = (struct r600_texture *)buf->resources[0];
struct r600_texture *chroma = (struct r600_texture *)buf->resources[1];
enum ruvd_surface_type type = (sscreen->info.chip_class >= GFX9) ?
RUVD_SURFACE_TYPE_GFX9 :
RUVD_SURFACE_TYPE_LEGACY;
msg->body.decode.dt_field_mode = buf->base.interlaced;
si_uvd_set_dt_surfaces(msg, &luma->surface, (chroma) ? &chroma->surface : NULL, type);
- return luma->resource.buf;
+ return luma->buffer.buf;
}
/* get the radeon resources for VCE */
static void si_vce_get_buffer(struct pipe_resource *resource,
struct pb_buffer **handle,
struct radeon_surf **surface)
{
struct r600_texture *res = (struct r600_texture *)resource;
if (handle)
- *handle = res->resource.buf;
+ *handle = res->buffer.buf;
if (surface)
*surface = &res->surface;
}
/**
* creates an UVD compatible decoder
*/
struct pipe_video_codec *si_uvd_create_decoder(struct pipe_context *context,
const struct pipe_video_codec *templ)
--
2.17.0
More information about the mesa-dev
mailing list