[Mesa-dev] [PATCH 2/9] radeonsi: rename r600_atom -> si_atom
Marek Olšák
maraeo at gmail.com
Tue Apr 17 00:42:04 UTC 2018
From: Marek Olšák <marek.olsak at amd.com>
---
src/gallium/drivers/radeonsi/si_descriptors.c | 2 +-
src/gallium/drivers/radeonsi/si_pipe.h | 40 ++++++-------
src/gallium/drivers/radeonsi/si_query.c | 4 +-
src/gallium/drivers/radeonsi/si_state.c | 30 +++++-----
src/gallium/drivers/radeonsi/si_state.h | 60 +++++++++----------
.../drivers/radeonsi/si_state_binning.c | 2 +-
src/gallium/drivers/radeonsi/si_state_draw.c | 2 +-
.../drivers/radeonsi/si_state_shaders.c | 4 +-
.../drivers/radeonsi/si_state_streamout.c | 4 +-
.../drivers/radeonsi/si_state_viewport.c | 4 +-
10 files changed, 76 insertions(+), 76 deletions(-)
diff --git a/src/gallium/drivers/radeonsi/si_descriptors.c b/src/gallium/drivers/radeonsi/si_descriptors.c
index 4beeb2db6c2..8bd7c77c8c6 100644
--- a/src/gallium/drivers/radeonsi/si_descriptors.c
+++ b/src/gallium/drivers/radeonsi/si_descriptors.c
@@ -2147,21 +2147,21 @@ static void si_emit_global_shader_pointers(struct si_context *sctx,
R_00B330_SPI_SHADER_USER_DATA_ES_0);
si_emit_shader_pointer(sctx, descs,
R_00B230_SPI_SHADER_USER_DATA_GS_0);
si_emit_shader_pointer(sctx, descs,
R_00B430_SPI_SHADER_USER_DATA_HS_0);
si_emit_shader_pointer(sctx, descs,
R_00B530_SPI_SHADER_USER_DATA_LS_0);
}
void si_emit_graphics_shader_pointers(struct si_context *sctx,
- struct r600_atom *atom)
+ struct si_atom *atom)
{
uint32_t *sh_base = sctx->shader_pointers.sh_base;
if (sctx->shader_pointers_dirty & (1 << SI_DESCS_RW_BUFFERS)) {
si_emit_global_shader_pointers(sctx,
&sctx->descriptors[SI_DESCS_RW_BUFFERS]);
}
si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(VERTEX),
sh_base[PIPE_SHADER_VERTEX]);
diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h
index 2ed764bd097..a76d52f7ea0 100644
--- a/src/gallium/drivers/radeonsi/si_pipe.h
+++ b/src/gallium/drivers/radeonsi/si_pipe.h
@@ -532,21 +532,21 @@ struct si_screen {
* the number of cores. */
LLVMTargetMachineRef tm[3]; /* used by the queue only */
struct util_queue shader_compiler_queue_low_priority;
/* Use at most 2 low priority threads on quadcore and better.
* We want to minimize the impact on multithreaded Mesa. */
LLVMTargetMachineRef tm_low_priority[2]; /* at most 2 threads */
};
struct si_blend_color {
- struct r600_atom atom;
+ struct si_atom atom;
struct pipe_blend_color state;
bool any_nonzeros;
};
struct si_sampler_view {
struct pipe_sampler_view base;
/* [0..7] = image descriptor
* [4..7] = buffer descriptor */
uint32_t state[8];
uint32_t fmask_state[8];
@@ -587,21 +587,21 @@ struct si_samplers {
uint32_t needs_color_decompress_mask;
};
struct si_images {
struct pipe_image_view views[SI_NUM_IMAGES];
uint32_t needs_color_decompress_mask;
unsigned enabled_mask;
};
struct si_framebuffer {
- struct r600_atom atom;
+ struct si_atom atom;
struct pipe_framebuffer_state state;
unsigned colorbuf_enabled_4bit;
unsigned spi_shader_col_format;
unsigned spi_shader_col_format_alpha;
unsigned spi_shader_col_format_blend;
unsigned spi_shader_col_format_blend_alpha;
ubyte nr_samples:5; /* at most 16xAA */
ubyte log_samples:3; /* at most 4 = 16xAA */
ubyte compressed_cb_mask;
ubyte uncompressed_cb_mask;
@@ -615,81 +615,81 @@ struct si_framebuffer {
};
struct si_signed_scissor {
int minx;
int miny;
int maxx;
int maxy;
};
struct si_scissors {
- struct r600_atom atom;
+ struct si_atom atom;
unsigned dirty_mask;
struct pipe_scissor_state states[SI_MAX_VIEWPORTS];
};
struct si_viewports {
- struct r600_atom atom;
+ struct si_atom atom;
unsigned dirty_mask;
unsigned depth_range_dirty_mask;
struct pipe_viewport_state states[SI_MAX_VIEWPORTS];
struct si_signed_scissor as_scissor[SI_MAX_VIEWPORTS];
};
struct si_clip_state {
- struct r600_atom atom;
+ struct si_atom atom;
struct pipe_clip_state state;
bool any_nonzeros;
};
struct si_sample_locs {
- struct r600_atom atom;
+ struct si_atom atom;
unsigned nr_samples;
};
struct si_sample_mask {
- struct r600_atom atom;
+ struct si_atom atom;
uint16_t sample_mask;
};
struct si_streamout_target {
struct pipe_stream_output_target b;
/* The buffer where BUFFER_FILLED_SIZE is stored. */
struct r600_resource *buf_filled_size;
unsigned buf_filled_size_offset;
bool buf_filled_size_valid;
unsigned stride_in_dw;
};
struct si_streamout {
- struct r600_atom begin_atom;
+ struct si_atom begin_atom;
bool begin_emitted;
unsigned enabled_mask;
unsigned num_targets;
struct si_streamout_target *targets[PIPE_MAX_SO_BUFFERS];
unsigned append_bitmask;
bool suspended;
/* External state which comes from the vertex shader,
* it must be set explicitly when binding a shader. */
uint16_t *stride_in_dw;
unsigned enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
/* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
unsigned hw_enabled_mask;
/* The state of VGT_STRMOUT_(CONFIG|EN). */
- struct r600_atom enable_atom;
+ struct si_atom enable_atom;
bool streamout_enabled;
bool prims_gen_query_enabled;
int num_prims_gen_queries;
};
/* A shader state consists of the shader selector, which is a constant state
* object shared by multiple contexts and shouldn't be modified, and
* the current shader variant selected for this context.
*/
struct si_shader_ctx_state {
@@ -818,32 +818,32 @@ struct si_context {
union si_state_atoms atoms;
unsigned dirty_atoms; /* mask */
/* PM4 states (precomputed immutable states) */
unsigned dirty_states;
union si_state queued;
union si_state emitted;
/* Atom declarations. */
struct si_framebuffer framebuffer;
struct si_sample_locs msaa_sample_locs;
- struct r600_atom db_render_state;
- struct r600_atom dpbb_state;
- struct r600_atom msaa_config;
+ struct si_atom db_render_state;
+ struct si_atom dpbb_state;
+ struct si_atom msaa_config;
struct si_sample_mask sample_mask;
- struct r600_atom cb_render_state;
+ struct si_atom cb_render_state;
unsigned last_cb_target_mask;
struct si_blend_color blend_color;
- struct r600_atom clip_regs;
+ struct si_atom clip_regs;
struct si_clip_state clip_state;
struct si_shader_data shader_pointers;
struct si_stencil_ref stencil_ref;
- struct r600_atom spi_map;
+ struct si_atom spi_map;
struct si_scissors scissors;
struct si_streamout streamout;
struct si_viewports viewports;
/* Precomputed states. */
struct si_pm4_state *init_config;
struct si_pm4_state *init_config_gs_rings;
bool init_config_has_vgt_flush;
struct si_pm4_state *vgt_shader_config[4];
@@ -925,21 +925,21 @@ struct si_context {
int last_gs_out_prim;
int last_prim;
int last_multi_vgt_param;
int last_rast_prim;
unsigned last_sc_line_stipple;
unsigned current_vs_state;
unsigned last_vs_state;
enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */
/* Scratch buffer */
- struct r600_atom scratch_state;
+ struct si_atom scratch_state;
struct r600_resource *scratch_buffer;
unsigned scratch_waves;
unsigned spi_tmpring_size;
struct r600_resource *compute_scratch_buffer;
/* Emitted derived tessellation state. */
/* Local shader (VS), or HS if LS-HS are merged. */
struct si_shader *last_ls;
struct si_shader_selector *last_tcs;
@@ -1021,21 +1021,21 @@ struct si_context {
unsigned last_tex_ps_draw_ratio; /* for query */
/* Queries. */
/* Maintain the list of active queries for pausing between IBs. */
int num_occlusion_queries;
int num_perfect_occlusion_queries;
struct list_head active_queries;
unsigned num_cs_dw_queries_suspend;
/* Render condition. */
- struct r600_atom render_cond_atom;
+ struct si_atom render_cond_atom;
struct pipe_query *render_cond;
unsigned render_cond_mode;
bool render_cond_invert;
bool render_cond_force_off; /* for u_blitter */
/* Statistics gathering for the DCC enablement heuristic. It can't be
* in r600_texture because r600_texture can be shared by multiple
* contexts. This is for back buffers only. We shouldn't get too many
* of those.
*
@@ -1351,42 +1351,42 @@ si_context_add_resource_size(struct si_context *sctx, struct pipe_resource *r)
}
static inline void
si_invalidate_draw_sh_constants(struct si_context *sctx)
{
sctx->last_base_vertex = SI_BASE_VERTEX_UNKNOWN;
}
static inline void
si_set_atom_dirty(struct si_context *sctx,
- struct r600_atom *atom, bool dirty)
+ struct si_atom *atom, bool dirty)
{
unsigned bit = 1 << atom->id;
if (dirty)
sctx->dirty_atoms |= bit;
else
sctx->dirty_atoms &= ~bit;
}
static inline bool
si_is_atom_dirty(struct si_context *sctx,
- struct r600_atom *atom)
+ struct si_atom *atom)
{
unsigned bit = 1 << atom->id;
return sctx->dirty_atoms & bit;
}
static inline void
si_mark_atom_dirty(struct si_context *sctx,
- struct r600_atom *atom)
+ struct si_atom *atom)
{
si_set_atom_dirty(sctx, atom, true);
}
static inline struct si_shader_ctx_state *si_get_vs(struct si_context *sctx)
{
if (sctx->gs_shader.cso)
return &sctx->gs_shader;
if (sctx->tes_shader.cso)
return &sctx->tes_shader;
diff --git a/src/gallium/drivers/radeonsi/si_query.c b/src/gallium/drivers/radeonsi/si_query.c
index c859cd081ca..7d94194f300 100644
--- a/src/gallium/drivers/radeonsi/si_query.c
+++ b/src/gallium/drivers/radeonsi/si_query.c
@@ -930,21 +930,21 @@ static void emit_set_predicate(struct si_context *ctx,
} else {
radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
radeon_emit(cs, va);
radeon_emit(cs, op | ((va >> 32) & 0xFF));
}
radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_READ,
RADEON_PRIO_QUERY);
}
static void si_emit_query_predication(struct si_context *ctx,
- struct r600_atom *atom)
+ struct si_atom *atom)
{
struct si_query_hw *query = (struct si_query_hw *)ctx->render_cond;
struct si_query_buffer *qbuf;
uint32_t op;
bool flag_wait, invert;
if (!query)
return;
invert = ctx->render_cond_invert;
@@ -1769,21 +1769,21 @@ static void si_query_hw_get_result_resource(struct si_context *sctx,
pipe_resource_reference(&tmp_buffer, NULL);
}
static void si_render_condition(struct pipe_context *ctx,
struct pipe_query *query,
boolean condition,
enum pipe_render_cond_flag mode)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_query_hw *rquery = (struct si_query_hw *)query;
- struct r600_atom *atom = &sctx->render_cond_atom;
+ struct si_atom *atom = &sctx->render_cond_atom;
if (query) {
bool needs_workaround = false;
/* There was a firmware regression in VI which causes successive
* SET_PREDICATION packets to give the wrong answer for
* non-inverted stream overflow predication.
*/
if (((sctx->chip_class == VI && sctx->screen->info.pfp_fw_feature < 49) ||
(sctx->chip_class == GFX9 && sctx->screen->info.pfp_fw_feature < 38)) &&
diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c
index 49110b292b9..5cb11311a97 100644
--- a/src/gallium/drivers/radeonsi/si_state.c
+++ b/src/gallium/drivers/radeonsi/si_state.c
@@ -28,31 +28,31 @@
#include "util/u_dual_blend.h"
#include "util/u_format.h"
#include "util/u_format_s3tc.h"
#include "util/u_memory.h"
#include "util/u_resource.h"
#include "util/u_upload_mgr.h"
/* Initialize an external atom (owned by ../radeon). */
static void
-si_init_external_atom(struct si_context *sctx, struct r600_atom *atom,
- struct r600_atom **list_elem)
+si_init_external_atom(struct si_context *sctx, struct si_atom *atom,
+ struct si_atom **list_elem)
{
atom->id = list_elem - sctx->atoms.array;
*list_elem = atom;
}
/* Initialize an atom owned by radeonsi. */
-void si_init_atom(struct si_context *sctx, struct r600_atom *atom,
- struct r600_atom **list_elem,
- void (*emit_func)(struct si_context *ctx, struct r600_atom *state))
+void si_init_atom(struct si_context *sctx, struct si_atom *atom,
+ struct si_atom **list_elem,
+ void (*emit_func)(struct si_context *ctx, struct si_atom *state))
{
atom->emit = emit_func;
atom->id = list_elem - sctx->atoms.array;
*list_elem = atom;
}
static unsigned si_map_swizzle(unsigned swizzle)
{
switch (swizzle) {
case PIPE_SWIZZLE_Y:
@@ -76,21 +76,21 @@ static unsigned si_pack_float_12p4(float x)
return x <= 0 ? 0 :
x >= 4096 ? 0xffff : x * 16;
}
/*
* Inferred framebuffer and blender state.
*
* CB_TARGET_MASK is emitted here to avoid a hang with dual source blending
* if there is not enough PS outputs.
*/
-static void si_emit_cb_render_state(struct si_context *sctx, struct r600_atom *atom)
+static void si_emit_cb_render_state(struct si_context *sctx, struct si_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
struct si_state_blend *blend = sctx->queued.named.blend;
/* CB_COLORn_INFO.FORMAT=INVALID should disable unbound colorbuffers,
* but you never know. */
uint32_t cb_target_mask = sctx->framebuffer.colorbuf_enabled_4bit;
unsigned i;
if (blend)
cb_target_mask &= blend->cb_target_mask;
@@ -709,21 +709,21 @@ static void si_set_blend_color(struct pipe_context *ctx,
const struct pipe_blend_color *state)
{
struct si_context *sctx = (struct si_context *)ctx;
static const struct pipe_blend_color zeros;
sctx->blend_color.state = *state;
sctx->blend_color.any_nonzeros = memcmp(state, &zeros, sizeof(*state)) != 0;
si_mark_atom_dirty(sctx, &sctx->blend_color.atom);
}
-static void si_emit_blend_color(struct si_context *sctx, struct r600_atom *atom)
+static void si_emit_blend_color(struct si_context *sctx, struct si_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
radeon_emit_array(cs, (uint32_t*)sctx->blend_color.state.color, 4);
}
/*
* Clipping
*/
@@ -743,29 +743,29 @@ static void si_set_clip_state(struct pipe_context *ctx,
si_mark_atom_dirty(sctx, &sctx->clip_state.atom);
cb.buffer = NULL;
cb.user_buffer = state->ucp;
cb.buffer_offset = 0;
cb.buffer_size = 4*4*8;
si_set_rw_buffer(sctx, SI_VS_CONST_CLIP_PLANES, &cb);
pipe_resource_reference(&cb.buffer, NULL);
}
-static void si_emit_clip_state(struct si_context *sctx, struct r600_atom *atom)
+static void si_emit_clip_state(struct si_context *sctx, struct si_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
radeon_set_context_reg_seq(cs, R_0285BC_PA_CL_UCP_0_X, 6*4);
radeon_emit_array(cs, (uint32_t*)sctx->clip_state.state.ucp, 6*4);
}
-static void si_emit_clip_regs(struct si_context *sctx, struct r600_atom *atom)
+static void si_emit_clip_regs(struct si_context *sctx, struct si_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
struct si_shader *vs = si_get_vs_state(sctx);
struct si_shader_selector *vs_sel = vs->selector;
struct tgsi_shader_info *info = &vs_sel->info;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
unsigned window_space =
info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
unsigned clipdist_mask = vs_sel->clipdist_mask;
unsigned ucp_mask = clipdist_mask ? 0 : rs->clip_plane_enable & SIX_BITS;
@@ -1065,21 +1065,21 @@ static void si_delete_rs_state(struct pipe_context *ctx, void *state)
if (sctx->queued.named.rasterizer == state)
si_pm4_bind_state(sctx, poly_offset, NULL);
FREE(rs->pm4_poly_offset);
si_pm4_delete_state(sctx, rasterizer, rs);
}
/*
* infeered state between dsa and stencil ref
*/
-static void si_emit_stencil_ref(struct si_context *sctx, struct r600_atom *atom)
+static void si_emit_stencil_ref(struct si_context *sctx, struct si_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
struct pipe_stencil_ref *ref = &sctx->stencil_ref.state;
struct si_dsa_stencil_ref_part *dsa = &sctx->stencil_ref.dsa_part;
radeon_set_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2);
radeon_emit(cs, S_028430_STENCILTESTVAL(ref->ref_value[0]) |
S_028430_STENCILMASK(dsa->valuemask[0]) |
S_028430_STENCILWRITEMASK(dsa->writemask[0]) |
S_028430_STENCILOPVAL(1));
@@ -1353,21 +1353,21 @@ void si_set_occlusion_query_state(struct si_context *sctx,
}
void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st)
{
st->saved_compute = sctx->cs_shader_state.program;
si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
si_get_shader_buffers(sctx, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
}
-static void si_emit_db_render_state(struct si_context *sctx, struct r600_atom *state)
+static void si_emit_db_render_state(struct si_context *sctx, struct si_atom *state)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
unsigned db_shader_control;
radeon_set_context_reg_seq(cs, R_028000_DB_RENDER_CONTROL, 2);
/* DB_RENDER_CONTROL */
if (sctx->dbcb_depth_copy_enabled ||
sctx->dbcb_stencil_copy_enabled) {
@@ -2945,21 +2945,21 @@ static void si_set_framebuffer_state(struct pipe_context *ctx,
sctx->do_update_shaders = true;
if (!sctx->decompression_enabled) {
/* Prevent textures decompression when the framebuffer state
* changes come from the decompression passes themselves.
*/
sctx->need_check_render_feedback = true;
}
}
-static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom *atom)
+static void si_emit_framebuffer_state(struct si_context *sctx, struct si_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
unsigned i, nr_cbufs = state->nr_cbufs;
struct r600_texture *tex = NULL;
struct r600_surface *cb = NULL;
unsigned cb_color_info = 0;
/* Colorbuffers. */
for (i = 0; i < nr_cbufs; i++) {
@@ -3203,21 +3203,21 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
if (sctx->screen->dfsm_allowed) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
}
sctx->framebuffer.dirty_cbufs = 0;
sctx->framebuffer.dirty_zsbuf = false;
}
static void si_emit_msaa_sample_locs(struct si_context *sctx,
- struct r600_atom *atom)
+ struct si_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
unsigned nr_samples = sctx->framebuffer.nr_samples;
bool has_msaa_sample_loc_bug = sctx->screen->has_msaa_sample_loc_bug;
/* Smoothing (only possible with nr_samples == 1) uses the same
* sample locations as the MSAA it simulates.
*/
if (nr_samples <= 1 && sctx->smoothing_enabled)
nr_samples = SI_NUM_SMOOTH_AA_SAMPLES;
@@ -3314,21 +3314,21 @@ static bool si_out_of_order_rasterization(struct si_context *sctx)
}
if (colormask & ~blendmask) {
if (!dsa_order_invariant.pass_last)
return false;
}
return true;
}
-static void si_emit_msaa_config(struct si_context *sctx, struct r600_atom *atom)
+static void si_emit_msaa_config(struct si_context *sctx, struct si_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
unsigned num_tile_pipes = sctx->screen->info.num_tile_pipes;
/* 33% faster rendering to linear color buffers */
bool dst_is_linear = sctx->framebuffer.any_dst_linear;
bool out_of_order_rast = si_out_of_order_rasterization(sctx);
unsigned sc_mode_cntl_1 =
S_028A4C_WALK_SIZE(dst_is_linear) |
S_028A4C_WALK_FENCE_ENABLE(!dst_is_linear) |
S_028A4C_WALK_FENCE_SIZE(num_tile_pipes == 2 ? 2 : 3) |
@@ -4170,21 +4170,21 @@ static void si_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
{
struct si_context *sctx = (struct si_context *)ctx;
if (sctx->sample_mask.sample_mask == (uint16_t)sample_mask)
return;
sctx->sample_mask.sample_mask = sample_mask;
si_mark_atom_dirty(sctx, &sctx->sample_mask.atom);
}
-static void si_emit_sample_mask(struct si_context *sctx, struct r600_atom *atom)
+static void si_emit_sample_mask(struct si_context *sctx, struct si_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
unsigned mask = sctx->sample_mask.sample_mask;
/* Needed for line and polygon smoothing as well as for the Polaris
* small primitive filter. We expect the state tracker to take care of
* this for us.
*/
assert(mask == 0xffff || sctx->framebuffer.nr_samples > 1 ||
(mask & 1 && sctx->blitter->running));
diff --git a/src/gallium/drivers/radeonsi/si_state.h b/src/gallium/drivers/radeonsi/si_state.h
index 628cce87ef9..ebdb44694e4 100644
--- a/src/gallium/drivers/radeonsi/si_state.h
+++ b/src/gallium/drivers/radeonsi/si_state.h
@@ -40,22 +40,22 @@
#define SI_NUM_SHADER_BUFFERS 16
struct si_screen;
struct si_shader;
struct si_shader_selector;
struct r600_texture;
struct si_qbo_state;
/* This encapsulates a state or an operation which can emitted into the GPU
* command stream. */
-struct r600_atom {
- void (*emit)(struct si_context *ctx, struct r600_atom *state);
+struct si_atom {
+ void (*emit)(struct si_context *ctx, struct si_atom *state);
unsigned short id;
};
struct si_state_blend {
struct si_pm4_state pm4;
uint32_t cb_target_mask;
/* Set 0xf or 0x0 (4 bits) per render target if the following is
* true. ANDed with spi_shader_col_format.
*/
unsigned cb_target_enabled_4bit;
@@ -125,21 +125,21 @@ struct si_state_dsa {
ubyte alpha_func:3;
bool depth_enabled:1;
bool depth_write_enabled:1;
bool stencil_enabled:1;
bool stencil_write_enabled:1;
bool db_can_write:1;
};
struct si_stencil_ref {
- struct r600_atom atom;
+ struct si_atom atom;
struct pipe_stencil_ref state;
struct si_dsa_stencil_ref_part dsa_part;
};
struct si_vertex_elements
{
uint32_t instance_divisors[SI_MAX_ATTRIBS];
uint32_t rsrc_word3[SI_MAX_ATTRIBS];
uint16_t src_offset[SI_MAX_ATTRIBS];
uint8_t fix_fetch[SI_MAX_ATTRIBS];
@@ -171,47 +171,47 @@ union si_state {
struct si_pm4_state *ps;
} named;
struct si_pm4_state *array[0];
};
#define SI_NUM_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
union si_state_atoms {
struct {
/* The order matters. */
- struct r600_atom *render_cond;
- struct r600_atom *streamout_begin;
- struct r600_atom *streamout_enable; /* must be after streamout_begin */
- struct r600_atom *framebuffer;
- struct r600_atom *msaa_sample_locs;
- struct r600_atom *db_render_state;
- struct r600_atom *dpbb_state;
- struct r600_atom *msaa_config;
- struct r600_atom *sample_mask;
- struct r600_atom *cb_render_state;
- struct r600_atom *blend_color;
- struct r600_atom *clip_regs;
- struct r600_atom *clip_state;
- struct r600_atom *shader_pointers;
- struct r600_atom *scissors;
- struct r600_atom *viewports;
- struct r600_atom *stencil_ref;
- struct r600_atom *spi_map;
- struct r600_atom *scratch_state;
+ struct si_atom *render_cond;
+ struct si_atom *streamout_begin;
+ struct si_atom *streamout_enable; /* must be after streamout_begin */
+ struct si_atom *framebuffer;
+ struct si_atom *msaa_sample_locs;
+ struct si_atom *db_render_state;
+ struct si_atom *dpbb_state;
+ struct si_atom *msaa_config;
+ struct si_atom *sample_mask;
+ struct si_atom *cb_render_state;
+ struct si_atom *blend_color;
+ struct si_atom *clip_regs;
+ struct si_atom *clip_state;
+ struct si_atom *shader_pointers;
+ struct si_atom *scissors;
+ struct si_atom *viewports;
+ struct si_atom *stencil_ref;
+ struct si_atom *spi_map;
+ struct si_atom *scratch_state;
} s;
- struct r600_atom *array[0];
+ struct si_atom *array[0];
};
-#define SI_NUM_ATOMS (sizeof(union si_state_atoms)/sizeof(struct r600_atom*))
+#define SI_NUM_ATOMS (sizeof(union si_state_atoms)/sizeof(struct si_atom*))
struct si_shader_data {
- struct r600_atom atom;
+ struct si_atom atom;
uint32_t sh_base[SI_NUM_SHADERS];
};
/* Private read-write buffer slots. */
enum {
SI_ES_RING_ESGS,
SI_GS_RING_ESGS,
SI_RING_GSVS,
@@ -357,42 +357,42 @@ bool si_upload_graphics_shader_descriptors(struct si_context *sctx);
bool si_upload_compute_shader_descriptors(struct si_context *sctx);
void si_release_all_descriptors(struct si_context *sctx);
void si_all_descriptors_begin_new_cs(struct si_context *sctx);
void si_all_resident_buffers_begin_new_cs(struct si_context *sctx);
void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
const uint8_t *ptr, unsigned size, uint32_t *const_offset);
void si_update_all_texture_descriptors(struct si_context *sctx);
void si_shader_change_notify(struct si_context *sctx);
void si_update_needs_color_decompress_masks(struct si_context *sctx);
void si_emit_graphics_shader_pointers(struct si_context *sctx,
- struct r600_atom *atom);
+ struct si_atom *atom);
void si_emit_compute_shader_pointers(struct si_context *sctx);
void si_set_rw_buffer(struct si_context *sctx,
uint slot, const struct pipe_constant_buffer *input);
void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
uint64_t new_active_mask);
void si_set_active_descriptors_for_shader(struct si_context *sctx,
struct si_shader_selector *sel);
bool si_bindless_descriptor_can_reclaim_slab(void *priv,
struct pb_slab_entry *entry);
struct pb_slab *si_bindless_descriptor_slab_alloc(void *priv, unsigned heap,
unsigned entry_size,
unsigned group_index);
void si_bindless_descriptor_slab_free(void *priv, struct pb_slab *pslab);
void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf,
uint64_t old_va);
/* si_state.c */
struct si_shader_selector;
-void si_init_atom(struct si_context *sctx, struct r600_atom *atom,
- struct r600_atom **list_elem,
- void (*emit_func)(struct si_context *ctx, struct r600_atom *state));
+void si_init_atom(struct si_context *sctx, struct si_atom *atom,
+ struct si_atom **list_elem,
+ void (*emit_func)(struct si_context *ctx, struct si_atom *state));
void si_init_state_functions(struct si_context *sctx);
void si_init_screen_state_functions(struct si_screen *sscreen);
void
si_make_buffer_descriptor(struct si_screen *screen, struct r600_resource *buf,
enum pipe_format format,
unsigned offset, unsigned size,
uint32_t *state);
void
si_make_texture_descriptor(struct si_screen *screen,
struct r600_texture *tex,
@@ -411,21 +411,21 @@ si_create_sampler_view_custom(struct pipe_context *ctx,
const struct pipe_sampler_view *state,
unsigned width0, unsigned height0,
unsigned force_level);
void si_update_fb_dirtiness_after_rendering(struct si_context *sctx);
void si_update_ps_iter_samples(struct si_context *sctx);
void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
void si_set_occlusion_query_state(struct si_context *sctx,
bool old_perfect_enable);
/* si_state_binning.c */
-void si_emit_dpbb_state(struct si_context *sctx, struct r600_atom *state);
+void si_emit_dpbb_state(struct si_context *sctx, struct si_atom *state);
/* si_state_shaders.c */
bool si_update_shaders(struct si_context *sctx);
void si_init_shader_functions(struct si_context *sctx);
bool si_init_shader_cache(struct si_screen *sscreen);
void si_destroy_shader_cache(struct si_screen *sscreen);
void si_get_active_slot_masks(const struct tgsi_shader_info *info,
uint32_t *const_and_shader_buffers,
uint64_t *samplers_and_images);
void *si_get_blit_vs(struct si_context *sctx, enum blitter_attrib_type type,
diff --git a/src/gallium/drivers/radeonsi/si_state_binning.c b/src/gallium/drivers/radeonsi/si_state_binning.c
index 87b89e8b492..6c702c0e687 100644
--- a/src/gallium/drivers/radeonsi/si_state_binning.c
+++ b/src/gallium/drivers/radeonsi/si_state_binning.c
@@ -327,21 +327,21 @@ static void si_emit_dpbb_disable(struct si_context *sctx)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
radeon_set_context_reg(cs, R_028C44_PA_SC_BINNER_CNTL_0,
S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC) |
S_028C44_DISABLE_START_OF_PRIM(1));
radeon_set_context_reg(cs, R_028060_DB_DFSM_CONTROL,
S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF));
}
-void si_emit_dpbb_state(struct si_context *sctx, struct r600_atom *state)
+void si_emit_dpbb_state(struct si_context *sctx, struct si_atom *state)
{
struct si_screen *sscreen = sctx->screen;
struct si_state_blend *blend = sctx->queued.named.blend;
struct si_state_dsa *dsa = sctx->queued.named.dsa;
unsigned db_shader_control = sctx->ps_db_shader_control;
assert(sctx->chip_class >= GFX9);
if (!sscreen->dpbb_allowed || !blend || !dsa) {
si_emit_dpbb_disable(sctx);
diff --git a/src/gallium/drivers/radeonsi/si_state_draw.c b/src/gallium/drivers/radeonsi/si_state_draw.c
index 96dfd93645d..852b6b0e977 100644
--- a/src/gallium/drivers/radeonsi/si_state_draw.c
+++ b/src/gallium/drivers/radeonsi/si_state_draw.c
@@ -1174,21 +1174,21 @@ static void si_get_draw_start_count(struct si_context *sctx,
*count = info->count;
}
}
static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_info *info,
unsigned skip_atom_mask)
{
/* Emit state atoms. */
unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
while (mask) {
- struct r600_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
+ struct si_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
atom->emit(sctx, atom);
}
sctx->dirty_atoms &= skip_atom_mask;
/* Emit states. */
mask = sctx->dirty_states;
while (mask) {
unsigned i = u_bit_scan(&mask);
struct si_pm4_state *state = sctx->queued.array[i];
diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.c b/src/gallium/drivers/radeonsi/si_state_shaders.c
index 67ab75bbd2d..db44a4967ba 100644
--- a/src/gallium/drivers/radeonsi/si_state_shaders.c
+++ b/src/gallium/drivers/radeonsi/si_state_shaders.c
@@ -2591,21 +2591,21 @@ static unsigned si_get_ps_input_cntl(struct si_context *sctx,
* Don't set any other bits.
* (FLAT_SHADE=1 completely changes behavior) */
ps_input_cntl = S_028644_OFFSET(0x20);
/* D3D 9 behaviour. GL is undefined */
if (name == TGSI_SEMANTIC_COLOR && index == 0)
ps_input_cntl |= S_028644_DEFAULT_VAL(3);
}
return ps_input_cntl;
}
-static void si_emit_spi_map(struct si_context *sctx, struct r600_atom *atom)
+static void si_emit_spi_map(struct si_context *sctx, struct si_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
struct si_shader *ps = sctx->ps_shader.current;
struct si_shader *vs = si_get_vs_state(sctx);
struct tgsi_shader_info *psinfo = ps ? &ps->selector->info : NULL;
unsigned i, num_interp, num_written = 0, bcol_interp[2];
if (!ps || !ps->selector->info.num_inputs)
return;
@@ -3322,21 +3322,21 @@ bool si_update_shaders(struct si_context *sctx)
sctx->prefetch_L2_mask |= SI_PREFETCH_PS;
else if (!sctx->queued.named.ps)
sctx->prefetch_L2_mask &= ~SI_PREFETCH_PS;
}
sctx->do_update_shaders = false;
return true;
}
static void si_emit_scratch_state(struct si_context *sctx,
- struct r600_atom *atom)
+ struct si_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
sctx->spi_tmpring_size);
if (sctx->scratch_buffer) {
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
sctx->scratch_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_SCRATCH_BUFFER);
diff --git a/src/gallium/drivers/radeonsi/si_state_streamout.c b/src/gallium/drivers/radeonsi/si_state_streamout.c
index 4cd5d2fc6b3..482946eba2b 100644
--- a/src/gallium/drivers/radeonsi/si_state_streamout.c
+++ b/src/gallium/drivers/radeonsi/si_state_streamout.c
@@ -247,21 +247,21 @@ static void si_flush_vgt_streamout(struct si_context *sctx)
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
radeon_emit(cs, reg_strmout_cntl >> 2); /* register */
radeon_emit(cs, 0);
radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */
radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */
radeon_emit(cs, 4); /* poll interval */
}
-static void si_emit_streamout_begin(struct si_context *sctx, struct r600_atom *atom)
+static void si_emit_streamout_begin(struct si_context *sctx, struct si_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
struct si_streamout_target **t = sctx->streamout.targets;
uint16_t *stride_in_dw = sctx->streamout.stride_in_dw;
unsigned i;
si_flush_vgt_streamout(sctx);
for (i = 0; i < sctx->streamout.num_targets; i++) {
if (!t[i])
@@ -350,21 +350,21 @@ void si_emit_streamout_end(struct si_context *sctx)
}
/* STREAMOUT CONFIG DERIVED STATE
*
* Streamout must be enabled for the PRIMITIVES_GENERATED query to work.
* The buffer mask is an independent state, so no writes occur if there
* are no buffers bound.
*/
static void si_emit_streamout_enable(struct si_context *sctx,
- struct r600_atom *atom)
+ struct si_atom *atom)
{
radeon_set_context_reg_seq(sctx->gfx_cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
radeon_emit(sctx->gfx_cs,
S_028B94_STREAMOUT_0_EN(si_get_strmout_en(sctx)) |
S_028B94_RAST_STREAM(0) |
S_028B94_STREAMOUT_1_EN(si_get_strmout_en(sctx)) |
S_028B94_STREAMOUT_2_EN(si_get_strmout_en(sctx)) |
S_028B94_STREAMOUT_3_EN(si_get_strmout_en(sctx)));
radeon_emit(sctx->gfx_cs,
sctx->streamout.hw_enabled_mask &
diff --git a/src/gallium/drivers/radeonsi/si_state_viewport.c b/src/gallium/drivers/radeonsi/si_state_viewport.c
index 09ea584e31e..5fc57647245 100644
--- a/src/gallium/drivers/radeonsi/si_state_viewport.c
+++ b/src/gallium/drivers/radeonsi/si_state_viewport.c
@@ -203,21 +203,21 @@ static void si_emit_guardband(struct si_context *ctx,
/* If any of the GB registers is updated, all of them must be updated. */
radeon_set_context_reg_seq(cs, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, 4);
radeon_emit(cs, fui(guardband_y)); /* R_028BE8_PA_CL_GB_VERT_CLIP_ADJ */
radeon_emit(cs, fui(discard_y)); /* R_028BEC_PA_CL_GB_VERT_DISC_ADJ */
radeon_emit(cs, fui(guardband_x)); /* R_028BF0_PA_CL_GB_HORZ_CLIP_ADJ */
radeon_emit(cs, fui(discard_x)); /* R_028BF4_PA_CL_GB_HORZ_DISC_ADJ */
}
-static void si_emit_scissors(struct si_context *ctx, struct r600_atom *atom)
+static void si_emit_scissors(struct si_context *ctx, struct si_atom *atom)
{
struct radeon_winsys_cs *cs = ctx->gfx_cs;
struct pipe_scissor_state *states = ctx->scissors.states;
unsigned mask = ctx->scissors.dirty_mask;
bool scissor_enabled = false;
struct si_signed_scissor max_vp_scissor;
int i;
if (ctx->queued.named.rasterizer)
scissor_enabled = ctx->queued.named.rasterizer->scissor_enable;
@@ -376,21 +376,21 @@ static void si_emit_depth_ranges(struct si_context *ctx)
si_viewport_zmin_zmax(&states[i], clip_halfz, window_space,
&zmin, &zmax);
radeon_emit(cs, fui(zmin));
radeon_emit(cs, fui(zmax));
}
}
ctx->viewports.depth_range_dirty_mask = 0;
}
static void si_emit_viewport_states(struct si_context *ctx,
- struct r600_atom *atom)
+ struct si_atom *atom)
{
si_emit_viewports(ctx);
si_emit_depth_ranges(ctx);
}
/**
* This reacts to 2 state changes:
* - VS.writes_viewport_index
* - VS output position in window space (enable/disable)
*
--
2.17.0
More information about the mesa-dev
mailing list