zink: remove all pipe_shader_type usage

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/18051>
This commit is contained in:
Mike Blumenkrantz 2022-08-04 12:31:44 -04:00
parent 2792d2bd4a
commit b988b8c84b
14 changed files with 273 additions and 279 deletions

View file

@ -382,15 +382,15 @@ zink_blit_begin(struct zink_context *ctx, enum zink_blit_flags flags)
util_blitter_save_viewport(ctx->blitter, ctx->vp_state.viewport_states);
util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->vertex_buffers);
util_blitter_save_vertex_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_VERTEX]);
util_blitter_save_tessctrl_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_TESS_CTRL]);
util_blitter_save_tesseval_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_TESS_EVAL]);
util_blitter_save_geometry_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_GEOMETRY]);
util_blitter_save_vertex_shader(ctx->blitter, ctx->gfx_stages[MESA_SHADER_VERTEX]);
util_blitter_save_tessctrl_shader(ctx->blitter, ctx->gfx_stages[MESA_SHADER_TESS_CTRL]);
util_blitter_save_tesseval_shader(ctx->blitter, ctx->gfx_stages[MESA_SHADER_TESS_EVAL]);
util_blitter_save_geometry_shader(ctx->blitter, ctx->gfx_stages[MESA_SHADER_GEOMETRY]);
util_blitter_save_rasterizer(ctx->blitter, ctx->rast_state);
util_blitter_save_so_targets(ctx->blitter, ctx->num_so_targets, ctx->so_targets);
if (flags & ZINK_BLIT_SAVE_FS) {
util_blitter_save_fragment_constant_buffer_slot(ctx->blitter, ctx->ubos[PIPE_SHADER_FRAGMENT]);
util_blitter_save_fragment_constant_buffer_slot(ctx->blitter, ctx->ubos[MESA_SHADER_FRAGMENT]);
util_blitter_save_blend(ctx->blitter, ctx->gfx_pipeline_state.blend_state);
util_blitter_save_depth_stencil_alpha(ctx->blitter, ctx->dsa_state);
util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
@ -398,7 +398,7 @@ zink_blit_begin(struct zink_context *ctx, enum zink_blit_flags flags)
util_blitter_save_scissor(ctx->blitter, ctx->vp_state.scissor_states);
/* also util_blitter_save_window_rectangles when we have that? */
util_blitter_save_fragment_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_FRAGMENT]);
util_blitter_save_fragment_shader(ctx->blitter, ctx->gfx_stages[MESA_SHADER_FRAGMENT]);
}
if (flags & ZINK_BLIT_SAVE_FB)
@ -407,11 +407,11 @@ zink_blit_begin(struct zink_context *ctx, enum zink_blit_flags flags)
if (flags & ZINK_BLIT_SAVE_TEXTURES) {
util_blitter_save_fragment_sampler_states(ctx->blitter,
ctx->di.num_samplers[PIPE_SHADER_FRAGMENT],
(void**)ctx->sampler_states[PIPE_SHADER_FRAGMENT]);
ctx->di.num_samplers[MESA_SHADER_FRAGMENT],
(void**)ctx->sampler_states[MESA_SHADER_FRAGMENT]);
util_blitter_save_fragment_sampler_views(ctx->blitter,
ctx->di.num_sampler_views[PIPE_SHADER_FRAGMENT],
ctx->sampler_views[PIPE_SHADER_FRAGMENT]);
ctx->di.num_sampler_views[MESA_SHADER_FRAGMENT],
ctx->sampler_views[MESA_SHADER_FRAGMENT]);
}
if (flags & ZINK_BLIT_NO_COND_RENDER && ctx->render_condition_active)

View file

@ -391,7 +391,7 @@ zink_screen_init_compiler(struct zink_screen *screen)
const void *
zink_get_compiler_options(struct pipe_screen *pscreen,
enum pipe_shader_ir ir,
enum pipe_shader_type shader)
gl_shader_stage shader)
{
assert(ir == PIPE_SHADER_IR_NIR);
return &zink_screen(pscreen)->nir_options;
@ -3166,23 +3166,23 @@ zink_shader_free(struct zink_context *ctx, struct zink_shader *shader)
zink_compute_program_reference(ctx, &comp, NULL);
} else {
struct zink_gfx_program *prog = (void*)entry->key;
enum pipe_shader_type pstage = pipe_shader_type_from_mesa(shader->nir->info.stage);
assert(pstage < ZINK_GFX_SHADER_COUNT);
if (!prog->base.removed && (shader->nir->info.stage != MESA_SHADER_TESS_CTRL || !shader->is_generated)) {
gl_shader_stage stage = shader->nir->info.stage;
assert(stage < ZINK_GFX_SHADER_COUNT);
if (!prog->base.removed && (stage != MESA_SHADER_TESS_CTRL || !shader->is_generated)) {
unsigned stages_present = prog->stages_present;
if (prog->shaders[PIPE_SHADER_TESS_CTRL] && prog->shaders[PIPE_SHADER_TESS_CTRL]->is_generated)
stages_present &= ~BITFIELD_BIT(PIPE_SHADER_TESS_CTRL);
if (prog->shaders[MESA_SHADER_TESS_CTRL] && prog->shaders[MESA_SHADER_TESS_CTRL]->is_generated)
stages_present &= ~BITFIELD_BIT(MESA_SHADER_TESS_CTRL);
struct hash_table *ht = &ctx->program_cache[zink_program_cache_stages(stages_present)];
struct hash_entry *he = _mesa_hash_table_search(ht, prog->shaders);
assert(he);
_mesa_hash_table_remove(ht, he);
prog->base.removed = true;
}
if (shader->nir->info.stage != MESA_SHADER_TESS_CTRL || !shader->is_generated)
prog->shaders[pstage] = NULL;
if (stage != MESA_SHADER_TESS_CTRL || !shader->is_generated)
prog->shaders[stage] = NULL;
/* only remove generated tcs during parent tes destruction */
if (shader->nir->info.stage == MESA_SHADER_TESS_EVAL && shader->generated)
prog->shaders[PIPE_SHADER_TESS_CTRL] = NULL;
if (stage == MESA_SHADER_TESS_EVAL && shader->generated)
prog->shaders[MESA_SHADER_TESS_CTRL] = NULL;
zink_gfx_program_reference(ctx, &prog, NULL);
}
}

View file

@ -44,7 +44,7 @@ struct tgsi_token;
const void *
zink_get_compiler_options(struct pipe_screen *screen,
enum pipe_shader_ir ir,
enum pipe_shader_type shader);
gl_shader_stage shader);
struct nir_shader *
zink_tgsi_to_nir(struct pipe_screen *screen, const struct tgsi_token *tokens);

View file

@ -480,7 +480,7 @@ get_layout_for_binding(const struct zink_context *ctx, struct zink_resource *res
}
ALWAYS_INLINE static struct zink_surface *
get_imageview_for_binding(struct zink_context *ctx, enum pipe_shader_type stage, enum zink_descriptor_type type, unsigned idx)
get_imageview_for_binding(struct zink_context *ctx, gl_shader_stage stage, enum zink_descriptor_type type, unsigned idx)
{
switch (type) {
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW: {
@ -504,7 +504,7 @@ get_imageview_for_binding(struct zink_context *ctx, enum pipe_shader_type stage,
}
ALWAYS_INLINE static struct zink_buffer_view *
get_bufferview_for_binding(struct zink_context *ctx, enum pipe_shader_type stage, enum zink_descriptor_type type, unsigned idx)
get_bufferview_for_binding(struct zink_context *ctx, gl_shader_stage stage, enum zink_descriptor_type type, unsigned idx)
{
switch (type) {
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW: {
@ -523,7 +523,7 @@ get_bufferview_for_binding(struct zink_context *ctx, enum pipe_shader_type stage
}
ALWAYS_INLINE static struct zink_resource *
update_descriptor_state_ubo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot, struct zink_resource *res)
update_descriptor_state_ubo(struct zink_context *ctx, gl_shader_stage shader, unsigned slot, struct zink_resource *res)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
bool have_null_descriptors = screen->info.rb2_feats.nullDescriptor;
@ -549,7 +549,7 @@ update_descriptor_state_ubo(struct zink_context *ctx, enum pipe_shader_type shad
}
ALWAYS_INLINE static struct zink_resource *
update_descriptor_state_ssbo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot, struct zink_resource *res)
update_descriptor_state_ssbo(struct zink_context *ctx, gl_shader_stage shader, unsigned slot, struct zink_resource *res)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
bool have_null_descriptors = screen->info.rb2_feats.nullDescriptor;
@ -568,7 +568,7 @@ update_descriptor_state_ssbo(struct zink_context *ctx, enum pipe_shader_type sha
}
ALWAYS_INLINE static struct zink_resource *
update_descriptor_state_sampler(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot, struct zink_resource *res)
update_descriptor_state_sampler(struct zink_context *ctx, gl_shader_stage shader, unsigned slot, struct zink_resource *res)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
bool have_null_descriptors = screen->info.rb2_feats.nullDescriptor;
@ -582,7 +582,7 @@ update_descriptor_state_sampler(struct zink_context *ctx, enum pipe_shader_type
ctx->di.sampler_surfaces[shader][slot].is_buffer = true;
} else {
struct zink_surface *surface = get_imageview_for_binding(ctx, shader, type, slot);
ctx->di.textures[shader][slot].imageLayout = get_layout_for_binding(ctx, res, type, shader == PIPE_SHADER_COMPUTE);
ctx->di.textures[shader][slot].imageLayout = get_layout_for_binding(ctx, res, type, shader == MESA_SHADER_COMPUTE);
ctx->di.textures[shader][slot].imageView = surface->image_view;
if (!screen->have_D24_UNORM_S8_UINT &&
ctx->sampler_states[shader][slot] && ctx->sampler_states[shader][slot]->sampler_clamped) {
@ -617,7 +617,7 @@ update_descriptor_state_sampler(struct zink_context *ctx, enum pipe_shader_type
}
ALWAYS_INLINE static struct zink_resource *
update_descriptor_state_image(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot, struct zink_resource *res)
update_descriptor_state_image(struct zink_context *ctx, gl_shader_stage shader, unsigned slot, struct zink_resource *res)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
bool have_null_descriptors = screen->info.rb2_feats.nullDescriptor;
@ -653,10 +653,10 @@ update_descriptor_state_image(struct zink_context *ctx, enum pipe_shader_type sh
}
static void
update_nonseamless_shader_key(struct zink_context *ctx, enum pipe_shader_type pstage)
update_nonseamless_shader_key(struct zink_context *ctx, gl_shader_stage pstage)
{
uint32_t *mask;
if (pstage == PIPE_SHADER_COMPUTE)
if (pstage == MESA_SHADER_COMPUTE)
mask = &ctx->compute_pipeline_state.key.base.nonseamless_cube_mask;
else
mask = &ctx->gfx_pipeline_state.shader_keys.key[pstage].base.nonseamless_cube_mask;
@ -669,7 +669,7 @@ update_nonseamless_shader_key(struct zink_context *ctx, enum pipe_shader_type ps
static void
zink_bind_sampler_states(struct pipe_context *pctx,
enum pipe_shader_type shader,
gl_shader_stage shader,
unsigned start_slot,
unsigned num_samplers,
void **samplers)
@ -1238,7 +1238,7 @@ zink_set_scissor_states(struct pipe_context *pctx,
static void
zink_set_inlinable_constants(struct pipe_context *pctx,
enum pipe_shader_type shader,
gl_shader_stage shader,
uint num_values, uint32_t *values)
{
struct zink_context *ctx = (struct zink_context *)pctx;
@ -1246,7 +1246,7 @@ zink_set_inlinable_constants(struct pipe_context *pctx,
uint32_t *inlinable_uniforms;
struct zink_shader_key *key = NULL;
if (shader == PIPE_SHADER_COMPUTE) {
if (shader == MESA_SHADER_COMPUTE) {
key = &ctx->compute_pipeline_state.key;
} else {
key = &ctx->gfx_pipeline_state.shader_keys.key[shader];
@ -1262,41 +1262,41 @@ zink_set_inlinable_constants(struct pipe_context *pctx,
}
ALWAYS_INLINE static void
unbind_descriptor_stage(struct zink_resource *res, enum pipe_shader_type pstage)
unbind_descriptor_stage(struct zink_resource *res, gl_shader_stage pstage)
{
if (!res->sampler_binds[pstage] && !res->image_binds[pstage])
res->gfx_barrier &= ~zink_pipeline_flags_from_pipe_stage(pstage);
}
ALWAYS_INLINE static void
unbind_buffer_descriptor_stage(struct zink_resource *res, enum pipe_shader_type pstage)
unbind_buffer_descriptor_stage(struct zink_resource *res, gl_shader_stage pstage)
{
if (!res->ubo_bind_mask[pstage] && !res->ssbo_bind_mask[pstage])
unbind_descriptor_stage(res, pstage);
}
ALWAYS_INLINE static void
unbind_ubo(struct zink_context *ctx, struct zink_resource *res, enum pipe_shader_type pstage, unsigned slot)
unbind_ubo(struct zink_context *ctx, struct zink_resource *res, gl_shader_stage pstage, unsigned slot)
{
if (!res)
return;
res->ubo_bind_mask[pstage] &= ~BITFIELD_BIT(slot);
res->ubo_bind_count[pstage == PIPE_SHADER_COMPUTE]--;
res->ubo_bind_count[pstage == MESA_SHADER_COMPUTE]--;
unbind_buffer_descriptor_stage(res, pstage);
if (!res->ubo_bind_count[pstage == PIPE_SHADER_COMPUTE])
res->barrier_access[pstage == PIPE_SHADER_COMPUTE] &= ~VK_ACCESS_UNIFORM_READ_BIT;
update_res_bind_count(ctx, res, pstage == PIPE_SHADER_COMPUTE, true);
if (!res->ubo_bind_count[pstage == MESA_SHADER_COMPUTE])
res->barrier_access[pstage == MESA_SHADER_COMPUTE] &= ~VK_ACCESS_UNIFORM_READ_BIT;
update_res_bind_count(ctx, res, pstage == MESA_SHADER_COMPUTE, true);
}
static void
invalidate_inlined_uniforms(struct zink_context *ctx, enum pipe_shader_type pstage)
invalidate_inlined_uniforms(struct zink_context *ctx, gl_shader_stage pstage)
{
unsigned bit = BITFIELD_BIT(pstage);
if (!(ctx->inlinable_uniforms_valid_mask & bit))
return;
ctx->inlinable_uniforms_valid_mask &= ~bit;
ctx->dirty_shader_stages |= bit;
if (pstage == PIPE_SHADER_COMPUTE)
if (pstage == MESA_SHADER_COMPUTE)
return;
struct zink_shader_key *key = &ctx->gfx_pipeline_state.shader_keys.key[pstage];
@ -1305,7 +1305,7 @@ invalidate_inlined_uniforms(struct zink_context *ctx, enum pipe_shader_type psta
static void
zink_set_constant_buffer(struct pipe_context *pctx,
enum pipe_shader_type shader, uint index,
gl_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *cb)
{
@ -1326,11 +1326,11 @@ zink_set_constant_buffer(struct pipe_context *pctx,
if (new_res) {
if (new_res != res) {
unbind_ubo(ctx, res, shader, index);
new_res->ubo_bind_count[shader == PIPE_SHADER_COMPUTE]++;
new_res->ubo_bind_count[shader == MESA_SHADER_COMPUTE]++;
new_res->ubo_bind_mask[shader] |= BITFIELD_BIT(index);
new_res->gfx_barrier |= zink_pipeline_flags_from_pipe_stage(shader);
new_res->barrier_access[shader == PIPE_SHADER_COMPUTE] |= VK_ACCESS_UNIFORM_READ_BIT;
update_res_bind_count(ctx, new_res, shader == PIPE_SHADER_COMPUTE, false);
new_res->barrier_access[shader == MESA_SHADER_COMPUTE] |= VK_ACCESS_UNIFORM_READ_BIT;
update_res_bind_count(ctx, new_res, shader == MESA_SHADER_COMPUTE, false);
}
zink_batch_resource_usage_set(&ctx->batch, new_res, false);
zink_resource_buffer_barrier(ctx, new_res, VK_ACCESS_UNIFORM_READ_BIT,
@ -1381,38 +1381,38 @@ zink_set_constant_buffer(struct pipe_context *pctx,
}
ALWAYS_INLINE static void
unbind_descriptor_reads(struct zink_resource *res, enum pipe_shader_type pstage)
unbind_descriptor_reads(struct zink_resource *res, gl_shader_stage pstage)
{
if (!res->sampler_binds[pstage] && !res->image_binds[pstage])
res->barrier_access[pstage == PIPE_SHADER_COMPUTE] &= ~VK_ACCESS_SHADER_READ_BIT;
res->barrier_access[pstage == MESA_SHADER_COMPUTE] &= ~VK_ACCESS_SHADER_READ_BIT;
}
ALWAYS_INLINE static void
unbind_buffer_descriptor_reads(struct zink_resource *res, enum pipe_shader_type pstage)
unbind_buffer_descriptor_reads(struct zink_resource *res, gl_shader_stage pstage)
{
if (!res->ssbo_bind_count[pstage == PIPE_SHADER_COMPUTE])
if (!res->ssbo_bind_count[pstage == MESA_SHADER_COMPUTE])
unbind_descriptor_reads(res, pstage);
}
ALWAYS_INLINE static void
unbind_ssbo(struct zink_context *ctx, struct zink_resource *res, enum pipe_shader_type pstage, unsigned slot, bool writable)
unbind_ssbo(struct zink_context *ctx, struct zink_resource *res, gl_shader_stage pstage, unsigned slot, bool writable)
{
if (!res)
return;
res->ssbo_bind_mask[pstage] &= ~BITFIELD_BIT(slot);
res->ssbo_bind_count[pstage == PIPE_SHADER_COMPUTE]--;
res->ssbo_bind_count[pstage == MESA_SHADER_COMPUTE]--;
unbind_buffer_descriptor_stage(res, pstage);
unbind_buffer_descriptor_reads(res, pstage);
update_res_bind_count(ctx, res, pstage == PIPE_SHADER_COMPUTE, true);
update_res_bind_count(ctx, res, pstage == MESA_SHADER_COMPUTE, true);
if (writable)
res->write_bind_count[pstage == PIPE_SHADER_COMPUTE]--;
if (!res->write_bind_count[pstage == PIPE_SHADER_COMPUTE])
res->barrier_access[pstage == PIPE_SHADER_COMPUTE] &= ~VK_ACCESS_SHADER_WRITE_BIT;
res->write_bind_count[pstage == MESA_SHADER_COMPUTE]--;
if (!res->write_bind_count[pstage == MESA_SHADER_COMPUTE])
res->barrier_access[pstage == MESA_SHADER_COMPUTE] &= ~VK_ACCESS_SHADER_WRITE_BIT;
}
static void
zink_set_shader_buffers(struct pipe_context *pctx,
enum pipe_shader_type p_stage,
gl_shader_stage p_stage,
unsigned start_slot, unsigned count,
const struct pipe_shader_buffer *buffers,
unsigned writable_bitmask)
@ -1435,17 +1435,17 @@ zink_set_shader_buffers(struct pipe_context *pctx,
if (new_res != res) {
unbind_ssbo(ctx, res, p_stage, i, was_writable);
new_res->ssbo_bind_mask[p_stage] |= BITFIELD_BIT(i);
new_res->ssbo_bind_count[p_stage == PIPE_SHADER_COMPUTE]++;
new_res->ssbo_bind_count[p_stage == MESA_SHADER_COMPUTE]++;
new_res->gfx_barrier |= zink_pipeline_flags_from_pipe_stage(p_stage);
update_res_bind_count(ctx, new_res, p_stage == PIPE_SHADER_COMPUTE, false);
update_res_bind_count(ctx, new_res, p_stage == MESA_SHADER_COMPUTE, false);
}
VkAccessFlags access = VK_ACCESS_SHADER_READ_BIT;
if (ctx->writable_ssbos[p_stage] & BITFIELD64_BIT(start_slot + i)) {
new_res->write_bind_count[p_stage == PIPE_SHADER_COMPUTE]++;
new_res->write_bind_count[p_stage == MESA_SHADER_COMPUTE]++;
access |= VK_ACCESS_SHADER_WRITE_BIT;
}
pipe_resource_reference(&ssbo->buffer, &new_res->base.b);
new_res->barrier_access[p_stage == PIPE_SHADER_COMPUTE] |= access;
new_res->barrier_access[p_stage == MESA_SHADER_COMPUTE] |= access;
zink_batch_resource_usage_set(&ctx->batch, new_res, access & VK_ACCESS_SHADER_WRITE_BIT);
ssbo->buffer_offset = buffers[i].buffer_offset;
ssbo->buffer_size = MIN2(buffers[i].buffer_size, new_res->base.b.width0 - ssbo->buffer_offset);
@ -1482,10 +1482,10 @@ update_binds_for_samplerviews(struct zink_context *ctx, struct zink_resource *re
{
VkImageLayout layout = get_layout_for_binding(ctx, res, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, is_compute);
if (is_compute) {
u_foreach_bit(slot, res->sampler_binds[PIPE_SHADER_COMPUTE]) {
if (ctx->di.textures[PIPE_SHADER_COMPUTE][slot].imageLayout != layout) {
update_descriptor_state_sampler(ctx, PIPE_SHADER_COMPUTE, slot, res);
zink_context_invalidate_descriptor_state(ctx, PIPE_SHADER_COMPUTE, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, slot, 1);
u_foreach_bit(slot, res->sampler_binds[MESA_SHADER_COMPUTE]) {
if (ctx->di.textures[MESA_SHADER_COMPUTE][slot].imageLayout != layout) {
update_descriptor_state_sampler(ctx, MESA_SHADER_COMPUTE, slot, res);
zink_context_invalidate_descriptor_state(ctx, MESA_SHADER_COMPUTE, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, slot, 1);
}
}
} else {
@ -1531,10 +1531,10 @@ check_for_layout_update(struct zink_context *ctx, struct zink_resource *res, boo
}
static void
unbind_shader_image(struct zink_context *ctx, enum pipe_shader_type stage, unsigned slot)
unbind_shader_image(struct zink_context *ctx, gl_shader_stage stage, unsigned slot)
{
struct zink_image_view *image_view = &ctx->image_views[stage][slot];
bool is_compute = stage == PIPE_SHADER_COMPUTE;
bool is_compute = stage == MESA_SHADER_COMPUTE;
if (!image_view->base.resource)
return;
@ -1542,7 +1542,7 @@ unbind_shader_image(struct zink_context *ctx, enum pipe_shader_type stage, unsig
res->image_binds[stage] &= ~BITFIELD_BIT(slot);
unbind_shader_image_counts(ctx, res, is_compute, image_view->base.access & PIPE_IMAGE_ACCESS_WRITE);
if (!res->write_bind_count[is_compute])
res->barrier_access[stage == PIPE_SHADER_COMPUTE] &= ~VK_ACCESS_SHADER_WRITE_BIT;
res->barrier_access[stage == MESA_SHADER_COMPUTE] &= ~VK_ACCESS_SHADER_WRITE_BIT;
if (image_view->base.resource->target == PIPE_BUFFER) {
unbind_buffer_descriptor_stage(res, stage);
@ -1633,7 +1633,7 @@ create_image_surface(struct zink_context *ctx, const struct pipe_image_view *vie
static void
zink_set_shader_images(struct pipe_context *pctx,
enum pipe_shader_type p_stage,
gl_shader_stage p_stage,
unsigned start_slot, unsigned count,
unsigned unbind_num_trailing_slots,
const struct pipe_image_view *images)
@ -1651,20 +1651,20 @@ zink_set_shader_images(struct pipe_context *pctx,
/* no refs */
VkAccessFlags access = 0;
if (images[i].access & PIPE_IMAGE_ACCESS_WRITE) {
res->write_bind_count[p_stage == PIPE_SHADER_COMPUTE]++;
res->write_bind_count[p_stage == MESA_SHADER_COMPUTE]++;
access |= VK_ACCESS_SHADER_WRITE_BIT;
}
if (images[i].access & PIPE_IMAGE_ACCESS_READ) {
access |= VK_ACCESS_SHADER_READ_BIT;
}
res->gfx_barrier |= zink_pipeline_flags_from_pipe_stage(p_stage);
res->barrier_access[p_stage == PIPE_SHADER_COMPUTE] |= access;
res->barrier_access[p_stage == MESA_SHADER_COMPUTE] |= access;
if (images[i].resource->target == PIPE_BUFFER) {
struct zink_buffer_view *bv = create_image_bufferview(ctx, &images[i]);
assert(bv);
if (image_view->buffer_view != bv) {
update_res_bind_count(ctx, res, p_stage == PIPE_SHADER_COMPUTE, false);
res->image_bind_count[p_stage == PIPE_SHADER_COMPUTE]++;
update_res_bind_count(ctx, res, p_stage == MESA_SHADER_COMPUTE, false);
res->image_bind_count[p_stage == MESA_SHADER_COMPUTE]++;
unbind_shader_image(ctx, p_stage, start_slot + i);
}
image_view->buffer_view = bv;
@ -1672,15 +1672,15 @@ zink_set_shader_images(struct pipe_context *pctx,
zink_resource_buffer_barrier(ctx, res, access,
res->gfx_barrier);
} else {
struct zink_surface *surface = create_image_surface(ctx, &images[i], p_stage == PIPE_SHADER_COMPUTE);
struct zink_surface *surface = create_image_surface(ctx, &images[i], p_stage == MESA_SHADER_COMPUTE);
assert(surface);
if (image_view->surface != surface) {
res->image_bind_count[p_stage == PIPE_SHADER_COMPUTE]++;
update_res_bind_count(ctx, res, p_stage == PIPE_SHADER_COMPUTE, false);
res->image_bind_count[p_stage == MESA_SHADER_COMPUTE]++;
update_res_bind_count(ctx, res, p_stage == MESA_SHADER_COMPUTE, false);
unbind_shader_image(ctx, p_stage, start_slot + i);
}
image_view->surface = surface;
finalize_image_bind(ctx, res, p_stage == PIPE_SHADER_COMPUTE);
finalize_image_bind(ctx, res, p_stage == MESA_SHADER_COMPUTE);
zink_batch_usage_set(&image_view->surface->batch_uses, ctx->batch.state);
}
memcpy(&image_view->base, images + i, sizeof(struct pipe_image_view));
@ -1720,15 +1720,15 @@ check_samplerview_for_batch_ref(struct zink_context *ctx, struct zink_sampler_vi
}
ALWAYS_INLINE static void
unbind_samplerview(struct zink_context *ctx, enum pipe_shader_type stage, unsigned slot)
unbind_samplerview(struct zink_context *ctx, gl_shader_stage stage, unsigned slot)
{
struct zink_sampler_view *sv = zink_sampler_view(ctx->sampler_views[stage][slot]);
if (!sv || !sv->base.texture)
return;
struct zink_resource *res = zink_resource(sv->base.texture);
res->sampler_bind_count[stage == PIPE_SHADER_COMPUTE]--;
res->sampler_bind_count[stage == MESA_SHADER_COMPUTE]--;
check_samplerview_for_batch_ref(ctx, sv);
update_res_bind_count(ctx, res, stage == PIPE_SHADER_COMPUTE, true);
update_res_bind_count(ctx, res, stage == MESA_SHADER_COMPUTE, true);
res->sampler_binds[stage] &= ~BITFIELD_BIT(slot);
if (res->obj->is_buffer) {
unbind_buffer_descriptor_stage(res, stage);
@ -1741,7 +1741,7 @@ unbind_samplerview(struct zink_context *ctx, enum pipe_shader_type stage, unsign
static void
zink_set_sampler_views(struct pipe_context *pctx,
enum pipe_shader_type shader_type,
gl_shader_stage shader_type,
unsigned start_slot,
unsigned num_views,
unsigned unbind_num_trailing_slots,
@ -1764,10 +1764,10 @@ zink_set_sampler_views(struct pipe_context *pctx,
if (!a || zink_resource(a->base.texture) != res) {
if (a)
unbind_samplerview(ctx, shader_type, start_slot + i);
update_res_bind_count(ctx, res, shader_type == PIPE_SHADER_COMPUTE, false);
res->sampler_bind_count[shader_type == PIPE_SHADER_COMPUTE]++;
update_res_bind_count(ctx, res, shader_type == MESA_SHADER_COMPUTE, false);
res->sampler_bind_count[shader_type == MESA_SHADER_COMPUTE]++;
res->gfx_barrier |= zink_pipeline_flags_from_pipe_stage(shader_type);
res->barrier_access[shader_type == PIPE_SHADER_COMPUTE] |= VK_ACCESS_SHADER_READ_BIT;
res->barrier_access[shader_type == MESA_SHADER_COMPUTE] |= VK_ACCESS_SHADER_READ_BIT;
} else if (a != b) {
check_samplerview_for_batch_ref(ctx, a);
}
@ -1801,13 +1801,13 @@ zink_set_sampler_views(struct pipe_context *pctx,
update |= iv != b->image_view->image_view;
} else if (a != b)
update = true;
if (shader_type == PIPE_SHADER_COMPUTE)
if (shader_type == MESA_SHADER_COMPUTE)
flush_pending_clears(ctx, res);
if (b->cube_array) {
ctx->di.cubes[shader_type] |= BITFIELD_BIT(start_slot + i);
zink_batch_usage_set(&b->cube_array->batch_uses, ctx->batch.state);
}
check_for_layout_update(ctx, res, shader_type == PIPE_SHADER_COMPUTE);
check_for_layout_update(ctx, res, shader_type == MESA_SHADER_COMPUTE);
zink_batch_usage_set(&b->image_view->batch_uses, ctx->batch.state);
if (!a)
update = true;
@ -2158,8 +2158,8 @@ void
zink_update_fbfetch(struct zink_context *ctx)
{
const bool had_fbfetch = ctx->di.fbfetch.imageLayout == VK_IMAGE_LAYOUT_GENERAL;
if (!ctx->gfx_stages[PIPE_SHADER_FRAGMENT] ||
!ctx->gfx_stages[PIPE_SHADER_FRAGMENT]->nir->info.fs.uses_fbfetch_output) {
if (!ctx->gfx_stages[MESA_SHADER_FRAGMENT] ||
!ctx->gfx_stages[MESA_SHADER_FRAGMENT]->nir->info.fs.uses_fbfetch_output) {
if (!had_fbfetch)
return;
ctx->rp_changed = true;
@ -2168,7 +2168,7 @@ zink_update_fbfetch(struct zink_context *ctx)
ctx->di.fbfetch.imageView = zink_screen(ctx->base.screen)->info.rb2_feats.nullDescriptor ?
VK_NULL_HANDLE :
zink_csurface(ctx->dummy_surface[0])->image_view;
zink_context_invalidate_descriptor_state(ctx, PIPE_SHADER_FRAGMENT, ZINK_DESCRIPTOR_TYPE_UBO, 0, 1);
zink_context_invalidate_descriptor_state(ctx, MESA_SHADER_FRAGMENT, ZINK_DESCRIPTOR_TYPE_UBO, 0, 1);
return;
}
@ -2187,7 +2187,7 @@ zink_update_fbfetch(struct zink_context *ctx)
}
ctx->di.fbfetch.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
if (changed) {
zink_context_invalidate_descriptor_state(ctx, PIPE_SHADER_FRAGMENT, ZINK_DESCRIPTOR_TYPE_UBO, 0, 1);
zink_context_invalidate_descriptor_state(ctx, MESA_SHADER_FRAGMENT, ZINK_DESCRIPTOR_TYPE_UBO, 0, 1);
if (!had_fbfetch) {
ctx->rp_changed = true;
zink_batch_no_rp(ctx);
@ -2442,7 +2442,7 @@ ALWAYS_INLINE static void
update_res_sampler_layouts(struct zink_context *ctx, struct zink_resource *res)
{
unsigned find = res->sampler_bind_count[0];
for (unsigned i = 0; find && i < PIPE_SHADER_COMPUTE; i++) {
for (unsigned i = 0; find && i < MESA_SHADER_COMPUTE; i++) {
u_foreach_bit(slot, res->sampler_binds[i]) {
/* only set layout, skip rest of update */
if (ctx->di.descriptor_res[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW][i][slot] == res)
@ -2572,7 +2572,7 @@ sync_flush(struct zink_context *ctx, struct zink_batch_state *bs)
}
static inline VkAccessFlags
get_access_flags_for_binding(struct zink_context *ctx, enum zink_descriptor_type type, enum pipe_shader_type stage, unsigned idx)
get_access_flags_for_binding(struct zink_context *ctx, enum zink_descriptor_type type, gl_shader_stage stage, unsigned idx)
{
VkAccessFlags flags = 0;
switch (type) {
@ -2602,7 +2602,7 @@ get_access_flags_for_binding(struct zink_context *ctx, enum zink_descriptor_type
}
static void
update_resource_refs_for_stage(struct zink_context *ctx, enum pipe_shader_type stage)
update_resource_refs_for_stage(struct zink_context *ctx, gl_shader_stage stage)
{
struct zink_batch *batch = &ctx->batch;
unsigned max_slot[] = {
@ -2655,7 +2655,7 @@ zink_update_descriptor_refs(struct zink_context *ctx, bool compute)
{
struct zink_batch *batch = &ctx->batch;
if (compute) {
update_resource_refs_for_stage(ctx, PIPE_SHADER_COMPUTE);
update_resource_refs_for_stage(ctx, MESA_SHADER_COMPUTE);
if (ctx->curr_compute)
zink_batch_reference_program(batch, &ctx->curr_compute->base);
} else {
@ -3397,15 +3397,15 @@ zink_resource_needs_barrier(struct zink_resource *res, VkImageLayout layout, VkA
}
VkShaderStageFlagBits
zink_shader_stage(enum pipe_shader_type type)
zink_shader_stage(gl_shader_stage type)
{
VkShaderStageFlagBits stages[] = {
[PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
[PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
[PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
[PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
[PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
[PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
[MESA_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
[MESA_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
[MESA_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
[MESA_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
[MESA_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
[MESA_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
};
return stages[type];
}
@ -3822,7 +3822,7 @@ zink_rebind_framebuffer(struct zink_context *ctx, struct zink_resource *res)
}
ALWAYS_INLINE static struct zink_resource *
rebind_ubo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot)
rebind_ubo(struct zink_context *ctx, gl_shader_stage shader, unsigned slot)
{
struct zink_resource *res = update_descriptor_state_ubo(ctx, shader, slot,
ctx->di.descriptor_res[ZINK_DESCRIPTOR_TYPE_UBO][shader][slot]);
@ -3831,7 +3831,7 @@ rebind_ubo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot
}
ALWAYS_INLINE static struct zink_resource *
rebind_ssbo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot)
rebind_ssbo(struct zink_context *ctx, gl_shader_stage shader, unsigned slot)
{
const struct pipe_shader_buffer *ssbo = &ctx->ssbos[shader][slot];
struct zink_resource *res = zink_resource(ssbo->buffer);
@ -3845,7 +3845,7 @@ rebind_ssbo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slo
}
ALWAYS_INLINE static struct zink_resource *
rebind_tbo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot)
rebind_tbo(struct zink_context *ctx, gl_shader_stage shader, unsigned slot)
{
struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->sampler_views[shader][slot]);
if (!sampler_view || sampler_view->base.texture->target != PIPE_BUFFER)
@ -3863,7 +3863,7 @@ rebind_tbo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot
}
ALWAYS_INLINE static struct zink_resource *
rebind_ibo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot)
rebind_ibo(struct zink_context *ctx, gl_shader_stage shader, unsigned slot)
{
struct zink_image_view *image_view = &ctx->image_views[shader][slot];
struct zink_resource *res = zink_resource(image_view->base.resource);
@ -4274,13 +4274,13 @@ rebind_image(struct zink_context *ctx, struct zink_resource *res)
}
}
}
if (!res->image_bind_count[i == PIPE_SHADER_COMPUTE])
if (!res->image_bind_count[i == MESA_SHADER_COMPUTE])
continue;
for (unsigned j = 0; j < ctx->di.num_images[i]; j++) {
if (zink_resource(ctx->image_views[i][j].base.resource) == res) {
zink_context_invalidate_descriptor_state(ctx, i, ZINK_DESCRIPTOR_TYPE_IMAGE, j, 1);
update_descriptor_state_image(ctx, i, j, res);
_mesa_set_add(ctx->need_barriers[i == PIPE_SHADER_COMPUTE], res);
_mesa_set_add(ctx->need_barriers[i == MESA_SHADER_COMPUTE], res);
}
}
}
@ -4307,7 +4307,7 @@ zink_rebind_all_buffers(struct zink_context *ctx)
if (ctx->num_so_targets)
zink_resource_buffer_barrier(ctx, zink_resource(ctx->dummy_xfb_buffer),
VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT);
for (unsigned shader = PIPE_SHADER_VERTEX; shader < MESA_SHADER_STAGES; shader++) {
for (unsigned shader = MESA_SHADER_VERTEX; shader < MESA_SHADER_STAGES; shader++) {
for (unsigned slot = 0; slot < ctx->di.num_ubos[shader]; slot++) {
struct zink_resource *res = rebind_ubo(ctx, shader, slot);
if (res)
@ -4356,10 +4356,10 @@ zink_rebind_all_images(struct zink_context *ctx)
continue;
if (ctx->image_views[i][j].surface->obj != res->obj) {
zink_surface_reference(zink_screen(ctx->base.screen), &image_view->surface, NULL);
image_view->surface = create_image_surface(ctx, &image_view->base, i == PIPE_SHADER_COMPUTE);
image_view->surface = create_image_surface(ctx, &image_view->base, i == MESA_SHADER_COMPUTE);
zink_context_invalidate_descriptor_state(ctx, i, ZINK_DESCRIPTOR_TYPE_IMAGE, j, 1);
update_descriptor_state_image(ctx, i, j, res);
_mesa_set_add(ctx->need_barriers[i == PIPE_SHADER_COMPUTE], res);
_mesa_set_add(ctx->need_barriers[i == MESA_SHADER_COMPUTE], res);
}
}
}
@ -4548,12 +4548,12 @@ zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
ctx->gfx_pipeline_state.shader_keys.last_vertex.key.vs_base.last_vertex_stage = true;
ctx->last_vertex_stage_dirty = true;
ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_TESS_CTRL].key.tcs.patch_vertices = 1;
ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_VERTEX].size = sizeof(struct zink_vs_key_base);
ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_TESS_EVAL].size = sizeof(struct zink_vs_key_base);
ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_TESS_CTRL].size = sizeof(struct zink_tcs_key);
ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_GEOMETRY].size = sizeof(struct zink_vs_key_base);
ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_FRAGMENT].size = sizeof(struct zink_fs_key);
ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_TESS_CTRL].key.tcs.patch_vertices = 1;
ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_VERTEX].size = sizeof(struct zink_vs_key_base);
ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_TESS_EVAL].size = sizeof(struct zink_vs_key_base);
ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_TESS_CTRL].size = sizeof(struct zink_tcs_key);
ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_GEOMETRY].size = sizeof(struct zink_vs_key_base);
ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_FRAGMENT].size = sizeof(struct zink_fs_key);
_mesa_hash_table_init(&ctx->compute_program_cache, ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
_mesa_hash_table_init(&ctx->framebuffer_cache, ctx, hash_framebuffer_imageless, equals_framebuffer_imageless);
if (!zink_init_render_pass(ctx))

View file

@ -67,9 +67,9 @@ zink_fb_clear_enabled(const struct zink_context *ctx, unsigned idx)
static inline uint32_t
zink_program_cache_stages(uint32_t stages_present)
{
return (stages_present & ((1 << PIPE_SHADER_TESS_CTRL) |
(1 << PIPE_SHADER_TESS_EVAL) |
(1 << PIPE_SHADER_GEOMETRY))) >> 1;
return (stages_present & ((1 << MESA_SHADER_TESS_CTRL) |
(1 << MESA_SHADER_TESS_EVAL) |
(1 << MESA_SHADER_GEOMETRY))) >> 1;
}
void
@ -118,20 +118,20 @@ void
zink_update_vk_sample_locations(struct zink_context *ctx);
static inline VkPipelineStageFlags
zink_pipeline_flags_from_pipe_stage(enum pipe_shader_type pstage)
zink_pipeline_flags_from_pipe_stage(gl_shader_stage pstage)
{
switch (pstage) {
case PIPE_SHADER_VERTEX:
case MESA_SHADER_VERTEX:
return VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
case PIPE_SHADER_FRAGMENT:
case MESA_SHADER_FRAGMENT:
return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
case PIPE_SHADER_GEOMETRY:
case MESA_SHADER_GEOMETRY:
return VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT;
case PIPE_SHADER_TESS_CTRL:
case MESA_SHADER_TESS_CTRL:
return VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT;
case PIPE_SHADER_TESS_EVAL:
case MESA_SHADER_TESS_EVAL:
return VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT;
case PIPE_SHADER_COMPUTE:
case MESA_SHADER_COMPUTE:
return VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
default:
unreachable("unknown shader stage");
@ -160,7 +160,7 @@ VkPipelineStageFlags
zink_pipeline_flags_from_stage(VkShaderStageFlagBits stage);
VkShaderStageFlagBits
zink_shader_stage(enum pipe_shader_type type);
zink_shader_stage(gl_shader_stage type);
struct pipe_context *
zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags);

View file

@ -25,10 +25,6 @@
* Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
*/
#include "tgsi/tgsi_from_mesa.h"
#include "zink_context.h"
#include "zink_descriptors.h"
#include "zink_program.h"
@ -209,7 +205,7 @@ zink_descriptor_util_pool_key_get(struct zink_context *ctx, enum zink_descriptor
static void
init_push_binding(VkDescriptorSetLayoutBinding *binding, unsigned i, VkDescriptorType type)
{
binding->binding = tgsi_processor_to_shader_stage(i);
binding->binding = i;
binding->descriptorType = type;
binding->descriptorCount = 1;
binding->stageFlags = zink_shader_stage(i);
@ -249,7 +245,7 @@ zink_descriptor_util_push_layouts_get(struct zink_context *ctx, struct zink_desc
VkDescriptorSetLayoutBinding compute_binding;
enum zink_descriptor_type dsl_type;
VkDescriptorType vktype = get_push_types(screen, &dsl_type);
init_push_binding(&compute_binding, PIPE_SHADER_COMPUTE, vktype);
init_push_binding(&compute_binding, MESA_SHADER_COMPUTE, vktype);
dsls[0] = create_gfx_layout(ctx, &layout_keys[0], false);
dsls[1] = create_layout(ctx, dsl_type, &compute_binding, 1, &layout_keys[1]);
return dsls[0] && dsls[1];
@ -304,7 +300,7 @@ init_template_entry(struct zink_shader *shader, enum zink_descriptor_type type,
unsigned idx, VkDescriptorUpdateTemplateEntry *entry, unsigned *entry_idx)
{
int index = shader->bindings[type][idx].index;
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
gl_shader_stage stage = shader->nir->info.stage;
entry->dstArrayElement = 0;
entry->dstBinding = shader->bindings[type][idx].binding;
entry->descriptorCount = shader->bindings[type][idx].size;
@ -400,7 +396,7 @@ zink_descriptor_program_init(struct zink_context *ctx, struct zink_program *pg)
else
stages = ((struct zink_gfx_program*)pg)->shaders;
if (!pg->is_compute && stages[PIPE_SHADER_FRAGMENT]->nir->info.fs.uses_fbfetch_output) {
if (!pg->is_compute && stages[MESA_SHADER_FRAGMENT]->nir->info.fs.uses_fbfetch_output) {
zink_descriptor_util_init_fbfetch(ctx);
push_count = 1;
pg->dd.fbfetch = true;
@ -415,7 +411,7 @@ zink_descriptor_program_init(struct zink_context *ctx, struct zink_program *pg)
if (!shader)
continue;
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
gl_shader_stage stage = shader->nir->info.stage;
VkShaderStageFlagBits stage_flags = zink_shader_stage(stage);
for (int j = 0; j < ZINK_DESCRIPTOR_TYPES; j++) {
unsigned desc_set = screen->desc_set_id[j] - 1;
@ -839,14 +835,14 @@ zink_descriptors_update(struct zink_context *ctx, bool is_compute)
}
void
zink_context_invalidate_descriptor_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type, unsigned start, unsigned count)
zink_context_invalidate_descriptor_state(struct zink_context *ctx, gl_shader_stage shader, enum zink_descriptor_type type, unsigned start, unsigned count)
{
if (type == ZINK_DESCRIPTOR_TYPE_UBO && !start)
ctx->dd.push_state_changed[shader == PIPE_SHADER_COMPUTE] = true;
ctx->dd.push_state_changed[shader == MESA_SHADER_COMPUTE] = true;
else {
if (zink_screen(ctx->base.screen)->compact_descriptors && type > ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW)
type -= ZINK_DESCRIPTOR_COMPACT;
ctx->dd.state_changed[shader == PIPE_SHADER_COMPUTE] |= BITFIELD_BIT(type);
ctx->dd.state_changed[shader == MESA_SHADER_COMPUTE] |= BITFIELD_BIT(type);
}
}
@ -916,7 +912,7 @@ zink_batch_descriptor_init(struct zink_screen *screen, struct zink_batch_state *
static void
init_push_template_entry(VkDescriptorUpdateTemplateEntry *entry, unsigned i)
{
entry->dstBinding = tgsi_processor_to_shader_stage(i);
entry->dstBinding = i;
entry->descriptorCount = 1;
entry->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
entry->offset = offsetof(struct zink_context, di.ubos[i][0]);
@ -930,7 +926,7 @@ zink_descriptors_init(struct zink_context *ctx)
VkDescriptorUpdateTemplateEntry *entry = &ctx->dd.push_entries[i];
init_push_template_entry(entry, i);
}
init_push_template_entry(&ctx->dd.compute_push_entry, PIPE_SHADER_COMPUTE);
init_push_template_entry(&ctx->dd.compute_push_entry, MESA_SHADER_COMPUTE);
VkDescriptorUpdateTemplateEntry *entry = &ctx->dd.push_entries[ZINK_GFX_SHADER_COUNT]; //fbfetch
entry->dstBinding = ZINK_FBFETCH_BINDING;
entry->descriptorCount = 1;

View file

@ -155,7 +155,7 @@ zink_descriptors_update(struct zink_context *ctx, bool is_compute);
void
zink_context_invalidate_descriptor_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type, unsigned, unsigned);
zink_context_invalidate_descriptor_state(struct zink_context *ctx, gl_shader_stage shader, enum zink_descriptor_type type, unsigned, unsigned);
void
zink_batch_descriptor_deinit(struct zink_screen *screen, struct zink_batch_state *bs);

View file

@ -179,14 +179,14 @@ static void
update_gfx_program(struct zink_context *ctx)
{
if (ctx->last_vertex_stage_dirty) {
enum pipe_shader_type pstage = pipe_shader_type_from_mesa(ctx->last_vertex_stage->nir->info.stage);
gl_shader_stage pstage = ctx->last_vertex_stage->nir->info.stage;
ctx->dirty_shader_stages |= BITFIELD_BIT(pstage);
memcpy(&ctx->gfx_pipeline_state.shader_keys.key[pstage].key.vs_base,
&ctx->gfx_pipeline_state.shader_keys.last_vertex.key.vs_base,
sizeof(struct zink_vs_key_base));
ctx->last_vertex_stage_dirty = false;
}
unsigned bits = BITFIELD_MASK(PIPE_SHADER_COMPUTE);
unsigned bits = BITFIELD_MASK(MESA_SHADER_COMPUTE);
if (ctx->gfx_dirty) {
struct zink_gfx_program *prog = NULL;
@ -777,7 +777,7 @@ zink_draw(struct pipe_context *pctx,
offsetof(struct zink_gfx_push_constant, draw_mode_is_indexed), sizeof(unsigned),
&draw_mode_is_indexed);
}
if (ctx->curr_program->shaders[PIPE_SHADER_TESS_CTRL] && ctx->curr_program->shaders[PIPE_SHADER_TESS_CTRL]->is_generated) {
if (ctx->curr_program->shaders[MESA_SHADER_TESS_CTRL] && ctx->curr_program->shaders[MESA_SHADER_TESS_CTRL]->is_generated) {
VKCTX(CmdPushConstants)(batch->state->cmdbuf, ctx->curr_program->base.layout, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
offsetof(struct zink_gfx_push_constant, default_inner_level), sizeof(float) * 6,
&ctx->tess_levels[0]);
@ -970,10 +970,10 @@ zink_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
zink_update_descriptor_refs(ctx, true);
zink_batch_reference_program(&ctx->batch, &ctx->curr_compute->base);
}
if (ctx->dirty_shader_stages & BITFIELD_BIT(PIPE_SHADER_COMPUTE)) {
if (ctx->dirty_shader_stages & BITFIELD_BIT(MESA_SHADER_COMPUTE)) {
/* update inlinable constants */
zink_update_compute_program(ctx);
ctx->dirty_shader_stages &= ~BITFIELD_BIT(PIPE_SHADER_COMPUTE);
ctx->dirty_shader_stages &= ~BITFIELD_BIT(MESA_SHADER_COMPUTE);
}
VkPipeline pipeline = zink_get_compute_pipeline(screen, ctx->curr_compute,
@ -1092,22 +1092,22 @@ static uint32_t
hash_gfx_program(const void *key)
{
const struct zink_shader **shaders = (const struct zink_shader**)key;
uint32_t base_hash = shaders[PIPE_SHADER_VERTEX]->hash ^ shaders[PIPE_SHADER_FRAGMENT]->hash;
uint32_t base_hash = shaders[MESA_SHADER_VERTEX]->hash ^ shaders[MESA_SHADER_FRAGMENT]->hash;
if (STAGE_MASK == STAGE_BASE) //VS+FS
return base_hash;
if (STAGE_MASK == STAGE_BASE_GS) //VS+GS+FS
return base_hash ^ shaders[PIPE_SHADER_GEOMETRY]->hash;
return base_hash ^ shaders[MESA_SHADER_GEOMETRY]->hash;
/*VS+TCS+FS isn't a thing */
/*VS+TCS+GS+FS isn't a thing */
if (STAGE_MASK == STAGE_BASE_TES) //VS+TES+FS
return base_hash ^ shaders[PIPE_SHADER_TESS_EVAL]->hash;
return base_hash ^ shaders[MESA_SHADER_TESS_EVAL]->hash;
if (STAGE_MASK == STAGE_BASE_TES_GS) //VS+TES+GS+FS
return base_hash ^ shaders[PIPE_SHADER_GEOMETRY]->hash ^ shaders[PIPE_SHADER_TESS_EVAL]->hash;
return base_hash ^ shaders[MESA_SHADER_GEOMETRY]->hash ^ shaders[MESA_SHADER_TESS_EVAL]->hash;
if (STAGE_MASK == STAGE_BASE_TCS_TES) //VS+TCS+TES+FS
return base_hash ^ shaders[PIPE_SHADER_TESS_CTRL]->hash ^ shaders[PIPE_SHADER_TESS_EVAL]->hash;
return base_hash ^ shaders[MESA_SHADER_TESS_CTRL]->hash ^ shaders[MESA_SHADER_TESS_EVAL]->hash;
/* all stages */
return base_hash ^ shaders[PIPE_SHADER_GEOMETRY]->hash ^ shaders[PIPE_SHADER_TESS_CTRL]->hash ^ shaders[PIPE_SHADER_TESS_EVAL]->hash;
return base_hash ^ shaders[MESA_SHADER_GEOMETRY]->hash ^ shaders[MESA_SHADER_TESS_CTRL]->hash ^ shaders[MESA_SHADER_TESS_EVAL]->hash;
}
template <unsigned STAGE_MASK>
@ -1123,11 +1123,11 @@ equals_gfx_program(const void *a, const void *b)
/*VS+TCS+FS isn't a thing */
/*VS+TCS+GS+FS isn't a thing */
if (STAGE_MASK == STAGE_BASE_TES) //VS+TES+FS
return sa[PIPE_SHADER_TESS_EVAL] == sb[PIPE_SHADER_TESS_EVAL] && !memcmp(a, b, sizeof(void*) * 2);
return sa[MESA_SHADER_TESS_EVAL] == sb[MESA_SHADER_TESS_EVAL] && !memcmp(a, b, sizeof(void*) * 2);
if (STAGE_MASK == STAGE_BASE_TES_GS) //VS+TES+GS+FS
return sa[PIPE_SHADER_TESS_EVAL] == sb[PIPE_SHADER_TESS_EVAL] && !memcmp(a, b, sizeof(void*) * 3);
return sa[MESA_SHADER_TESS_EVAL] == sb[MESA_SHADER_TESS_EVAL] && !memcmp(a, b, sizeof(void*) * 3);
if (STAGE_MASK == STAGE_BASE_TCS_TES) //VS+TCS+TES+FS
return !memcmp(&sa[PIPE_SHADER_TESS_CTRL], &sb[PIPE_SHADER_TESS_CTRL], sizeof(void*) * 2) &&
return !memcmp(&sa[MESA_SHADER_TESS_CTRL], &sb[MESA_SHADER_TESS_CTRL], sizeof(void*) * 2) &&
!memcmp(a, b, sizeof(void*) * 2);
/* all stages */

View file

@ -274,12 +274,12 @@ zink_create_gfx_pipeline(struct zink_screen *screen,
break;
default: break;
}
if (prog->nir[PIPE_SHADER_TESS_EVAL]) {
check_warn |= !prog->nir[PIPE_SHADER_TESS_EVAL]->info.tess.point_mode &&
prog->nir[PIPE_SHADER_TESS_EVAL]->info.tess._primitive_mode == TESS_PRIMITIVE_ISOLINES;
if (prog->nir[MESA_SHADER_TESS_EVAL]) {
check_warn |= !prog->nir[MESA_SHADER_TESS_EVAL]->info.tess.point_mode &&
prog->nir[MESA_SHADER_TESS_EVAL]->info.tess._primitive_mode == TESS_PRIMITIVE_ISOLINES;
}
if (prog->nir[PIPE_SHADER_GEOMETRY]) {
switch (prog->nir[PIPE_SHADER_GEOMETRY]->info.gs.output_primitive) {
if (prog->nir[MESA_SHADER_GEOMETRY]) {
switch (prog->nir[MESA_SHADER_GEOMETRY]->info.gs.output_primitive) {
case SHADER_PRIM_LINES:
case SHADER_PRIM_LINE_LOOP:
case SHADER_PRIM_LINE_STRIP:
@ -346,7 +346,7 @@ zink_create_gfx_pipeline(struct zink_screen *screen,
VkPipelineTessellationStateCreateInfo tci = {0};
VkPipelineTessellationDomainOriginStateCreateInfo tdci = {0};
if (prog->shaders[PIPE_SHADER_TESS_CTRL] && prog->shaders[PIPE_SHADER_TESS_EVAL]) {
if (prog->shaders[MESA_SHADER_TESS_CTRL] && prog->shaders[MESA_SHADER_TESS_EVAL]) {
tci.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
tci.patchControlPoints = state->dyn_state2.vertices_per_patch;
pci.pTessellationState = &tci;
@ -688,12 +688,12 @@ zink_create_gfx_pipeline_library(struct zink_screen *screen, struct zink_gfx_pro
rast_line_state.lineRasterizationMode = VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT;
bool check_warn = line;
if (prog->nir[PIPE_SHADER_TESS_EVAL]) {
check_warn |= !prog->nir[PIPE_SHADER_TESS_EVAL]->info.tess.point_mode &&
prog->nir[PIPE_SHADER_TESS_EVAL]->info.tess._primitive_mode == TESS_PRIMITIVE_ISOLINES;
if (prog->nir[MESA_SHADER_TESS_EVAL]) {
check_warn |= !prog->nir[MESA_SHADER_TESS_EVAL]->info.tess.point_mode &&
prog->nir[MESA_SHADER_TESS_EVAL]->info.tess._primitive_mode == TESS_PRIMITIVE_ISOLINES;
}
if (prog->nir[PIPE_SHADER_GEOMETRY]) {
switch (prog->nir[PIPE_SHADER_GEOMETRY]->info.gs.output_primitive) {
if (prog->nir[MESA_SHADER_GEOMETRY]) {
switch (prog->nir[MESA_SHADER_GEOMETRY]->info.gs.output_primitive) {
case SHADER_PRIM_LINES:
case SHADER_PRIM_LINE_LOOP:
case SHADER_PRIM_LINE_STRIP:
@ -754,7 +754,7 @@ zink_create_gfx_pipeline_library(struct zink_screen *screen, struct zink_gfx_pro
VkPipelineTessellationStateCreateInfo tci = {0};
VkPipelineTessellationDomainOriginStateCreateInfo tdci = {0};
if (prog->shaders[PIPE_SHADER_TESS_CTRL] && prog->shaders[PIPE_SHADER_TESS_EVAL]) {
if (prog->shaders[MESA_SHADER_TESS_CTRL] && prog->shaders[MESA_SHADER_TESS_EVAL]) {
tci.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
tci.patchControlPoints = 3; //this is a wild guess; pray for extendedDynamicState2PatchControlPoints
pci.pTessellationState = &tci;

View file

@ -37,7 +37,6 @@
#include "util/u_debug.h"
#include "util/u_memory.h"
#include "util/u_prim.h"
#include "tgsi/tgsi_from_mesa.h"
/* for pipeline cache */
#define XXH_INLINE_ALL
@ -93,19 +92,18 @@ get_shader_module_for_stage(struct zink_context *ctx, struct zink_screen *screen
struct zink_gfx_pipeline_state *state)
{
gl_shader_stage stage = zs->nir->info.stage;
enum pipe_shader_type pstage = pipe_shader_type_from_mesa(stage);
VkShaderModule mod;
struct zink_shader_module *zm = NULL;
unsigned inline_size = 0, nonseamless_size = 0;
struct zink_shader_key *key = &state->shader_keys.key[pstage];
struct zink_shader_key *key = &state->shader_keys.key[stage];
bool ignore_key_size = false;
if (pstage == PIPE_SHADER_TESS_CTRL && !zs->is_generated) {
if (stage == MESA_SHADER_TESS_CTRL && !zs->is_generated) {
/* non-generated tcs won't use the shader key */
ignore_key_size = true;
}
if (ctx && zs->nir->info.num_inlinable_uniforms &&
ctx->inlinable_uniforms_valid_mask & BITFIELD64_BIT(pstage)) {
if (zs->can_inline && (screen->is_cpu || prog->inlined_variant_count[pstage] < ZINK_MAX_INLINED_VARIANTS))
ctx->inlinable_uniforms_valid_mask & BITFIELD64_BIT(stage)) {
if (zs->can_inline && (screen->is_cpu || prog->inlined_variant_count[stage] < ZINK_MAX_INLINED_VARIANTS))
inline_size = zs->nir->info.num_inlinable_uniforms;
else
key->inline_uniforms = false;
@ -114,7 +112,7 @@ get_shader_module_for_stage(struct zink_context *ctx, struct zink_screen *screen
nonseamless_size = sizeof(uint32_t);
struct zink_shader_module *iter, *next;
LIST_FOR_EACH_ENTRY_SAFE(iter, next, &prog->shader_cache[pstage][!!nonseamless_size][!!inline_size], list) {
LIST_FOR_EACH_ENTRY_SAFE(iter, next, &prog->shader_cache[stage][!!nonseamless_size][!!inline_size], list) {
if (!shader_key_matches(iter, ignore_key_size, key, inline_size))
continue;
list_delinit(&iter->list);
@ -127,8 +125,8 @@ get_shader_module_for_stage(struct zink_context *ctx, struct zink_screen *screen
if (!zm) {
return NULL;
}
unsigned patch_vertices = state->shader_keys.key[PIPE_SHADER_TESS_CTRL ].key.tcs.patch_vertices;
if (pstage == PIPE_SHADER_TESS_CTRL && zs->is_generated && zs->spirv) {
unsigned patch_vertices = state->shader_keys.key[MESA_SHADER_TESS_CTRL ].key.tcs.patch_vertices;
if (stage == MESA_SHADER_TESS_CTRL && zs->is_generated && zs->spirv) {
assert(ctx); //TODO async
mod = zink_shader_tcs_compile(screen, zs, patch_vertices);
} else {
@ -155,15 +153,15 @@ get_shader_module_for_stage(struct zink_context *ctx, struct zink_screen *screen
zm->has_nonseamless = !!nonseamless_size;
if (inline_size)
memcpy(zm->key + key->size + nonseamless_size, key->base.inlined_uniform_values, inline_size * sizeof(uint32_t));
if (pstage == PIPE_SHADER_TESS_CTRL && zs->is_generated)
if (stage == MESA_SHADER_TESS_CTRL && zs->is_generated)
zm->hash = patch_vertices;
else
zm->hash = shader_module_hash(zm);
zm->default_variant = !inline_size && list_is_empty(&prog->shader_cache[pstage][0][0]);
zm->default_variant = !inline_size && list_is_empty(&prog->shader_cache[stage][0][0]);
if (inline_size)
prog->inlined_variant_count[pstage]++;
prog->inlined_variant_count[stage]++;
}
list_add(&zm->list, &prog->shader_cache[pstage][!!nonseamless_size][!!inline_size]);
list_add(&zm->list, &prog->shader_cache[stage][!!nonseamless_size][!!inline_size]);
return zm;
}
@ -192,7 +190,7 @@ update_gfx_shader_modules(struct zink_context *ctx,
{
bool hash_changed = false;
bool default_variants = true;
bool first = !prog->modules[PIPE_SHADER_VERTEX];
bool first = !prog->modules[MESA_SHADER_VERTEX];
uint32_t variant_hash = prog->last_variant_hash;
u_foreach_bit(pstage, mask) {
assert(prog->shaders[pstage]);
@ -361,7 +359,7 @@ update_cs_shader_module(struct zink_context *ctx, struct zink_compute_program *c
struct zink_shader_key *key = &ctx->compute_pipeline_state.key;
if (ctx && zs->nir->info.num_inlinable_uniforms &&
ctx->inlinable_uniforms_valid_mask & BITFIELD64_BIT(PIPE_SHADER_COMPUTE)) {
ctx->inlinable_uniforms_valid_mask & BITFIELD64_BIT(MESA_SHADER_COMPUTE)) {
if (screen->is_cpu || comp->inlined_variant_count < ZINK_MAX_INLINED_VARIANTS)
inline_size = zs->nir->info.num_inlinable_uniforms;
else
@ -472,7 +470,7 @@ assign_io(struct zink_gfx_program *prog, struct zink_shader *stages[ZINK_GFX_SHA
/* build array in pipeline order */
for (unsigned i = 0; i < ZINK_GFX_SHADER_COUNT; i++)
shaders[tgsi_processor_to_shader_stage(i)] = stages[i];
shaders[i] = stages[i];
for (unsigned i = 0; i < MESA_SHADER_FRAGMENT;) {
nir_shader *producer = shaders[i]->nir;
@ -514,21 +512,21 @@ zink_create_gfx_program(struct zink_context *ctx,
prog->stages_present |= BITFIELD_BIT(i);
}
}
if (stages[PIPE_SHADER_TESS_EVAL] && !stages[PIPE_SHADER_TESS_CTRL]) {
prog->shaders[PIPE_SHADER_TESS_EVAL]->generated =
prog->shaders[PIPE_SHADER_TESS_CTRL] =
zink_shader_tcs_create(screen, stages[PIPE_SHADER_VERTEX], vertices_per_patch);
prog->stages_present |= BITFIELD_BIT(PIPE_SHADER_TESS_CTRL);
if (stages[MESA_SHADER_TESS_EVAL] && !stages[MESA_SHADER_TESS_CTRL]) {
prog->shaders[MESA_SHADER_TESS_EVAL]->generated =
prog->shaders[MESA_SHADER_TESS_CTRL] =
zink_shader_tcs_create(screen, stages[MESA_SHADER_VERTEX], vertices_per_patch);
prog->stages_present |= BITFIELD_BIT(MESA_SHADER_TESS_CTRL);
}
assign_io(prog, prog->shaders);
if (stages[PIPE_SHADER_GEOMETRY])
prog->last_vertex_stage = stages[PIPE_SHADER_GEOMETRY];
else if (stages[PIPE_SHADER_TESS_EVAL])
prog->last_vertex_stage = stages[PIPE_SHADER_TESS_EVAL];
if (stages[MESA_SHADER_GEOMETRY])
prog->last_vertex_stage = stages[MESA_SHADER_GEOMETRY];
else if (stages[MESA_SHADER_TESS_EVAL])
prog->last_vertex_stage = stages[MESA_SHADER_TESS_EVAL];
else
prog->last_vertex_stage = stages[PIPE_SHADER_VERTEX];
prog->last_vertex_stage = stages[MESA_SHADER_VERTEX];
for (int i = 0; i < ARRAY_SIZE(prog->pipelines); ++i) {
_mesa_hash_table_init(&prog->pipelines[i], prog, NULL, equals_gfx_pipeline_state);
@ -647,18 +645,18 @@ fail:
}
uint32_t
zink_program_get_descriptor_usage(struct zink_context *ctx, enum pipe_shader_type stage, enum zink_descriptor_type type)
zink_program_get_descriptor_usage(struct zink_context *ctx, gl_shader_stage stage, enum zink_descriptor_type type)
{
struct zink_shader *zs = NULL;
switch (stage) {
case PIPE_SHADER_VERTEX:
case PIPE_SHADER_TESS_CTRL:
case PIPE_SHADER_TESS_EVAL:
case PIPE_SHADER_GEOMETRY:
case PIPE_SHADER_FRAGMENT:
case MESA_SHADER_VERTEX:
case MESA_SHADER_TESS_CTRL:
case MESA_SHADER_TESS_EVAL:
case MESA_SHADER_GEOMETRY:
case MESA_SHADER_FRAGMENT:
zs = ctx->gfx_stages[stage];
break;
case PIPE_SHADER_COMPUTE: {
case MESA_SHADER_COMPUTE: {
zs = ctx->compute_stage;
break;
}
@ -683,18 +681,18 @@ zink_program_get_descriptor_usage(struct zink_context *ctx, enum pipe_shader_typ
}
bool
zink_program_descriptor_is_buffer(struct zink_context *ctx, enum pipe_shader_type stage, enum zink_descriptor_type type, unsigned i)
zink_program_descriptor_is_buffer(struct zink_context *ctx, gl_shader_stage stage, enum zink_descriptor_type type, unsigned i)
{
struct zink_shader *zs = NULL;
switch (stage) {
case PIPE_SHADER_VERTEX:
case PIPE_SHADER_TESS_CTRL:
case PIPE_SHADER_TESS_EVAL:
case PIPE_SHADER_GEOMETRY:
case PIPE_SHADER_FRAGMENT:
case MESA_SHADER_VERTEX:
case MESA_SHADER_TESS_CTRL:
case MESA_SHADER_TESS_EVAL:
case MESA_SHADER_GEOMETRY:
case MESA_SHADER_FRAGMENT:
zs = ctx->gfx_stages[stage];
break;
case PIPE_SHADER_COMPUTE: {
case MESA_SHADER_COMPUTE: {
zs = ctx->compute_stage;
break;
}
@ -779,8 +777,8 @@ zink_destroy_gfx_program(struct zink_context *ctx,
if (screen->info.have_EXT_extended_dynamic_state) {
/* only need first 3/4 for point/line/tri/patch */
if ((prog->stages_present &
(BITFIELD_BIT(PIPE_SHADER_TESS_EVAL) | BITFIELD_BIT(PIPE_SHADER_GEOMETRY))) ==
BITFIELD_BIT(PIPE_SHADER_TESS_EVAL))
(BITFIELD_BIT(MESA_SHADER_TESS_EVAL) | BITFIELD_BIT(MESA_SHADER_GEOMETRY))) ==
BITFIELD_BIT(MESA_SHADER_TESS_EVAL))
max_idx = 4;
else
max_idx = 3;
@ -1091,7 +1089,7 @@ zink_get_compute_pipeline(struct zink_screen *screen,
}
static inline void
bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
bind_stage(struct zink_context *ctx, gl_shader_stage stage,
struct zink_shader *shader)
{
if (shader && shader->nir->info.num_inlinable_uniforms)
@ -1099,7 +1097,7 @@ bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
else
ctx->shader_has_inlinable_uniforms_mask &= ~(1 << stage);
if (stage == PIPE_SHADER_COMPUTE) {
if (stage == MESA_SHADER_COMPUTE) {
if (ctx->compute_stage) {
ctx->compute_pipeline_state.final_hash ^= ctx->compute_pipeline_state.module_hash;
ctx->compute_pipeline_state.module = VK_NULL_HANDLE;
@ -1121,7 +1119,7 @@ bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
ctx->compute_pipeline_state.module = ctx->curr_compute->curr->shader;
ctx->compute_pipeline_state.final_hash ^= ctx->compute_pipeline_state.module_hash;
if (ctx->compute_pipeline_state.key.base.nonseamless_cube_mask)
ctx->dirty_shader_stages |= BITFIELD_BIT(PIPE_SHADER_COMPUTE);
ctx->dirty_shader_stages |= BITFIELD_BIT(MESA_SHADER_COMPUTE);
} else if (!shader)
ctx->curr_compute = NULL;
ctx->compute_stage = shader;
@ -1130,7 +1128,7 @@ bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
if (ctx->gfx_stages[stage])
ctx->gfx_hash ^= ctx->gfx_stages[stage]->hash;
ctx->gfx_stages[stage] = shader;
ctx->gfx_dirty = ctx->gfx_stages[PIPE_SHADER_FRAGMENT] && ctx->gfx_stages[PIPE_SHADER_VERTEX];
ctx->gfx_dirty = ctx->gfx_stages[MESA_SHADER_FRAGMENT] && ctx->gfx_stages[MESA_SHADER_VERTEX];
ctx->gfx_pipeline_state.modules_changed = true;
if (shader) {
ctx->shader_stages |= BITFIELD_BIT(stage);
@ -1148,21 +1146,21 @@ bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
static void
bind_last_vertex_stage(struct zink_context *ctx)
{
enum pipe_shader_type old = ctx->last_vertex_stage ? pipe_shader_type_from_mesa(ctx->last_vertex_stage->nir->info.stage) : MESA_SHADER_STAGES;
if (ctx->gfx_stages[PIPE_SHADER_GEOMETRY])
ctx->last_vertex_stage = ctx->gfx_stages[PIPE_SHADER_GEOMETRY];
else if (ctx->gfx_stages[PIPE_SHADER_TESS_EVAL])
ctx->last_vertex_stage = ctx->gfx_stages[PIPE_SHADER_TESS_EVAL];
gl_shader_stage old = ctx->last_vertex_stage ? ctx->last_vertex_stage->nir->info.stage : MESA_SHADER_STAGES;
if (ctx->gfx_stages[MESA_SHADER_GEOMETRY])
ctx->last_vertex_stage = ctx->gfx_stages[MESA_SHADER_GEOMETRY];
else if (ctx->gfx_stages[MESA_SHADER_TESS_EVAL])
ctx->last_vertex_stage = ctx->gfx_stages[MESA_SHADER_TESS_EVAL];
else
ctx->last_vertex_stage = ctx->gfx_stages[PIPE_SHADER_VERTEX];
enum pipe_shader_type current = ctx->last_vertex_stage ? pipe_shader_type_from_mesa(ctx->last_vertex_stage->nir->info.stage) : PIPE_SHADER_VERTEX;
ctx->last_vertex_stage = ctx->gfx_stages[MESA_SHADER_VERTEX];
gl_shader_stage current = ctx->last_vertex_stage ? ctx->last_vertex_stage->nir->info.stage : MESA_SHADER_VERTEX;
if (old != current) {
if (old != MESA_SHADER_STAGES) {
memset(&ctx->gfx_pipeline_state.shader_keys.key[old].key.vs_base, 0, sizeof(struct zink_vs_key_base));
ctx->dirty_shader_stages |= BITFIELD_BIT(old);
} else {
/* always unset vertex shader values when changing to a non-vs last stage */
memset(&ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_VERTEX].key.vs_base, 0, sizeof(struct zink_vs_key_base));
memset(&ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_VERTEX].key.vs_base, 0, sizeof(struct zink_vs_key_base));
}
unsigned num_viewports = ctx->vp_state.num_viewports;
@ -1191,9 +1189,9 @@ zink_bind_vs_state(struct pipe_context *pctx,
void *cso)
{
struct zink_context *ctx = zink_context(pctx);
if (!cso && !ctx->gfx_stages[PIPE_SHADER_VERTEX])
if (!cso && !ctx->gfx_stages[MESA_SHADER_VERTEX])
return;
bind_stage(ctx, PIPE_SHADER_VERTEX, cso);
bind_stage(ctx, MESA_SHADER_VERTEX, cso);
bind_last_vertex_stage(ctx);
if (cso) {
struct zink_shader *zs = cso;
@ -1212,9 +1210,9 @@ zink_bind_vs_state(struct pipe_context *pctx,
void
zink_update_fs_key_samples(struct zink_context *ctx)
{
if (!ctx->gfx_stages[PIPE_SHADER_FRAGMENT])
if (!ctx->gfx_stages[MESA_SHADER_FRAGMENT])
return;
nir_shader *nir = ctx->gfx_stages[PIPE_SHADER_FRAGMENT]->nir;
nir_shader *nir = ctx->gfx_stages[MESA_SHADER_FRAGMENT]->nir;
if (nir->info.outputs_written & (1 << FRAG_RESULT_SAMPLE_MASK)) {
bool samples = zink_get_fs_key(ctx)->samples;
if (samples != (ctx->fb_state.samples > 1))
@ -1227,14 +1225,14 @@ zink_bind_fs_state(struct pipe_context *pctx,
void *cso)
{
struct zink_context *ctx = zink_context(pctx);
if (!cso && !ctx->gfx_stages[PIPE_SHADER_FRAGMENT])
if (!cso && !ctx->gfx_stages[MESA_SHADER_FRAGMENT])
return;
bind_stage(ctx, PIPE_SHADER_FRAGMENT, cso);
bind_stage(ctx, MESA_SHADER_FRAGMENT, cso);
ctx->fbfetch_outputs = 0;
if (cso) {
nir_shader *nir = ctx->gfx_stages[PIPE_SHADER_FRAGMENT]->nir;
nir_shader *nir = ctx->gfx_stages[MESA_SHADER_FRAGMENT]->nir;
if (nir->info.fs.uses_fbfetch_output) {
nir_foreach_shader_out_variable(var, ctx->gfx_stages[PIPE_SHADER_FRAGMENT]->nir) {
nir_foreach_shader_out_variable(var, ctx->gfx_stages[MESA_SHADER_FRAGMENT]->nir) {
if (var->data.fb_fetch_output)
ctx->fbfetch_outputs |= BITFIELD_BIT(var->data.location - FRAG_RESULT_DATA0);
}
@ -1249,10 +1247,10 @@ zink_bind_gs_state(struct pipe_context *pctx,
void *cso)
{
struct zink_context *ctx = zink_context(pctx);
if (!cso && !ctx->gfx_stages[PIPE_SHADER_GEOMETRY])
if (!cso && !ctx->gfx_stages[MESA_SHADER_GEOMETRY])
return;
bool had_points = ctx->gfx_stages[PIPE_SHADER_GEOMETRY] ? ctx->gfx_stages[PIPE_SHADER_GEOMETRY]->nir->info.gs.output_primitive == SHADER_PRIM_POINTS : false;
bind_stage(ctx, PIPE_SHADER_GEOMETRY, cso);
bool had_points = ctx->gfx_stages[MESA_SHADER_GEOMETRY] ? ctx->gfx_stages[MESA_SHADER_GEOMETRY]->nir->info.gs.output_primitive == SHADER_PRIM_POINTS : false;
bind_stage(ctx, MESA_SHADER_GEOMETRY, cso);
bind_last_vertex_stage(ctx);
if (cso) {
if (!had_points && ctx->last_vertex_stage->nir->info.gs.output_primitive == SHADER_PRIM_POINTS)
@ -1267,7 +1265,7 @@ static void
zink_bind_tcs_state(struct pipe_context *pctx,
void *cso)
{
bind_stage(zink_context(pctx), PIPE_SHADER_TESS_CTRL, cso);
bind_stage(zink_context(pctx), MESA_SHADER_TESS_CTRL, cso);
}
static void
@ -1275,16 +1273,16 @@ zink_bind_tes_state(struct pipe_context *pctx,
void *cso)
{
struct zink_context *ctx = zink_context(pctx);
if (!cso && !ctx->gfx_stages[PIPE_SHADER_TESS_EVAL])
if (!cso && !ctx->gfx_stages[MESA_SHADER_TESS_EVAL])
return;
if (!!ctx->gfx_stages[PIPE_SHADER_TESS_EVAL] != !!cso) {
if (!!ctx->gfx_stages[MESA_SHADER_TESS_EVAL] != !!cso) {
if (!cso) {
/* if unsetting a TESS that uses a generated TCS, ensure the TCS is unset */
if (ctx->gfx_stages[PIPE_SHADER_TESS_EVAL]->generated)
ctx->gfx_stages[PIPE_SHADER_TESS_CTRL] = NULL;
if (ctx->gfx_stages[MESA_SHADER_TESS_EVAL]->generated)
ctx->gfx_stages[MESA_SHADER_TESS_CTRL] = NULL;
}
}
bind_stage(ctx, PIPE_SHADER_TESS_EVAL, cso);
bind_stage(ctx, MESA_SHADER_TESS_EVAL, cso);
bind_last_vertex_stage(ctx);
}
@ -1305,7 +1303,7 @@ static void
zink_bind_cs_state(struct pipe_context *pctx,
void *cso)
{
bind_stage(zink_context(pctx), PIPE_SHADER_COMPUTE, cso);
bind_stage(zink_context(pctx), MESA_SHADER_COMPUTE, cso);
}
void

View file

@ -105,7 +105,7 @@ unsigned
zink_program_num_bindings(const struct zink_program *pg, bool is_compute);
bool
zink_program_descriptor_is_buffer(struct zink_context *ctx, enum pipe_shader_type stage, enum zink_descriptor_type type, unsigned i);
zink_program_descriptor_is_buffer(struct zink_context *ctx, gl_shader_stage stage, enum zink_descriptor_type type, unsigned i);
void
zink_update_gfx_program(struct zink_context *ctx, struct zink_gfx_program *prog);
@ -129,7 +129,7 @@ void
zink_program_init(struct zink_context *ctx);
uint32_t
zink_program_get_descriptor_usage(struct zink_context *ctx, enum pipe_shader_type stage, enum zink_descriptor_type type);
zink_program_get_descriptor_usage(struct zink_context *ctx, gl_shader_stage stage, enum zink_descriptor_type type);
void
debug_describe_zink_gfx_program(char* buf, const struct zink_gfx_program *ptr);
@ -215,23 +215,23 @@ zink_program_has_descriptors(const struct zink_program *pg)
static inline struct zink_fs_key *
zink_set_fs_key(struct zink_context *ctx)
{
ctx->dirty_shader_stages |= BITFIELD_BIT(PIPE_SHADER_FRAGMENT);
return (struct zink_fs_key *)&ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_FRAGMENT];
ctx->dirty_shader_stages |= BITFIELD_BIT(MESA_SHADER_FRAGMENT);
return (struct zink_fs_key *)&ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_FRAGMENT];
}
static inline const struct zink_fs_key *
zink_get_fs_key(struct zink_context *ctx)
{
return (const struct zink_fs_key *)&ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_FRAGMENT];
return (const struct zink_fs_key *)&ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_FRAGMENT];
}
static inline bool
zink_set_tcs_key_patches(struct zink_context *ctx, uint8_t patch_vertices)
{
struct zink_tcs_key *tcs = (struct zink_tcs_key*)&ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_TESS_CTRL];
struct zink_tcs_key *tcs = (struct zink_tcs_key*)&ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_TESS_CTRL];
if (tcs->patch_vertices == patch_vertices)
return false;
ctx->dirty_shader_stages |= BITFIELD_BIT(PIPE_SHADER_TESS_CTRL);
ctx->dirty_shader_stages |= BITFIELD_BIT(MESA_SHADER_TESS_CTRL);
tcs->patch_vertices = patch_vertices;
return true;
}
@ -239,7 +239,7 @@ zink_set_tcs_key_patches(struct zink_context *ctx, uint8_t patch_vertices)
static inline const struct zink_tcs_key *
zink_get_tcs_key(struct zink_context *ctx)
{
return (const struct zink_tcs_key *)&ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_TESS_CTRL];
return (const struct zink_tcs_key *)&ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_TESS_CTRL];
}
void
@ -248,14 +248,14 @@ zink_update_fs_key_samples(struct zink_context *ctx);
static inline struct zink_vs_key *
zink_set_vs_key(struct zink_context *ctx)
{
ctx->dirty_shader_stages |= BITFIELD_BIT(PIPE_SHADER_VERTEX);
return (struct zink_vs_key *)&ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_VERTEX];
ctx->dirty_shader_stages |= BITFIELD_BIT(MESA_SHADER_VERTEX);
return (struct zink_vs_key *)&ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_VERTEX];
}
static inline const struct zink_vs_key *
zink_get_vs_key(struct zink_context *ctx)
{
return (const struct zink_vs_key *)&ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_VERTEX];
return (const struct zink_vs_key *)&ctx->gfx_pipeline_state.shader_keys.key[MESA_SHADER_VERTEX];
}
static inline struct zink_vs_key_base *

View file

@ -1080,7 +1080,7 @@ zink_query_update_gs_states(struct zink_context *ctx, bool was_line_loop)
{
struct zink_query *query;
bool suspendall = false;
bool have_gs = !!ctx->gfx_stages[PIPE_SHADER_GEOMETRY];
bool have_gs = !!ctx->gfx_stages[MESA_SHADER_GEOMETRY];
bool have_xfb = !!ctx->num_so_targets;
LIST_FOR_EACH_ENTRY(query, &ctx->primitives_generated_queries, stats_list) {

View file

@ -338,8 +338,8 @@ zink_init_zs_attachment(struct zink_context *ctx, struct zink_rt_attrib *rt)
rt->clear_stencil = zink_fb_clear_enabled(ctx, PIPE_MAX_COLOR_BUFS) &&
!zink_fb_clear_first_needs_explicit(fb_clear) &&
(zink_fb_clear_element(fb_clear, 0)->zs.bits & PIPE_CLEAR_STENCIL);
const uint64_t outputs_written = ctx->gfx_stages[PIPE_SHADER_FRAGMENT] ?
ctx->gfx_stages[PIPE_SHADER_FRAGMENT]->nir->info.outputs_written : 0;
const uint64_t outputs_written = ctx->gfx_stages[MESA_SHADER_FRAGMENT] ?
ctx->gfx_stages[MESA_SHADER_FRAGMENT]->nir->info.outputs_written : 0;
bool needs_write_z = (ctx->dsa_state && ctx->dsa_state->hw_state.depth_write) ||
outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH);
needs_write_z |= transient || rt->clear_color ||

View file

@ -875,7 +875,7 @@ zink_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
static int
zink_get_shader_param(struct pipe_screen *pscreen,
enum pipe_shader_type shader,
gl_shader_stage shader,
enum pipe_shader_cap param)
{
struct zink_screen *screen = zink_screen(pscreen);
@ -883,22 +883,22 @@ zink_get_shader_param(struct pipe_screen *pscreen,
switch (param) {
case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
switch (shader) {
case PIPE_SHADER_FRAGMENT:
case PIPE_SHADER_VERTEX:
case MESA_SHADER_FRAGMENT:
case MESA_SHADER_VERTEX:
return INT_MAX;
case PIPE_SHADER_TESS_CTRL:
case PIPE_SHADER_TESS_EVAL:
case MESA_SHADER_TESS_CTRL:
case MESA_SHADER_TESS_EVAL:
if (screen->info.feats.features.tessellationShader &&
screen->info.have_KHR_maintenance2)
return INT_MAX;
break;
case PIPE_SHADER_GEOMETRY:
case MESA_SHADER_GEOMETRY:
if (screen->info.feats.features.geometryShader)
return INT_MAX;
break;
case PIPE_SHADER_COMPUTE:
case MESA_SHADER_COMPUTE:
return INT_MAX;
default:
break;
@ -913,19 +913,19 @@ zink_get_shader_param(struct pipe_screen *pscreen,
case PIPE_SHADER_CAP_MAX_INPUTS: {
uint32_t max = 0;
switch (shader) {
case PIPE_SHADER_VERTEX:
case MESA_SHADER_VERTEX:
max = MIN2(screen->info.props.limits.maxVertexInputAttributes, PIPE_MAX_ATTRIBS);
break;
case PIPE_SHADER_TESS_CTRL:
case MESA_SHADER_TESS_CTRL:
max = screen->info.props.limits.maxTessellationControlPerVertexInputComponents / 4;
break;
case PIPE_SHADER_TESS_EVAL:
case MESA_SHADER_TESS_EVAL:
max = screen->info.props.limits.maxTessellationEvaluationInputComponents / 4;
break;
case PIPE_SHADER_GEOMETRY:
case MESA_SHADER_GEOMETRY:
max = screen->info.props.limits.maxGeometryInputComponents / 4;
break;
case PIPE_SHADER_FRAGMENT:
case MESA_SHADER_FRAGMENT:
/* intel drivers report fewer components, but it's a value that's compatible
* with what we need for GL, so we can still force a conformant value here
*/
@ -938,9 +938,9 @@ zink_get_shader_param(struct pipe_screen *pscreen,
return 0; /* unsupported stage */
}
switch (shader) {
case PIPE_SHADER_VERTEX:
case PIPE_SHADER_TESS_EVAL:
case PIPE_SHADER_GEOMETRY:
case MESA_SHADER_VERTEX:
case MESA_SHADER_TESS_EVAL:
case MESA_SHADER_GEOMETRY:
/* last vertex stage must support streamout, and this is capped in glsl compiler */
return MIN2(max, MAX_VARYING);
default: break;
@ -951,19 +951,19 @@ zink_get_shader_param(struct pipe_screen *pscreen,
case PIPE_SHADER_CAP_MAX_OUTPUTS: {
uint32_t max = 0;
switch (shader) {
case PIPE_SHADER_VERTEX:
case MESA_SHADER_VERTEX:
max = screen->info.props.limits.maxVertexOutputComponents / 4;
break;
case PIPE_SHADER_TESS_CTRL:
case MESA_SHADER_TESS_CTRL:
max = screen->info.props.limits.maxTessellationControlPerVertexOutputComponents / 4;
break;
case PIPE_SHADER_TESS_EVAL:
case MESA_SHADER_TESS_EVAL:
max = screen->info.props.limits.maxTessellationEvaluationOutputComponents / 4;
break;
case PIPE_SHADER_GEOMETRY:
case MESA_SHADER_GEOMETRY:
max = screen->info.props.limits.maxGeometryOutputComponents / 4;
break;
case PIPE_SHADER_FRAGMENT:
case MESA_SHADER_FRAGMENT:
max = screen->info.props.limits.maxColorAttachments;
break;
default:
@ -1036,15 +1036,15 @@ zink_get_shader_param(struct pipe_screen *pscreen,
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
switch (shader) {
case PIPE_SHADER_VERTEX:
case PIPE_SHADER_TESS_CTRL:
case PIPE_SHADER_TESS_EVAL:
case PIPE_SHADER_GEOMETRY:
case MESA_SHADER_VERTEX:
case MESA_SHADER_TESS_CTRL:
case MESA_SHADER_TESS_EVAL:
case MESA_SHADER_GEOMETRY:
if (!screen->info.feats.features.vertexPipelineStoresAndAtomics)
return 0;
break;
case PIPE_SHADER_FRAGMENT:
case MESA_SHADER_FRAGMENT:
if (!screen->info.feats.features.fragmentStoresAndAtomics)
return 0;
break;