mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-20 11:40:10 +01:00
kk: Add MESA_KK_DISABLE_WORKAROUNDS to disable workarounds
MESA_KK_DISABLE_WORKAROUNDS provides a way to disable workarounds we've had to apply to get Vulkan conformance. In hopes that Metal bugs get fixed in upcoming macOS releases. Reviewed-by: Arcady Goldmints-Orlov <arcady@lunarg.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38426>
This commit is contained in:
parent
adf881cd3e
commit
c49b3c670c
7 changed files with 55 additions and 22 deletions
|
|
@ -22,6 +22,7 @@ struct nir_to_msl_ctx {
|
||||||
struct hash_table *types;
|
struct hash_table *types;
|
||||||
nir_shader *shader;
|
nir_shader *shader;
|
||||||
struct _mesa_string_buffer *text;
|
struct _mesa_string_buffer *text;
|
||||||
|
uint64_t disabled_workarounds;
|
||||||
unsigned short indentlevel;
|
unsigned short indentlevel;
|
||||||
struct io_slot_info inputs_info[NUM_TOTAL_VARYING_SLOTS];
|
struct io_slot_info inputs_info[NUM_TOTAL_VARYING_SLOTS];
|
||||||
struct io_slot_info outputs_info[NUM_TOTAL_VARYING_SLOTS];
|
struct io_slot_info outputs_info[NUM_TOTAL_VARYING_SLOTS];
|
||||||
|
|
|
||||||
|
|
@ -112,7 +112,11 @@ emit_local_vars(struct nir_to_msl_ctx *ctx, nir_shader *shader)
|
||||||
}
|
}
|
||||||
if (shader->scratch_size) {
|
if (shader->scratch_size) {
|
||||||
/* KK_WORKAROUND_1 */
|
/* KK_WORKAROUND_1 */
|
||||||
P_IND(ctx, "uchar scratch[%d] = {0};\n", shader->scratch_size);
|
if (ctx->disabled_workarounds & BITFIELD64_BIT(1)) {
|
||||||
|
P_IND(ctx, "uchar scratch[%d];\n", shader->scratch_size);
|
||||||
|
} else {
|
||||||
|
P_IND(ctx, "uchar scratch[%d] = {0};\n", shader->scratch_size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (BITSET_TEST(shader->info.system_values_read,
|
if (BITSET_TEST(shader->info.system_values_read,
|
||||||
SYSTEM_VALUE_HELPER_INVOCATION)) {
|
SYSTEM_VALUE_HELPER_INVOCATION)) {
|
||||||
|
|
@ -124,8 +128,7 @@ static bool
|
||||||
is_register(nir_def *def)
|
is_register(nir_def *def)
|
||||||
{
|
{
|
||||||
return ((nir_def_is_intrinsic(def)) &&
|
return ((nir_def_is_intrinsic(def)) &&
|
||||||
(nir_def_as_intrinsic(def)->intrinsic ==
|
(nir_def_as_intrinsic(def)->intrinsic == nir_intrinsic_load_reg));
|
||||||
nir_intrinsic_load_reg));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
@ -167,8 +170,7 @@ src_to_msl(struct nir_to_msl_ctx *ctx, nir_src *src)
|
||||||
if (bitcast)
|
if (bitcast)
|
||||||
P(ctx, "as_type<%s>(", bitcast);
|
P(ctx, "as_type<%s>(", bitcast);
|
||||||
if (is_register(src->ssa)) {
|
if (is_register(src->ssa)) {
|
||||||
nir_intrinsic_instr *instr =
|
nir_intrinsic_instr *instr = nir_def_as_intrinsic(src->ssa);
|
||||||
nir_def_as_intrinsic(src->ssa);
|
|
||||||
if (src->ssa->bit_size != 1u) {
|
if (src->ssa->bit_size != 1u) {
|
||||||
P(ctx, "as_type<%s>(r%d)", msl_type_for_def(ctx->types, src->ssa),
|
P(ctx, "as_type<%s>(r%d)", msl_type_for_def(ctx->types, src->ssa),
|
||||||
instr->src[0].ssa->index);
|
instr->src[0].ssa->index);
|
||||||
|
|
@ -1357,7 +1359,11 @@ intrinsic_to_msl(struct nir_to_msl_ctx *ctx, nir_intrinsic_instr *instr)
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_elect:
|
case nir_intrinsic_elect:
|
||||||
/* KK_WORKAROUND_3 */
|
/* KK_WORKAROUND_3 */
|
||||||
P(ctx, "simd_is_first() && (ulong)simd_ballot(true);\n");
|
if (ctx->disabled_workarounds & BITFIELD64_BIT(3)) {
|
||||||
|
P(ctx, "simd_is_first();\n");
|
||||||
|
} else {
|
||||||
|
P(ctx, "simd_is_first() && (ulong)simd_ballot(true);\n");
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_read_first_invocation:
|
case nir_intrinsic_read_first_invocation:
|
||||||
P(ctx, "simd_broadcast_first(");
|
P(ctx, "simd_broadcast_first(");
|
||||||
|
|
@ -1782,10 +1788,14 @@ cf_node_to_metal(struct nir_to_msl_ctx *ctx, nir_cf_node *node)
|
||||||
nir_loop *loop = nir_cf_node_as_loop(node);
|
nir_loop *loop = nir_cf_node_as_loop(node);
|
||||||
assert(!nir_loop_has_continue_construct(loop));
|
assert(!nir_loop_has_continue_construct(loop));
|
||||||
/* KK_WORKAROUND_2 */
|
/* KK_WORKAROUND_2 */
|
||||||
P_IND(ctx,
|
if (ctx->disabled_workarounds & BITFIELD64_BIT(2)) {
|
||||||
"for (uint64_t no_crash = 0u; no_crash < %" PRIu64
|
P_IND(ctx, "while (true) {\n");
|
||||||
"; ++no_crash) {\n",
|
} else {
|
||||||
UINT64_MAX);
|
P_IND(ctx,
|
||||||
|
"for (uint64_t no_crash = 0u; no_crash < %" PRIu64
|
||||||
|
"; ++no_crash) {\n",
|
||||||
|
UINT64_MAX);
|
||||||
|
}
|
||||||
ctx->indentlevel++;
|
ctx->indentlevel++;
|
||||||
foreach_list_typed(nir_cf_node, node, node, &loop->body) {
|
foreach_list_typed(nir_cf_node, node, node, &loop->body) {
|
||||||
cf_node_to_metal(ctx, node);
|
cf_node_to_metal(ctx, node);
|
||||||
|
|
@ -1979,7 +1989,7 @@ predeclare_ssa_values(struct nir_to_msl_ctx *ctx, nir_function_impl *impl)
|
||||||
}
|
}
|
||||||
|
|
||||||
char *
|
char *
|
||||||
nir_to_msl(nir_shader *shader, void *mem_ctx)
|
nir_to_msl(nir_shader *shader, void *mem_ctx, uint64_t disabled_workarounds)
|
||||||
{
|
{
|
||||||
/* Need to rename the entrypoint here since hardcoded shaders used by vk_meta
|
/* Need to rename the entrypoint here since hardcoded shaders used by vk_meta
|
||||||
* don't go through the preprocess step since we are the ones creating them.
|
* don't go through the preprocess step since we are the ones creating them.
|
||||||
|
|
@ -1989,6 +1999,7 @@ nir_to_msl(nir_shader *shader, void *mem_ctx)
|
||||||
struct nir_to_msl_ctx ctx = {
|
struct nir_to_msl_ctx ctx = {
|
||||||
.shader = shader,
|
.shader = shader,
|
||||||
.text = _mesa_string_buffer_create(mem_ctx, 1024),
|
.text = _mesa_string_buffer_create(mem_ctx, 1024),
|
||||||
|
.disabled_workarounds = disabled_workarounds,
|
||||||
};
|
};
|
||||||
nir_function_impl *impl = nir_shader_get_entrypoint(shader);
|
nir_function_impl *impl = nir_shader_get_entrypoint(shader);
|
||||||
msl_gather_info(&ctx);
|
msl_gather_info(&ctx);
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,8 @@
|
||||||
enum pipe_format;
|
enum pipe_format;
|
||||||
|
|
||||||
/* Assumes nir_shader_gather_info has been called beforehand. */
|
/* Assumes nir_shader_gather_info has been called beforehand. */
|
||||||
char *nir_to_msl(nir_shader *shader, void *mem_ctx);
|
char *nir_to_msl(nir_shader *shader, void *mem_ctx,
|
||||||
|
uint64_t disabled_workarounds);
|
||||||
|
|
||||||
/* Call this after all API-specific lowerings. It will bring the NIR out of SSA
|
/* Call this after all API-specific lowerings. It will bring the NIR out of SSA
|
||||||
* at the end */
|
* at the end */
|
||||||
|
|
|
||||||
|
|
@ -177,7 +177,7 @@ main(int argc, char **argv)
|
||||||
optimize(shader);
|
optimize(shader);
|
||||||
nir_print_shader(shader, stdout);
|
nir_print_shader(shader, stdout);
|
||||||
|
|
||||||
char *msl_text = nir_to_msl(shader, shader);
|
char *msl_text = nir_to_msl(shader, shader, 0u);
|
||||||
|
|
||||||
fputs(msl_text, stdout);
|
fputs(msl_text, stdout);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -138,6 +138,31 @@ kk_sampler_heap_remove(struct kk_device *dev, struct kk_rc_sampler *rc)
|
||||||
simple_mtx_unlock(&h->lock);
|
simple_mtx_unlock(&h->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
kk_parse_device_environment_options(struct kk_device *dev)
|
||||||
|
{
|
||||||
|
dev->gpu_capture_enabled =
|
||||||
|
debug_get_bool_option("MESA_KK_GPU_CAPTURE", false);
|
||||||
|
if (dev->gpu_capture_enabled) {
|
||||||
|
const char *capture_directory =
|
||||||
|
debug_get_option("MESA_KK_GPU_CAPTURE_DIRECTORY", NULL);
|
||||||
|
mtl_start_gpu_capture(dev->mtl_handle, capture_directory);
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *list = debug_get_option("MESA_KK_DISABLE_WORKAROUNDS", "");
|
||||||
|
const char *all_workarounds = "all";
|
||||||
|
const size_t all_len = strlen(all_workarounds);
|
||||||
|
for (unsigned n; n = strcspn(list, ","), *list; list += MAX2(1, n)) {
|
||||||
|
if (n == all_len && !strncmp(list, all_workarounds, n)) {
|
||||||
|
dev->disabled_workarounds = UINT64_MAX;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
int index = atoi(list);
|
||||||
|
dev->disabled_workarounds |= BITFIELD64_BIT(index);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
VKAPI_ATTR VkResult VKAPI_CALL
|
VKAPI_ATTR VkResult VKAPI_CALL
|
||||||
kk_CreateDevice(VkPhysicalDevice physicalDevice,
|
kk_CreateDevice(VkPhysicalDevice physicalDevice,
|
||||||
const VkDeviceCreateInfo *pCreateInfo,
|
const VkDeviceCreateInfo *pCreateInfo,
|
||||||
|
|
@ -211,15 +236,9 @@ kk_CreateDevice(VkPhysicalDevice physicalDevice,
|
||||||
simple_mtx_init(&dev->user_heap_cache.mutex, mtx_plain);
|
simple_mtx_init(&dev->user_heap_cache.mutex, mtx_plain);
|
||||||
dev->user_heap_cache.handles = UTIL_DYNARRAY_INIT;
|
dev->user_heap_cache.handles = UTIL_DYNARRAY_INIT;
|
||||||
|
|
||||||
*pDevice = kk_device_to_handle(dev);
|
kk_parse_device_environment_options(dev);
|
||||||
|
|
||||||
dev->gpu_capture_enabled =
|
*pDevice = kk_device_to_handle(dev);
|
||||||
debug_get_bool_option("MESA_KK_GPU_CAPTURE", false);
|
|
||||||
if (dev->gpu_capture_enabled) {
|
|
||||||
const char *capture_directory =
|
|
||||||
debug_get_option("MESA_KK_GPU_CAPTURE_DIRECTORY", NULL);
|
|
||||||
mtl_start_gpu_capture(dev->mtl_handle, capture_directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
return VK_SUCCESS;
|
return VK_SUCCESS;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -100,6 +100,7 @@ struct kk_device {
|
||||||
|
|
||||||
struct vk_meta_device meta;
|
struct vk_meta_device meta;
|
||||||
|
|
||||||
|
uint64_t disabled_workarounds;
|
||||||
bool gpu_capture_enabled;
|
bool gpu_capture_enabled;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -652,7 +652,7 @@ kk_compile_shader(struct kk_device *dev, struct vk_shader_compile_info *info,
|
||||||
}
|
}
|
||||||
msl_optimize_nir(nir);
|
msl_optimize_nir(nir);
|
||||||
modify_nir_info(nir);
|
modify_nir_info(nir);
|
||||||
shader->msl_code = nir_to_msl(nir, NULL);
|
shader->msl_code = nir_to_msl(nir, NULL, dev->disabled_workarounds);
|
||||||
const char *entrypoint_name = nir_shader_get_entrypoint(nir)->function->name;
|
const char *entrypoint_name = nir_shader_get_entrypoint(nir)->function->name;
|
||||||
|
|
||||||
/* We need to steal so it doesn't get destroyed with the nir. Needs to happen
|
/* We need to steal so it doesn't get destroyed with the nir. Needs to happen
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue