2015-11-12 11:40:34 +01:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2015 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include "nir.h"
|
|
|
|
|
#include "nir_builder.h"
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* lowers:
|
|
|
|
|
*
|
|
|
|
|
* packDouble2x32(foo) -> packDouble2x32Split(foo.x, foo.y)
|
|
|
|
|
* unpackDouble2x32(foo) -> vec2(unpackDouble2x32_x(foo), unpackDouble2x32_y(foo))
|
2016-09-02 08:09:53 -07:00
|
|
|
* packInt2x32(foo) -> packInt2x32Split(foo.x, foo.y)
|
|
|
|
|
* unpackInt2x32(foo) -> vec2(unpackInt2x32_x(foo), unpackInt2x32_y(foo))
|
2015-11-12 11:40:34 +01:00
|
|
|
*/
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
static nir_def *
|
|
|
|
|
lower_pack_64_from_32(nir_builder *b, nir_def *src)
|
2015-11-12 11:40:34 +01:00
|
|
|
{
|
2017-02-14 22:15:16 -08:00
|
|
|
return nir_pack_64_2x32_split(b, nir_channel(b, src, 0),
|
2023-08-08 12:00:35 -05:00
|
|
|
nir_channel(b, src, 1));
|
2015-11-12 11:40:34 +01:00
|
|
|
}
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
static nir_def *
|
|
|
|
|
lower_unpack_64_to_32(nir_builder *b, nir_def *src)
|
2015-11-12 11:40:34 +01:00
|
|
|
{
|
2017-02-14 22:15:16 -08:00
|
|
|
return nir_vec2(b, nir_unpack_64_2x32_split_x(b, src),
|
2023-08-08 12:00:35 -05:00
|
|
|
nir_unpack_64_2x32_split_y(b, src));
|
2016-09-02 08:09:53 -07:00
|
|
|
}
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
static nir_def *
|
|
|
|
|
lower_pack_32_from_16(nir_builder *b, nir_def *src)
|
2018-04-17 10:09:03 +02:00
|
|
|
{
|
|
|
|
|
return nir_pack_32_2x16_split(b, nir_channel(b, src, 0),
|
2023-08-08 12:00:35 -05:00
|
|
|
nir_channel(b, src, 1));
|
2018-04-17 10:09:03 +02:00
|
|
|
}
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
static nir_def *
|
|
|
|
|
lower_unpack_32_to_16(nir_builder *b, nir_def *src)
|
2018-04-17 10:09:03 +02:00
|
|
|
{
|
|
|
|
|
return nir_vec2(b, nir_unpack_32_2x16_split_x(b, src),
|
2023-08-08 12:00:35 -05:00
|
|
|
nir_unpack_32_2x16_split_y(b, src));
|
2018-04-17 10:09:03 +02:00
|
|
|
}
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
static nir_def *
|
|
|
|
|
lower_pack_64_from_16(nir_builder *b, nir_def *src)
|
2018-04-17 10:09:03 +02:00
|
|
|
{
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *xy = nir_pack_32_2x16_split(b, nir_channel(b, src, 0),
|
|
|
|
|
nir_channel(b, src, 1));
|
2018-04-17 10:09:03 +02:00
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *zw = nir_pack_32_2x16_split(b, nir_channel(b, src, 2),
|
|
|
|
|
nir_channel(b, src, 3));
|
2018-04-17 10:09:03 +02:00
|
|
|
|
|
|
|
|
return nir_pack_64_2x32_split(b, xy, zw);
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
static nir_def *
|
|
|
|
|
lower_unpack_64_to_16(nir_builder *b, nir_def *src)
|
2018-04-17 10:09:03 +02:00
|
|
|
{
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *xy = nir_unpack_64_2x32_split_x(b, src);
|
|
|
|
|
nir_def *zw = nir_unpack_64_2x32_split_y(b, src);
|
2018-04-17 10:09:03 +02:00
|
|
|
|
|
|
|
|
return nir_vec4(b, nir_unpack_32_2x16_split_x(b, xy),
|
2023-08-08 12:00:35 -05:00
|
|
|
nir_unpack_32_2x16_split_y(b, xy),
|
|
|
|
|
nir_unpack_32_2x16_split_x(b, zw),
|
|
|
|
|
nir_unpack_32_2x16_split_y(b, zw));
|
2018-04-17 10:09:03 +02:00
|
|
|
}
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
static nir_def *
|
|
|
|
|
lower_pack_32_from_8(nir_builder *b, nir_def *src)
|
2021-01-25 16:31:17 -08:00
|
|
|
{
|
2023-11-17 11:09:06 -08:00
|
|
|
if (b->shader->options->has_pack_32_4x8) {
|
|
|
|
|
return nir_pack_32_4x8_split(b,
|
|
|
|
|
nir_channel(b, src, 0),
|
|
|
|
|
nir_channel(b, src, 1),
|
|
|
|
|
nir_channel(b, src, 2),
|
|
|
|
|
nir_channel(b, src, 3));
|
|
|
|
|
} else {
|
|
|
|
|
nir_def *src32 = nir_u2u32(b, src);
|
|
|
|
|
|
|
|
|
|
return nir_ior(b,
|
|
|
|
|
nir_ior(b,
|
|
|
|
|
nir_channel(b, src32, 0) ,
|
|
|
|
|
nir_ishl_imm(b, nir_channel(b, src32, 1), 8)),
|
|
|
|
|
nir_ior(b,
|
|
|
|
|
nir_ishl_imm(b, nir_channel(b, src32, 2), 16),
|
|
|
|
|
nir_ishl_imm(b, nir_channel(b, src32, 3), 24)));
|
|
|
|
|
}
|
2021-01-25 16:31:17 -08:00
|
|
|
}
|
|
|
|
|
|
2023-07-21 17:05:12 -07:00
|
|
|
static nir_def *
|
|
|
|
|
lower_unpack_32_to_8(nir_builder *b, nir_def *src)
|
|
|
|
|
{
|
|
|
|
|
/* Some drivers call nir_lower_pack after the last time nir_opt_algebraic
|
|
|
|
|
* is called. To prevent issues there, don't generate byte extraction
|
|
|
|
|
* instructions when the lowering flag is set.
|
|
|
|
|
*/
|
|
|
|
|
if (b->shader->options->lower_extract_byte) {
|
|
|
|
|
return nir_vec4(b, nir_u2u8(b, src ),
|
|
|
|
|
nir_u2u8(b, nir_ushr_imm(b, src, 8)),
|
|
|
|
|
nir_u2u8(b, nir_ushr_imm(b, src, 16)),
|
|
|
|
|
nir_u2u8(b, nir_ushr_imm(b, src, 24)));
|
|
|
|
|
} else {
|
|
|
|
|
return nir_vec4(b, nir_u2u8(b, nir_extract_u8_imm(b, src, 0)),
|
|
|
|
|
nir_u2u8(b, nir_extract_u8_imm(b, src, 1)),
|
|
|
|
|
nir_u2u8(b, nir_extract_u8_imm(b, src, 2)),
|
|
|
|
|
nir_u2u8(b, nir_extract_u8_imm(b, src, 3)));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-24 15:29:24 -08:00
|
|
|
static bool
|
2024-08-08 22:33:35 -04:00
|
|
|
lower_pack_instr(nir_builder *b, nir_alu_instr *alu_instr, void *data)
|
2015-11-12 11:40:34 +01:00
|
|
|
{
|
2024-08-28 11:35:07 +08:00
|
|
|
nir_lower_packing_op op;
|
2021-06-28 01:00:31 +02:00
|
|
|
switch (alu_instr->op) {
|
|
|
|
|
case nir_op_pack_64_2x32:
|
2024-08-28 11:35:07 +08:00
|
|
|
op = nir_lower_packing_op_pack_64_2x32;
|
2021-06-28 01:00:31 +02:00
|
|
|
break;
|
|
|
|
|
case nir_op_unpack_64_2x32:
|
2024-08-28 11:35:07 +08:00
|
|
|
op = nir_lower_packing_op_unpack_64_2x32;
|
2021-06-28 01:00:31 +02:00
|
|
|
break;
|
|
|
|
|
case nir_op_pack_64_4x16:
|
2024-08-28 11:35:07 +08:00
|
|
|
op = nir_lower_packing_op_pack_64_4x16;
|
2021-06-28 01:00:31 +02:00
|
|
|
break;
|
|
|
|
|
case nir_op_unpack_64_4x16:
|
2024-08-28 11:35:07 +08:00
|
|
|
op = nir_lower_packing_op_unpack_64_4x16;
|
2021-06-28 01:00:31 +02:00
|
|
|
break;
|
|
|
|
|
case nir_op_pack_32_2x16:
|
2024-08-28 11:35:07 +08:00
|
|
|
op = nir_lower_packing_op_pack_32_2x16;
|
2021-06-28 01:00:31 +02:00
|
|
|
break;
|
|
|
|
|
case nir_op_unpack_32_2x16:
|
2024-08-28 11:35:07 +08:00
|
|
|
op = nir_lower_packing_op_unpack_32_2x16;
|
2021-06-28 01:00:31 +02:00
|
|
|
break;
|
2021-01-25 16:31:17 -08:00
|
|
|
case nir_op_pack_32_4x8:
|
2024-08-28 11:35:07 +08:00
|
|
|
op = nir_lower_packing_op_pack_32_4x8;
|
2021-01-25 16:31:17 -08:00
|
|
|
break;
|
2023-07-21 17:05:12 -07:00
|
|
|
case nir_op_unpack_32_4x8:
|
2024-08-28 11:35:07 +08:00
|
|
|
op = nir_lower_packing_op_unpack_32_4x8;
|
2023-07-21 17:05:12 -07:00
|
|
|
break;
|
2021-06-28 01:00:31 +02:00
|
|
|
default:
|
2024-08-28 11:35:07 +08:00
|
|
|
return false;
|
2015-11-12 11:40:34 +01:00
|
|
|
}
|
2024-08-28 11:35:07 +08:00
|
|
|
|
|
|
|
|
if (b->shader->options->skip_lower_packing_ops & BITFIELD_BIT(op))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
b->cursor = nir_before_instr(&alu_instr->instr);
|
|
|
|
|
|
|
|
|
|
typedef nir_def *(*lower_func_t)(nir_builder *b, nir_def *src);
|
|
|
|
|
static const lower_func_t lower_funcs[nir_lower_packing_num_ops] = {
|
|
|
|
|
[nir_lower_packing_op_pack_64_2x32] = lower_pack_64_from_32,
|
|
|
|
|
[nir_lower_packing_op_unpack_64_2x32] = lower_unpack_64_to_32,
|
|
|
|
|
[nir_lower_packing_op_pack_64_4x16] = lower_pack_64_from_16,
|
|
|
|
|
[nir_lower_packing_op_unpack_64_4x16] = lower_unpack_64_to_16,
|
|
|
|
|
[nir_lower_packing_op_pack_32_2x16] = lower_pack_32_from_16,
|
|
|
|
|
[nir_lower_packing_op_unpack_32_2x16] = lower_unpack_32_to_16,
|
|
|
|
|
[nir_lower_packing_op_pack_32_4x8] = lower_pack_32_from_8,
|
|
|
|
|
[nir_lower_packing_op_unpack_32_4x8] = lower_unpack_32_to_8,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
nir_def *src = nir_ssa_for_alu_src(b, alu_instr, 0);
|
|
|
|
|
nir_def *dest = lower_funcs[op](b, src);
|
2024-06-20 12:07:26 -04:00
|
|
|
nir_def_replace(&alu_instr->def, dest);
|
2017-02-24 15:29:24 -08:00
|
|
|
|
2021-06-28 01:00:31 +02:00
|
|
|
return true;
|
2015-11-12 11:40:34 +01:00
|
|
|
}
|
|
|
|
|
|
2017-02-24 15:29:24 -08:00
|
|
|
bool
|
2018-04-27 09:28:48 +02:00
|
|
|
nir_lower_pack(nir_shader *shader)
|
2015-11-12 11:40:34 +01:00
|
|
|
{
|
2024-08-08 22:33:35 -04:00
|
|
|
return nir_shader_alu_pass(shader, lower_pack_instr,
|
|
|
|
|
nir_metadata_control_flow, NULL);
|
2015-11-12 11:40:34 +01:00
|
|
|
}
|