2017-11-23 23:15:14 -08:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2017 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
2018-08-19 00:31:46 -07:00
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
2017-11-23 23:15:14 -08:00
|
|
|
*
|
2018-08-19 00:31:46 -07:00
|
|
|
* The above copyright notice and this permission notice shall be included
|
|
|
|
|
* in all copies or substantial portions of the Software.
|
2017-11-23 23:15:14 -08:00
|
|
|
*
|
2018-08-19 00:31:46 -07:00
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
|
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
2017-11-23 23:15:14 -08:00
|
|
|
*/
|
2018-07-30 23:49:34 -07:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* @file iris_state.c
|
|
|
|
|
*
|
|
|
|
|
* ============================= GENXML CODE =============================
|
|
|
|
|
* [This file is compiled once per generation.]
|
|
|
|
|
* =======================================================================
|
|
|
|
|
*
|
|
|
|
|
* This is the main state upload code.
|
|
|
|
|
*
|
|
|
|
|
* Gallium uses Constant State Objects, or CSOs, for most state. Large,
|
|
|
|
|
* complex, or highly reusable state can be created once, and bound and
|
|
|
|
|
* rebound multiple times. This is modeled with the pipe->create_*_state()
|
|
|
|
|
* and pipe->bind_*_state() hooks. Highly dynamic or inexpensive state is
|
|
|
|
|
* streamed out on the fly, via pipe->set_*_state() hooks.
|
|
|
|
|
*
|
|
|
|
|
* OpenGL involves frequently mutating context state, which is mirrored in
|
|
|
|
|
* core Mesa by highly mutable data structures. However, most applications
|
|
|
|
|
* typically draw the same things over and over - from frame to frame, most
|
|
|
|
|
* of the same objects are still visible and need to be redrawn. So, rather
|
|
|
|
|
* than inventing new state all the time, applications usually mutate to swap
|
|
|
|
|
* between known states that we've seen before.
|
|
|
|
|
*
|
|
|
|
|
* Gallium isolates us from this mutation by tracking API state, and
|
|
|
|
|
* distilling it into a set of Constant State Objects, or CSOs. Large,
|
|
|
|
|
* complex, or typically reusable state can be created once, then reused
|
|
|
|
|
* multiple times. Drivers can create and store their own associated data.
|
|
|
|
|
* This create/bind model corresponds to the pipe->create_*_state() and
|
|
|
|
|
* pipe->bind_*_state() driver hooks.
|
|
|
|
|
*
|
|
|
|
|
* Some state is cheap to create, or expected to be highly dynamic. Rather
|
|
|
|
|
* than creating and caching piles of CSOs for these, Gallium simply streams
|
|
|
|
|
* them out, via the pipe->set_*_state() driver hooks.
|
|
|
|
|
*
|
|
|
|
|
* To reduce draw time overhead, we try to compute as much state at create
|
|
|
|
|
* time as possible. Wherever possible, we translate the Gallium pipe state
|
|
|
|
|
* to 3DSTATE commands, and store those commands in the CSO. At draw time,
|
|
|
|
|
* we can simply memcpy them into a batch buffer.
|
|
|
|
|
*
|
|
|
|
|
* No hardware matches the abstraction perfectly, so some commands require
|
|
|
|
|
* information from multiple CSOs. In this case, we can store two copies
|
|
|
|
|
* of the packet (one in each CSO), and simply | together their DWords at
|
|
|
|
|
* draw time. Sometimes the second set is trivial (one or two fields), so
|
|
|
|
|
* we simply pack it at draw time.
|
|
|
|
|
*
|
|
|
|
|
* There are two main components in the file below. First, the CSO hooks
|
|
|
|
|
* create/bind/track state. The second are the draw-time upload functions,
|
|
|
|
|
* iris_upload_render_state() and iris_upload_compute_state(), which read
|
|
|
|
|
* the context state and emit the commands into the actual batch.
|
|
|
|
|
*/
|
|
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
#include <stdio.h>
|
|
|
|
|
#include <errno.h>
|
2018-01-08 14:44:14 -08:00
|
|
|
|
2018-06-07 15:33:52 -07:00
|
|
|
#if HAVE_VALGRIND
|
2018-01-08 14:44:14 -08:00
|
|
|
#include <valgrind.h>
|
|
|
|
|
#include <memcheck.h>
|
|
|
|
|
#define VG(x) x
|
2018-11-21 11:12:11 +00:00
|
|
|
#ifdef DEBUG
|
2018-01-08 14:44:14 -08:00
|
|
|
#define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
|
2018-06-07 15:33:52 -07:00
|
|
|
#endif
|
2018-01-08 14:44:14 -08:00
|
|
|
#else
|
|
|
|
|
#define VG(x)
|
|
|
|
|
#endif
|
|
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
#include "pipe/p_defines.h"
|
|
|
|
|
#include "pipe/p_state.h"
|
|
|
|
|
#include "pipe/p_context.h"
|
|
|
|
|
#include "pipe/p_screen.h"
|
2019-04-18 22:29:27 -07:00
|
|
|
#include "util/u_dual_blend.h"
|
2017-11-23 23:15:14 -08:00
|
|
|
#include "util/u_inlines.h"
|
2019-06-27 15:05:31 -07:00
|
|
|
#include "util/format/u_format.h"
|
2018-05-21 00:32:04 -07:00
|
|
|
#include "util/u_framebuffer.h"
|
2017-11-23 23:15:14 -08:00
|
|
|
#include "util/u_transfer.h"
|
2018-02-09 14:21:54 -08:00
|
|
|
#include "util/u_upload_mgr.h"
|
2018-07-14 01:29:33 -07:00
|
|
|
#include "util/u_viewport.h"
|
2018-09-12 16:31:13 -07:00
|
|
|
#include "util/u_memory.h"
|
2021-11-23 00:43:36 +02:00
|
|
|
#include "util/u_trace_gallium.h"
|
2019-02-24 14:21:39 -08:00
|
|
|
#include "drm-uapi/i915_drm.h"
|
2018-04-19 19:04:17 -07:00
|
|
|
#include "nir.h"
|
2017-11-23 23:15:14 -08:00
|
|
|
#include "intel/compiler/brw_compiler.h"
|
2021-03-03 13:20:06 -08:00
|
|
|
#include "intel/common/intel_aux_map.h"
|
|
|
|
|
#include "intel/common/intel_l3_config.h"
|
|
|
|
|
#include "intel/common/intel_sample_positions.h"
|
2021-11-23 00:43:36 +02:00
|
|
|
#include "intel/ds/intel_tracepoints.h"
|
2017-12-27 02:54:26 -08:00
|
|
|
#include "iris_batch.h"
|
2017-11-23 23:15:14 -08:00
|
|
|
#include "iris_context.h"
|
2018-12-04 22:19:33 -08:00
|
|
|
#include "iris_defines.h"
|
2018-01-11 22:18:54 -08:00
|
|
|
#include "iris_pipe.h"
|
2017-11-23 23:15:14 -08:00
|
|
|
#include "iris_resource.h"
|
2021-11-23 00:43:36 +02:00
|
|
|
#include "iris_utrace.h"
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2019-07-12 00:50:19 -07:00
|
|
|
#include "iris_genx_macros.h"
|
2021-03-03 13:20:06 -08:00
|
|
|
#include "intel/common/intel_guardband.h"
|
2021-10-26 16:50:35 -07:00
|
|
|
#include "intel/common/intel_pixel_hash.h"
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Statically assert that PIPE_* enums match the hardware packets.
|
|
|
|
|
* (As long as they match, we don't need to translate them.)
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
UNUSED static void pipe_asserts()
|
|
|
|
|
{
|
|
|
|
|
#define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
|
|
|
|
|
|
|
|
|
|
/* pipe_logicop happens to match the hardware. */
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_CLEAR == LOGICOP_CLEAR);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_NOR == LOGICOP_NOR);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED == LOGICOP_AND_INVERTED);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED == LOGICOP_COPY_INVERTED);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE == LOGICOP_AND_REVERSE);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_INVERT == LOGICOP_INVERT);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_XOR == LOGICOP_XOR);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_NAND == LOGICOP_NAND);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_AND == LOGICOP_AND);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_EQUIV == LOGICOP_EQUIV);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_NOOP == LOGICOP_NOOP);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED == LOGICOP_OR_INVERTED);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_COPY == LOGICOP_COPY);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE == LOGICOP_OR_REVERSE);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_OR == LOGICOP_OR);
|
|
|
|
|
PIPE_ASSERT(PIPE_LOGICOP_SET == LOGICOP_SET);
|
|
|
|
|
|
|
|
|
|
/* pipe_blend_func happens to match the hardware. */
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_ONE == BLENDFACTOR_ONE);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR == BLENDFACTOR_SRC_COLOR);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA == BLENDFACTOR_SRC_ALPHA);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA == BLENDFACTOR_DST_ALPHA);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR == BLENDFACTOR_DST_COLOR);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE == BLENDFACTOR_SRC_ALPHA_SATURATE);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR == BLENDFACTOR_CONST_COLOR);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA == BLENDFACTOR_CONST_ALPHA);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR == BLENDFACTOR_SRC1_COLOR);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA == BLENDFACTOR_SRC1_ALPHA);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO == BLENDFACTOR_ZERO);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR == BLENDFACTOR_INV_SRC_COLOR);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA == BLENDFACTOR_INV_SRC_ALPHA);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA == BLENDFACTOR_INV_DST_ALPHA);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR == BLENDFACTOR_INV_DST_COLOR);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR == BLENDFACTOR_INV_CONST_COLOR);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA == BLENDFACTOR_INV_CONST_ALPHA);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR == BLENDFACTOR_INV_SRC1_COLOR);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA == BLENDFACTOR_INV_SRC1_ALPHA);
|
|
|
|
|
|
|
|
|
|
/* pipe_blend_func happens to match the hardware. */
|
|
|
|
|
PIPE_ASSERT(PIPE_BLEND_ADD == BLENDFUNCTION_ADD);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLEND_SUBTRACT == BLENDFUNCTION_SUBTRACT);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT == BLENDFUNCTION_REVERSE_SUBTRACT);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLEND_MIN == BLENDFUNCTION_MIN);
|
|
|
|
|
PIPE_ASSERT(PIPE_BLEND_MAX == BLENDFUNCTION_MAX);
|
|
|
|
|
|
|
|
|
|
/* pipe_stencil_op happens to match the hardware. */
|
|
|
|
|
PIPE_ASSERT(PIPE_STENCIL_OP_KEEP == STENCILOP_KEEP);
|
|
|
|
|
PIPE_ASSERT(PIPE_STENCIL_OP_ZERO == STENCILOP_ZERO);
|
|
|
|
|
PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE == STENCILOP_REPLACE);
|
|
|
|
|
PIPE_ASSERT(PIPE_STENCIL_OP_INCR == STENCILOP_INCRSAT);
|
|
|
|
|
PIPE_ASSERT(PIPE_STENCIL_OP_DECR == STENCILOP_DECRSAT);
|
|
|
|
|
PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP == STENCILOP_INCR);
|
|
|
|
|
PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP == STENCILOP_DECR);
|
|
|
|
|
PIPE_ASSERT(PIPE_STENCIL_OP_INVERT == STENCILOP_INVERT);
|
2018-01-29 15:06:04 -08:00
|
|
|
|
|
|
|
|
/* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
|
|
|
|
|
PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT == UPPERLEFT);
|
|
|
|
|
PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT == LOWERLEFT);
|
2017-11-23 23:15:14 -08:00
|
|
|
#undef PIPE_ASSERT
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-10 00:19:29 -08:00
|
|
|
static unsigned
|
|
|
|
|
translate_prim_type(enum pipe_prim_type prim, uint8_t verts_per_patch)
|
|
|
|
|
{
|
|
|
|
|
static const unsigned map[] = {
|
|
|
|
|
[PIPE_PRIM_POINTS] = _3DPRIM_POINTLIST,
|
|
|
|
|
[PIPE_PRIM_LINES] = _3DPRIM_LINELIST,
|
|
|
|
|
[PIPE_PRIM_LINE_LOOP] = _3DPRIM_LINELOOP,
|
|
|
|
|
[PIPE_PRIM_LINE_STRIP] = _3DPRIM_LINESTRIP,
|
|
|
|
|
[PIPE_PRIM_TRIANGLES] = _3DPRIM_TRILIST,
|
|
|
|
|
[PIPE_PRIM_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
|
|
|
|
|
[PIPE_PRIM_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
|
|
|
|
|
[PIPE_PRIM_QUADS] = _3DPRIM_QUADLIST,
|
|
|
|
|
[PIPE_PRIM_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
|
|
|
|
|
[PIPE_PRIM_POLYGON] = _3DPRIM_POLYGON,
|
|
|
|
|
[PIPE_PRIM_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
|
|
|
|
|
[PIPE_PRIM_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
|
|
|
|
|
[PIPE_PRIM_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
|
|
|
|
|
[PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
|
|
|
|
|
[PIPE_PRIM_PATCHES] = _3DPRIM_PATCHLIST_1 - 1,
|
|
|
|
|
};
|
|
|
|
|
|
2018-01-20 01:05:13 -08:00
|
|
|
return map[prim] + (prim == PIPE_PRIM_PATCHES ? verts_per_patch : 0);
|
2018-01-10 00:19:29 -08:00
|
|
|
}
|
|
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
static unsigned
|
|
|
|
|
translate_compare_func(enum pipe_compare_func pipe_func)
|
|
|
|
|
{
|
|
|
|
|
static const unsigned map[] = {
|
|
|
|
|
[PIPE_FUNC_NEVER] = COMPAREFUNCTION_NEVER,
|
|
|
|
|
[PIPE_FUNC_LESS] = COMPAREFUNCTION_LESS,
|
|
|
|
|
[PIPE_FUNC_EQUAL] = COMPAREFUNCTION_EQUAL,
|
|
|
|
|
[PIPE_FUNC_LEQUAL] = COMPAREFUNCTION_LEQUAL,
|
|
|
|
|
[PIPE_FUNC_GREATER] = COMPAREFUNCTION_GREATER,
|
|
|
|
|
[PIPE_FUNC_NOTEQUAL] = COMPAREFUNCTION_NOTEQUAL,
|
|
|
|
|
[PIPE_FUNC_GEQUAL] = COMPAREFUNCTION_GEQUAL,
|
|
|
|
|
[PIPE_FUNC_ALWAYS] = COMPAREFUNCTION_ALWAYS,
|
|
|
|
|
};
|
|
|
|
|
return map[pipe_func];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static unsigned
|
|
|
|
|
translate_shadow_func(enum pipe_compare_func pipe_func)
|
|
|
|
|
{
|
|
|
|
|
/* Gallium specifies the result of shadow comparisons as:
|
|
|
|
|
*
|
|
|
|
|
* 1 if ref <op> texel,
|
|
|
|
|
* 0 otherwise.
|
|
|
|
|
*
|
|
|
|
|
* The hardware does:
|
|
|
|
|
*
|
|
|
|
|
* 0 if texel <op> ref,
|
|
|
|
|
* 1 otherwise.
|
|
|
|
|
*
|
|
|
|
|
* So we need to flip the operator and also negate.
|
|
|
|
|
*/
|
|
|
|
|
static const unsigned map[] = {
|
2021-05-05 11:18:23 +10:00
|
|
|
[PIPE_FUNC_NEVER] = PREFILTEROP_ALWAYS,
|
|
|
|
|
[PIPE_FUNC_LESS] = PREFILTEROP_LEQUAL,
|
|
|
|
|
[PIPE_FUNC_EQUAL] = PREFILTEROP_NOTEQUAL,
|
|
|
|
|
[PIPE_FUNC_LEQUAL] = PREFILTEROP_LESS,
|
|
|
|
|
[PIPE_FUNC_GREATER] = PREFILTEROP_GEQUAL,
|
|
|
|
|
[PIPE_FUNC_NOTEQUAL] = PREFILTEROP_EQUAL,
|
|
|
|
|
[PIPE_FUNC_GEQUAL] = PREFILTEROP_GREATER,
|
|
|
|
|
[PIPE_FUNC_ALWAYS] = PREFILTEROP_NEVER,
|
2017-11-23 23:15:14 -08:00
|
|
|
};
|
|
|
|
|
return map[pipe_func];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static unsigned
|
|
|
|
|
translate_cull_mode(unsigned pipe_face)
|
|
|
|
|
{
|
|
|
|
|
static const unsigned map[4] = {
|
|
|
|
|
[PIPE_FACE_NONE] = CULLMODE_NONE,
|
|
|
|
|
[PIPE_FACE_FRONT] = CULLMODE_FRONT,
|
|
|
|
|
[PIPE_FACE_BACK] = CULLMODE_BACK,
|
|
|
|
|
[PIPE_FACE_FRONT_AND_BACK] = CULLMODE_BOTH,
|
|
|
|
|
};
|
|
|
|
|
return map[pipe_face];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static unsigned
|
|
|
|
|
translate_fill_mode(unsigned pipe_polymode)
|
|
|
|
|
{
|
|
|
|
|
static const unsigned map[4] = {
|
|
|
|
|
[PIPE_POLYGON_MODE_FILL] = FILL_MODE_SOLID,
|
|
|
|
|
[PIPE_POLYGON_MODE_LINE] = FILL_MODE_WIREFRAME,
|
|
|
|
|
[PIPE_POLYGON_MODE_POINT] = FILL_MODE_POINT,
|
|
|
|
|
[PIPE_POLYGON_MODE_FILL_RECTANGLE] = FILL_MODE_SOLID,
|
|
|
|
|
};
|
|
|
|
|
return map[pipe_polymode];
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
static unsigned
|
|
|
|
|
translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
|
|
|
|
|
{
|
|
|
|
|
static const unsigned map[] = {
|
|
|
|
|
[PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
|
|
|
|
|
[PIPE_TEX_MIPFILTER_LINEAR] = MIPFILTER_LINEAR,
|
|
|
|
|
[PIPE_TEX_MIPFILTER_NONE] = MIPFILTER_NONE,
|
|
|
|
|
};
|
|
|
|
|
return map[pipe_mip];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
|
translate_wrap(unsigned pipe_wrap)
|
|
|
|
|
{
|
|
|
|
|
static const unsigned map[] = {
|
|
|
|
|
[PIPE_TEX_WRAP_REPEAT] = TCM_WRAP,
|
|
|
|
|
[PIPE_TEX_WRAP_CLAMP] = TCM_HALF_BORDER,
|
|
|
|
|
[PIPE_TEX_WRAP_CLAMP_TO_EDGE] = TCM_CLAMP,
|
|
|
|
|
[PIPE_TEX_WRAP_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
|
|
|
|
|
[PIPE_TEX_WRAP_MIRROR_REPEAT] = TCM_MIRROR,
|
|
|
|
|
[PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
|
|
|
|
|
|
|
|
|
|
/* These are unsupported. */
|
|
|
|
|
[PIPE_TEX_WRAP_MIRROR_CLAMP] = -1,
|
|
|
|
|
[PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1,
|
|
|
|
|
};
|
|
|
|
|
return map[pipe_wrap];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Allocate space for some indirect state.
|
|
|
|
|
*
|
|
|
|
|
* Return a pointer to the map (to fill it out) and a state ref (for
|
|
|
|
|
* referring to the state in GPU commands).
|
|
|
|
|
*/
|
2018-06-28 00:57:49 -07:00
|
|
|
static void *
|
|
|
|
|
upload_state(struct u_upload_mgr *uploader,
|
|
|
|
|
struct iris_state_ref *ref,
|
|
|
|
|
unsigned size,
|
|
|
|
|
unsigned alignment)
|
|
|
|
|
{
|
|
|
|
|
void *p = NULL;
|
|
|
|
|
u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
|
|
|
|
|
return p;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Stream out temporary/short-lived state.
|
|
|
|
|
*
|
|
|
|
|
* This allocates space, pins the BO, and includes the BO address in the
|
|
|
|
|
* returned offset (which works because all state lives in 32-bit memory
|
|
|
|
|
* zones).
|
|
|
|
|
*/
|
2018-04-06 00:05:24 -07:00
|
|
|
static uint32_t *
|
|
|
|
|
stream_state(struct iris_batch *batch,
|
|
|
|
|
struct u_upload_mgr *uploader,
|
2018-06-15 11:55:28 -07:00
|
|
|
struct pipe_resource **out_res,
|
2018-04-06 00:05:24 -07:00
|
|
|
unsigned size,
|
|
|
|
|
unsigned alignment,
|
2018-04-06 11:44:59 -07:00
|
|
|
uint32_t *out_offset)
|
2018-04-06 00:05:24 -07:00
|
|
|
{
|
|
|
|
|
void *ptr = NULL;
|
|
|
|
|
|
2018-06-15 11:55:28 -07:00
|
|
|
u_upload_alloc(uploader, 0, size, alignment, out_offset, out_res, &ptr);
|
2018-04-06 11:44:59 -07:00
|
|
|
|
2018-06-15 11:55:28 -07:00
|
|
|
struct iris_bo *bo = iris_resource_bo(*out_res);
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
|
2018-04-06 11:44:59 -07:00
|
|
|
|
2019-10-02 15:09:33 -04:00
|
|
|
iris_record_state_size(batch->state_sizes,
|
2021-07-19 21:23:18 -07:00
|
|
|
bo->address + *out_offset, size);
|
2018-04-06 11:44:59 -07:00
|
|
|
|
2019-10-02 15:09:33 -04:00
|
|
|
*out_offset += iris_bo_offset_from_base_address(bo);
|
iris: Record state sizes for INTEL_DEBUG=bat decoding.
Felix noticed a crash when using INTEL_DEBUG=bat decoding. It turned
out that we were sometimes placing variable length data near the end
of a buffer, and with the decoder guessing random lengths rather than
having an actual count, it was walking off the end and crashing. So
this does more than improve the decoder output.
Unfortunately, this is a bit more complicated than i965's handling,
because we don't have a single state buffer. Various places upload
data via u_upload_mgr, and so there isn't a central place to record
the size. We don't need to catch every single place, however, since
it's only important to record variable length packets (like viewports
and binding tables).
State data also lives arbitrarily long, rather than being discarded on
every batch like i965, so we don't know when to clear out old entries
either. (We also don't have a callback when an upload buffer is
released.) So, this tracking may space leak over time. That's probably
okay though, as this is only a debugging feature and it's a slow leak.
We may also get lucky and overwrite existing entries as we reuse BOs,
though I find this unlikely to happen.
The fact that the decoder works in terms of offsets from a state base
address is also not ideal, as dynamic state base address and surface
state base address differ for iris. However, because dynamic state
addresses start from the top of a 4GB region, and binding tables start
from addresses [0, 64K), it's highly unlikely that we'll get overlap.
We can always improve this, but for now it's better than what we had.
2019-05-22 18:14:38 -07:00
|
|
|
|
2018-04-06 00:05:24 -07:00
|
|
|
return ptr;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* stream_state() + memcpy.
|
|
|
|
|
*/
|
2018-04-06 00:05:24 -07:00
|
|
|
static uint32_t
|
|
|
|
|
emit_state(struct iris_batch *batch,
|
|
|
|
|
struct u_upload_mgr *uploader,
|
2018-06-15 11:55:28 -07:00
|
|
|
struct pipe_resource **out_res,
|
2018-04-06 00:05:24 -07:00
|
|
|
const void *data,
|
|
|
|
|
unsigned size,
|
|
|
|
|
unsigned alignment)
|
|
|
|
|
{
|
|
|
|
|
unsigned offset = 0;
|
2018-06-15 11:55:28 -07:00
|
|
|
uint32_t *map =
|
|
|
|
|
stream_state(batch, uploader, out_res, size, alignment, &offset);
|
2018-04-06 00:05:24 -07:00
|
|
|
|
|
|
|
|
if (map)
|
|
|
|
|
memcpy(map, data, size);
|
|
|
|
|
|
|
|
|
|
return offset;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Did field 'x' change between 'old_cso' and 'new_cso'?
|
|
|
|
|
*
|
|
|
|
|
* (If so, we may want to set some dirty flags.)
|
|
|
|
|
*/
|
2018-06-09 00:01:09 -07:00
|
|
|
#define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
|
|
|
|
|
#define cso_changed_memcmp(x) \
|
|
|
|
|
(!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
|
|
|
|
|
|
2018-09-08 19:43:34 -07:00
|
|
|
static void
|
2019-09-03 15:34:54 -07:00
|
|
|
flush_before_state_base_change(struct iris_batch *batch)
|
2018-09-08 19:43:34 -07:00
|
|
|
{
|
|
|
|
|
/* Flush before emitting STATE_BASE_ADDRESS.
|
|
|
|
|
*
|
|
|
|
|
* This isn't documented anywhere in the PRM. However, it seems to be
|
2021-02-24 22:11:12 -08:00
|
|
|
* necessary prior to changing the surface state base address. We've
|
2018-09-08 19:43:34 -07:00
|
|
|
* seen issues in Vulkan where we get GPU hangs when using multi-level
|
|
|
|
|
* command buffers which clear depth, reset state base address, and then
|
|
|
|
|
* go render stuff.
|
|
|
|
|
*
|
|
|
|
|
* Normally, in GL, we would trust the kernel to do sufficient stalls
|
|
|
|
|
* and flushes prior to executing our batch. However, it doesn't seem
|
|
|
|
|
* as if the kernel's flushing is always sufficient and we don't want to
|
|
|
|
|
* rely on it.
|
|
|
|
|
*
|
|
|
|
|
* We make this an end-of-pipe sync instead of a normal flush because we
|
|
|
|
|
* do not know the current status of the GPU. On Haswell at least,
|
|
|
|
|
* having a fast-clear operation in flight at the same time as a normal
|
|
|
|
|
* rendering operation can cause hangs. Since the kernel's flushing is
|
|
|
|
|
* insufficient, we need to ensure that any rendering operations from
|
|
|
|
|
* other processes are definitely complete before we try to do our own
|
|
|
|
|
* rendering. It's a bit of a big hammer but it appears to work.
|
|
|
|
|
*/
|
|
|
|
|
iris_emit_end_of_pipe_sync(batch,
|
2019-09-03 15:34:54 -07:00
|
|
|
"change STATE_BASE_ADDRESS (flushes)",
|
2018-09-08 19:43:34 -07:00
|
|
|
PIPE_CONTROL_RENDER_TARGET_FLUSH |
|
|
|
|
|
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
|
2021-10-07 12:38:03 -07:00
|
|
|
PIPE_CONTROL_DATA_CACHE_FLUSH);
|
2018-09-08 19:43:34 -07:00
|
|
|
}
|
|
|
|
|
|
2019-09-03 15:34:54 -07:00
|
|
|
static void
|
|
|
|
|
flush_after_state_base_change(struct iris_batch *batch)
|
|
|
|
|
{
|
|
|
|
|
/* After re-setting the surface state base address, we have to do some
|
|
|
|
|
* cache flusing so that the sampler engine will pick up the new
|
|
|
|
|
* SURFACE_STATE objects and binding tables. From the Broadwell PRM,
|
|
|
|
|
* Shared Function > 3D Sampler > State > State Caching (page 96):
|
|
|
|
|
*
|
|
|
|
|
* Coherency with system memory in the state cache, like the texture
|
|
|
|
|
* cache is handled partially by software. It is expected that the
|
|
|
|
|
* command stream or shader will issue Cache Flush operation or
|
|
|
|
|
* Cache_Flush sampler message to ensure that the L1 cache remains
|
|
|
|
|
* coherent with system memory.
|
|
|
|
|
*
|
|
|
|
|
* [...]
|
|
|
|
|
*
|
|
|
|
|
* Whenever the value of the Dynamic_State_Base_Addr,
|
|
|
|
|
* Surface_State_Base_Addr are altered, the L1 state cache must be
|
|
|
|
|
* invalidated to ensure the new surface or sampler state is fetched
|
|
|
|
|
* from system memory.
|
|
|
|
|
*
|
|
|
|
|
* The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
|
|
|
|
|
* which, according the PIPE_CONTROL instruction documentation in the
|
|
|
|
|
* Broadwell PRM:
|
|
|
|
|
*
|
|
|
|
|
* Setting this bit is independent of any other bit in this packet.
|
|
|
|
|
* This bit controls the invalidation of the L1 and L2 state caches
|
|
|
|
|
* at the top of the pipe i.e. at the parsing time.
|
|
|
|
|
*
|
|
|
|
|
* Unfortunately, experimentation seems to indicate that state cache
|
|
|
|
|
* invalidation through a PIPE_CONTROL does nothing whatsoever in
|
|
|
|
|
* regards to surface state and binding tables. In stead, it seems that
|
|
|
|
|
* invalidating the texture cache is what is actually needed.
|
|
|
|
|
*
|
|
|
|
|
* XXX: As far as we have been able to determine through
|
|
|
|
|
* experimentation, shows that flush the texture cache appears to be
|
|
|
|
|
* sufficient. The theory here is that all of the sampling/rendering
|
|
|
|
|
* units cache the binding table in the texture cache. However, we have
|
|
|
|
|
* yet to be able to actually confirm this.
|
2021-06-18 19:39:08 -07:00
|
|
|
*
|
|
|
|
|
* Wa_14013910100:
|
|
|
|
|
*
|
|
|
|
|
* "DG2 128/256/512-A/B: S/W must program STATE_BASE_ADDRESS command twice
|
|
|
|
|
* or program pipe control with Instruction cache invalidate post
|
|
|
|
|
* STATE_BASE_ADDRESS command"
|
2019-09-03 15:34:54 -07:00
|
|
|
*/
|
|
|
|
|
iris_emit_end_of_pipe_sync(batch,
|
|
|
|
|
"change STATE_BASE_ADDRESS (invalidates)",
|
|
|
|
|
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
|
|
|
|
|
PIPE_CONTROL_CONST_CACHE_INVALIDATE |
|
2021-06-18 19:39:08 -07:00
|
|
|
PIPE_CONTROL_STATE_CACHE_INVALIDATE |
|
|
|
|
|
(GFX_VERx10 != 125 ? 0 :
|
|
|
|
|
PIPE_CONTROL_INSTRUCTION_INVALIDATE));
|
2019-09-03 15:34:54 -07:00
|
|
|
}
|
|
|
|
|
|
2019-10-04 01:21:45 -07:00
|
|
|
static void
|
|
|
|
|
iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
|
|
|
|
|
uint32_t src)
|
|
|
|
|
{
|
2021-11-05 00:18:35 -07:00
|
|
|
struct mi_builder b;
|
|
|
|
|
mi_builder_init(&b, &batch->screen->devinfo, batch);
|
|
|
|
|
mi_store(&b, mi_reg32(dst), mi_reg32(src));
|
2019-10-04 01:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
|
|
|
|
|
uint32_t src)
|
|
|
|
|
{
|
2021-11-05 00:18:35 -07:00
|
|
|
struct mi_builder b;
|
|
|
|
|
mi_builder_init(&b, &batch->screen->devinfo, batch);
|
|
|
|
|
mi_store(&b, mi_reg64(dst), mi_reg64(src));
|
2019-10-04 01:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
|
|
|
|
|
uint32_t val)
|
|
|
|
|
{
|
2021-11-05 00:18:35 -07:00
|
|
|
struct mi_builder b;
|
|
|
|
|
mi_builder_init(&b, &batch->screen->devinfo, batch);
|
|
|
|
|
mi_store(&b, mi_reg32(reg), mi_imm(val));
|
2019-10-04 01:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
|
|
|
|
|
uint64_t val)
|
|
|
|
|
{
|
2021-11-05 00:18:35 -07:00
|
|
|
struct mi_builder b;
|
|
|
|
|
mi_builder_init(&b, &batch->screen->devinfo, batch);
|
|
|
|
|
mi_store(&b, mi_reg64(reg), mi_imm(val));
|
2019-10-04 01:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
|
|
|
|
|
struct iris_bo *bo, uint32_t offset)
|
|
|
|
|
{
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
2021-11-05 00:18:35 -07:00
|
|
|
struct mi_builder b;
|
|
|
|
|
mi_builder_init(&b, &batch->screen->devinfo, batch);
|
|
|
|
|
struct mi_value src = mi_mem32(ro_bo(bo, offset));
|
|
|
|
|
mi_store(&b, mi_reg32(reg), src);
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_end(batch);
|
2019-10-04 01:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Load a 64-bit value from a buffer into a MMIO register via
|
|
|
|
|
* two MI_LOAD_REGISTER_MEM commands.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
|
|
|
|
|
struct iris_bo *bo, uint32_t offset)
|
|
|
|
|
{
|
2021-11-05 00:18:35 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
|
|
|
|
struct mi_builder b;
|
|
|
|
|
mi_builder_init(&b, &batch->screen->devinfo, batch);
|
|
|
|
|
struct mi_value src = mi_mem64(ro_bo(bo, offset));
|
|
|
|
|
mi_store(&b, mi_reg64(reg), src);
|
|
|
|
|
iris_batch_sync_region_end(batch);
|
2019-10-04 01:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
|
|
|
|
|
struct iris_bo *bo, uint32_t offset,
|
|
|
|
|
bool predicated)
|
|
|
|
|
{
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
2021-11-05 00:18:35 -07:00
|
|
|
struct mi_builder b;
|
|
|
|
|
mi_builder_init(&b, &batch->screen->devinfo, batch);
|
|
|
|
|
struct mi_value dst = mi_mem32(rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE));
|
|
|
|
|
struct mi_value src = mi_reg32(reg);
|
|
|
|
|
if (predicated)
|
|
|
|
|
mi_store_if(&b, dst, src);
|
|
|
|
|
else
|
|
|
|
|
mi_store(&b, dst, src);
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_end(batch);
|
2019-10-04 01:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
|
|
|
|
|
struct iris_bo *bo, uint32_t offset,
|
|
|
|
|
bool predicated)
|
|
|
|
|
{
|
2021-11-05 00:18:35 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
|
|
|
|
struct mi_builder b;
|
|
|
|
|
mi_builder_init(&b, &batch->screen->devinfo, batch);
|
|
|
|
|
struct mi_value dst = mi_mem64(rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE));
|
|
|
|
|
struct mi_value src = mi_reg64(reg);
|
|
|
|
|
if (predicated)
|
|
|
|
|
mi_store_if(&b, dst, src);
|
|
|
|
|
else
|
|
|
|
|
mi_store(&b, dst, src);
|
|
|
|
|
iris_batch_sync_region_end(batch);
|
2019-10-04 01:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
iris_store_data_imm32(struct iris_batch *batch,
|
|
|
|
|
struct iris_bo *bo, uint32_t offset,
|
|
|
|
|
uint32_t imm)
|
|
|
|
|
{
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
2021-11-05 00:18:35 -07:00
|
|
|
struct mi_builder b;
|
|
|
|
|
mi_builder_init(&b, &batch->screen->devinfo, batch);
|
|
|
|
|
struct mi_value dst = mi_mem32(rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE));
|
|
|
|
|
struct mi_value src = mi_imm(imm);
|
|
|
|
|
mi_store(&b, dst, src);
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_end(batch);
|
2019-10-04 01:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
iris_store_data_imm64(struct iris_batch *batch,
|
|
|
|
|
struct iris_bo *bo, uint32_t offset,
|
|
|
|
|
uint64_t imm)
|
|
|
|
|
{
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
2021-11-05 00:18:35 -07:00
|
|
|
struct mi_builder b;
|
|
|
|
|
mi_builder_init(&b, &batch->screen->devinfo, batch);
|
|
|
|
|
struct mi_value dst = mi_mem64(rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE));
|
|
|
|
|
struct mi_value src = mi_imm(imm);
|
|
|
|
|
mi_store(&b, dst, src);
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_end(batch);
|
2019-10-04 01:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
iris_copy_mem_mem(struct iris_batch *batch,
|
|
|
|
|
struct iris_bo *dst_bo, uint32_t dst_offset,
|
|
|
|
|
struct iris_bo *src_bo, uint32_t src_offset,
|
|
|
|
|
unsigned bytes)
|
|
|
|
|
{
|
|
|
|
|
/* MI_COPY_MEM_MEM operates on DWords. */
|
|
|
|
|
assert(bytes % 4 == 0);
|
|
|
|
|
assert(dst_offset % 4 == 0);
|
|
|
|
|
assert(src_offset % 4 == 0);
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
2019-10-04 01:21:45 -07:00
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < bytes; i += 4) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
|
2020-05-29 16:38:43 -07:00
|
|
|
cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i,
|
|
|
|
|
IRIS_DOMAIN_OTHER_WRITE);
|
2019-10-04 01:21:45 -07:00
|
|
|
cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-04-23 17:58:48 -07:00
|
|
|
|
|
|
|
|
iris_batch_sync_region_end(batch);
|
2019-10-04 01:21:45 -07:00
|
|
|
}
|
|
|
|
|
|
2018-10-19 02:00:13 -07:00
|
|
|
static void
|
|
|
|
|
emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
|
|
|
|
|
{
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 8 && GFX_VER < 10
|
2018-10-19 02:00:13 -07:00
|
|
|
/* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
|
|
|
|
|
*
|
|
|
|
|
* Software must clear the COLOR_CALC_STATE Valid field in
|
|
|
|
|
* 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
|
|
|
|
|
* with Pipeline Select set to GPGPU.
|
|
|
|
|
*
|
2021-03-29 15:46:12 -07:00
|
|
|
* The internal hardware docs recommend the same workaround for Gfx9
|
2018-10-19 02:00:13 -07:00
|
|
|
* hardware too.
|
|
|
|
|
*/
|
|
|
|
|
if (pipeline == GPGPU)
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
|
|
|
|
|
* PIPELINE_SELECT [DevBWR+]":
|
|
|
|
|
*
|
|
|
|
|
* "Project: DEVSNB+
|
|
|
|
|
*
|
|
|
|
|
* Software must ensure all the write caches are flushed through a
|
|
|
|
|
* stalling PIPE_CONTROL command followed by another PIPE_CONTROL
|
|
|
|
|
* command to invalidate read only caches prior to programming
|
|
|
|
|
* MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
|
|
|
|
|
*/
|
|
|
|
|
iris_emit_pipe_control_flush(batch,
|
2019-06-19 16:04:50 -05:00
|
|
|
"workaround: PIPELINE_SELECT flushes (1/2)",
|
2018-10-19 02:00:13 -07:00
|
|
|
PIPE_CONTROL_RENDER_TARGET_FLUSH |
|
|
|
|
|
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
|
|
|
|
|
PIPE_CONTROL_DATA_CACHE_FLUSH |
|
|
|
|
|
PIPE_CONTROL_CS_STALL);
|
|
|
|
|
|
|
|
|
|
iris_emit_pipe_control_flush(batch,
|
2019-06-19 16:04:50 -05:00
|
|
|
"workaround: PIPELINE_SELECT flushes (2/2)",
|
2018-10-19 02:00:13 -07:00
|
|
|
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
|
|
|
|
|
PIPE_CONTROL_CONST_CACHE_INVALIDATE |
|
|
|
|
|
PIPE_CONTROL_STATE_CACHE_INVALIDATE |
|
|
|
|
|
PIPE_CONTROL_INSTRUCTION_INVALIDATE);
|
|
|
|
|
|
|
|
|
|
iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 9
|
|
|
|
|
sel.MaskBits = GFX_VER >= 12 ? 0x13 : 3;
|
|
|
|
|
sel.MediaSamplerDOPClockGateEnable = GFX_VER >= 12;
|
2018-10-19 02:00:13 -07:00
|
|
|
#endif
|
|
|
|
|
sel.PipelineSelection = pipeline;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
UNUSED static void
|
|
|
|
|
init_glk_barrier_mode(struct iris_batch *batch, uint32_t value)
|
|
|
|
|
{
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 9
|
2018-10-19 02:00:13 -07:00
|
|
|
/* Project: DevGLK
|
|
|
|
|
*
|
|
|
|
|
* "This chicken bit works around a hardware issue with barrier
|
|
|
|
|
* logic encountered when switching between GPGPU and 3D pipelines.
|
|
|
|
|
* To workaround the issue, this mode bit should be set after a
|
|
|
|
|
* pipeline is selected."
|
|
|
|
|
*/
|
2021-03-11 20:50:49 -06:00
|
|
|
iris_emit_reg(batch, GENX(SLICE_COMMON_ECO_CHICKEN1), reg) {
|
2018-10-19 02:00:13 -07:00
|
|
|
reg.GLKBarrierMode = value;
|
|
|
|
|
reg.GLKBarrierModeMask = 1;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
static void
|
2018-10-19 02:11:11 -07:00
|
|
|
init_state_base_address(struct iris_batch *batch)
|
2018-01-25 01:53:52 -08:00
|
|
|
{
|
2020-10-07 07:44:56 -07:00
|
|
|
struct isl_device *isl_dev = &batch->screen->isl_dev;
|
2020-12-14 11:11:59 +02:00
|
|
|
uint32_t mocs = isl_mocs(isl_dev, 0, false);
|
2019-09-03 15:34:54 -07:00
|
|
|
flush_before_state_base_change(batch);
|
2018-01-25 01:53:52 -08:00
|
|
|
|
2018-09-12 23:31:46 -07:00
|
|
|
/* We program most base addresses once at context initialization time.
|
2018-07-30 23:49:34 -07:00
|
|
|
* Each base address points at a 4GB memory zone, and never needs to
|
|
|
|
|
* change. See iris_bufmgr.h for a description of the memory zones.
|
2018-09-08 19:43:34 -07:00
|
|
|
*
|
2018-09-12 23:31:46 -07:00
|
|
|
* The one exception is Surface State Base Address, which needs to be
|
|
|
|
|
* updated occasionally. See iris_binder.c for the details there.
|
2018-07-30 23:49:34 -07:00
|
|
|
*/
|
2018-01-25 01:53:52 -08:00
|
|
|
iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
|
2019-11-05 15:08:01 -08:00
|
|
|
sba.GeneralStateMOCS = mocs;
|
|
|
|
|
sba.StatelessDataPortAccessMOCS = mocs;
|
|
|
|
|
sba.DynamicStateMOCS = mocs;
|
|
|
|
|
sba.IndirectObjectMOCS = mocs;
|
|
|
|
|
sba.InstructionMOCS = mocs;
|
|
|
|
|
sba.SurfaceStateMOCS = mocs;
|
2018-01-25 01:53:52 -08:00
|
|
|
|
|
|
|
|
sba.GeneralStateBaseAddressModifyEnable = true;
|
|
|
|
|
sba.DynamicStateBaseAddressModifyEnable = true;
|
|
|
|
|
sba.IndirectObjectBaseAddressModifyEnable = true;
|
|
|
|
|
sba.InstructionBaseAddressModifyEnable = true;
|
|
|
|
|
sba.GeneralStateBufferSizeModifyEnable = true;
|
|
|
|
|
sba.DynamicStateBufferSizeModifyEnable = true;
|
2021-10-18 22:55:34 -07:00
|
|
|
#if GFX_VER >= 9
|
2020-10-20 21:29:50 -05:00
|
|
|
sba.BindlessSurfaceStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_BINDLESS_START);
|
|
|
|
|
sba.BindlessSurfaceStateSize = (IRIS_BINDLESS_SIZE >> 12) - 1;
|
2018-01-25 01:53:52 -08:00
|
|
|
sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
|
2019-11-05 15:08:01 -08:00
|
|
|
sba.BindlessSurfaceStateMOCS = mocs;
|
2021-10-18 22:56:48 -07:00
|
|
|
#endif
|
|
|
|
|
#if GFX_VER >= 11
|
|
|
|
|
sba.BindlessSamplerStateMOCS = mocs;
|
2018-11-07 14:23:27 +10:00
|
|
|
#endif
|
2018-01-25 01:53:52 -08:00
|
|
|
sba.IndirectObjectBufferSizeModifyEnable = true;
|
|
|
|
|
sba.InstructionBuffersizeModifyEnable = true;
|
|
|
|
|
|
2018-04-20 18:44:22 -07:00
|
|
|
sba.InstructionBaseAddress = ro_bo(NULL, IRIS_MEMZONE_SHADER_START);
|
|
|
|
|
sba.DynamicStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_DYNAMIC_START);
|
2018-01-25 01:53:52 -08:00
|
|
|
|
|
|
|
|
sba.GeneralStateBufferSize = 0xfffff;
|
|
|
|
|
sba.IndirectObjectBufferSize = 0xfffff;
|
|
|
|
|
sba.InstructionBufferSize = 0xfffff;
|
2018-04-06 00:05:24 -07:00
|
|
|
sba.DynamicStateBufferSize = 0xfffff;
|
2018-01-25 01:53:52 -08:00
|
|
|
}
|
2019-09-03 15:34:54 -07:00
|
|
|
|
|
|
|
|
flush_after_state_base_change(batch);
|
2018-10-19 02:11:11 -07:00
|
|
|
}
|
|
|
|
|
|
2019-02-14 02:26:53 -08:00
|
|
|
static void
|
2020-01-17 11:36:52 -06:00
|
|
|
iris_emit_l3_config(struct iris_batch *batch,
|
2021-03-03 13:49:18 -08:00
|
|
|
const struct intel_l3_config *cfg)
|
2019-02-14 02:26:53 -08:00
|
|
|
{
|
2021-03-16 10:14:30 -07:00
|
|
|
assert(cfg || GFX_VER >= 12);
|
2017-12-12 20:24:57 -08:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2017-12-12 20:24:57 -08:00
|
|
|
#define L3_ALLOCATION_REG GENX(L3ALLOC)
|
|
|
|
|
#define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
|
|
|
|
|
#else
|
|
|
|
|
#define L3_ALLOCATION_REG GENX(L3CNTLREG)
|
|
|
|
|
#define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
|
|
|
|
|
#endif
|
|
|
|
|
|
2021-03-11 20:50:49 -06:00
|
|
|
iris_emit_reg(batch, L3_ALLOCATION_REG, reg) {
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER < 11
|
2021-03-03 13:58:15 -08:00
|
|
|
reg.SLMEnable = cfg->n[INTEL_L3P_SLM] > 0;
|
2017-12-12 20:24:57 -08:00
|
|
|
#endif
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 11
|
2021-03-29 17:24:46 -07:00
|
|
|
/* Wa_1406697149: Bit 9 "Error Detection Behavior Control" must be set
|
2019-02-14 02:26:53 -08:00
|
|
|
* in L3CNTLREG register. The default setting of the bit is not the
|
|
|
|
|
* desirable behavior.
|
|
|
|
|
*/
|
|
|
|
|
reg.ErrorDetectionBehaviorControl = true;
|
2019-05-10 14:15:53 -07:00
|
|
|
reg.UseFullWays = true;
|
2019-02-14 02:26:53 -08:00
|
|
|
#endif
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER < 12 || cfg) {
|
2021-03-03 13:58:15 -08:00
|
|
|
reg.URBAllocation = cfg->n[INTEL_L3P_URB];
|
|
|
|
|
reg.ROAllocation = cfg->n[INTEL_L3P_RO];
|
|
|
|
|
reg.DCAllocation = cfg->n[INTEL_L3P_DC];
|
|
|
|
|
reg.AllAllocation = cfg->n[INTEL_L3P_ALL];
|
2019-08-02 00:32:17 -07:00
|
|
|
} else {
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2019-08-02 00:32:17 -07:00
|
|
|
reg.L3FullWayAllocationEnable = true;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
2019-02-14 02:26:53 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 9
|
2019-04-24 16:43:36 -07:00
|
|
|
static void
|
|
|
|
|
iris_enable_obj_preemption(struct iris_batch *batch, bool enable)
|
|
|
|
|
{
|
|
|
|
|
/* A fixed function pipe flush is required before modifying this field */
|
2019-06-19 16:04:50 -05:00
|
|
|
iris_emit_end_of_pipe_sync(batch, enable ? "enable preemption"
|
|
|
|
|
: "disable preemption",
|
|
|
|
|
PIPE_CONTROL_RENDER_TARGET_FLUSH);
|
2019-04-24 16:43:36 -07:00
|
|
|
|
|
|
|
|
/* enable object level preemption */
|
2021-03-11 20:50:49 -06:00
|
|
|
iris_emit_reg(batch, GENX(CS_CHICKEN1), reg) {
|
2019-04-24 16:43:36 -07:00
|
|
|
reg.ReplayMode = enable;
|
|
|
|
|
reg.ReplayModeMask = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2019-07-19 15:29:00 -07:00
|
|
|
static void
|
2021-07-21 14:50:12 -07:00
|
|
|
upload_pixel_hashing_tables(struct iris_batch *batch)
|
2019-07-19 15:29:00 -07:00
|
|
|
{
|
2021-07-21 14:50:12 -07:00
|
|
|
UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
|
|
|
|
|
UNUSED struct iris_context *ice = batch->ice;
|
|
|
|
|
assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
|
2021-06-08 23:53:54 +00:00
|
|
|
|
2021-07-21 14:50:12 -07:00
|
|
|
#if GFX_VER == 11
|
2021-06-08 23:53:54 +00:00
|
|
|
/* Gfx11 hardware has two pixel pipes at most. */
|
|
|
|
|
for (unsigned i = 2; i < ARRAY_SIZE(devinfo->ppipe_subslices); i++)
|
|
|
|
|
assert(devinfo->ppipe_subslices[i] == 0);
|
2021-01-13 23:06:55 -08:00
|
|
|
|
|
|
|
|
if (devinfo->ppipe_subslices[0] == devinfo->ppipe_subslices[1])
|
2019-07-19 15:29:00 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
|
|
|
|
|
uint32_t hash_address;
|
|
|
|
|
struct pipe_resource *tmp = NULL;
|
|
|
|
|
uint32_t *map =
|
|
|
|
|
stream_state(batch, ice->state.dynamic_uploader, &tmp,
|
|
|
|
|
size, 64, &hash_address);
|
|
|
|
|
pipe_resource_reference(&tmp, NULL);
|
|
|
|
|
|
2021-01-13 23:06:55 -08:00
|
|
|
const bool flip = devinfo->ppipe_subslices[0] < devinfo->ppipe_subslices[1];
|
|
|
|
|
struct GENX(SLICE_HASH_TABLE) table;
|
2021-10-26 16:51:41 -07:00
|
|
|
intel_compute_pixel_hash_table_3way(16, 16, 3, 3, flip, table.Entry[0]);
|
2019-07-19 15:29:00 -07:00
|
|
|
|
2021-01-13 23:06:55 -08:00
|
|
|
GENX(SLICE_HASH_TABLE_pack)(NULL, map, &table);
|
2019-07-19 15:29:00 -07:00
|
|
|
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
|
|
|
|
|
ptr.SliceHashStatePointerValid = true;
|
|
|
|
|
ptr.SliceHashTableStatePointer = hash_address;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) {
|
|
|
|
|
mode.SliceHashingTableEnable = true;
|
|
|
|
|
}
|
2021-07-21 14:50:12 -07:00
|
|
|
|
2021-03-16 10:09:00 -07:00
|
|
|
#elif GFX_VERx10 == 120
|
2021-01-13 23:07:22 -08:00
|
|
|
/* For each n calculate ppipes_of[n], equal to the number of pixel pipes
|
|
|
|
|
* present with n active dual subslices.
|
|
|
|
|
*/
|
|
|
|
|
unsigned ppipes_of[3] = {};
|
|
|
|
|
|
|
|
|
|
for (unsigned n = 0; n < ARRAY_SIZE(ppipes_of); n++) {
|
2021-06-08 23:53:54 +00:00
|
|
|
for (unsigned p = 0; p < 3; p++)
|
2021-01-13 23:07:22 -08:00
|
|
|
ppipes_of[n] += (devinfo->ppipe_subslices[p] == n);
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-29 15:46:12 -07:00
|
|
|
/* Gfx12 has three pixel pipes. */
|
2021-06-08 23:53:54 +00:00
|
|
|
for (unsigned p = 3; p < ARRAY_SIZE(devinfo->ppipe_subslices); p++)
|
|
|
|
|
assert(devinfo->ppipe_subslices[p] == 0);
|
2021-01-13 23:07:22 -08:00
|
|
|
|
|
|
|
|
if (ppipes_of[2] == 3 || ppipes_of[0] == 2) {
|
|
|
|
|
/* All three pixel pipes have the maximum number of active dual
|
|
|
|
|
* subslices, or there is only one active pixel pipe: Nothing to do.
|
|
|
|
|
*/
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_SUBSLICE_HASH_TABLE), p) {
|
|
|
|
|
p.SliceHashControl[0] = TABLE_0;
|
|
|
|
|
|
|
|
|
|
if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
|
2021-10-26 16:51:41 -07:00
|
|
|
intel_compute_pixel_hash_table_3way(8, 16, 2, 2, 0, p.TwoWayTableEntry[0]);
|
2021-01-13 23:07:22 -08:00
|
|
|
else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
|
2021-10-26 16:51:41 -07:00
|
|
|
intel_compute_pixel_hash_table_3way(8, 16, 3, 3, 0, p.TwoWayTableEntry[0]);
|
2021-01-13 23:07:22 -08:00
|
|
|
|
|
|
|
|
if (ppipes_of[2] == 2 && ppipes_of[1] == 1)
|
2021-10-26 16:51:41 -07:00
|
|
|
intel_compute_pixel_hash_table_3way(8, 16, 5, 4, 0, p.ThreeWayTableEntry[0]);
|
2021-01-13 23:07:22 -08:00
|
|
|
else if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
|
2021-10-26 16:51:41 -07:00
|
|
|
intel_compute_pixel_hash_table_3way(8, 16, 2, 2, 0, p.ThreeWayTableEntry[0]);
|
2021-01-13 23:07:22 -08:00
|
|
|
else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
|
2021-10-26 16:51:41 -07:00
|
|
|
intel_compute_pixel_hash_table_3way(8, 16, 3, 3, 0, p.ThreeWayTableEntry[0]);
|
2021-01-13 23:07:22 -08:00
|
|
|
else
|
|
|
|
|
unreachable("Illegal fusing.");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), p) {
|
|
|
|
|
p.SubsliceHashingTableEnable = true;
|
2021-03-10 09:54:15 -08:00
|
|
|
p.SubsliceHashingTableEnableMask = true;
|
2021-01-13 23:07:22 -08:00
|
|
|
}
|
2021-10-06 14:45:02 -07:00
|
|
|
|
|
|
|
|
#elif GFX_VERx10 == 125
|
|
|
|
|
struct pipe_screen *pscreen = &batch->screen->base;
|
|
|
|
|
const unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
|
|
|
|
|
const struct pipe_resource tmpl = {
|
|
|
|
|
.target = PIPE_BUFFER,
|
|
|
|
|
.format = PIPE_FORMAT_R8_UNORM,
|
|
|
|
|
.bind = PIPE_BIND_CUSTOM,
|
|
|
|
|
.usage = PIPE_USAGE_IMMUTABLE,
|
|
|
|
|
.flags = IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE,
|
|
|
|
|
.width0 = size,
|
|
|
|
|
.height0 = 1,
|
|
|
|
|
.depth0 = 1,
|
|
|
|
|
.array_size = 1
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
pipe_resource_reference(&ice->state.pixel_hashing_tables, NULL);
|
|
|
|
|
ice->state.pixel_hashing_tables = pscreen->resource_create(pscreen, &tmpl);
|
|
|
|
|
|
|
|
|
|
struct iris_resource *res = (struct iris_resource *)ice->state.pixel_hashing_tables;
|
|
|
|
|
struct pipe_transfer *transfer = NULL;
|
|
|
|
|
uint32_t *map = pipe_buffer_map_range(&ice->ctx, ice->state.pixel_hashing_tables,
|
|
|
|
|
0, size, PIPE_MAP_WRITE,
|
|
|
|
|
&transfer);
|
|
|
|
|
|
|
|
|
|
uint32_t ppipe_mask = 0;
|
|
|
|
|
for (unsigned p = 0; p < ARRAY_SIZE(devinfo->ppipe_subslices); p++) {
|
|
|
|
|
if (devinfo->ppipe_subslices[p])
|
|
|
|
|
ppipe_mask |= (1u << p);
|
|
|
|
|
}
|
|
|
|
|
assert(ppipe_mask);
|
|
|
|
|
|
|
|
|
|
struct GENX(SLICE_HASH_TABLE) table;
|
|
|
|
|
|
|
|
|
|
/* Note that the hardware expects an array with 7 tables, each
|
|
|
|
|
* table is intended to specify the pixel pipe hashing behavior for
|
|
|
|
|
* every possible slice count between 2 and 8, however that doesn't
|
|
|
|
|
* actually work, among other reasons due to hardware bugs that
|
|
|
|
|
* will cause the GPU to erroneously access the table at the wrong
|
|
|
|
|
* index in some cases, so in practice all 7 tables need to be
|
|
|
|
|
* initialized to the same value.
|
|
|
|
|
*/
|
|
|
|
|
for (unsigned i = 0; i < 7; i++)
|
|
|
|
|
intel_compute_pixel_hash_table_nway(16, 16, ppipe_mask, table.Entry[i][0]);
|
|
|
|
|
|
|
|
|
|
GENX(SLICE_HASH_TABLE_pack)(NULL, map, &table);
|
|
|
|
|
|
|
|
|
|
pipe_buffer_unmap(&ice->ctx, transfer);
|
|
|
|
|
|
|
|
|
|
iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_NONE);
|
|
|
|
|
iris_record_state_size(batch->state_sizes, res->bo->address + res->offset, size);
|
|
|
|
|
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
|
|
|
|
|
ptr.SliceHashStatePointerValid = true;
|
|
|
|
|
ptr.SliceHashTableStatePointer = iris_bo_offset_from_base_address(res->bo) +
|
|
|
|
|
res->offset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) {
|
|
|
|
|
mode.SliceHashingTableEnable = true;
|
|
|
|
|
mode.SliceHashingTableEnableMask = true;
|
2021-10-12 23:57:53 -07:00
|
|
|
mode.CrossSliceHashingMode = (util_bitcount(ppipe_mask) > 1 ?
|
|
|
|
|
hashing32x32 : NormalMode);
|
|
|
|
|
mode.CrossSliceHashingModeMask = -1;
|
2021-10-06 14:45:02 -07:00
|
|
|
}
|
2019-07-19 15:29:00 -07:00
|
|
|
#endif
|
2021-07-21 14:50:12 -07:00
|
|
|
}
|
2019-07-19 15:29:00 -07:00
|
|
|
|
2019-10-05 14:48:46 -04:00
|
|
|
static void
|
|
|
|
|
iris_alloc_push_constants(struct iris_batch *batch)
|
|
|
|
|
{
|
2021-09-23 22:59:40 -07:00
|
|
|
const struct intel_device_info *devinfo = &batch->screen->devinfo;
|
|
|
|
|
|
2019-10-05 14:48:46 -04:00
|
|
|
/* For now, we set a static partitioning of the push constant area,
|
|
|
|
|
* assuming that all stages could be in use.
|
|
|
|
|
*
|
|
|
|
|
* TODO: Try lazily allocating the HS/DS/GS sections as needed, and
|
|
|
|
|
* see if that improves performance by offering more space to
|
|
|
|
|
* the VS/FS when those aren't in use. Also, try dynamically
|
|
|
|
|
* enabling/disabling it like i965 does. This would be more
|
|
|
|
|
* stalls and may not actually help; we don't know yet.
|
|
|
|
|
*/
|
2021-09-23 22:59:40 -07:00
|
|
|
|
|
|
|
|
/* Divide as equally as possible with any remainder given to FRAGMENT. */
|
|
|
|
|
const unsigned push_constant_kb = devinfo->max_constant_urb_size_kb;
|
|
|
|
|
const unsigned stage_size = push_constant_kb / 5;
|
|
|
|
|
const unsigned frag_size = push_constant_kb - 4 * stage_size;
|
|
|
|
|
|
2019-10-05 14:48:46 -04:00
|
|
|
for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
|
|
|
|
|
alloc._3DCommandSubOpcode = 18 + i;
|
2021-09-23 22:59:40 -07:00
|
|
|
alloc.ConstantBufferOffset = stage_size * i;
|
|
|
|
|
alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? frag_size : stage_size;
|
2019-10-05 14:48:46 -04:00
|
|
|
}
|
|
|
|
|
}
|
2021-05-12 10:42:41 -07:00
|
|
|
|
|
|
|
|
#if GFX_VERx10 == 125
|
|
|
|
|
/* Wa_22011440098
|
|
|
|
|
*
|
|
|
|
|
* In 3D mode, after programming push constant alloc command immediately
|
|
|
|
|
* program push constant command(ZERO length) without any commit between
|
|
|
|
|
* them.
|
|
|
|
|
*/
|
|
|
|
|
if (intel_device_info_is_dg2(devinfo)) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), c) {
|
|
|
|
|
c.MOCS = iris_mocs(NULL, &batch->screen->isl_dev, 0);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2019-10-05 14:48:46 -04:00
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2020-02-25 15:04:08 -08:00
|
|
|
static void
|
|
|
|
|
init_aux_map_state(struct iris_batch *batch);
|
|
|
|
|
#endif
|
|
|
|
|
|
2020-08-18 13:56:22 -07:00
|
|
|
/**
|
|
|
|
|
* Upload initial GPU state for any kind of context.
|
|
|
|
|
*
|
|
|
|
|
* These need to happen for both render and compute.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
iris_init_common_context(struct iris_batch *batch)
|
|
|
|
|
{
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 11
|
2021-03-11 20:50:49 -06:00
|
|
|
iris_emit_reg(batch, GENX(SAMPLER_MODE), reg) {
|
2020-08-18 13:56:22 -07:00
|
|
|
reg.HeaderlessMessageforPreemptableContexts = 1;
|
|
|
|
|
reg.HeaderlessMessageforPreemptableContextsMask = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
|
2021-03-11 20:50:49 -06:00
|
|
|
iris_emit_reg(batch, GENX(HALF_SLICE_CHICKEN7), reg) {
|
2020-08-18 13:56:22 -07:00
|
|
|
reg.EnabledTexelOffsetPrecisionFix = 1;
|
|
|
|
|
reg.EnabledTexelOffsetPrecisionFixMask = 1;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-19 02:11:11 -07:00
|
|
|
/**
|
|
|
|
|
* Upload the initial GPU state for a render context.
|
|
|
|
|
*
|
|
|
|
|
* This sets some invariant state that needs to be programmed a particular
|
|
|
|
|
* way, but we never actually change.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
2019-10-02 10:25:17 -04:00
|
|
|
iris_init_render_context(struct iris_batch *batch)
|
2018-10-19 02:11:11 -07:00
|
|
|
{
|
2021-04-05 13:19:39 -07:00
|
|
|
UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
|
2018-10-19 02:11:11 -07:00
|
|
|
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
|
|
|
|
|
2018-10-19 02:11:11 -07:00
|
|
|
emit_pipeline_select(batch, _3D);
|
|
|
|
|
|
2020-01-17 11:37:31 -06:00
|
|
|
iris_emit_l3_config(batch, batch->screen->l3_config_3d);
|
2019-02-14 02:26:53 -08:00
|
|
|
|
2018-10-19 02:11:11 -07:00
|
|
|
init_state_base_address(batch);
|
2018-01-25 01:53:52 -08:00
|
|
|
|
2020-08-18 13:56:22 -07:00
|
|
|
iris_init_common_context(batch);
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 9
|
2021-03-11 20:50:49 -06:00
|
|
|
iris_emit_reg(batch, GENX(CS_DEBUG_MODE2), reg) {
|
2018-09-07 12:26:55 -07:00
|
|
|
reg.CONSTANT_BUFFERAddressOffsetDisable = true;
|
|
|
|
|
reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
|
|
|
|
|
}
|
2018-11-07 14:23:27 +10:00
|
|
|
#else
|
2021-03-11 20:50:49 -06:00
|
|
|
iris_emit_reg(batch, GENX(INSTPM), reg) {
|
2018-11-07 14:23:27 +10:00
|
|
|
reg.CONSTANT_BUFFERAddressOffsetDisable = true;
|
|
|
|
|
reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2018-09-07 12:26:55 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 9
|
2021-03-11 20:50:49 -06:00
|
|
|
iris_emit_reg(batch, GENX(CACHE_MODE_1), reg) {
|
2018-09-18 11:07:16 -07:00
|
|
|
reg.FloatBlendOptimizationEnable = true;
|
|
|
|
|
reg.FloatBlendOptimizationEnableMask = true;
|
2020-09-09 11:05:18 -07:00
|
|
|
reg.MSCRAWHazardAvoidanceBit = true;
|
|
|
|
|
reg.MSCRAWHazardAvoidanceBitMask = true;
|
2018-09-18 11:07:16 -07:00
|
|
|
reg.PartialResolveDisableInVC = true;
|
|
|
|
|
reg.PartialResolveDisableInVCMask = true;
|
|
|
|
|
}
|
2018-10-19 02:00:13 -07:00
|
|
|
|
2021-09-22 15:06:58 +03:00
|
|
|
if (devinfo->platform == INTEL_PLATFORM_GLK)
|
2018-10-19 02:00:13 -07:00
|
|
|
init_glk_barrier_mode(batch, GLK_BARRIER_MODE_3D_HULL);
|
2018-09-18 11:07:16 -07:00
|
|
|
#endif
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 11
|
2021-03-11 20:50:49 -06:00
|
|
|
iris_emit_reg(batch, GENX(TCCNTLREG), reg) {
|
2020-01-13 18:07:34 -08:00
|
|
|
reg.L3DataPartialWriteMergingEnable = true;
|
|
|
|
|
reg.ColorZPartialWriteMergingEnable = true;
|
|
|
|
|
reg.URBPartialWriteMergingEnable = true;
|
|
|
|
|
reg.TCDisable = true;
|
|
|
|
|
}
|
2019-08-30 17:19:46 -07:00
|
|
|
|
2020-01-13 18:07:34 -08:00
|
|
|
/* Hardware specification recommends disabling repacking for the
|
|
|
|
|
* compatibility with decompression mechanism in display controller.
|
|
|
|
|
*/
|
|
|
|
|
if (devinfo->disable_ccs_repack) {
|
2021-03-11 20:50:49 -06:00
|
|
|
iris_emit_reg(batch, GENX(CACHE_MODE_0), reg) {
|
2020-01-13 18:07:34 -08:00
|
|
|
reg.DisableRepackingforCompression = true;
|
|
|
|
|
reg.DisableRepackingforCompressionMask = true;
|
2019-06-27 09:54:36 -07:00
|
|
|
}
|
2020-01-13 18:07:34 -08:00
|
|
|
}
|
2021-01-13 23:07:22 -08:00
|
|
|
#endif
|
|
|
|
|
|
2021-07-21 14:50:12 -07:00
|
|
|
upload_pixel_hashing_tables(batch);
|
2018-09-18 11:04:44 -07:00
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
|
|
|
|
|
* changing it dynamically. We set it to the maximum size here, and
|
|
|
|
|
* instead include the render target dimensions in the viewport, so
|
|
|
|
|
* viewport extents clipping takes care of pruning stray geometry.
|
|
|
|
|
*/
|
2018-01-09 11:25:29 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
|
|
|
|
|
rect.ClippedDrawingRectangleXMax = UINT16_MAX;
|
|
|
|
|
rect.ClippedDrawingRectangleYMax = UINT16_MAX;
|
|
|
|
|
}
|
2018-07-30 23:49:34 -07:00
|
|
|
|
|
|
|
|
/* Set the initial MSAA sample positions. */
|
2018-01-09 11:25:29 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_PATTERN), pat) {
|
2021-03-03 13:58:15 -08:00
|
|
|
INTEL_SAMPLE_POS_1X(pat._1xSample);
|
|
|
|
|
INTEL_SAMPLE_POS_2X(pat._2xSample);
|
|
|
|
|
INTEL_SAMPLE_POS_4X(pat._4xSample);
|
|
|
|
|
INTEL_SAMPLE_POS_8X(pat._8xSample);
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 9
|
2021-03-03 13:58:15 -08:00
|
|
|
INTEL_SAMPLE_POS_16X(pat._16xSample);
|
2018-11-07 14:23:27 +10:00
|
|
|
#endif
|
2018-01-09 11:25:29 -08:00
|
|
|
}
|
2018-07-30 23:49:34 -07:00
|
|
|
|
|
|
|
|
/* Use the legacy AA line coverage computation. */
|
2018-01-09 11:25:29 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_AA_LINE_PARAMETERS), foo);
|
2018-07-30 23:49:34 -07:00
|
|
|
|
|
|
|
|
/* Disable chromakeying (it's for media) */
|
2018-01-09 11:25:29 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_WM_CHROMAKEY), foo);
|
2018-07-30 23:49:34 -07:00
|
|
|
|
|
|
|
|
/* We want regular rendering, not special HiZ operations. */
|
2018-01-09 11:25:29 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
|
2018-07-30 23:49:34 -07:00
|
|
|
|
|
|
|
|
/* No polygon stippling offsets are necessary. */
|
2019-01-24 09:26:38 -08:00
|
|
|
/* TODO: may need to set an offset for origin-UL framebuffers */
|
2018-01-09 21:29:09 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
|
2018-01-11 23:01:28 -08:00
|
|
|
|
2019-10-05 14:48:46 -04:00
|
|
|
iris_alloc_push_constants(batch);
|
2020-02-25 15:04:08 -08:00
|
|
|
|
2020-04-23 17:58:48 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2020-02-25 15:04:08 -08:00
|
|
|
init_aux_map_state(batch);
|
|
|
|
|
#endif
|
2020-04-23 17:58:48 -07:00
|
|
|
|
|
|
|
|
iris_batch_sync_region_end(batch);
|
2018-01-09 11:25:29 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-26 21:59:20 -07:00
|
|
|
static void
|
2019-10-02 10:25:17 -04:00
|
|
|
iris_init_compute_context(struct iris_batch *batch)
|
2018-07-26 21:59:20 -07:00
|
|
|
{
|
2021-04-05 13:19:39 -07:00
|
|
|
UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
|
2018-10-19 02:00:13 -07:00
|
|
|
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
|
|
|
|
|
2021-03-29 17:15:41 -07:00
|
|
|
/* Wa_1607854226:
|
2020-01-13 17:50:06 +02:00
|
|
|
*
|
|
|
|
|
* Start with pipeline in 3D mode to set the STATE_BASE_ADDRESS.
|
|
|
|
|
*/
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 12
|
2020-01-13 17:50:06 +02:00
|
|
|
emit_pipeline_select(batch, _3D);
|
|
|
|
|
#else
|
2018-10-19 02:00:13 -07:00
|
|
|
emit_pipeline_select(batch, GPGPU);
|
2020-01-13 17:50:06 +02:00
|
|
|
#endif
|
2018-07-26 21:59:20 -07:00
|
|
|
|
2020-01-17 11:37:31 -06:00
|
|
|
iris_emit_l3_config(batch, batch->screen->l3_config_cs);
|
2018-11-14 23:09:10 -06:00
|
|
|
|
2018-10-19 02:11:11 -07:00
|
|
|
init_state_base_address(batch);
|
|
|
|
|
|
2020-08-18 13:56:22 -07:00
|
|
|
iris_init_common_context(batch);
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 12
|
2020-01-13 17:50:06 +02:00
|
|
|
emit_pipeline_select(batch, GPGPU);
|
|
|
|
|
#endif
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 9
|
2021-09-22 15:06:58 +03:00
|
|
|
if (devinfo->platform == INTEL_PLATFORM_GLK)
|
2018-10-19 02:00:13 -07:00
|
|
|
init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
|
2018-09-13 11:40:10 -07:00
|
|
|
#endif
|
2020-02-25 15:04:08 -08:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2020-02-25 15:04:08 -08:00
|
|
|
init_aux_map_state(batch);
|
|
|
|
|
#endif
|
|
|
|
|
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_end(batch);
|
2018-07-26 21:59:20 -07:00
|
|
|
}
|
|
|
|
|
|
2018-07-01 22:13:07 -07:00
|
|
|
struct iris_vertex_buffer_state {
|
2018-12-04 16:38:14 -08:00
|
|
|
/** The VERTEX_BUFFER_STATE hardware structure. */
|
|
|
|
|
uint32_t state[GENX(VERTEX_BUFFER_STATE_length)];
|
2018-07-30 23:49:34 -07:00
|
|
|
|
|
|
|
|
/** The resource to source vertex data from. */
|
2018-12-04 16:38:14 -08:00
|
|
|
struct pipe_resource *resource;
|
2019-09-23 20:37:39 -07:00
|
|
|
|
|
|
|
|
int offset;
|
2018-07-01 22:13:07 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct iris_depth_buffer_state {
|
2018-07-30 23:49:34 -07:00
|
|
|
/* Depth/HiZ/Stencil related hardware packets. */
|
2018-07-01 22:13:07 -07:00
|
|
|
uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
|
|
|
|
|
GENX(3DSTATE_STENCIL_BUFFER_length) +
|
|
|
|
|
GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
|
2021-06-15 10:38:38 -07:00
|
|
|
GENX(3DSTATE_CLEAR_PARAMS_length)];
|
2018-07-01 22:13:07 -07:00
|
|
|
};
|
|
|
|
|
|
2021-06-16 10:22:48 -07:00
|
|
|
#if GFX_VERx10 == 120
|
|
|
|
|
enum iris_depth_reg_mode {
|
|
|
|
|
IRIS_DEPTH_REG_MODE_HW_DEFAULT = 0,
|
|
|
|
|
IRIS_DEPTH_REG_MODE_D16,
|
|
|
|
|
IRIS_DEPTH_REG_MODE_UNKNOWN,
|
|
|
|
|
};
|
|
|
|
|
#endif
|
|
|
|
|
|
2018-07-01 22:13:07 -07:00
|
|
|
/**
|
2018-07-30 23:49:34 -07:00
|
|
|
* Generation-specific context state (ice->state.genx->...).
|
|
|
|
|
*
|
|
|
|
|
* Most state can go in iris_context directly, but these encode hardware
|
|
|
|
|
* packets which vary by generation.
|
2018-07-01 22:13:07 -07:00
|
|
|
*/
|
|
|
|
|
struct iris_genx_state {
|
2018-12-04 16:38:14 -08:00
|
|
|
struct iris_vertex_buffer_state vertex_buffers[33];
|
2019-02-16 00:57:54 -08:00
|
|
|
uint32_t last_index_buffer[GENX(3DSTATE_INDEX_BUFFER_length)];
|
2018-12-04 16:38:14 -08:00
|
|
|
|
2018-07-01 22:13:07 -07:00
|
|
|
struct iris_depth_buffer_state depth_buffer;
|
2018-06-29 12:58:31 -07:00
|
|
|
|
|
|
|
|
uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
|
2019-04-22 11:27:37 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
2019-09-25 00:31:07 -07:00
|
|
|
bool pma_fix_enabled;
|
|
|
|
|
#endif
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 9
|
2019-04-24 16:43:36 -07:00
|
|
|
/* Is object level preemption enabled? */
|
|
|
|
|
bool object_preemption;
|
|
|
|
|
#endif
|
|
|
|
|
|
2021-06-16 10:22:48 -07:00
|
|
|
#if GFX_VERx10 == 120
|
|
|
|
|
enum iris_depth_reg_mode depth_reg_mode;
|
|
|
|
|
#endif
|
|
|
|
|
|
2019-04-22 11:27:37 -07:00
|
|
|
struct {
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
2019-04-22 11:27:37 -07:00
|
|
|
struct brw_image_param image_param[PIPE_MAX_SHADER_IMAGES];
|
|
|
|
|
#endif
|
|
|
|
|
} shaders[MESA_SHADER_STAGES];
|
2018-07-01 22:13:07 -07:00
|
|
|
};
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_blend_color() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* This corresponds to our COLOR_CALC_STATE.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_blend_color(struct pipe_context *ctx,
|
|
|
|
|
const struct pipe_blend_color *state)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
|
2017-11-23 23:15:14 -08:00
|
|
|
memcpy(&ice->state.blend_color, state, sizeof(struct pipe_blend_color));
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Gallium CSO for blend state (see pipe_blend_state).
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
struct iris_blend_state {
|
2018-06-09 00:01:09 -07:00
|
|
|
/** Partial 3DSTATE_PS_BLEND */
|
2017-11-23 23:15:14 -08:00
|
|
|
uint32_t ps_blend[GENX(3DSTATE_PS_BLEND_length)];
|
2018-06-09 00:01:09 -07:00
|
|
|
|
|
|
|
|
/** Partial BLEND_STATE */
|
2018-01-25 21:53:41 -08:00
|
|
|
uint32_t blend_state[GENX(BLEND_STATE_length) +
|
|
|
|
|
BRW_MAX_DRAW_BUFFERS * GENX(BLEND_STATE_ENTRY_length)];
|
2018-01-25 02:09:59 -08:00
|
|
|
|
|
|
|
|
bool alpha_to_coverage; /* for shader key */
|
2018-12-10 23:22:54 -08:00
|
|
|
|
|
|
|
|
/** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
|
|
|
|
|
uint8_t blend_enables;
|
2019-02-11 12:07:51 -08:00
|
|
|
|
|
|
|
|
/** Bitfield of whether color writes are enabled for RT[i] */
|
|
|
|
|
uint8_t color_write_enables;
|
2019-04-18 22:29:27 -07:00
|
|
|
|
|
|
|
|
/** Does RT[0] use dual color blending? */
|
|
|
|
|
bool dual_color_blending;
|
2017-11-23 23:15:14 -08:00
|
|
|
};
|
|
|
|
|
|
2018-12-04 00:18:41 -08:00
|
|
|
static enum pipe_blendfactor
|
|
|
|
|
fix_blendfactor(enum pipe_blendfactor f, bool alpha_to_one)
|
|
|
|
|
{
|
|
|
|
|
if (alpha_to_one) {
|
|
|
|
|
if (f == PIPE_BLENDFACTOR_SRC1_ALPHA)
|
|
|
|
|
return PIPE_BLENDFACTOR_ONE;
|
|
|
|
|
|
|
|
|
|
if (f == PIPE_BLENDFACTOR_INV_SRC1_ALPHA)
|
|
|
|
|
return PIPE_BLENDFACTOR_ZERO;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return f;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->create_blend_state() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* Translates a pipe_blend_state into iris_blend_state.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void *
|
|
|
|
|
iris_create_blend_state(struct pipe_context *ctx,
|
|
|
|
|
const struct pipe_blend_state *state)
|
|
|
|
|
{
|
|
|
|
|
struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
|
2018-11-12 11:33:44 -08:00
|
|
|
uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2018-12-10 23:22:54 -08:00
|
|
|
cso->blend_enables = 0;
|
2019-02-11 12:07:51 -08:00
|
|
|
cso->color_write_enables = 0;
|
2018-12-10 23:22:54 -08:00
|
|
|
STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS <= 8);
|
|
|
|
|
|
2018-01-25 02:09:59 -08:00
|
|
|
cso->alpha_to_coverage = state->alpha_to_coverage;
|
|
|
|
|
|
2018-11-12 11:33:44 -08:00
|
|
|
bool indep_alpha_blend = false;
|
2018-01-25 21:53:41 -08:00
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
|
2018-11-08 00:15:50 -08:00
|
|
|
const struct pipe_rt_blend_state *rt =
|
|
|
|
|
&state->rt[state->independent_blend_enable ? i : 0];
|
2018-11-12 11:33:44 -08:00
|
|
|
|
2018-12-04 00:18:41 -08:00
|
|
|
enum pipe_blendfactor src_rgb =
|
|
|
|
|
fix_blendfactor(rt->rgb_src_factor, state->alpha_to_one);
|
|
|
|
|
enum pipe_blendfactor src_alpha =
|
|
|
|
|
fix_blendfactor(rt->alpha_src_factor, state->alpha_to_one);
|
|
|
|
|
enum pipe_blendfactor dst_rgb =
|
|
|
|
|
fix_blendfactor(rt->rgb_dst_factor, state->alpha_to_one);
|
|
|
|
|
enum pipe_blendfactor dst_alpha =
|
|
|
|
|
fix_blendfactor(rt->alpha_dst_factor, state->alpha_to_one);
|
|
|
|
|
|
2018-11-12 11:33:44 -08:00
|
|
|
if (rt->rgb_func != rt->alpha_func ||
|
2018-12-04 00:18:41 -08:00
|
|
|
src_rgb != src_alpha || dst_rgb != dst_alpha)
|
2018-11-12 11:33:44 -08:00
|
|
|
indep_alpha_blend = true;
|
|
|
|
|
|
2018-12-10 23:22:54 -08:00
|
|
|
if (rt->blend_enable)
|
|
|
|
|
cso->blend_enables |= 1u << i;
|
|
|
|
|
|
2019-02-11 12:07:51 -08:00
|
|
|
if (rt->colormask)
|
|
|
|
|
cso->color_write_enables |= 1u << i;
|
|
|
|
|
|
2018-11-12 11:33:44 -08:00
|
|
|
iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
|
2017-11-23 23:15:14 -08:00
|
|
|
be.LogicOpEnable = state->logicop_enable;
|
|
|
|
|
be.LogicOpFunction = state->logicop_func;
|
|
|
|
|
|
|
|
|
|
be.PreBlendSourceOnlyClampEnable = false;
|
|
|
|
|
be.ColorClampRange = COLORCLAMP_RTFORMAT;
|
|
|
|
|
be.PreBlendColorClampEnable = true;
|
|
|
|
|
be.PostBlendColorClampEnable = true;
|
|
|
|
|
|
2018-11-08 00:15:50 -08:00
|
|
|
be.ColorBufferBlendEnable = rt->blend_enable;
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2018-11-08 00:15:50 -08:00
|
|
|
be.ColorBlendFunction = rt->rgb_func;
|
|
|
|
|
be.AlphaBlendFunction = rt->alpha_func;
|
2021-04-12 12:04:31 -07:00
|
|
|
|
|
|
|
|
/* The casts prevent warnings about implicit enum type conversions. */
|
|
|
|
|
be.SourceBlendFactor = (int) src_rgb;
|
|
|
|
|
be.SourceAlphaBlendFactor = (int) src_alpha;
|
|
|
|
|
be.DestinationBlendFactor = (int) dst_rgb;
|
|
|
|
|
be.DestinationAlphaBlendFactor = (int) dst_alpha;
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2018-11-08 00:15:50 -08:00
|
|
|
be.WriteDisableRed = !(rt->colormask & PIPE_MASK_R);
|
|
|
|
|
be.WriteDisableGreen = !(rt->colormask & PIPE_MASK_G);
|
|
|
|
|
be.WriteDisableBlue = !(rt->colormask & PIPE_MASK_B);
|
|
|
|
|
be.WriteDisableAlpha = !(rt->colormask & PIPE_MASK_A);
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
2018-11-12 11:33:44 -08:00
|
|
|
blend_entry += GENX(BLEND_STATE_ENTRY_length);
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2018-11-12 11:33:44 -08:00
|
|
|
iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
|
2019-05-02 21:14:49 -07:00
|
|
|
/* pb.HasWriteableRT is filled in at draw time.
|
|
|
|
|
* pb.AlphaTestEnable is filled in at draw time.
|
|
|
|
|
*
|
|
|
|
|
* pb.ColorBufferBlendEnable is filled in at draw time so we can avoid
|
|
|
|
|
* setting it when dual color blending without an appropriate shader.
|
|
|
|
|
*/
|
|
|
|
|
|
2018-11-12 11:33:44 -08:00
|
|
|
pb.AlphaToCoverageEnable = state->alpha_to_coverage;
|
|
|
|
|
pb.IndependentAlphaBlendEnable = indep_alpha_blend;
|
|
|
|
|
|
2021-04-12 12:04:31 -07:00
|
|
|
/* The casts prevent warnings about implicit enum type conversions. */
|
2018-12-04 00:18:41 -08:00
|
|
|
pb.SourceBlendFactor =
|
2021-04-12 12:04:31 -07:00
|
|
|
(int) fix_blendfactor(state->rt[0].rgb_src_factor, state->alpha_to_one);
|
2018-12-04 00:18:41 -08:00
|
|
|
pb.SourceAlphaBlendFactor =
|
2021-04-12 12:04:31 -07:00
|
|
|
(int) fix_blendfactor(state->rt[0].alpha_src_factor, state->alpha_to_one);
|
2018-12-04 00:18:41 -08:00
|
|
|
pb.DestinationBlendFactor =
|
2021-04-12 12:04:31 -07:00
|
|
|
(int) fix_blendfactor(state->rt[0].rgb_dst_factor, state->alpha_to_one);
|
2018-12-04 00:18:41 -08:00
|
|
|
pb.DestinationAlphaBlendFactor =
|
2021-04-12 12:04:31 -07:00
|
|
|
(int) fix_blendfactor(state->rt[0].alpha_dst_factor, state->alpha_to_one);
|
2018-11-12 11:33:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_pack_state(GENX(BLEND_STATE), cso->blend_state, bs) {
|
|
|
|
|
bs.AlphaToCoverageEnable = state->alpha_to_coverage;
|
|
|
|
|
bs.IndependentAlphaBlendEnable = indep_alpha_blend;
|
|
|
|
|
bs.AlphaToOneEnable = state->alpha_to_one;
|
|
|
|
|
bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
|
|
|
|
|
bs.ColorDitherEnable = state->dither;
|
|
|
|
|
/* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-18 22:29:27 -07:00
|
|
|
cso->dual_color_blending = util_blend_state_is_dual(state, 0);
|
2018-11-12 11:33:44 -08:00
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
return cso;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->bind_blend_state() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* Bind a blending CSO and flag related dirty bits.
|
|
|
|
|
*/
|
2017-12-27 02:54:26 -08:00
|
|
|
static void
|
|
|
|
|
iris_bind_blend_state(struct pipe_context *ctx, void *state)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
2018-12-10 23:22:54 -08:00
|
|
|
struct iris_blend_state *cso = state;
|
|
|
|
|
|
|
|
|
|
ice->state.cso_blend = cso;
|
|
|
|
|
|
2018-06-09 00:01:09 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= ice->state.stage_dirty_for_nos[IRIS_NOS_BLEND];
|
2019-09-25 00:31:07 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER == 8)
|
2019-09-25 00:31:07 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
|
2017-12-27 02:54:26 -08:00
|
|
|
}
|
|
|
|
|
|
2019-02-11 12:07:51 -08:00
|
|
|
/**
|
|
|
|
|
* Return true if the FS writes to any color outputs which are not disabled
|
|
|
|
|
* via color masking.
|
|
|
|
|
*/
|
|
|
|
|
static bool
|
|
|
|
|
has_writeable_rt(const struct iris_blend_state *cso_blend,
|
|
|
|
|
const struct shader_info *fs_info)
|
|
|
|
|
{
|
|
|
|
|
if (!fs_info)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
unsigned rt_outputs = fs_info->outputs_written >> FRAG_RESULT_DATA0;
|
|
|
|
|
|
|
|
|
|
if (fs_info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_COLOR))
|
|
|
|
|
rt_outputs = (1 << BRW_MAX_DRAW_BUFFERS) - 1;
|
|
|
|
|
|
|
|
|
|
return cso_blend->color_write_enables & rt_outputs;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Gallium CSO for depth, stencil, and alpha testing state.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
struct iris_depth_stencil_alpha_state {
|
2018-07-30 23:49:34 -07:00
|
|
|
/** Partial 3DSTATE_WM_DEPTH_STENCIL. */
|
2017-11-23 23:15:14 -08:00
|
|
|
uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
|
2018-06-09 00:01:09 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2019-10-23 20:56:45 +01:00
|
|
|
uint32_t depth_bounds[GENX(3DSTATE_DEPTH_BOUNDS_length)];
|
|
|
|
|
#endif
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
|
2020-12-04 08:19:57 -05:00
|
|
|
unsigned alpha_enabled:1;
|
|
|
|
|
unsigned alpha_func:3; /**< PIPE_FUNC_x */
|
|
|
|
|
float alpha_ref_value; /**< reference value */
|
2018-08-18 23:21:41 -07:00
|
|
|
|
|
|
|
|
/** Outbound to resolve and cache set tracking. */
|
|
|
|
|
bool depth_writes_enabled;
|
|
|
|
|
bool stencil_writes_enabled;
|
2019-09-25 00:31:07 -07:00
|
|
|
|
2021-03-29 15:46:12 -07:00
|
|
|
/** Outbound to Gfx8-9 PMA stall equations */
|
2019-09-25 00:31:07 -07:00
|
|
|
bool depth_test_enabled;
|
2017-11-23 23:15:14 -08:00
|
|
|
};
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->create_depth_stencil_alpha_state() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
|
|
|
|
|
* testing state since we need pieces of it in a variety of places.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void *
|
2017-12-27 02:54:26 -08:00
|
|
|
iris_create_zsa_state(struct pipe_context *ctx,
|
2017-11-23 23:15:14 -08:00
|
|
|
const struct pipe_depth_stencil_alpha_state *state)
|
|
|
|
|
{
|
|
|
|
|
struct iris_depth_stencil_alpha_state *cso =
|
|
|
|
|
malloc(sizeof(struct iris_depth_stencil_alpha_state));
|
|
|
|
|
|
|
|
|
|
bool two_sided_stencil = state->stencil[1].enabled;
|
|
|
|
|
|
2020-12-04 08:19:57 -05:00
|
|
|
cso->alpha_enabled = state->alpha_enabled;
|
|
|
|
|
cso->alpha_func = state->alpha_func;
|
|
|
|
|
cso->alpha_ref_value = state->alpha_ref_value;
|
2020-12-04 08:37:14 -05:00
|
|
|
cso->depth_writes_enabled = state->depth_writemask;
|
|
|
|
|
cso->depth_test_enabled = state->depth_enabled;
|
2018-08-18 23:21:41 -07:00
|
|
|
cso->stencil_writes_enabled =
|
|
|
|
|
state->stencil[0].writemask != 0 ||
|
2019-03-09 00:25:30 -08:00
|
|
|
(two_sided_stencil && state->stencil[1].writemask != 0);
|
2018-08-18 23:21:41 -07:00
|
|
|
|
2019-12-03 20:38:14 -05:00
|
|
|
/* gallium frontends need to optimize away EQUAL writes for us. */
|
2020-12-04 08:37:14 -05:00
|
|
|
assert(!(state->depth_func == PIPE_FUNC_EQUAL && state->depth_writemask));
|
2017-11-23 23:15:14 -08:00
|
|
|
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), cso->wmds, wmds) {
|
|
|
|
|
wmds.StencilFailOp = state->stencil[0].fail_op;
|
|
|
|
|
wmds.StencilPassDepthFailOp = state->stencil[0].zfail_op;
|
|
|
|
|
wmds.StencilPassDepthPassOp = state->stencil[0].zpass_op;
|
|
|
|
|
wmds.StencilTestFunction =
|
|
|
|
|
translate_compare_func(state->stencil[0].func);
|
|
|
|
|
wmds.BackfaceStencilFailOp = state->stencil[1].fail_op;
|
|
|
|
|
wmds.BackfaceStencilPassDepthFailOp = state->stencil[1].zfail_op;
|
|
|
|
|
wmds.BackfaceStencilPassDepthPassOp = state->stencil[1].zpass_op;
|
|
|
|
|
wmds.BackfaceStencilTestFunction =
|
|
|
|
|
translate_compare_func(state->stencil[1].func);
|
2020-12-04 08:37:14 -05:00
|
|
|
wmds.DepthTestFunction = translate_compare_func(state->depth_func);
|
2017-11-23 23:15:14 -08:00
|
|
|
wmds.DoubleSidedStencilEnable = two_sided_stencil;
|
|
|
|
|
wmds.StencilTestEnable = state->stencil[0].enabled;
|
|
|
|
|
wmds.StencilBufferWriteEnable =
|
|
|
|
|
state->stencil[0].writemask != 0 ||
|
|
|
|
|
(two_sided_stencil && state->stencil[1].writemask != 0);
|
2020-12-04 08:37:14 -05:00
|
|
|
wmds.DepthTestEnable = state->depth_enabled;
|
|
|
|
|
wmds.DepthBufferWriteEnable = state->depth_writemask;
|
2017-11-23 23:15:14 -08:00
|
|
|
wmds.StencilTestMask = state->stencil[0].valuemask;
|
|
|
|
|
wmds.StencilWriteMask = state->stencil[0].writemask;
|
|
|
|
|
wmds.BackfaceStencilTestMask = state->stencil[1].valuemask;
|
|
|
|
|
wmds.BackfaceStencilWriteMask = state->stencil[1].writemask;
|
2018-01-09 11:25:29 -08:00
|
|
|
/* wmds.[Backface]StencilReferenceValue are merged later */
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2020-02-04 10:49:59 -08:00
|
|
|
wmds.StencilReferenceValueModifyDisable = true;
|
|
|
|
|
#endif
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2019-10-23 20:56:45 +01:00
|
|
|
iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS), cso->depth_bounds, depth_bounds) {
|
|
|
|
|
depth_bounds.DepthBoundsTestValueModifyDisable = false;
|
|
|
|
|
depth_bounds.DepthBoundsTestEnableModifyDisable = false;
|
2020-12-04 08:37:14 -05:00
|
|
|
depth_bounds.DepthBoundsTestEnable = state->depth_bounds_test;
|
|
|
|
|
depth_bounds.DepthBoundsTestMinValue = state->depth_bounds_min;
|
|
|
|
|
depth_bounds.DepthBoundsTestMaxValue = state->depth_bounds_max;
|
2019-10-23 20:56:45 +01:00
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
return cso;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->bind_depth_stencil_alpha_state() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* Bind a depth/stencil/alpha CSO and flag related dirty bits.
|
|
|
|
|
*/
|
2017-12-27 02:54:26 -08:00
|
|
|
static void
|
|
|
|
|
iris_bind_zsa_state(struct pipe_context *ctx, void *state)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
2018-01-10 00:36:44 -08:00
|
|
|
struct iris_depth_stencil_alpha_state *old_cso = ice->state.cso_zsa;
|
|
|
|
|
struct iris_depth_stencil_alpha_state *new_cso = state;
|
|
|
|
|
|
|
|
|
|
if (new_cso) {
|
2020-12-04 08:19:57 -05:00
|
|
|
if (cso_changed(alpha_ref_value))
|
2018-01-10 00:36:44 -08:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
|
2018-06-09 00:01:09 -07:00
|
|
|
|
2020-12-04 08:19:57 -05:00
|
|
|
if (cso_changed(alpha_enabled))
|
2018-06-09 00:01:09 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_PS_BLEND | IRIS_DIRTY_BLEND_STATE;
|
2018-07-21 20:47:08 -07:00
|
|
|
|
2020-12-04 08:19:57 -05:00
|
|
|
if (cso_changed(alpha_func))
|
2018-07-21 20:47:08 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
|
2018-08-18 23:21:41 -07:00
|
|
|
|
2021-03-22 21:15:16 -07:00
|
|
|
if (cso_changed(depth_writes_enabled) || cso_changed(stencil_writes_enabled))
|
2019-03-11 00:04:56 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
|
|
|
|
|
|
2018-08-18 23:21:41 -07:00
|
|
|
ice->state.depth_writes_enabled = new_cso->depth_writes_enabled;
|
|
|
|
|
ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled;
|
2019-10-23 20:56:45 +01:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2019-10-23 20:56:45 +01:00
|
|
|
if (cso_changed(depth_bounds))
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_DEPTH_BOUNDS;
|
|
|
|
|
#endif
|
2018-01-10 00:36:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ice->state.cso_zsa = new_cso;
|
2017-12-27 02:54:26 -08:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |=
|
|
|
|
|
ice->state.stage_dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
|
2019-09-25 00:31:07 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER == 8)
|
2019-09-25 00:31:07 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
2019-09-25 00:31:07 -07:00
|
|
|
static bool
|
|
|
|
|
want_pma_fix(struct iris_context *ice)
|
|
|
|
|
{
|
|
|
|
|
UNUSED struct iris_screen *screen = (void *) ice->ctx.screen;
|
2021-04-05 13:19:39 -07:00
|
|
|
UNUSED const struct intel_device_info *devinfo = &screen->devinfo;
|
2019-09-25 00:31:07 -07:00
|
|
|
const struct brw_wm_prog_data *wm_prog_data = (void *)
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
|
|
|
|
|
const struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
|
|
|
|
|
const struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
|
|
|
|
|
const struct iris_blend_state *cso_blend = ice->state.cso_blend;
|
|
|
|
|
|
2021-03-29 15:46:12 -07:00
|
|
|
/* In very specific combinations of state, we can instruct Gfx8-9 hardware
|
2019-09-25 00:31:07 -07:00
|
|
|
* to avoid stalling at the pixel mask array. The state equations are
|
|
|
|
|
* documented in these places:
|
|
|
|
|
*
|
2021-03-29 15:46:12 -07:00
|
|
|
* - Gfx8 Depth PMA Fix: CACHE_MODE_1::NP_PMA_FIX_ENABLE
|
|
|
|
|
* - Gfx9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable
|
2019-09-25 00:31:07 -07:00
|
|
|
*
|
|
|
|
|
* Both equations share some common elements:
|
|
|
|
|
*
|
|
|
|
|
* no_hiz_op =
|
|
|
|
|
* !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
|
|
|
|
|
* 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
|
|
|
|
|
* 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
|
|
|
|
|
* 3DSTATE_WM_HZ_OP::StencilBufferClear) &&
|
|
|
|
|
*
|
|
|
|
|
* killpixels =
|
|
|
|
|
* 3DSTATE_WM::ForceKillPix != ForceOff &&
|
|
|
|
|
* (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
|
|
|
|
|
* 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
|
|
|
|
|
* 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
|
|
|
|
|
* 3DSTATE_PS_BLEND::AlphaTestEnable ||
|
|
|
|
|
* 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
|
|
|
|
|
*
|
|
|
|
|
* (Technically the stencil PMA treats ForceKillPix differently,
|
|
|
|
|
* but I think this is a documentation oversight, and we don't
|
|
|
|
|
* ever use it in this way, so it doesn't matter).
|
|
|
|
|
*
|
|
|
|
|
* common_pma_fix =
|
|
|
|
|
* 3DSTATE_WM::ForceThreadDispatch != 1 &&
|
|
|
|
|
* 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 &&
|
|
|
|
|
* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
|
|
|
|
|
* 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
|
|
|
|
|
* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS &&
|
|
|
|
|
* 3DSTATE_PS_EXTRA::PixelShaderValid &&
|
|
|
|
|
* no_hiz_op
|
|
|
|
|
*
|
|
|
|
|
* These are always true:
|
|
|
|
|
*
|
|
|
|
|
* 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0
|
|
|
|
|
* 3DSTATE_PS_EXTRA::PixelShaderValid
|
|
|
|
|
*
|
|
|
|
|
* Also, we never use the normal drawing path for HiZ ops; these are true:
|
|
|
|
|
*
|
|
|
|
|
* !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
|
|
|
|
|
* 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
|
|
|
|
|
* 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
|
|
|
|
|
* 3DSTATE_WM_HZ_OP::StencilBufferClear)
|
|
|
|
|
*
|
|
|
|
|
* This happens sometimes:
|
|
|
|
|
*
|
|
|
|
|
* 3DSTATE_WM::ForceThreadDispatch != 1
|
|
|
|
|
*
|
|
|
|
|
* However, we choose to ignore it as it either agrees with the signal
|
|
|
|
|
* (dispatch was already enabled, so nothing out of the ordinary), or
|
|
|
|
|
* there are no framebuffer attachments (so no depth or HiZ anyway,
|
|
|
|
|
* meaning the PMA signal will already be disabled).
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
if (!cso_fb->zsbuf)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
struct iris_resource *zres, *sres;
|
|
|
|
|
iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, &zres, &sres);
|
|
|
|
|
|
|
|
|
|
/* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
|
|
|
|
|
* 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
|
|
|
|
|
*/
|
|
|
|
|
if (!zres || !iris_resource_level_has_hiz(zres, cso_fb->zsbuf->u.tex.level))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */
|
|
|
|
|
if (wm_prog_data->early_fragment_tests)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* 3DSTATE_WM::ForceKillPix != ForceOff &&
|
|
|
|
|
* (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
|
|
|
|
|
* 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
|
|
|
|
|
* 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
|
|
|
|
|
* 3DSTATE_PS_BLEND::AlphaTestEnable ||
|
|
|
|
|
* 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
|
|
|
|
|
*/
|
|
|
|
|
bool killpixels = wm_prog_data->uses_kill || wm_prog_data->uses_omask ||
|
2020-12-04 08:19:57 -05:00
|
|
|
cso_blend->alpha_to_coverage || cso_zsa->alpha_enabled;
|
2019-09-25 00:31:07 -07:00
|
|
|
|
2021-03-29 15:46:12 -07:00
|
|
|
/* The Gfx8 depth PMA equation becomes:
|
2019-09-25 00:31:07 -07:00
|
|
|
*
|
|
|
|
|
* depth_writes =
|
|
|
|
|
* 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
|
|
|
|
|
* 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE
|
|
|
|
|
*
|
|
|
|
|
* stencil_writes =
|
|
|
|
|
* 3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
|
|
|
|
|
* 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
|
|
|
|
|
* 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE
|
|
|
|
|
*
|
|
|
|
|
* Z_PMA_OPT =
|
|
|
|
|
* common_pma_fix &&
|
|
|
|
|
* 3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable &&
|
|
|
|
|
* ((killpixels && (depth_writes || stencil_writes)) ||
|
|
|
|
|
* 3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF)
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
if (!cso_zsa->depth_test_enabled)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
return wm_prog_data->computed_depth_mode != PSCDEPTH_OFF ||
|
|
|
|
|
(killpixels && (cso_zsa->depth_writes_enabled ||
|
|
|
|
|
(sres && cso_zsa->stencil_writes_enabled)));
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
genX(update_pma_fix)(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
bool enable)
|
|
|
|
|
{
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
2019-09-25 00:31:07 -07:00
|
|
|
struct iris_genx_state *genx = ice->state.genx;
|
|
|
|
|
|
|
|
|
|
if (genx->pma_fix_enabled == enable)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
genx->pma_fix_enabled = enable;
|
|
|
|
|
|
|
|
|
|
/* According to the Broadwell PIPE_CONTROL documentation, software should
|
|
|
|
|
* emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
|
|
|
|
|
* prior to the LRI. If stencil buffer writes are enabled, then a Render * Cache Flush is also necessary.
|
|
|
|
|
*
|
2021-03-29 15:46:12 -07:00
|
|
|
* The Gfx9 docs say to use a depth stall rather than a command streamer
|
2019-09-25 00:31:07 -07:00
|
|
|
* stall. However, the hardware seems to violently disagree. A full
|
|
|
|
|
* command streamer stall seems to be needed in both cases.
|
|
|
|
|
*/
|
|
|
|
|
iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
|
|
|
|
|
PIPE_CONTROL_CS_STALL |
|
|
|
|
|
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
|
|
|
|
|
PIPE_CONTROL_RENDER_TARGET_FLUSH);
|
|
|
|
|
|
2021-03-11 20:50:49 -06:00
|
|
|
iris_emit_reg(batch, GENX(CACHE_MODE_1), reg) {
|
2019-09-25 00:31:07 -07:00
|
|
|
reg.NPPMAFixEnable = enable;
|
|
|
|
|
reg.NPEarlyZFailsDisable = enable;
|
|
|
|
|
reg.NPPMAFixEnableMask = true;
|
|
|
|
|
reg.NPEarlyZFailsDisableMask = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
|
|
|
|
|
* Flush bits is often necessary. We do it regardless because it's easier.
|
|
|
|
|
* The render cache flush is also necessary if stencil writes are enabled.
|
|
|
|
|
*
|
2021-03-29 15:46:12 -07:00
|
|
|
* Again, the Gfx9 docs give a different set of flushes but the Broadwell
|
2019-09-25 00:31:07 -07:00
|
|
|
* flushes seem to work just as well.
|
|
|
|
|
*/
|
|
|
|
|
iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
|
|
|
|
|
PIPE_CONTROL_DEPTH_STALL |
|
|
|
|
|
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
|
|
|
|
|
PIPE_CONTROL_RENDER_TARGET_FLUSH);
|
|
|
|
|
#endif
|
2017-12-27 02:54:26 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Gallium CSO for rasterizer state.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
struct iris_rasterizer_state {
|
|
|
|
|
uint32_t sf[GENX(3DSTATE_SF_length)];
|
|
|
|
|
uint32_t clip[GENX(3DSTATE_CLIP_length)];
|
|
|
|
|
uint32_t raster[GENX(3DSTATE_RASTER_length)];
|
|
|
|
|
uint32_t wm[GENX(3DSTATE_WM_length)];
|
2018-01-20 00:55:16 -08:00
|
|
|
uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2018-10-26 22:18:56 -07:00
|
|
|
uint8_t num_clip_plane_consts;
|
2018-07-14 01:29:33 -07:00
|
|
|
bool clip_halfz; /* for CC_VIEWPORT */
|
|
|
|
|
bool depth_clip_near; /* for CC_VIEWPORT */
|
|
|
|
|
bool depth_clip_far; /* for CC_VIEWPORT */
|
2017-11-23 23:15:14 -08:00
|
|
|
bool flatshade; /* for shader state */
|
2018-07-11 12:45:19 -07:00
|
|
|
bool flatshade_first; /* for stream output */
|
2018-01-25 02:09:59 -08:00
|
|
|
bool clamp_fragment_color; /* for shader state */
|
2017-11-23 23:15:14 -08:00
|
|
|
bool light_twoside; /* for shader state */
|
2018-12-03 02:59:08 -08:00
|
|
|
bool rasterizer_discard; /* for 3DSTATE_STREAMOUT and 3DSTATE_CLIP */
|
2018-01-09 23:13:16 -08:00
|
|
|
bool half_pixel_center; /* for 3DSTATE_MULTISAMPLE */
|
2018-06-09 00:01:09 -07:00
|
|
|
bool line_stipple_enable;
|
|
|
|
|
bool poly_stipple_enable;
|
2018-07-16 15:36:34 -07:00
|
|
|
bool multisample;
|
|
|
|
|
bool force_persample_interp;
|
2019-04-18 13:21:56 -04:00
|
|
|
bool conservative_rasterization;
|
2020-10-15 17:21:14 +03:00
|
|
|
bool fill_mode_point;
|
|
|
|
|
bool fill_mode_line;
|
2019-04-28 23:25:10 -07:00
|
|
|
bool fill_mode_point_or_line;
|
2017-11-23 23:15:14 -08:00
|
|
|
enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
|
2018-01-29 15:06:04 -08:00
|
|
|
uint16_t sprite_coord_enable;
|
2017-11-23 23:15:14 -08:00
|
|
|
};
|
|
|
|
|
|
2018-08-21 11:30:09 -07:00
|
|
|
static float
|
|
|
|
|
get_line_width(const struct pipe_rasterizer_state *state)
|
|
|
|
|
{
|
|
|
|
|
float line_width = state->line_width;
|
|
|
|
|
|
|
|
|
|
/* From the OpenGL 4.4 spec:
|
|
|
|
|
*
|
|
|
|
|
* "The actual width of non-antialiased lines is determined by rounding
|
|
|
|
|
* the supplied width to the nearest integer, then clamping it to the
|
|
|
|
|
* implementation-dependent maximum non-antialiased line width."
|
|
|
|
|
*/
|
|
|
|
|
if (!state->multisample && !state->line_smooth)
|
|
|
|
|
line_width = roundf(state->line_width);
|
|
|
|
|
|
|
|
|
|
if (!state->multisample && state->line_smooth && line_width < 1.5f) {
|
|
|
|
|
/* For 1 pixel line thickness or less, the general anti-aliasing
|
|
|
|
|
* algorithm gives up, and a garbage line is generated. Setting a
|
|
|
|
|
* Line Width of 0.0 specifies the rasterization of the "thinnest"
|
|
|
|
|
* (one-pixel-wide), non-antialiased lines.
|
|
|
|
|
*
|
|
|
|
|
* Lines rendered with zero Line Width are rasterized using the
|
|
|
|
|
* "Grid Intersection Quantization" rules as specified by the
|
|
|
|
|
* "Zero-Width (Cosmetic) Line Rasterization" section of the docs.
|
|
|
|
|
*/
|
|
|
|
|
line_width = 0.0f;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return line_width;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->create_rasterizer_state() driver hook.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void *
|
|
|
|
|
iris_create_rasterizer_state(struct pipe_context *ctx,
|
|
|
|
|
const struct pipe_rasterizer_state *state)
|
|
|
|
|
{
|
|
|
|
|
struct iris_rasterizer_state *cso =
|
|
|
|
|
malloc(sizeof(struct iris_rasterizer_state));
|
|
|
|
|
|
2018-07-16 15:36:34 -07:00
|
|
|
cso->multisample = state->multisample;
|
|
|
|
|
cso->force_persample_interp = state->force_persample_interp;
|
2018-07-14 01:29:33 -07:00
|
|
|
cso->clip_halfz = state->clip_halfz;
|
|
|
|
|
cso->depth_clip_near = state->depth_clip_near;
|
|
|
|
|
cso->depth_clip_far = state->depth_clip_far;
|
2017-11-23 23:15:14 -08:00
|
|
|
cso->flatshade = state->flatshade;
|
2018-07-11 12:45:19 -07:00
|
|
|
cso->flatshade_first = state->flatshade_first;
|
2018-01-25 02:09:59 -08:00
|
|
|
cso->clamp_fragment_color = state->clamp_fragment_color;
|
2017-11-23 23:15:14 -08:00
|
|
|
cso->light_twoside = state->light_twoside;
|
|
|
|
|
cso->rasterizer_discard = state->rasterizer_discard;
|
2018-01-09 23:13:16 -08:00
|
|
|
cso->half_pixel_center = state->half_pixel_center;
|
2018-01-29 15:06:04 -08:00
|
|
|
cso->sprite_coord_mode = state->sprite_coord_mode;
|
|
|
|
|
cso->sprite_coord_enable = state->sprite_coord_enable;
|
2018-06-09 00:01:09 -07:00
|
|
|
cso->line_stipple_enable = state->line_stipple_enable;
|
|
|
|
|
cso->poly_stipple_enable = state->poly_stipple_enable;
|
2019-04-18 13:21:56 -04:00
|
|
|
cso->conservative_rasterization =
|
|
|
|
|
state->conservative_raster_mode == PIPE_CONSERVATIVE_RASTER_POST_SNAP;
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2020-10-15 17:21:14 +03:00
|
|
|
cso->fill_mode_point =
|
2019-04-28 23:25:10 -07:00
|
|
|
state->fill_front == PIPE_POLYGON_MODE_POINT ||
|
|
|
|
|
state->fill_back == PIPE_POLYGON_MODE_POINT;
|
2020-10-15 17:21:14 +03:00
|
|
|
cso->fill_mode_line =
|
|
|
|
|
state->fill_front == PIPE_POLYGON_MODE_LINE ||
|
|
|
|
|
state->fill_back == PIPE_POLYGON_MODE_LINE;
|
|
|
|
|
cso->fill_mode_point_or_line =
|
|
|
|
|
cso->fill_mode_point ||
|
|
|
|
|
cso->fill_mode_line;
|
2019-04-28 23:25:10 -07:00
|
|
|
|
2018-11-09 02:20:31 -08:00
|
|
|
if (state->clip_plane_enable != 0)
|
|
|
|
|
cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1;
|
|
|
|
|
else
|
|
|
|
|
cso->num_clip_plane_consts = 0;
|
|
|
|
|
|
2018-08-21 11:30:09 -07:00
|
|
|
float line_width = get_line_width(state);
|
|
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
|
|
|
|
|
sf.StatisticsEnable = true;
|
|
|
|
|
sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
|
|
|
|
|
sf.LineEndCapAntialiasingRegionWidth =
|
|
|
|
|
state->line_smooth ? _10pixels : _05pixels;
|
|
|
|
|
sf.LastPixelEnable = state->line_last_pixel;
|
2018-08-21 11:30:09 -07:00
|
|
|
sf.LineWidth = line_width;
|
2019-01-15 23:41:34 -08:00
|
|
|
sf.SmoothPointEnable = (state->point_smooth || state->multisample) &&
|
|
|
|
|
!state->point_quad_rasterization;
|
2017-11-23 23:15:14 -08:00
|
|
|
sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
|
2021-03-30 16:57:49 +03:00
|
|
|
sf.PointWidth = CLAMP(state->point_size, 0.125f, 255.875f);
|
2017-11-23 23:15:14 -08:00
|
|
|
|
|
|
|
|
if (state->flatshade_first) {
|
2018-06-26 10:02:46 -07:00
|
|
|
sf.TriangleFanProvokingVertexSelect = 1;
|
|
|
|
|
} else {
|
2017-11-23 23:15:14 -08:00
|
|
|
sf.TriangleStripListProvokingVertexSelect = 2;
|
|
|
|
|
sf.TriangleFanProvokingVertexSelect = 2;
|
|
|
|
|
sf.LineStripListProvokingVertexSelect = 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_RASTER), cso->raster, rr) {
|
|
|
|
|
rr.FrontWinding = state->front_ccw ? CounterClockwise : Clockwise;
|
|
|
|
|
rr.CullMode = translate_cull_mode(state->cull_face);
|
|
|
|
|
rr.FrontFaceFillMode = translate_fill_mode(state->fill_front);
|
|
|
|
|
rr.BackFaceFillMode = translate_fill_mode(state->fill_back);
|
|
|
|
|
rr.DXMultisampleRasterizationEnable = state->multisample;
|
|
|
|
|
rr.GlobalDepthOffsetEnableSolid = state->offset_tri;
|
|
|
|
|
rr.GlobalDepthOffsetEnableWireframe = state->offset_line;
|
|
|
|
|
rr.GlobalDepthOffsetEnablePoint = state->offset_point;
|
2018-06-26 10:09:08 -07:00
|
|
|
rr.GlobalDepthOffsetConstant = state->offset_units * 2;
|
2017-11-23 23:15:14 -08:00
|
|
|
rr.GlobalDepthOffsetScale = state->offset_scale;
|
|
|
|
|
rr.GlobalDepthOffsetClamp = state->offset_clamp;
|
2019-01-15 23:41:34 -08:00
|
|
|
rr.SmoothPointEnable = state->point_smooth;
|
2017-11-23 23:15:14 -08:00
|
|
|
rr.AntialiasingEnable = state->line_smooth;
|
|
|
|
|
rr.ScissorRectangleEnable = state->scissor;
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 9
|
2017-11-23 23:15:14 -08:00
|
|
|
rr.ViewportZNearClipTestEnable = state->depth_clip_near;
|
|
|
|
|
rr.ViewportZFarClipTestEnable = state->depth_clip_far;
|
2019-04-18 13:21:56 -04:00
|
|
|
rr.ConservativeRasterizationEnable =
|
|
|
|
|
cso->conservative_rasterization;
|
2018-11-07 14:23:27 +10:00
|
|
|
#else
|
|
|
|
|
rr.ViewportZClipTestEnable = (state->depth_clip_near || state->depth_clip_far);
|
|
|
|
|
#endif
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
|
2018-01-23 01:23:54 -08:00
|
|
|
/* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
|
|
|
|
|
* the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
cl.EarlyCullEnable = true;
|
|
|
|
|
cl.UserClipDistanceClipTestEnableBitmask = state->clip_plane_enable;
|
|
|
|
|
cl.ForceUserClipDistanceClipTestEnableBitmask = true;
|
|
|
|
|
cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
|
|
|
|
|
cl.GuardbandClipTestEnable = true;
|
|
|
|
|
cl.ClipEnable = true;
|
|
|
|
|
cl.MinimumPointWidth = 0.125;
|
|
|
|
|
cl.MaximumPointWidth = 255.875;
|
|
|
|
|
|
|
|
|
|
if (state->flatshade_first) {
|
2018-06-26 10:02:46 -07:00
|
|
|
cl.TriangleFanProvokingVertexSelect = 1;
|
|
|
|
|
} else {
|
2017-11-23 23:15:14 -08:00
|
|
|
cl.TriangleStripListProvokingVertexSelect = 2;
|
|
|
|
|
cl.TriangleFanProvokingVertexSelect = 2;
|
|
|
|
|
cl.LineStripListProvokingVertexSelect = 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_WM), cso->wm, wm) {
|
2018-01-23 01:23:54 -08:00
|
|
|
/* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
|
|
|
|
|
* filled in at draw time from the FS program.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
wm.LineAntialiasingRegionWidth = _10pixels;
|
|
|
|
|
wm.LineEndCapAntialiasingRegionWidth = _05pixels;
|
|
|
|
|
wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
|
|
|
|
|
wm.LineStippleEnable = state->line_stipple_enable;
|
|
|
|
|
wm.PolygonStippleEnable = state->poly_stipple_enable;
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-20 00:55:16 -08:00
|
|
|
/* Remap from 0..255 back to 1..256 */
|
|
|
|
|
const unsigned line_stipple_factor = state->line_stipple_factor + 1;
|
|
|
|
|
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) {
|
2019-09-07 23:43:05 -07:00
|
|
|
if (state->line_stipple_enable) {
|
|
|
|
|
line.LineStipplePattern = state->line_stipple_pattern;
|
|
|
|
|
line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
|
|
|
|
|
line.LineStippleRepeatCount = line_stipple_factor;
|
|
|
|
|
}
|
2018-01-20 00:55:16 -08:00
|
|
|
}
|
|
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
return cso;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->bind_rasterizer_state() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* Bind a rasterizer CSO and flag related dirty bits.
|
|
|
|
|
*/
|
2018-01-09 11:44:04 -08:00
|
|
|
static void
|
|
|
|
|
iris_bind_rasterizer_state(struct pipe_context *ctx, void *state)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
2018-01-09 21:29:09 -08:00
|
|
|
struct iris_rasterizer_state *old_cso = ice->state.cso_rast;
|
|
|
|
|
struct iris_rasterizer_state *new_cso = state;
|
|
|
|
|
|
2018-01-10 00:36:44 -08:00
|
|
|
if (new_cso) {
|
2018-01-09 23:14:10 -08:00
|
|
|
/* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
|
2018-06-09 00:01:09 -07:00
|
|
|
if (cso_changed_memcmp(line_stipple))
|
2018-01-09 23:14:10 -08:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_LINE_STIPPLE;
|
2018-01-09 21:29:09 -08:00
|
|
|
|
2018-06-09 00:01:09 -07:00
|
|
|
if (cso_changed(half_pixel_center))
|
2018-01-09 23:14:10 -08:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
|
2018-06-09 00:01:09 -07:00
|
|
|
|
|
|
|
|
if (cso_changed(line_stipple_enable) || cso_changed(poly_stipple_enable))
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_WM;
|
2018-06-29 12:58:31 -07:00
|
|
|
|
2018-12-03 02:59:08 -08:00
|
|
|
if (cso_changed(rasterizer_discard))
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
|
|
|
|
|
|
|
|
|
|
if (cso_changed(flatshade_first))
|
2018-06-29 12:58:31 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
|
2018-07-14 01:29:33 -07:00
|
|
|
|
|
|
|
|
if (cso_changed(depth_clip_near) || cso_changed(depth_clip_far) ||
|
|
|
|
|
cso_changed(clip_halfz))
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
|
2018-07-23 15:29:00 -07:00
|
|
|
|
2018-12-02 15:52:46 -08:00
|
|
|
if (cso_changed(sprite_coord_enable) ||
|
|
|
|
|
cso_changed(sprite_coord_mode) ||
|
|
|
|
|
cso_changed(light_twoside))
|
2018-07-23 15:29:00 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_SBE;
|
2019-04-18 13:21:56 -04:00
|
|
|
|
|
|
|
|
if (cso_changed(conservative_rasterization))
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
|
2018-01-09 23:13:16 -08:00
|
|
|
}
|
|
|
|
|
|
2018-01-09 21:29:09 -08:00
|
|
|
ice->state.cso_rast = new_cso;
|
2018-01-09 11:44:04 -08:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_RASTER;
|
2018-06-15 16:22:58 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_CLIP;
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |=
|
|
|
|
|
ice->state.stage_dirty_for_nos[IRIS_NOS_RASTERIZER];
|
2018-01-09 11:44:04 -08:00
|
|
|
}
|
|
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
/**
|
|
|
|
|
* Return true if the given wrap mode requires the border color to exist.
|
2018-07-30 23:49:34 -07:00
|
|
|
*
|
|
|
|
|
* (We can skip uploading it if the sampler isn't going to use it.)
|
2017-11-23 23:15:14 -08:00
|
|
|
*/
|
|
|
|
|
static bool
|
|
|
|
|
wrap_mode_needs_border_color(unsigned wrap_mode)
|
|
|
|
|
{
|
|
|
|
|
return wrap_mode == TCM_CLAMP_BORDER || wrap_mode == TCM_HALF_BORDER;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Gallium CSO for sampler state.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
struct iris_sampler_state {
|
2018-10-01 18:33:17 -07:00
|
|
|
union pipe_color_union border_color;
|
2017-11-23 23:15:14 -08:00
|
|
|
bool needs_border_color;
|
|
|
|
|
|
|
|
|
|
uint32_t sampler_state[GENX(SAMPLER_STATE_length)];
|
2021-10-19 17:52:06 -07:00
|
|
|
|
|
|
|
|
#if GFX_VERx10 == 125
|
|
|
|
|
/* Sampler state structure to use for 3D textures in order to
|
|
|
|
|
* implement Wa_14014414195.
|
|
|
|
|
*/
|
|
|
|
|
uint32_t sampler_state_3d[GENX(SAMPLER_STATE_length)];
|
|
|
|
|
#endif
|
2017-11-23 23:15:14 -08:00
|
|
|
};
|
|
|
|
|
|
2021-10-19 17:52:06 -07:00
|
|
|
static void
|
|
|
|
|
fill_sampler_state(uint32_t *sampler_state,
|
|
|
|
|
const struct pipe_sampler_state *state,
|
|
|
|
|
unsigned max_anisotropy)
|
2017-11-23 23:15:14 -08:00
|
|
|
{
|
2018-08-23 01:49:49 -07:00
|
|
|
float min_lod = state->min_lod;
|
|
|
|
|
unsigned mag_img_filter = state->mag_img_filter;
|
|
|
|
|
|
|
|
|
|
// XXX: explain this code ported from ilo...I don't get it at all...
|
|
|
|
|
if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE &&
|
|
|
|
|
state->min_lod > 0.0f) {
|
|
|
|
|
min_lod = 0.0f;
|
|
|
|
|
mag_img_filter = state->min_img_filter;
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-19 17:52:06 -07:00
|
|
|
iris_pack_state(GENX(SAMPLER_STATE), sampler_state, samp) {
|
|
|
|
|
samp.TCXAddressControlMode = translate_wrap(state->wrap_s);
|
|
|
|
|
samp.TCYAddressControlMode = translate_wrap(state->wrap_t);
|
|
|
|
|
samp.TCZAddressControlMode = translate_wrap(state->wrap_r);
|
2017-11-23 23:15:14 -08:00
|
|
|
samp.CubeSurfaceControlMode = state->seamless_cube_map;
|
|
|
|
|
samp.NonnormalizedCoordinateEnable = !state->normalized_coords;
|
|
|
|
|
samp.MinModeFilter = state->min_img_filter;
|
2018-08-23 01:49:49 -07:00
|
|
|
samp.MagModeFilter = mag_img_filter;
|
2017-11-23 23:15:14 -08:00
|
|
|
samp.MipModeFilter = translate_mip_filter(state->min_mip_filter);
|
|
|
|
|
samp.MaximumAnisotropy = RATIO21;
|
|
|
|
|
|
2021-10-19 17:52:06 -07:00
|
|
|
if (max_anisotropy >= 2) {
|
2017-11-23 23:15:14 -08:00
|
|
|
if (state->min_img_filter == PIPE_TEX_FILTER_LINEAR) {
|
|
|
|
|
samp.MinModeFilter = MAPFILTER_ANISOTROPIC;
|
|
|
|
|
samp.AnisotropicAlgorithm = EWAApproximation;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)
|
|
|
|
|
samp.MagModeFilter = MAPFILTER_ANISOTROPIC;
|
|
|
|
|
|
|
|
|
|
samp.MaximumAnisotropy =
|
2021-10-19 17:52:06 -07:00
|
|
|
MIN2((max_anisotropy - 2) / 2, RATIO161);
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Set address rounding bits if not using nearest filtering. */
|
|
|
|
|
if (state->min_img_filter != PIPE_TEX_FILTER_NEAREST) {
|
|
|
|
|
samp.UAddressMinFilterRoundingEnable = true;
|
|
|
|
|
samp.VAddressMinFilterRoundingEnable = true;
|
|
|
|
|
samp.RAddressMinFilterRoundingEnable = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (state->mag_img_filter != PIPE_TEX_FILTER_NEAREST) {
|
|
|
|
|
samp.UAddressMagFilterRoundingEnable = true;
|
|
|
|
|
samp.VAddressMagFilterRoundingEnable = true;
|
|
|
|
|
samp.RAddressMagFilterRoundingEnable = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (state->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
|
|
|
|
|
samp.ShadowFunction = translate_shadow_func(state->compare_func);
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
const float hw_max_lod = GFX_VER >= 7 ? 14 : 13;
|
2017-11-23 23:15:14 -08:00
|
|
|
|
|
|
|
|
samp.LODPreClampMode = CLAMP_MODE_OGL;
|
2018-08-23 01:49:49 -07:00
|
|
|
samp.MinLOD = CLAMP(min_lod, 0, hw_max_lod);
|
2017-11-23 23:15:14 -08:00
|
|
|
samp.MaxLOD = CLAMP(state->max_lod, 0, hw_max_lod);
|
|
|
|
|
samp.TextureLODBias = CLAMP(state->lod_bias, -16, 15);
|
|
|
|
|
|
2018-06-28 02:25:25 -07:00
|
|
|
/* .BorderColorPointer is filled in by iris_bind_sampler_states. */
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
2021-10-19 17:52:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* The pipe->create_sampler_state() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* We fill out SAMPLER_STATE (except for the border color pointer), and
|
|
|
|
|
* store that on the CPU. It doesn't make sense to upload it to a GPU
|
|
|
|
|
* buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
|
|
|
|
|
* all bound sampler states to be in contiguous memor.
|
|
|
|
|
*/
|
|
|
|
|
static void *
|
|
|
|
|
iris_create_sampler_state(struct pipe_context *ctx,
|
|
|
|
|
const struct pipe_sampler_state *state)
|
|
|
|
|
{
|
|
|
|
|
UNUSED struct iris_screen *screen = (void *)ctx->screen;
|
|
|
|
|
UNUSED const struct intel_device_info *devinfo = &screen->devinfo;
|
|
|
|
|
struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
|
|
|
|
|
|
|
|
|
|
if (!cso)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST == MAPFILTER_NEAREST);
|
|
|
|
|
STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR == MAPFILTER_LINEAR);
|
|
|
|
|
|
|
|
|
|
unsigned wrap_s = translate_wrap(state->wrap_s);
|
|
|
|
|
unsigned wrap_t = translate_wrap(state->wrap_t);
|
|
|
|
|
unsigned wrap_r = translate_wrap(state->wrap_r);
|
|
|
|
|
|
|
|
|
|
memcpy(&cso->border_color, &state->border_color, sizeof(cso->border_color));
|
|
|
|
|
|
|
|
|
|
cso->needs_border_color = wrap_mode_needs_border_color(wrap_s) ||
|
|
|
|
|
wrap_mode_needs_border_color(wrap_t) ||
|
|
|
|
|
wrap_mode_needs_border_color(wrap_r);
|
|
|
|
|
|
|
|
|
|
fill_sampler_state(cso->sampler_state, state, state->max_anisotropy);
|
|
|
|
|
|
|
|
|
|
#if GFX_VERx10 == 125
|
|
|
|
|
/* Fill an extra sampler state structure with anisotropic filtering
|
|
|
|
|
* disabled used to implement Wa_14014414195.
|
|
|
|
|
*/
|
|
|
|
|
fill_sampler_state(cso->sampler_state_3d, state, 0);
|
|
|
|
|
#endif
|
2017-11-23 23:15:14 -08:00
|
|
|
|
|
|
|
|
return cso;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->bind_sampler_states() driver hook.
|
|
|
|
|
*/
|
2018-01-11 22:18:54 -08:00
|
|
|
static void
|
|
|
|
|
iris_bind_sampler_states(struct pipe_context *ctx,
|
|
|
|
|
enum pipe_shader_type p_stage,
|
|
|
|
|
unsigned start, unsigned count,
|
|
|
|
|
void **states)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
|
|
|
|
gl_shader_stage stage = stage_from_pipe(p_stage);
|
2018-08-18 23:43:14 -07:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
2018-01-11 22:18:54 -08:00
|
|
|
|
|
|
|
|
assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
|
2018-06-28 02:25:25 -07:00
|
|
|
|
2019-09-07 22:30:02 -07:00
|
|
|
bool dirty = false;
|
|
|
|
|
|
2018-06-28 02:25:25 -07:00
|
|
|
for (int i = 0; i < count; i++) {
|
2021-09-27 18:52:09 -05:00
|
|
|
struct iris_sampler_state *state = states ? states[i] : NULL;
|
|
|
|
|
if (shs->samplers[start + i] != state) {
|
|
|
|
|
shs->samplers[start + i] = state;
|
2019-09-07 22:30:02 -07:00
|
|
|
dirty = true;
|
|
|
|
|
}
|
2018-06-28 02:25:25 -07:00
|
|
|
}
|
2018-01-11 22:18:54 -08:00
|
|
|
|
2019-09-07 22:30:02 -07:00
|
|
|
if (dirty)
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
|
2018-12-04 15:34:30 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Upload the sampler states into a contiguous area of GPU memory, for
|
|
|
|
|
* for 3DSTATE_SAMPLER_STATE_POINTERS_*.
|
|
|
|
|
*
|
|
|
|
|
* Also fill out the border color state pointers.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
iris_upload_sampler_states(struct iris_context *ice, gl_shader_stage stage)
|
|
|
|
|
{
|
2021-10-19 17:52:06 -07:00
|
|
|
UNUSED struct iris_screen *screen = (void *) ice->ctx.screen;
|
|
|
|
|
UNUSED const struct intel_device_info *devinfo = &screen->devinfo;
|
2018-12-04 15:34:30 -08:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
|
|
|
|
const struct shader_info *info = iris_get_shader_info(ice, stage);
|
|
|
|
|
|
2019-12-03 20:38:14 -05:00
|
|
|
/* We assume gallium frontends will call pipe->bind_sampler_states()
|
2018-12-04 15:34:30 -08:00
|
|
|
* if the program's number of textures changes.
|
|
|
|
|
*/
|
2021-03-08 15:23:31 +10:00
|
|
|
unsigned count = info ? BITSET_LAST_BIT(info->textures_used) : 0;
|
2018-12-04 15:34:30 -08:00
|
|
|
|
|
|
|
|
if (!count)
|
|
|
|
|
return;
|
|
|
|
|
|
2018-06-28 02:25:25 -07:00
|
|
|
/* Assemble the SAMPLER_STATEs into a contiguous table that lives
|
|
|
|
|
* in the dynamic state memory zone, so we can point to it via the
|
|
|
|
|
* 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
|
2018-04-07 00:49:12 -07:00
|
|
|
*/
|
iris: Record state sizes for INTEL_DEBUG=bat decoding.
Felix noticed a crash when using INTEL_DEBUG=bat decoding. It turned
out that we were sometimes placing variable length data near the end
of a buffer, and with the decoder guessing random lengths rather than
having an actual count, it was walking off the end and crashing. So
this does more than improve the decoder output.
Unfortunately, this is a bit more complicated than i965's handling,
because we don't have a single state buffer. Various places upload
data via u_upload_mgr, and so there isn't a central place to record
the size. We don't need to catch every single place, however, since
it's only important to record variable length packets (like viewports
and binding tables).
State data also lives arbitrarily long, rather than being discarded on
every batch like i965, so we don't know when to clear out old entries
either. (We also don't have a callback when an upload buffer is
released.) So, this tracking may space leak over time. That's probably
okay though, as this is only a debugging feature and it's a slow leak.
We may also get lucky and overwrite existing entries as we reuse BOs,
though I find this unlikely to happen.
The fact that the decoder works in terms of offsets from a state base
address is also not ideal, as dynamic state base address and surface
state base address differ for iris. However, because dynamic state
addresses start from the top of a 4GB region, and binding tables start
from addresses [0, 64K), it's highly unlikely that we'll get overlap.
We can always improve this, but for now it's better than what we had.
2019-05-22 18:14:38 -07:00
|
|
|
unsigned size = count * 4 * GENX(SAMPLER_STATE_length);
|
2018-08-23 02:15:12 -07:00
|
|
|
uint32_t *map =
|
iris: Record state sizes for INTEL_DEBUG=bat decoding.
Felix noticed a crash when using INTEL_DEBUG=bat decoding. It turned
out that we were sometimes placing variable length data near the end
of a buffer, and with the decoder guessing random lengths rather than
having an actual count, it was walking off the end and crashing. So
this does more than improve the decoder output.
Unfortunately, this is a bit more complicated than i965's handling,
because we don't have a single state buffer. Various places upload
data via u_upload_mgr, and so there isn't a central place to record
the size. We don't need to catch every single place, however, since
it's only important to record variable length packets (like viewports
and binding tables).
State data also lives arbitrarily long, rather than being discarded on
every batch like i965, so we don't know when to clear out old entries
either. (We also don't have a callback when an upload buffer is
released.) So, this tracking may space leak over time. That's probably
okay though, as this is only a debugging feature and it's a slow leak.
We may also get lucky and overwrite existing entries as we reuse BOs,
though I find this unlikely to happen.
The fact that the decoder works in terms of offsets from a state base
address is also not ideal, as dynamic state base address and surface
state base address differ for iris. However, because dynamic state
addresses start from the top of a 4GB region, and binding tables start
from addresses [0, 64K), it's highly unlikely that we'll get overlap.
We can always improve this, but for now it's better than what we had.
2019-05-22 18:14:38 -07:00
|
|
|
upload_state(ice->state.dynamic_uploader, &shs->sampler_table, size, 32);
|
2018-04-19 12:07:44 -07:00
|
|
|
if (unlikely(!map))
|
|
|
|
|
return;
|
2018-04-07 00:49:12 -07:00
|
|
|
|
2018-08-18 23:43:14 -07:00
|
|
|
struct pipe_resource *res = shs->sampler_table.res;
|
2019-10-02 15:09:33 -04:00
|
|
|
struct iris_bo *bo = iris_resource_bo(res);
|
|
|
|
|
|
|
|
|
|
iris_record_state_size(ice->state.sizes,
|
2021-07-19 21:23:18 -07:00
|
|
|
bo->address + shs->sampler_table.offset, size);
|
2018-04-07 00:49:12 -07:00
|
|
|
|
2019-10-02 15:09:33 -04:00
|
|
|
shs->sampler_table.offset += iris_bo_offset_from_base_address(bo);
|
iris: Record state sizes for INTEL_DEBUG=bat decoding.
Felix noticed a crash when using INTEL_DEBUG=bat decoding. It turned
out that we were sometimes placing variable length data near the end
of a buffer, and with the decoder guessing random lengths rather than
having an actual count, it was walking off the end and crashing. So
this does more than improve the decoder output.
Unfortunately, this is a bit more complicated than i965's handling,
because we don't have a single state buffer. Various places upload
data via u_upload_mgr, and so there isn't a central place to record
the size. We don't need to catch every single place, however, since
it's only important to record variable length packets (like viewports
and binding tables).
State data also lives arbitrarily long, rather than being discarded on
every batch like i965, so we don't know when to clear out old entries
either. (We also don't have a callback when an upload buffer is
released.) So, this tracking may space leak over time. That's probably
okay though, as this is only a debugging feature and it's a slow leak.
We may also get lucky and overwrite existing entries as we reuse BOs,
though I find this unlikely to happen.
The fact that the decoder works in terms of offsets from a state base
address is also not ideal, as dynamic state base address and surface
state base address differ for iris. However, because dynamic state
addresses start from the top of a 4GB region, and binding tables start
from addresses [0, 64K), it's highly unlikely that we'll get overlap.
We can always improve this, but for now it's better than what we had.
2019-05-22 18:14:38 -07:00
|
|
|
|
2018-06-28 02:25:25 -07:00
|
|
|
/* Make sure all land in the same BO */
|
|
|
|
|
iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
|
|
|
|
|
|
2018-12-04 15:34:30 -08:00
|
|
|
ice->state.need_border_colors &= ~(1 << stage);
|
|
|
|
|
|
2018-01-11 22:18:54 -08:00
|
|
|
for (int i = 0; i < count; i++) {
|
2018-08-18 23:43:14 -07:00
|
|
|
struct iris_sampler_state *state = shs->samplers[i];
|
iris: Properly support alpha and luminance-alpha formats
For texturing, we map alpha formats to the corresponding red format,
as many alpha formats are outright missing, and red is more efficient
when sampling anyway.
When rendering to A8_UNORM, we use that format directly, so the image
gets the shader output's .a/.w channel, rather than the .r/.x channel.
All other A* formats are non-renderable, so we can't do much and just
mark them as unsupported for rendering. Fortunately, GL only requires
rendering to A8_UNORM, so that works out.
According to Andre Heider and Timur Kristóf, this fixes font rendering
in Witcher 1 (via nine). Andre also reported that it fixes Unigine
Heaven (presumably via nine).
v2: Use the same swizzle for both sampler views and "render targets".
BLORP expects the read swizzle, and will take the inverse when
setting up the destination swizzle (and actually applying it in
the shaders). We ignore the format swizzle when setting up normal
rendering SURFACE_STATEs, which is necessary because it would be
an illegal shader channel select combination. Thanks to Jason
Ekstrand for pointing out that BLORP took an inverse swizzle.
Tested-by: Timur Kristóf <timur.kristof@gmail.com>
Tested-by: Andre Heider <a.heider@gmail.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-02-21 22:49:40 -08:00
|
|
|
struct iris_sampler_view *tex = shs->textures[i];
|
2018-04-07 00:49:12 -07:00
|
|
|
|
2018-06-28 02:25:25 -07:00
|
|
|
if (!state) {
|
|
|
|
|
memset(map, 0, 4 * GENX(SAMPLER_STATE_length));
|
|
|
|
|
} else {
|
2021-10-19 17:52:06 -07:00
|
|
|
const uint32_t *sampler_state = state->sampler_state;
|
|
|
|
|
#if GFX_VERx10 == 125
|
|
|
|
|
if (tex && tex->res->base.b.target == PIPE_TEXTURE_3D)
|
|
|
|
|
sampler_state = state->sampler_state_3d;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if (!state->needs_border_color) {
|
|
|
|
|
memcpy(map, sampler_state, 4 * GENX(SAMPLER_STATE_length));
|
|
|
|
|
} else {
|
|
|
|
|
ice->state.need_border_colors |= 1 << stage;
|
|
|
|
|
|
|
|
|
|
/* We may need to swizzle the border color for format faking.
|
|
|
|
|
* A/LA formats are faked as R/RG with 000R or R00G swizzles.
|
|
|
|
|
* This means we need to move the border color's A channel into
|
|
|
|
|
* the R or G channels so that those read swizzles will move it
|
|
|
|
|
* back into A.
|
|
|
|
|
*/
|
|
|
|
|
union pipe_color_union *color = &state->border_color;
|
|
|
|
|
union pipe_color_union tmp;
|
|
|
|
|
if (tex) {
|
|
|
|
|
enum pipe_format internal_format = tex->res->internal_format;
|
|
|
|
|
|
|
|
|
|
if (util_format_is_alpha(internal_format)) {
|
|
|
|
|
unsigned char swz[4] = {
|
|
|
|
|
PIPE_SWIZZLE_W, PIPE_SWIZZLE_0,
|
|
|
|
|
PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
|
|
|
|
|
};
|
|
|
|
|
util_format_apply_color_swizzle(&tmp, color, swz, true);
|
|
|
|
|
color = &tmp;
|
|
|
|
|
} else if (util_format_is_luminance_alpha(internal_format) &&
|
|
|
|
|
internal_format != PIPE_FORMAT_L8A8_SRGB) {
|
|
|
|
|
unsigned char swz[4] = {
|
|
|
|
|
PIPE_SWIZZLE_X, PIPE_SWIZZLE_W,
|
|
|
|
|
PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
|
|
|
|
|
};
|
|
|
|
|
util_format_apply_color_swizzle(&tmp, color, swz, true);
|
|
|
|
|
color = &tmp;
|
|
|
|
|
}
|
iris: Properly support alpha and luminance-alpha formats
For texturing, we map alpha formats to the corresponding red format,
as many alpha formats are outright missing, and red is more efficient
when sampling anyway.
When rendering to A8_UNORM, we use that format directly, so the image
gets the shader output's .a/.w channel, rather than the .r/.x channel.
All other A* formats are non-renderable, so we can't do much and just
mark them as unsupported for rendering. Fortunately, GL only requires
rendering to A8_UNORM, so that works out.
According to Andre Heider and Timur Kristóf, this fixes font rendering
in Witcher 1 (via nine). Andre also reported that it fixes Unigine
Heaven (presumably via nine).
v2: Use the same swizzle for both sampler views and "render targets".
BLORP expects the read swizzle, and will take the inverse when
setting up the destination swizzle (and actually applying it in
the shaders). We ignore the format swizzle when setting up normal
rendering SURFACE_STATEs, which is necessary because it would be
an illegal shader channel select combination. Thanks to Jason
Ekstrand for pointing out that BLORP took an inverse swizzle.
Tested-by: Timur Kristóf <timur.kristof@gmail.com>
Tested-by: Andre Heider <a.heider@gmail.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-02-21 22:49:40 -08:00
|
|
|
}
|
|
|
|
|
|
2021-10-19 17:52:06 -07:00
|
|
|
/* Stream out the border color and merge the pointer. */
|
|
|
|
|
uint32_t offset = iris_upload_border_color(ice, color);
|
2018-06-28 02:25:25 -07:00
|
|
|
|
2021-10-19 17:52:06 -07:00
|
|
|
uint32_t dynamic[GENX(SAMPLER_STATE_length)];
|
|
|
|
|
iris_pack_state(GENX(SAMPLER_STATE), dynamic, dyns) {
|
|
|
|
|
dyns.BorderColorPointer = offset;
|
|
|
|
|
}
|
2018-06-28 02:25:25 -07:00
|
|
|
|
2021-10-19 17:52:06 -07:00
|
|
|
for (uint32_t j = 0; j < GENX(SAMPLER_STATE_length); j++)
|
|
|
|
|
map[j] = sampler_state[j] | dynamic[j];
|
|
|
|
|
}
|
2018-06-28 02:25:25 -07:00
|
|
|
}
|
2018-04-07 00:49:12 -07:00
|
|
|
|
|
|
|
|
map += GENX(SAMPLER_STATE_length);
|
2018-01-11 22:18:54 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-07 20:31:09 -07:00
|
|
|
static enum isl_channel_select
|
|
|
|
|
fmt_swizzle(const struct iris_format_info *fmt, enum pipe_swizzle swz)
|
|
|
|
|
{
|
|
|
|
|
switch (swz) {
|
|
|
|
|
case PIPE_SWIZZLE_X: return fmt->swizzle.r;
|
|
|
|
|
case PIPE_SWIZZLE_Y: return fmt->swizzle.g;
|
|
|
|
|
case PIPE_SWIZZLE_Z: return fmt->swizzle.b;
|
|
|
|
|
case PIPE_SWIZZLE_W: return fmt->swizzle.a;
|
2021-04-12 11:45:51 -07:00
|
|
|
case PIPE_SWIZZLE_1: return ISL_CHANNEL_SELECT_ONE;
|
|
|
|
|
case PIPE_SWIZZLE_0: return ISL_CHANNEL_SELECT_ZERO;
|
2018-10-07 20:31:09 -07:00
|
|
|
default: unreachable("invalid swizzle");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-22 14:28:54 -07:00
|
|
|
static void
|
|
|
|
|
fill_buffer_surface_state(struct isl_device *isl_dev,
|
2019-05-07 10:31:55 -07:00
|
|
|
struct iris_resource *res,
|
2018-10-22 14:28:54 -07:00
|
|
|
void *map,
|
|
|
|
|
enum isl_format format,
|
2019-02-28 01:13:33 -08:00
|
|
|
struct isl_swizzle swizzle,
|
2018-10-22 14:28:54 -07:00
|
|
|
unsigned offset,
|
2020-10-07 07:44:56 -07:00
|
|
|
unsigned size,
|
|
|
|
|
isl_surf_usage_flags_t usage)
|
2018-10-22 14:28:54 -07:00
|
|
|
{
|
|
|
|
|
const struct isl_format_layout *fmtl = isl_format_get_layout(format);
|
2018-12-26 02:06:13 -08:00
|
|
|
const unsigned cpp = format == ISL_FORMAT_RAW ? 1 : fmtl->bpb / 8;
|
2018-10-22 14:28:54 -07:00
|
|
|
|
|
|
|
|
/* The ARB_texture_buffer_specification says:
|
|
|
|
|
*
|
|
|
|
|
* "The number of texels in the buffer texture's texel array is given by
|
|
|
|
|
*
|
|
|
|
|
* floor(<buffer_size> / (<components> * sizeof(<base_type>)),
|
|
|
|
|
*
|
|
|
|
|
* where <buffer_size> is the size of the buffer object, in basic
|
|
|
|
|
* machine units and <components> and <base_type> are the element count
|
|
|
|
|
* and base data type for elements, as specified in Table X.1. The
|
|
|
|
|
* number of texels in the texel array is then clamped to the
|
|
|
|
|
* implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
|
|
|
|
|
*
|
|
|
|
|
* We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
|
|
|
|
|
* so that when ISL divides by stride to obtain the number of texels, that
|
|
|
|
|
* texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
|
|
|
|
|
*/
|
|
|
|
|
unsigned final_size =
|
2019-05-07 10:31:55 -07:00
|
|
|
MIN3(size, res->bo->size - res->offset - offset,
|
|
|
|
|
IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
|
2018-10-22 14:28:54 -07:00
|
|
|
|
|
|
|
|
isl_buffer_fill_state(isl_dev, map,
|
2021-07-19 21:23:18 -07:00
|
|
|
.address = res->bo->address + res->offset + offset,
|
2018-10-22 14:28:54 -07:00
|
|
|
.size_B = final_size,
|
|
|
|
|
.format = format,
|
2019-02-28 01:13:33 -08:00
|
|
|
.swizzle = swizzle,
|
2018-10-22 14:28:54 -07:00
|
|
|
.stride_B = cpp,
|
2020-10-07 07:44:56 -07:00
|
|
|
.mocs = iris_mocs(res->bo, isl_dev, usage));
|
2018-10-22 14:28:54 -07:00
|
|
|
}
|
|
|
|
|
|
2018-12-07 11:33:13 -08:00
|
|
|
#define SURFACE_STATE_ALIGNMENT 64
|
|
|
|
|
|
2018-12-07 11:20:39 -08:00
|
|
|
/**
|
2018-12-07 11:33:13 -08:00
|
|
|
* Allocate several contiguous SURFACE_STATE structures, one for each
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
* supported auxiliary surface mode. This only allocates the CPU-side
|
|
|
|
|
* copy, they will need to be uploaded later after they're filled in.
|
2018-12-07 11:20:39 -08:00
|
|
|
*/
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
static void
|
|
|
|
|
alloc_surface_states(struct iris_surface_state *surf_state,
|
2018-12-07 11:33:13 -08:00
|
|
|
unsigned aux_usages)
|
2018-12-07 11:20:39 -08:00
|
|
|
{
|
|
|
|
|
const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
|
|
|
|
|
|
2018-12-07 11:33:13 -08:00
|
|
|
/* If this changes, update this to explicitly align pointers */
|
|
|
|
|
STATIC_ASSERT(surf_size == SURFACE_STATE_ALIGNMENT);
|
|
|
|
|
|
|
|
|
|
assert(aux_usages != 0);
|
|
|
|
|
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
/* In case we're re-allocating them... */
|
|
|
|
|
free(surf_state->cpu);
|
|
|
|
|
|
|
|
|
|
surf_state->num_states = util_bitcount(aux_usages);
|
|
|
|
|
surf_state->cpu = calloc(surf_state->num_states, surf_size);
|
|
|
|
|
surf_state->ref.offset = 0;
|
|
|
|
|
pipe_resource_reference(&surf_state->ref.res, NULL);
|
|
|
|
|
|
|
|
|
|
assert(surf_state->cpu);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Upload the CPU side SURFACE_STATEs into a GPU buffer.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
upload_surface_states(struct u_upload_mgr *mgr,
|
|
|
|
|
struct iris_surface_state *surf_state)
|
|
|
|
|
{
|
|
|
|
|
const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
|
|
|
|
|
const unsigned bytes = surf_state->num_states * surf_size;
|
|
|
|
|
|
2018-12-07 11:33:13 -08:00
|
|
|
void *map =
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
upload_state(mgr, &surf_state->ref, bytes, SURFACE_STATE_ALIGNMENT);
|
2018-12-07 11:20:39 -08:00
|
|
|
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
surf_state->ref.offset +=
|
|
|
|
|
iris_bo_offset_from_base_address(iris_resource_bo(surf_state->ref.res));
|
2018-12-07 11:20:39 -08:00
|
|
|
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
if (map)
|
|
|
|
|
memcpy(map, surf_state->cpu, bytes);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Update resource addresses in a set of SURFACE_STATE descriptors,
|
|
|
|
|
* and re-upload them if necessary.
|
|
|
|
|
*/
|
|
|
|
|
static bool
|
|
|
|
|
update_surface_state_addrs(struct u_upload_mgr *mgr,
|
|
|
|
|
struct iris_surface_state *surf_state,
|
|
|
|
|
struct iris_bo *bo)
|
|
|
|
|
{
|
2021-07-19 21:23:18 -07:00
|
|
|
if (surf_state->bo_address == bo->address)
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) % 64 == 0);
|
|
|
|
|
STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_bits) == 64);
|
|
|
|
|
|
|
|
|
|
uint64_t *ss_addr = (uint64_t *) &surf_state->cpu[GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) / 32];
|
|
|
|
|
|
|
|
|
|
/* First, update the CPU copies. We assume no other fields exist in
|
|
|
|
|
* the QWord containing Surface Base Address.
|
|
|
|
|
*/
|
|
|
|
|
for (unsigned i = 0; i < surf_state->num_states; i++) {
|
2021-07-19 21:23:18 -07:00
|
|
|
*ss_addr = *ss_addr - surf_state->bo_address + bo->address;
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
ss_addr = ((void *) ss_addr) + SURFACE_STATE_ALIGNMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Next, upload the updated copies to a GPU buffer. */
|
|
|
|
|
upload_surface_states(mgr, surf_state);
|
|
|
|
|
|
2021-07-19 21:23:18 -07:00
|
|
|
surf_state->bo_address = bo->address;
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
|
|
|
|
|
return true;
|
2018-12-07 11:20:39 -08:00
|
|
|
}
|
|
|
|
|
|
2018-12-06 23:29:20 -08:00
|
|
|
static void
|
|
|
|
|
fill_surface_state(struct isl_device *isl_dev,
|
|
|
|
|
void *map,
|
|
|
|
|
struct iris_resource *res,
|
2019-08-07 10:38:17 -07:00
|
|
|
struct isl_surf *surf,
|
2018-12-07 11:33:13 -08:00
|
|
|
struct isl_view *view,
|
2019-07-16 11:08:28 -07:00
|
|
|
unsigned aux_usage,
|
2019-11-14 23:26:07 -08:00
|
|
|
uint32_t extra_main_offset,
|
2019-07-16 11:08:28 -07:00
|
|
|
uint32_t tile_x_sa,
|
|
|
|
|
uint32_t tile_y_sa)
|
2018-12-06 23:29:20 -08:00
|
|
|
{
|
|
|
|
|
struct isl_surf_fill_state_info f = {
|
2019-08-07 10:38:17 -07:00
|
|
|
.surf = surf,
|
2018-12-06 23:29:20 -08:00
|
|
|
.view = view,
|
2020-10-07 07:44:56 -07:00
|
|
|
.mocs = iris_mocs(res->bo, isl_dev, view->usage),
|
2021-07-19 21:23:18 -07:00
|
|
|
.address = res->bo->address + res->offset + extra_main_offset,
|
2019-07-16 11:08:28 -07:00
|
|
|
.x_offset_sa = tile_x_sa,
|
|
|
|
|
.y_offset_sa = tile_y_sa,
|
2018-12-06 23:29:20 -08:00
|
|
|
};
|
|
|
|
|
|
2018-12-07 11:33:13 -08:00
|
|
|
if (aux_usage != ISL_AUX_USAGE_NONE) {
|
|
|
|
|
f.aux_surf = &res->aux.surf;
|
|
|
|
|
f.aux_usage = aux_usage;
|
2021-09-07 08:45:05 -07:00
|
|
|
f.clear_color = res->aux.clear_color;
|
2021-09-07 08:26:31 -07:00
|
|
|
|
2021-09-01 16:39:39 -07:00
|
|
|
if (aux_usage == ISL_AUX_USAGE_MC)
|
|
|
|
|
f.mc_format = iris_format_for_usage(isl_dev->info,
|
|
|
|
|
res->external_format,
|
|
|
|
|
surf->usage).fmt;
|
|
|
|
|
|
2021-09-07 08:26:31 -07:00
|
|
|
if (res->aux.bo)
|
|
|
|
|
f.aux_address = res->aux.bo->address + res->aux.offset;
|
|
|
|
|
|
2021-09-07 08:45:05 -07:00
|
|
|
if (res->aux.clear_color_bo) {
|
|
|
|
|
f.clear_address = res->aux.clear_color_bo->address +
|
|
|
|
|
res->aux.clear_color_offset;
|
2021-03-29 14:41:58 -07:00
|
|
|
f.use_clear_address = isl_dev->info->ver > 9;
|
2019-04-02 17:10:26 -07:00
|
|
|
}
|
2018-12-07 11:33:13 -08:00
|
|
|
}
|
|
|
|
|
|
2018-12-06 23:29:20 -08:00
|
|
|
isl_surf_fill_state_s(isl_dev, map, &f);
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->create_sampler_view() driver hook.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static struct pipe_sampler_view *
|
|
|
|
|
iris_create_sampler_view(struct pipe_context *ctx,
|
2018-01-09 17:54:43 -08:00
|
|
|
struct pipe_resource *tex,
|
|
|
|
|
const struct pipe_sampler_view *tmpl)
|
2017-11-23 23:15:14 -08:00
|
|
|
{
|
2018-01-09 17:54:43 -08:00
|
|
|
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
|
2021-04-05 13:19:39 -07:00
|
|
|
const struct intel_device_info *devinfo = &screen->devinfo;
|
2018-01-09 17:54:43 -08:00
|
|
|
struct iris_sampler_view *isv = calloc(1, sizeof(struct iris_sampler_view));
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2018-01-09 17:54:43 -08:00
|
|
|
if (!isv)
|
2017-11-23 23:15:14 -08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
/* initialize base object */
|
2018-08-18 23:21:32 -07:00
|
|
|
isv->base = *tmpl;
|
|
|
|
|
isv->base.context = ctx;
|
|
|
|
|
isv->base.texture = NULL;
|
|
|
|
|
pipe_reference_init(&isv->base.reference, 1);
|
|
|
|
|
pipe_resource_reference(&isv->base.texture, tex);
|
2018-01-09 17:54:43 -08:00
|
|
|
|
2018-10-04 19:49:06 -07:00
|
|
|
if (util_format_is_depth_or_stencil(tmpl->format)) {
|
|
|
|
|
struct iris_resource *zres, *sres;
|
|
|
|
|
const struct util_format_description *desc =
|
|
|
|
|
util_format_description(tmpl->format);
|
|
|
|
|
|
|
|
|
|
iris_get_depth_stencil_resources(tex, &zres, &sres);
|
|
|
|
|
|
2021-02-08 16:39:42 -08:00
|
|
|
tex = util_format_has_depth(desc) ? &zres->base.b : &sres->base.b;
|
2018-10-04 19:49:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
isv->res = (struct iris_resource *) tex;
|
|
|
|
|
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
alloc_surface_states(&isv->surface_state, isv->res->aux.sampler_usages);
|
|
|
|
|
|
2021-07-19 21:23:18 -07:00
|
|
|
isv->surface_state.bo_address = isv->res->bo->address;
|
2018-12-07 11:33:13 -08:00
|
|
|
|
2018-11-29 23:03:20 -08:00
|
|
|
isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
|
2018-10-07 20:31:09 -07:00
|
|
|
|
2018-11-27 13:02:03 +10:00
|
|
|
if (isv->base.target == PIPE_TEXTURE_CUBE ||
|
|
|
|
|
isv->base.target == PIPE_TEXTURE_CUBE_ARRAY)
|
|
|
|
|
usage |= ISL_SURF_USAGE_CUBE_BIT;
|
|
|
|
|
|
2018-10-07 20:31:09 -07:00
|
|
|
const struct iris_format_info fmt =
|
|
|
|
|
iris_format_for_usage(devinfo, tmpl->format, usage);
|
|
|
|
|
|
2019-03-19 12:47:58 -07:00
|
|
|
isv->clear_color = isv->res->aux.clear_color;
|
|
|
|
|
|
2018-01-09 17:54:43 -08:00
|
|
|
isv->view = (struct isl_view) {
|
2018-10-07 20:31:09 -07:00
|
|
|
.format = fmt.fmt,
|
2018-01-09 17:54:43 -08:00
|
|
|
.swizzle = (struct isl_swizzle) {
|
2018-10-07 20:31:09 -07:00
|
|
|
.r = fmt_swizzle(&fmt, tmpl->swizzle_r),
|
|
|
|
|
.g = fmt_swizzle(&fmt, tmpl->swizzle_g),
|
|
|
|
|
.b = fmt_swizzle(&fmt, tmpl->swizzle_b),
|
|
|
|
|
.a = fmt_swizzle(&fmt, tmpl->swizzle_a),
|
2018-01-09 17:54:43 -08:00
|
|
|
},
|
2018-10-07 20:31:09 -07:00
|
|
|
.usage = usage,
|
2018-01-09 17:54:43 -08:00
|
|
|
};
|
|
|
|
|
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
void *map = isv->surface_state.cpu;
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/* Fill out SURFACE_STATE for this view. */
|
2018-07-17 00:17:55 -07:00
|
|
|
if (tmpl->target != PIPE_BUFFER) {
|
|
|
|
|
isv->view.base_level = tmpl->u.tex.first_level;
|
|
|
|
|
isv->view.levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1;
|
2021-02-23 00:15:15 +02:00
|
|
|
|
|
|
|
|
if (tmpl->target == PIPE_TEXTURE_3D) {
|
|
|
|
|
isv->view.base_array_layer = 0;
|
|
|
|
|
isv->view.array_len = 1;
|
|
|
|
|
} else {
|
|
|
|
|
isv->view.base_array_layer = tmpl->u.tex.first_layer;
|
|
|
|
|
isv->view.array_len =
|
|
|
|
|
tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
|
|
|
|
|
}
|
2018-07-17 00:17:55 -07:00
|
|
|
|
2019-03-27 14:42:12 -07:00
|
|
|
unsigned aux_modes = isv->res->aux.sampler_usages;
|
2018-12-07 11:33:13 -08:00
|
|
|
while (aux_modes) {
|
|
|
|
|
enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
|
|
|
|
|
|
2019-08-07 10:38:17 -07:00
|
|
|
fill_surface_state(&screen->isl_dev, map, isv->res, &isv->res->surf,
|
2019-11-14 23:26:07 -08:00
|
|
|
&isv->view, aux_usage, 0, 0, 0);
|
2018-12-07 11:33:13 -08:00
|
|
|
|
|
|
|
|
map += SURFACE_STATE_ALIGNMENT;
|
|
|
|
|
}
|
2018-07-17 00:17:55 -07:00
|
|
|
} else {
|
2019-05-07 10:31:55 -07:00
|
|
|
fill_buffer_surface_state(&screen->isl_dev, isv->res, map,
|
iris: Properly support alpha and luminance-alpha formats
For texturing, we map alpha formats to the corresponding red format,
as many alpha formats are outright missing, and red is more efficient
when sampling anyway.
When rendering to A8_UNORM, we use that format directly, so the image
gets the shader output's .a/.w channel, rather than the .r/.x channel.
All other A* formats are non-renderable, so we can't do much and just
mark them as unsupported for rendering. Fortunately, GL only requires
rendering to A8_UNORM, so that works out.
According to Andre Heider and Timur Kristóf, this fixes font rendering
in Witcher 1 (via nine). Andre also reported that it fixes Unigine
Heaven (presumably via nine).
v2: Use the same swizzle for both sampler views and "render targets".
BLORP expects the read swizzle, and will take the inverse when
setting up the destination swizzle (and actually applying it in
the shaders). We ignore the format swizzle when setting up normal
rendering SURFACE_STATEs, which is necessary because it would be
an illegal shader channel select combination. Thanks to Jason
Ekstrand for pointing out that BLORP took an inverse swizzle.
Tested-by: Timur Kristóf <timur.kristof@gmail.com>
Tested-by: Andre Heider <a.heider@gmail.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-02-21 22:49:40 -08:00
|
|
|
isv->view.format, isv->view.swizzle,
|
2020-10-07 07:44:56 -07:00
|
|
|
tmpl->u.buf.offset, tmpl->u.buf.size,
|
|
|
|
|
ISL_SURF_USAGE_TEXTURE_BIT);
|
2018-07-17 00:17:55 -07:00
|
|
|
}
|
2018-01-09 17:54:43 -08:00
|
|
|
|
2018-08-18 23:21:32 -07:00
|
|
|
return &isv->base;
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
static void
|
|
|
|
|
iris_sampler_view_destroy(struct pipe_context *ctx,
|
|
|
|
|
struct pipe_sampler_view *state)
|
|
|
|
|
{
|
|
|
|
|
struct iris_sampler_view *isv = (void *) state;
|
|
|
|
|
pipe_resource_reference(&state->texture, NULL);
|
2019-11-14 16:06:10 -08:00
|
|
|
pipe_resource_reference(&isv->surface_state.ref.res, NULL);
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
free(isv->surface_state.cpu);
|
2018-07-30 23:49:34 -07:00
|
|
|
free(isv);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* The pipe->create_surface() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* In Gallium nomenclature, "surfaces" are a view of a resource that
|
|
|
|
|
* can be bound as a render target or depth/stencil buffer.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static struct pipe_surface *
|
|
|
|
|
iris_create_surface(struct pipe_context *ctx,
|
|
|
|
|
struct pipe_resource *tex,
|
2018-01-09 17:54:43 -08:00
|
|
|
const struct pipe_surface *tmpl)
|
2017-11-23 23:15:14 -08:00
|
|
|
{
|
2018-01-09 14:34:15 -08:00
|
|
|
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
|
2021-04-05 13:19:39 -07:00
|
|
|
const struct intel_device_info *devinfo = &screen->devinfo;
|
2018-01-09 14:34:15 -08:00
|
|
|
|
2018-08-13 16:41:19 -07:00
|
|
|
isl_surf_usage_flags_t usage = 0;
|
2018-04-25 15:25:33 -07:00
|
|
|
if (tmpl->writable)
|
|
|
|
|
usage = ISL_SURF_USAGE_STORAGE_BIT;
|
|
|
|
|
else if (util_format_is_depth_or_stencil(tmpl->format))
|
|
|
|
|
usage = ISL_SURF_USAGE_DEPTH_BIT;
|
2018-08-13 16:41:19 -07:00
|
|
|
else
|
2018-04-25 15:25:33 -07:00
|
|
|
usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
|
|
|
|
|
|
2018-10-07 20:31:09 -07:00
|
|
|
const struct iris_format_info fmt =
|
2019-06-27 17:16:20 -07:00
|
|
|
iris_format_for_usage(devinfo, tmpl->format, usage);
|
2018-08-13 16:41:19 -07:00
|
|
|
|
|
|
|
|
if ((usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) &&
|
2018-10-07 20:31:09 -07:00
|
|
|
!isl_format_supports_rendering(devinfo, fmt.fmt)) {
|
2018-08-13 16:41:19 -07:00
|
|
|
/* Framebuffer validation will reject this invalid case, but it
|
|
|
|
|
* hasn't had the opportunity yet. In the meantime, we need to
|
|
|
|
|
* avoid hitting ISL asserts about unsupported formats below.
|
|
|
|
|
*/
|
|
|
|
|
return NULL;
|
2018-07-26 01:06:27 -07:00
|
|
|
}
|
|
|
|
|
|
2019-06-27 17:16:20 -07:00
|
|
|
struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
|
|
|
|
|
struct pipe_surface *psurf = &surf->base;
|
|
|
|
|
struct iris_resource *res = (struct iris_resource *) tex;
|
|
|
|
|
|
|
|
|
|
if (!surf)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
pipe_reference_init(&psurf->reference, 1);
|
|
|
|
|
pipe_resource_reference(&psurf->texture, tex);
|
|
|
|
|
psurf->context = ctx;
|
|
|
|
|
psurf->format = tmpl->format;
|
|
|
|
|
psurf->width = tex->width0;
|
|
|
|
|
psurf->height = tex->height0;
|
|
|
|
|
psurf->texture = tex;
|
|
|
|
|
psurf->u.tex.first_layer = tmpl->u.tex.first_layer;
|
|
|
|
|
psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
|
|
|
|
|
psurf->u.tex.level = tmpl->u.tex.level;
|
|
|
|
|
|
2019-07-16 11:08:28 -07:00
|
|
|
uint32_t array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
|
|
|
|
|
|
2019-05-01 14:34:00 -07:00
|
|
|
struct isl_view *view = &surf->view;
|
|
|
|
|
*view = (struct isl_view) {
|
2018-10-07 20:31:09 -07:00
|
|
|
.format = fmt.fmt,
|
2018-01-09 17:54:43 -08:00
|
|
|
.base_level = tmpl->u.tex.level,
|
2018-01-09 14:34:15 -08:00
|
|
|
.levels = 1,
|
2018-01-09 17:54:43 -08:00
|
|
|
.base_array_layer = tmpl->u.tex.first_layer,
|
2019-07-16 11:08:28 -07:00
|
|
|
.array_len = array_len,
|
2018-01-09 14:34:15 -08:00
|
|
|
.swizzle = ISL_SWIZZLE_IDENTITY,
|
2018-04-25 15:25:33 -07:00
|
|
|
.usage = usage,
|
2018-01-09 14:34:15 -08:00
|
|
|
};
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
2019-07-16 11:08:28 -07:00
|
|
|
struct isl_view *read_view = &surf->read_view;
|
|
|
|
|
*read_view = (struct isl_view) {
|
|
|
|
|
.format = fmt.fmt,
|
|
|
|
|
.base_level = tmpl->u.tex.level,
|
|
|
|
|
.levels = 1,
|
|
|
|
|
.base_array_layer = tmpl->u.tex.first_layer,
|
|
|
|
|
.array_len = array_len,
|
|
|
|
|
.swizzle = ISL_SWIZZLE_IDENTITY,
|
|
|
|
|
.usage = ISL_SURF_USAGE_TEXTURE_BIT,
|
|
|
|
|
};
|
2021-06-03 17:49:02 -05:00
|
|
|
|
|
|
|
|
struct isl_surf read_surf = res->surf;
|
2021-07-07 11:28:05 -05:00
|
|
|
uint64_t read_surf_offset_B = 0;
|
2021-06-03 17:49:02 -05:00
|
|
|
uint32_t read_surf_tile_x_sa = 0, read_surf_tile_y_sa = 0;
|
|
|
|
|
if (tex->target == PIPE_TEXTURE_3D && array_len == 1) {
|
|
|
|
|
/* The minimum array element field of the surface state structure is
|
|
|
|
|
* ignored by the sampler unit for 3D textures on some hardware. If the
|
|
|
|
|
* render buffer is a single slice of a 3D texture, create a 2D texture
|
|
|
|
|
* covering that slice.
|
|
|
|
|
*
|
|
|
|
|
* TODO: This only handles the case where we're rendering to a single
|
|
|
|
|
* slice of an array texture. If we have layered rendering combined
|
|
|
|
|
* with non-coherent FB fetch and a non-zero base_array_layer, then
|
|
|
|
|
* we're going to run into problems.
|
|
|
|
|
*
|
|
|
|
|
* See https://gitlab.freedesktop.org/mesa/mesa/-/issues/4904
|
|
|
|
|
*/
|
|
|
|
|
isl_surf_get_image_surf(&screen->isl_dev, &res->surf,
|
|
|
|
|
read_view->base_level,
|
|
|
|
|
0, read_view->base_array_layer,
|
|
|
|
|
&read_surf, &read_surf_offset_B,
|
|
|
|
|
&read_surf_tile_x_sa, &read_surf_tile_y_sa);
|
|
|
|
|
read_view->base_level = 0;
|
|
|
|
|
read_view->base_array_layer = 0;
|
|
|
|
|
assert(read_view->array_len == 1);
|
|
|
|
|
} else if (tex->target == PIPE_TEXTURE_1D_ARRAY) {
|
|
|
|
|
/* Convert 1D array textures to 2D arrays because shaders always provide
|
|
|
|
|
* the array index coordinate at the Z component to avoid recompiles
|
|
|
|
|
* when changing the texture target of the framebuffer.
|
|
|
|
|
*/
|
|
|
|
|
assert(read_surf.dim_layout == ISL_DIM_LAYOUT_GFX4_2D);
|
|
|
|
|
read_surf.dim = ISL_SURF_DIM_2D;
|
|
|
|
|
}
|
2019-07-16 11:08:28 -07:00
|
|
|
#endif
|
|
|
|
|
|
2019-03-19 12:47:58 -07:00
|
|
|
surf->clear_color = res->aux.clear_color;
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
|
2018-04-25 15:25:33 -07:00
|
|
|
if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
|
|
|
|
|
ISL_SURF_USAGE_STENCIL_BIT))
|
|
|
|
|
return psurf;
|
|
|
|
|
|
2018-06-28 00:57:49 -07:00
|
|
|
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
alloc_surface_states(&surf->surface_state, res->aux.possible_usages);
|
2021-07-19 21:23:18 -07:00
|
|
|
surf->surface_state.bo_address = res->bo->address;
|
2018-04-06 23:57:45 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
alloc_surface_states(&surf->surface_state_read, res->aux.possible_usages);
|
2021-07-19 21:23:18 -07:00
|
|
|
surf->surface_state_read.bo_address = res->bo->address;
|
2019-07-16 11:08:28 -07:00
|
|
|
#endif
|
|
|
|
|
|
2019-05-01 14:34:00 -07:00
|
|
|
if (!isl_format_is_compressed(res->surf.format)) {
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
void *map = surf->surface_state.cpu;
|
|
|
|
|
UNUSED void *map_read = surf->surface_state_read.cpu;
|
|
|
|
|
|
2019-05-01 14:34:00 -07:00
|
|
|
/* This is a normal surface. Fill out a SURFACE_STATE for each possible
|
|
|
|
|
* auxiliary surface mode and return the pipe_surface.
|
|
|
|
|
*/
|
|
|
|
|
unsigned aux_modes = res->aux.possible_usages;
|
|
|
|
|
while (aux_modes) {
|
|
|
|
|
enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
|
2019-08-07 10:38:17 -07:00
|
|
|
fill_surface_state(&screen->isl_dev, map, res, &res->surf,
|
2019-11-14 23:26:07 -08:00
|
|
|
view, aux_usage, 0, 0, 0);
|
2019-05-01 14:34:00 -07:00
|
|
|
map += SURFACE_STATE_ALIGNMENT;
|
2019-07-16 11:08:28 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
2021-06-03 17:49:02 -05:00
|
|
|
fill_surface_state(&screen->isl_dev, map_read, res,
|
|
|
|
|
&read_surf, read_view, aux_usage,
|
|
|
|
|
read_surf_offset_B,
|
|
|
|
|
read_surf_tile_x_sa, read_surf_tile_y_sa);
|
2019-07-16 11:08:28 -07:00
|
|
|
map_read += SURFACE_STATE_ALIGNMENT;
|
|
|
|
|
#endif
|
2019-05-01 14:34:00 -07:00
|
|
|
}
|
2018-12-07 11:33:13 -08:00
|
|
|
|
2019-05-01 14:34:00 -07:00
|
|
|
return psurf;
|
2018-12-07 11:33:13 -08:00
|
|
|
}
|
2018-01-09 14:34:15 -08:00
|
|
|
|
2019-05-01 14:34:00 -07:00
|
|
|
/* The resource has a compressed format, which is not renderable, but we
|
|
|
|
|
* have a renderable view format. We must be attempting to upload blocks
|
|
|
|
|
* of compressed data via an uncompressed view.
|
|
|
|
|
*
|
|
|
|
|
* In this case, we can assume there are no auxiliary buffers, a single
|
|
|
|
|
* miplevel, and that the resource is single-sampled. Gallium may try
|
|
|
|
|
* and create an uncompressed view with multiple layers, however.
|
|
|
|
|
*/
|
|
|
|
|
assert(!isl_format_is_compressed(fmt.fmt));
|
|
|
|
|
assert(res->aux.possible_usages == 1 << ISL_AUX_USAGE_NONE);
|
|
|
|
|
assert(res->surf.samples == 1);
|
|
|
|
|
assert(view->levels == 1);
|
|
|
|
|
|
|
|
|
|
struct isl_surf isl_surf;
|
2021-07-07 11:28:05 -05:00
|
|
|
uint64_t offset_B = 0;
|
|
|
|
|
uint32_t tile_x_el = 0, tile_y_el = 0;
|
2021-06-28 17:15:28 -05:00
|
|
|
bool ok = isl_surf_get_uncompressed_surf(&screen->isl_dev, &res->surf,
|
|
|
|
|
view, &isl_surf, view,
|
|
|
|
|
&offset_B, &tile_x_el, &tile_y_el);
|
|
|
|
|
if (!ok) {
|
|
|
|
|
free(surf);
|
|
|
|
|
return NULL;
|
2019-05-01 14:34:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
psurf->width = isl_surf.logical_level0_px.width;
|
|
|
|
|
psurf->height = isl_surf.logical_level0_px.height;
|
|
|
|
|
|
|
|
|
|
struct isl_surf_fill_state_info f = {
|
|
|
|
|
.surf = &isl_surf,
|
|
|
|
|
.view = view,
|
2020-10-07 07:44:56 -07:00
|
|
|
.mocs = iris_mocs(res->bo, &screen->isl_dev,
|
|
|
|
|
ISL_SURF_USAGE_RENDER_TARGET_BIT),
|
2021-07-19 21:23:18 -07:00
|
|
|
.address = res->bo->address + offset_B,
|
2021-06-28 17:15:28 -05:00
|
|
|
.x_offset_sa = tile_x_el, /* Single-sampled, so el == sa */
|
|
|
|
|
.y_offset_sa = tile_y_el, /* Single-sampled, so el == sa */
|
2019-05-01 14:34:00 -07:00
|
|
|
};
|
|
|
|
|
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
isl_surf_fill_state_s(&screen->isl_dev, surf->surface_state.cpu, &f);
|
|
|
|
|
|
2018-01-09 14:34:15 -08:00
|
|
|
return psurf;
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER < 9
|
2018-11-30 02:27:07 -08:00
|
|
|
static void
|
|
|
|
|
fill_default_image_param(struct brw_image_param *param)
|
|
|
|
|
{
|
|
|
|
|
memset(param, 0, sizeof(*param));
|
|
|
|
|
/* Set the swizzling shifts to all-ones to effectively disable swizzling --
|
|
|
|
|
* See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
|
|
|
|
|
* detailed explanation of these parameters.
|
|
|
|
|
*/
|
|
|
|
|
param->swizzling[0] = 0xff;
|
|
|
|
|
param->swizzling[1] = 0xff;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
fill_buffer_image_param(struct brw_image_param *param,
|
|
|
|
|
enum pipe_format pfmt,
|
|
|
|
|
unsigned size)
|
|
|
|
|
{
|
|
|
|
|
const unsigned cpp = util_format_get_blocksize(pfmt);
|
|
|
|
|
|
|
|
|
|
fill_default_image_param(param);
|
|
|
|
|
param->size[0] = size / cpp;
|
|
|
|
|
param->stride[0] = cpp;
|
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
#define isl_surf_fill_image_param(x, ...)
|
|
|
|
|
#define fill_default_image_param(x, ...)
|
|
|
|
|
#define fill_buffer_image_param(x, ...)
|
|
|
|
|
#endif
|
|
|
|
|
|
2018-08-30 15:45:36 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_shader_images() driver hook.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
iris_set_shader_images(struct pipe_context *ctx,
|
|
|
|
|
enum pipe_shader_type p_stage,
|
|
|
|
|
unsigned start_slot, unsigned count,
|
2020-12-21 03:01:34 -05:00
|
|
|
unsigned unbind_num_trailing_slots,
|
2018-08-30 15:45:36 -07:00
|
|
|
const struct pipe_image_view *p_images)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
|
|
|
|
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
|
|
|
|
|
gl_shader_stage stage = stage_from_pipe(p_stage);
|
|
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
2019-04-22 11:27:37 -07:00
|
|
|
struct iris_genx_state *genx = ice->state.genx;
|
|
|
|
|
struct brw_image_param *image_params = genx->shaders[stage].image_param;
|
|
|
|
|
#endif
|
2018-08-30 15:45:36 -07:00
|
|
|
|
2021-01-27 23:27:02 -08:00
|
|
|
shs->bound_image_views &=
|
|
|
|
|
~u_bit_consecutive(start_slot, count + unbind_num_trailing_slots);
|
2018-11-25 19:03:43 -08:00
|
|
|
|
2018-08-30 15:45:36 -07:00
|
|
|
for (unsigned i = 0; i < count; i++) {
|
2019-04-22 11:27:37 -07:00
|
|
|
struct iris_image_view *iv = &shs->image[start_slot + i];
|
|
|
|
|
|
2018-08-30 15:45:36 -07:00
|
|
|
if (p_images && p_images[i].resource) {
|
|
|
|
|
const struct pipe_image_view *img = &p_images[i];
|
|
|
|
|
struct iris_resource *res = (void *) img->resource;
|
2018-11-21 00:38:49 -08:00
|
|
|
|
2019-06-27 16:54:47 -07:00
|
|
|
util_copy_image_view(&iv->base, img);
|
2019-04-22 11:27:37 -07:00
|
|
|
|
|
|
|
|
shs->bound_image_views |= 1 << (start_slot + i);
|
|
|
|
|
|
|
|
|
|
res->bind_history |= PIPE_BIND_SHADER_IMAGE;
|
2019-09-10 11:14:57 -07:00
|
|
|
res->bind_stages |= 1 << stage;
|
2018-08-30 15:45:36 -07:00
|
|
|
|
2020-03-12 08:16:28 +02:00
|
|
|
enum isl_format isl_fmt = iris_image_view_get_format(ice, img);
|
2018-08-30 15:45:36 -07:00
|
|
|
|
2021-03-29 15:40:04 -07:00
|
|
|
/* Render compression with images supported on gfx12+ only. */
|
2021-03-16 10:14:30 -07:00
|
|
|
unsigned aux_usages = GFX_VER >= 12 ? res->aux.possible_usages :
|
2020-03-12 13:43:14 +02:00
|
|
|
1 << ISL_AUX_USAGE_NONE;
|
|
|
|
|
|
|
|
|
|
alloc_surface_states(&iv->surface_state, aux_usages);
|
2021-07-19 21:23:18 -07:00
|
|
|
iv->surface_state.bo_address = res->bo->address;
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
|
|
|
|
|
void *map = iv->surface_state.cpu;
|
|
|
|
|
|
2021-02-08 16:39:42 -08:00
|
|
|
if (res->base.b.target != PIPE_BUFFER) {
|
2018-08-30 15:45:36 -07:00
|
|
|
struct isl_view view = {
|
2018-12-26 02:06:13 -08:00
|
|
|
.format = isl_fmt,
|
2018-08-30 15:45:36 -07:00
|
|
|
.base_level = img->u.tex.level,
|
|
|
|
|
.levels = 1,
|
|
|
|
|
.base_array_layer = img->u.tex.first_layer,
|
|
|
|
|
.array_len = img->u.tex.last_layer - img->u.tex.first_layer + 1,
|
|
|
|
|
.swizzle = ISL_SWIZZLE_IDENTITY,
|
2020-03-12 08:16:28 +02:00
|
|
|
.usage = ISL_SURF_USAGE_STORAGE_BIT,
|
2018-08-30 15:45:36 -07:00
|
|
|
};
|
|
|
|
|
|
2020-03-12 08:16:28 +02:00
|
|
|
/* If using untyped fallback. */
|
|
|
|
|
if (isl_fmt == ISL_FORMAT_RAW) {
|
2019-05-07 10:31:55 -07:00
|
|
|
fill_buffer_surface_state(&screen->isl_dev, res, map,
|
2019-02-28 01:13:33 -08:00
|
|
|
isl_fmt, ISL_SWIZZLE_IDENTITY,
|
2020-10-07 07:44:56 -07:00
|
|
|
0, res->bo->size,
|
|
|
|
|
ISL_SURF_USAGE_STORAGE_BIT);
|
2018-12-26 02:06:13 -08:00
|
|
|
} else {
|
2020-03-12 13:43:14 +02:00
|
|
|
unsigned aux_modes = aux_usages;
|
2018-12-07 11:33:13 -08:00
|
|
|
while (aux_modes) {
|
|
|
|
|
enum isl_aux_usage usage = u_bit_scan(&aux_modes);
|
|
|
|
|
|
2019-08-07 10:38:17 -07:00
|
|
|
fill_surface_state(&screen->isl_dev, map, res, &res->surf,
|
2019-11-14 23:26:07 -08:00
|
|
|
&view, usage, 0, 0, 0);
|
2018-12-07 11:33:13 -08:00
|
|
|
|
|
|
|
|
map += SURFACE_STATE_ALIGNMENT;
|
|
|
|
|
}
|
2018-12-26 02:06:13 -08:00
|
|
|
}
|
|
|
|
|
|
2018-11-30 02:27:07 -08:00
|
|
|
isl_surf_fill_image_param(&screen->isl_dev,
|
2019-04-22 11:27:37 -07:00
|
|
|
&image_params[start_slot + i],
|
2018-11-30 02:27:07 -08:00
|
|
|
&res->surf, &view);
|
2018-08-30 15:45:36 -07:00
|
|
|
} else {
|
2021-02-08 16:39:42 -08:00
|
|
|
util_range_add(&res->base.b, &res->valid_buffer_range, img->u.buf.offset,
|
2019-04-05 11:54:10 -07:00
|
|
|
img->u.buf.offset + img->u.buf.size);
|
|
|
|
|
|
2019-05-07 10:31:55 -07:00
|
|
|
fill_buffer_surface_state(&screen->isl_dev, res, map,
|
2019-02-28 01:13:33 -08:00
|
|
|
isl_fmt, ISL_SWIZZLE_IDENTITY,
|
2020-10-07 07:44:56 -07:00
|
|
|
img->u.buf.offset, img->u.buf.size,
|
|
|
|
|
ISL_SURF_USAGE_STORAGE_BIT);
|
2019-04-22 11:27:37 -07:00
|
|
|
fill_buffer_image_param(&image_params[start_slot + i],
|
2018-11-30 02:27:07 -08:00
|
|
|
img->format, img->u.buf.size);
|
2018-08-30 15:45:36 -07:00
|
|
|
}
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
|
|
|
|
|
upload_surface_states(ice->state.surface_uploader, &iv->surface_state);
|
2018-08-30 15:45:36 -07:00
|
|
|
} else {
|
2019-04-22 11:27:37 -07:00
|
|
|
pipe_resource_reference(&iv->base.resource, NULL);
|
2019-11-14 16:06:10 -08:00
|
|
|
pipe_resource_reference(&iv->surface_state.ref.res, NULL);
|
2019-04-22 11:27:37 -07:00
|
|
|
fill_default_image_param(&image_params[start_slot + i]);
|
2018-08-30 15:45:36 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
|
2019-03-11 00:04:56 -07:00
|
|
|
ice->state.dirty |=
|
|
|
|
|
stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
|
|
|
|
|
: IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
|
2018-11-30 02:27:07 -08:00
|
|
|
|
|
|
|
|
/* Broadwell also needs brw_image_params re-uploaded */
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER < 9) {
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
|
2019-06-14 14:03:28 +02:00
|
|
|
shs->sysvals_need_upload = true;
|
2018-11-30 02:27:07 -08:00
|
|
|
}
|
2020-12-21 03:01:34 -05:00
|
|
|
|
|
|
|
|
if (unbind_num_trailing_slots) {
|
|
|
|
|
iris_set_shader_images(ctx, p_stage, start_slot + count,
|
|
|
|
|
unbind_num_trailing_slots, 0, NULL);
|
|
|
|
|
}
|
2018-08-30 15:45:36 -07:00
|
|
|
}
|
|
|
|
|
|
2021-10-19 17:52:06 -07:00
|
|
|
UNUSED static bool
|
|
|
|
|
is_sampler_view_3d(const struct iris_sampler_view *view)
|
|
|
|
|
{
|
|
|
|
|
return view && view->res->base.b.target == PIPE_TEXTURE_3D;
|
|
|
|
|
}
|
2018-08-30 15:45:36 -07:00
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_sampler_views() driver hook.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_sampler_views(struct pipe_context *ctx,
|
2018-04-07 06:35:51 -07:00
|
|
|
enum pipe_shader_type p_stage,
|
2017-11-23 23:15:14 -08:00
|
|
|
unsigned start, unsigned count,
|
2020-12-21 03:01:34 -05:00
|
|
|
unsigned unbind_num_trailing_slots,
|
2021-06-06 02:23:31 -04:00
|
|
|
bool take_ownership,
|
2017-11-23 23:15:14 -08:00
|
|
|
struct pipe_sampler_view **views)
|
|
|
|
|
{
|
2018-04-07 06:35:51 -07:00
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
2021-10-19 17:52:06 -07:00
|
|
|
UNUSED struct iris_screen *screen = (void *) ctx->screen;
|
|
|
|
|
UNUSED const struct intel_device_info *devinfo = &screen->devinfo;
|
2018-04-07 06:35:51 -07:00
|
|
|
gl_shader_stage stage = stage_from_pipe(p_stage);
|
2018-08-18 23:43:14 -07:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
2020-12-21 03:01:34 -05:00
|
|
|
unsigned i;
|
2018-04-07 06:35:51 -07:00
|
|
|
|
2021-01-27 23:27:02 -08:00
|
|
|
shs->bound_sampler_views &=
|
|
|
|
|
~u_bit_consecutive(start, count + unbind_num_trailing_slots);
|
2018-12-02 23:07:27 -08:00
|
|
|
|
2020-12-21 03:01:34 -05:00
|
|
|
for (i = 0; i < count; i++) {
|
2019-03-14 15:12:28 -04:00
|
|
|
struct pipe_sampler_view *pview = views ? views[i] : NULL;
|
2021-10-19 17:52:06 -07:00
|
|
|
struct iris_sampler_view *view = (void *) pview;
|
|
|
|
|
|
|
|
|
|
#if GFX_VERx10 == 125
|
|
|
|
|
if (is_sampler_view_3d(shs->textures[start + i]) !=
|
|
|
|
|
is_sampler_view_3d(view))
|
|
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
|
|
|
|
|
#endif
|
2021-06-06 02:23:31 -04:00
|
|
|
|
|
|
|
|
if (take_ownership) {
|
|
|
|
|
pipe_sampler_view_reference((struct pipe_sampler_view **)
|
|
|
|
|
&shs->textures[start + i], NULL);
|
|
|
|
|
shs->textures[start + i] = (struct iris_sampler_view *)pview;
|
|
|
|
|
} else {
|
|
|
|
|
pipe_sampler_view_reference((struct pipe_sampler_view **)
|
|
|
|
|
&shs->textures[start + i], pview);
|
|
|
|
|
}
|
2018-12-02 23:17:44 -08:00
|
|
|
if (view) {
|
2018-11-21 00:38:49 -08:00
|
|
|
view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
|
2019-09-10 11:14:57 -07:00
|
|
|
view->res->bind_stages |= 1 << stage;
|
|
|
|
|
|
2018-12-02 23:17:44 -08:00
|
|
|
shs->bound_sampler_views |= 1 << (start + i);
|
2019-11-15 15:18:06 -08:00
|
|
|
|
|
|
|
|
update_surface_state_addrs(ice->state.surface_uploader,
|
|
|
|
|
&view->surface_state, view->res->bo);
|
2018-12-02 23:17:44 -08:00
|
|
|
}
|
2018-04-19 12:07:44 -07:00
|
|
|
}
|
2020-12-21 03:01:34 -05:00
|
|
|
for (; i < count + unbind_num_trailing_slots; i++) {
|
|
|
|
|
pipe_sampler_view_reference((struct pipe_sampler_view **)
|
|
|
|
|
&shs->textures[start + i], NULL);
|
|
|
|
|
}
|
2018-04-07 06:35:51 -07:00
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= (IRIS_STAGE_DIRTY_BINDINGS_VS << stage);
|
2019-03-11 00:04:56 -07:00
|
|
|
ice->state.dirty |=
|
|
|
|
|
stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
|
|
|
|
|
: IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2018-10-25 17:51:04 -05:00
|
|
|
static void
|
|
|
|
|
iris_set_compute_resources(struct pipe_context *ctx,
|
|
|
|
|
unsigned start, unsigned count,
|
|
|
|
|
struct pipe_surface **resources)
|
|
|
|
|
{
|
|
|
|
|
assert(count == 0);
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-25 17:53:23 -05:00
|
|
|
static void
|
|
|
|
|
iris_set_global_binding(struct pipe_context *ctx,
|
|
|
|
|
unsigned start_slot, unsigned count,
|
|
|
|
|
struct pipe_resource **resources,
|
|
|
|
|
uint32_t **handles)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
|
|
|
|
|
|
|
|
|
assert(start_slot + count <= IRIS_MAX_GLOBAL_BINDINGS);
|
|
|
|
|
for (unsigned i = 0; i < count; i++) {
|
|
|
|
|
if (resources && resources[i]) {
|
|
|
|
|
pipe_resource_reference(&ice->state.global_bindings[start_slot + i],
|
|
|
|
|
resources[i]);
|
|
|
|
|
struct iris_resource *res = (void *) resources[i];
|
2021-07-19 21:23:18 -07:00
|
|
|
uint64_t addr = res->bo->address;
|
2018-10-25 17:53:23 -05:00
|
|
|
memcpy(handles[i], &addr, sizeof(addr));
|
|
|
|
|
} else {
|
|
|
|
|
pipe_resource_reference(&ice->state.global_bindings[start_slot + i],
|
|
|
|
|
NULL);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_CS;
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-21 12:22:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_tess_state() driver hook.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
iris_set_tess_state(struct pipe_context *ctx,
|
|
|
|
|
const float default_outer_level[4],
|
|
|
|
|
const float default_inner_level[2])
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
2019-03-06 20:56:37 -08:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
|
2018-09-21 12:22:34 -07:00
|
|
|
|
|
|
|
|
memcpy(&ice->state.default_outer_level[0], &default_outer_level[0], 4 * sizeof(float));
|
|
|
|
|
memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float));
|
|
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TCS;
|
2019-06-14 14:03:28 +02:00
|
|
|
shs->sysvals_need_upload = true;
|
2018-09-21 12:22:34 -07:00
|
|
|
}
|
|
|
|
|
|
2021-08-13 02:29:56 -04:00
|
|
|
static void
|
|
|
|
|
iris_set_patch_vertices(struct pipe_context *ctx, uint8_t patch_vertices)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
|
|
|
|
|
|
|
|
|
ice->state.patch_vertices = patch_vertices;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
static void
|
|
|
|
|
iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
|
|
|
|
|
{
|
|
|
|
|
struct iris_surface *surf = (void *) p_surf;
|
|
|
|
|
pipe_resource_reference(&p_surf->texture, NULL);
|
2019-11-14 16:06:10 -08:00
|
|
|
pipe_resource_reference(&surf->surface_state.ref.res, NULL);
|
|
|
|
|
pipe_resource_reference(&surf->surface_state_read.ref.res, NULL);
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
free(surf->surface_state.cpu);
|
2018-07-30 23:49:34 -07:00
|
|
|
free(surf);
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_clip_state(struct pipe_context *ctx,
|
|
|
|
|
const struct pipe_clip_state *state)
|
|
|
|
|
{
|
2018-11-09 02:11:16 -08:00
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
|
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
|
2019-06-27 15:06:30 +10:00
|
|
|
struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
|
2019-06-28 22:25:57 +10:00
|
|
|
struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
|
2018-11-09 02:11:16 -08:00
|
|
|
|
|
|
|
|
memcpy(&ice->state.clip_planes, state, sizeof(*state));
|
|
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS |
|
|
|
|
|
IRIS_STAGE_DIRTY_CONSTANTS_GS |
|
|
|
|
|
IRIS_STAGE_DIRTY_CONSTANTS_TES;
|
2019-06-14 14:03:28 +02:00
|
|
|
shs->sysvals_need_upload = true;
|
2019-06-27 15:06:30 +10:00
|
|
|
gshs->sysvals_need_upload = true;
|
2019-06-28 22:25:57 +10:00
|
|
|
tshs->sysvals_need_upload = true;
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_polygon_stipple() driver hook.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_polygon_stipple(struct pipe_context *ctx,
|
|
|
|
|
const struct pipe_poly_stipple *state)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
|
|
|
|
memcpy(&ice->state.poly_stipple, state, sizeof(*state));
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_POLYGON_STIPPLE;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_sample_mask() driver hook.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
2018-01-10 00:19:29 -08:00
|
|
|
iris_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
|
2017-11-23 23:15:14 -08:00
|
|
|
{
|
2018-01-10 00:19:29 -08:00
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/* We only support 16x MSAA, so we have 16 bits of sample maks.
|
|
|
|
|
* st/mesa may pass us 0xffffffff though, meaning "enable all samples".
|
|
|
|
|
*/
|
2018-07-27 16:02:09 -07:00
|
|
|
ice->state.sample_mask = sample_mask & 0xffff;
|
2018-01-10 00:19:29 -08:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_SAMPLE_MASK;
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_scissor_states() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* This corresponds to our SCISSOR_RECT state structures. It's an
|
|
|
|
|
* exact match, so we just store them, and memcpy them out later.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_scissor_states(struct pipe_context *ctx,
|
|
|
|
|
unsigned start_slot,
|
|
|
|
|
unsigned num_scissors,
|
2018-10-23 01:36:26 -07:00
|
|
|
const struct pipe_scissor_state *rects)
|
2017-11-23 23:15:14 -08:00
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
|
|
|
|
|
2018-01-21 21:23:48 -08:00
|
|
|
for (unsigned i = 0; i < num_scissors; i++) {
|
2018-10-23 01:36:26 -07:00
|
|
|
if (rects[i].minx == rects[i].maxx || rects[i].miny == rects[i].maxy) {
|
|
|
|
|
/* If the scissor was out of bounds and got clamped to 0 width/height
|
|
|
|
|
* at the bounds, the subtraction of 1 from maximums could produce a
|
|
|
|
|
* negative number and thus not clip anything. Instead, just provide
|
|
|
|
|
* a min > max scissor inside the bounds, which produces the expected
|
|
|
|
|
* no rendering.
|
|
|
|
|
*/
|
|
|
|
|
ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
|
|
|
|
|
.minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
|
|
|
|
|
};
|
|
|
|
|
} else {
|
|
|
|
|
ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
|
|
|
|
|
.minx = rects[i].minx, .miny = rects[i].miny,
|
|
|
|
|
.maxx = rects[i].maxx - 1, .maxy = rects[i].maxy - 1,
|
|
|
|
|
};
|
|
|
|
|
}
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_stencil_ref() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_stencil_ref(struct pipe_context *ctx,
|
2020-12-03 11:36:53 -05:00
|
|
|
const struct pipe_stencil_ref state)
|
2017-11-23 23:15:14 -08:00
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
2020-12-03 11:36:53 -05:00
|
|
|
memcpy(&ice->state.stencil_ref, &state, sizeof(state));
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER >= 12)
|
2020-02-04 10:49:59 -08:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_STENCIL_REF;
|
2021-03-16 10:14:30 -07:00
|
|
|
else if (GFX_VER >= 9)
|
2018-11-07 14:23:27 +10:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
|
2020-02-04 10:49:59 -08:00
|
|
|
else
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2017-12-23 14:33:04 -08:00
|
|
|
static float
|
2018-01-30 17:36:24 -08:00
|
|
|
viewport_extent(const struct pipe_viewport_state *state, int axis, float sign)
|
2017-12-23 14:33:04 -08:00
|
|
|
{
|
2018-01-30 17:36:24 -08:00
|
|
|
return copysignf(state->scale[axis], sign) + state->translate[axis];
|
2017-12-23 14:33:04 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_viewport_states() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* This corresponds to our SF_CLIP_VIEWPORT states. We can't calculate
|
|
|
|
|
* the guardband yet, as we need the framebuffer dimensions, but we can
|
|
|
|
|
* at least fill out the rest.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_viewport_states(struct pipe_context *ctx,
|
|
|
|
|
unsigned start_slot,
|
2018-06-20 15:45:48 -07:00
|
|
|
unsigned count,
|
2018-06-20 16:03:43 -07:00
|
|
|
const struct pipe_viewport_state *states)
|
2017-11-23 23:15:14 -08:00
|
|
|
{
|
2018-01-09 11:58:28 -08:00
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
2018-12-17 15:17:54 -08:00
|
|
|
|
2018-12-03 02:02:49 -08:00
|
|
|
memcpy(&ice->state.viewports[start_slot], states, sizeof(*states) * count);
|
2018-01-09 11:58:28 -08:00
|
|
|
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
|
2018-07-14 01:29:33 -07:00
|
|
|
|
|
|
|
|
if (ice->state.cso_rast && (!ice->state.cso_rast->depth_clip_near ||
|
|
|
|
|
!ice->state.cso_rast->depth_clip_far))
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_framebuffer_state() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* Sets the current draw FBO, including color render targets, depth,
|
|
|
|
|
* and stencil buffers.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_framebuffer_state(struct pipe_context *ctx,
|
|
|
|
|
const struct pipe_framebuffer_state *state)
|
|
|
|
|
{
|
2018-01-09 14:34:15 -08:00
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
2018-05-08 23:52:07 -07:00
|
|
|
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
|
|
|
|
|
struct isl_device *isl_dev = &screen->isl_dev;
|
2018-01-09 23:13:16 -08:00
|
|
|
struct pipe_framebuffer_state *cso = &ice->state.framebuffer;
|
2018-08-03 16:18:09 -07:00
|
|
|
struct iris_resource *zres;
|
|
|
|
|
struct iris_resource *stencil_res;
|
2018-01-09 14:34:15 -08:00
|
|
|
|
2018-07-24 10:59:10 -07:00
|
|
|
unsigned samples = util_framebuffer_get_num_samples(state);
|
2018-12-23 18:22:44 -08:00
|
|
|
unsigned layers = util_framebuffer_get_num_layers(state);
|
2018-07-24 10:59:10 -07:00
|
|
|
|
|
|
|
|
if (cso->samples != samples) {
|
2018-01-09 23:13:16 -08:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
|
2019-07-07 17:14:15 -07:00
|
|
|
|
|
|
|
|
/* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER >= 9 && (cso->samples == 16 || samples == 16))
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
|
2018-01-09 23:13:16 -08:00
|
|
|
}
|
2018-01-09 14:34:15 -08:00
|
|
|
|
2018-01-30 01:50:44 -08:00
|
|
|
if (cso->nr_cbufs != state->nr_cbufs) {
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-23 18:22:44 -08:00
|
|
|
if ((cso->layers == 0) != (layers == 0)) {
|
2018-06-15 16:22:58 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_CLIP;
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-03 02:02:49 -08:00
|
|
|
if (cso->width != state->width || cso->height != state->height) {
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-30 18:49:47 -07:00
|
|
|
if (cso->zsbuf || state->zsbuf) {
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-21 00:32:04 -07:00
|
|
|
util_copy_framebuffer_state(cso, state);
|
2018-07-24 10:59:10 -07:00
|
|
|
cso->samples = samples;
|
2018-12-23 18:22:44 -08:00
|
|
|
cso->layers = layers;
|
2018-01-09 14:34:15 -08:00
|
|
|
|
2018-07-01 22:13:07 -07:00
|
|
|
struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
|
2018-05-08 23:52:07 -07:00
|
|
|
|
|
|
|
|
struct isl_view view = {
|
|
|
|
|
.base_level = 0,
|
|
|
|
|
.levels = 1,
|
|
|
|
|
.base_array_layer = 0,
|
|
|
|
|
.array_len = 1,
|
|
|
|
|
.swizzle = ISL_SWIZZLE_IDENTITY,
|
|
|
|
|
};
|
|
|
|
|
|
2021-10-19 05:27:04 -07:00
|
|
|
struct isl_depth_stencil_hiz_emit_info info = {
|
|
|
|
|
.view = &view,
|
|
|
|
|
.mocs = iris_mocs(NULL, isl_dev, ISL_SURF_USAGE_DEPTH_BIT),
|
|
|
|
|
};
|
2018-05-08 23:52:07 -07:00
|
|
|
|
2018-08-03 16:18:09 -07:00
|
|
|
if (cso->zsbuf) {
|
|
|
|
|
iris_get_depth_stencil_resources(cso->zsbuf->texture, &zres,
|
|
|
|
|
&stencil_res);
|
2018-05-08 23:52:07 -07:00
|
|
|
|
|
|
|
|
view.base_level = cso->zsbuf->u.tex.level;
|
|
|
|
|
view.base_array_layer = cso->zsbuf->u.tex.first_layer;
|
|
|
|
|
view.array_len =
|
|
|
|
|
cso->zsbuf->u.tex.last_layer - cso->zsbuf->u.tex.first_layer + 1;
|
|
|
|
|
|
2018-08-03 16:18:09 -07:00
|
|
|
if (zres) {
|
|
|
|
|
view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
|
2018-05-08 23:52:07 -07:00
|
|
|
|
2018-08-03 16:18:09 -07:00
|
|
|
info.depth_surf = &zres->surf;
|
2021-07-19 21:23:18 -07:00
|
|
|
info.depth_address = zres->bo->address + zres->offset;
|
2020-10-07 07:44:56 -07:00
|
|
|
info.mocs = iris_mocs(zres->bo, isl_dev, view.usage);
|
2018-08-03 16:18:09 -07:00
|
|
|
|
|
|
|
|
view.format = zres->surf.format;
|
2018-12-10 00:35:48 -08:00
|
|
|
|
|
|
|
|
if (iris_resource_level_has_hiz(zres, view.base_level)) {
|
2019-08-09 11:08:26 -07:00
|
|
|
info.hiz_usage = zres->aux.usage;
|
2018-12-10 00:35:48 -08:00
|
|
|
info.hiz_surf = &zres->aux.surf;
|
2021-07-19 21:23:18 -07:00
|
|
|
info.hiz_address = zres->aux.bo->address + zres->aux.offset;
|
2018-12-10 00:35:48 -08:00
|
|
|
}
|
2020-12-16 14:57:14 -08:00
|
|
|
|
|
|
|
|
ice->state.hiz_usage = info.hiz_usage;
|
2018-05-08 23:52:07 -07:00
|
|
|
}
|
|
|
|
|
|
2018-08-03 16:18:09 -07:00
|
|
|
if (stencil_res) {
|
|
|
|
|
view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
|
2019-10-23 16:24:46 -07:00
|
|
|
info.stencil_aux_usage = stencil_res->aux.usage;
|
2018-08-03 16:18:09 -07:00
|
|
|
info.stencil_surf = &stencil_res->surf;
|
2021-07-19 21:23:18 -07:00
|
|
|
info.stencil_address = stencil_res->bo->address + stencil_res->offset;
|
2018-12-12 00:02:25 -08:00
|
|
|
if (!zres) {
|
2018-08-03 16:18:09 -07:00
|
|
|
view.format = stencil_res->surf.format;
|
2020-10-07 07:44:56 -07:00
|
|
|
info.mocs = iris_mocs(stencil_res->bo, isl_dev, view.usage);
|
2018-12-12 00:02:25 -08:00
|
|
|
}
|
2018-08-03 16:18:09 -07:00
|
|
|
}
|
2018-05-08 23:52:07 -07:00
|
|
|
}
|
2018-01-09 23:30:21 -08:00
|
|
|
|
2018-05-08 23:52:07 -07:00
|
|
|
isl_emit_depth_stencil_hiz_s(isl_dev, cso_z->packets, &info);
|
|
|
|
|
|
2018-07-31 10:33:35 +10:00
|
|
|
/* Make a null surface for unbound buffers */
|
|
|
|
|
void *null_surf_map =
|
|
|
|
|
upload_state(ice->state.surface_uploader, &ice->state.null_fb,
|
|
|
|
|
4 * GENX(RENDER_SURFACE_STATE_length), 64);
|
2018-11-05 23:16:14 -08:00
|
|
|
isl_null_fill_state(&screen->isl_dev, null_surf_map,
|
2021-06-07 05:26:05 +10:00
|
|
|
.size = isl_extent3d(MAX2(cso->width, 1),
|
|
|
|
|
MAX2(cso->height, 1),
|
|
|
|
|
cso->layers ? cso->layers : 1));
|
2018-09-11 01:09:27 -07:00
|
|
|
ice->state.null_fb.offset +=
|
|
|
|
|
iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
|
2018-07-31 10:33:35 +10:00
|
|
|
|
2018-06-15 12:33:58 -07:00
|
|
|
/* Render target change */
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_FS;
|
2018-07-16 16:21:22 -07:00
|
|
|
|
2019-02-15 11:35:28 -08:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
|
|
|
|
|
|
2019-03-11 00:04:56 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
|
|
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |=
|
|
|
|
|
ice->state.stage_dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
|
2018-09-18 11:04:44 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER == 8)
|
2019-09-25 00:31:07 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_constant_buffer() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* This uploads any constant data in user buffers, and references
|
|
|
|
|
* any UBO resources containing constant data.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_constant_buffer(struct pipe_context *ctx,
|
2018-02-09 14:21:54 -08:00
|
|
|
enum pipe_shader_type p_stage, unsigned index,
|
gallium: add take_ownership param into set_constant_buffer to eliminate atomics
We often do this:
pipe->set_constant_buffer(pipe, shader, slot, &cb);
pipe_resource_reference(&cb->buffer, NULL);
That results in atomic increment in set_constant_buffer followed by
atomic decrement after set_constant_buffer. This new interface
eliminates those atomics.
For the case above, this should be used instead:
pipe->set_constant_buffer(pipe, shader, slot, true, &cb);
cb->buffer = NULL; // if cb is not a local variable, else do nothing
AMD Zen benefits from this. The perf improvement is ~3% for Viewperf13/Catia.
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8298>
2020-12-26 12:01:10 -05:00
|
|
|
bool take_ownership,
|
2018-06-06 02:16:52 -07:00
|
|
|
const struct pipe_constant_buffer *input)
|
2017-11-23 23:15:14 -08:00
|
|
|
{
|
2018-02-09 14:21:54 -08:00
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
|
|
|
|
gl_shader_stage stage = stage_from_pipe(p_stage);
|
2018-08-18 23:39:48 -07:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
2019-04-16 23:44:15 -07:00
|
|
|
struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2019-09-14 23:18:20 -07:00
|
|
|
/* TODO: Only do this if the buffer changes? */
|
|
|
|
|
pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
|
|
|
|
|
|
2019-06-14 14:03:28 +02:00
|
|
|
if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
|
2019-04-16 23:01:41 -07:00
|
|
|
shs->bound_cbufs |= 1u << index;
|
|
|
|
|
|
2019-06-14 14:03:28 +02:00
|
|
|
if (input->user_buffer) {
|
|
|
|
|
void *map = NULL;
|
|
|
|
|
pipe_resource_reference(&cbuf->buffer, NULL);
|
|
|
|
|
u_upload_alloc(ice->ctx.const_uploader, 0, input->buffer_size, 64,
|
|
|
|
|
&cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
|
2018-06-06 14:37:38 -07:00
|
|
|
|
2019-06-14 14:03:28 +02:00
|
|
|
if (!cbuf->buffer) {
|
|
|
|
|
/* Allocation was unsuccessful - just unbind */
|
gallium: add take_ownership param into set_constant_buffer to eliminate atomics
We often do this:
pipe->set_constant_buffer(pipe, shader, slot, &cb);
pipe_resource_reference(&cb->buffer, NULL);
That results in atomic increment in set_constant_buffer followed by
atomic decrement after set_constant_buffer. This new interface
eliminates those atomics.
For the case above, this should be used instead:
pipe->set_constant_buffer(pipe, shader, slot, true, &cb);
cb->buffer = NULL; // if cb is not a local variable, else do nothing
AMD Zen benefits from this. The perf improvement is ~3% for Viewperf13/Catia.
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8298>
2020-12-26 12:01:10 -05:00
|
|
|
iris_set_constant_buffer(ctx, p_stage, index, false, NULL);
|
2019-06-14 14:03:28 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(map);
|
|
|
|
|
memcpy(map, input->user_buffer, input->buffer_size);
|
|
|
|
|
} else if (input->buffer) {
|
2020-05-29 16:12:55 -07:00
|
|
|
if (cbuf->buffer != input->buffer) {
|
2020-05-29 16:36:23 -07:00
|
|
|
ice->state.dirty |= (IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
|
|
|
|
|
IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES);
|
2020-05-05 13:05:52 -07:00
|
|
|
shs->dirty_cbufs |= 1u << index;
|
2020-05-29 16:12:55 -07:00
|
|
|
}
|
|
|
|
|
|
gallium: add take_ownership param into set_constant_buffer to eliminate atomics
We often do this:
pipe->set_constant_buffer(pipe, shader, slot, &cb);
pipe_resource_reference(&cb->buffer, NULL);
That results in atomic increment in set_constant_buffer followed by
atomic decrement after set_constant_buffer. This new interface
eliminates those atomics.
For the case above, this should be used instead:
pipe->set_constant_buffer(pipe, shader, slot, true, &cb);
cb->buffer = NULL; // if cb is not a local variable, else do nothing
AMD Zen benefits from this. The perf improvement is ~3% for Viewperf13/Catia.
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8298>
2020-12-26 12:01:10 -05:00
|
|
|
if (take_ownership) {
|
|
|
|
|
pipe_resource_reference(&cbuf->buffer, NULL);
|
|
|
|
|
cbuf->buffer = input->buffer;
|
|
|
|
|
} else {
|
|
|
|
|
pipe_resource_reference(&cbuf->buffer, input->buffer);
|
|
|
|
|
}
|
2019-06-14 14:03:28 +02:00
|
|
|
|
|
|
|
|
cbuf->buffer_offset = input->buffer_offset;
|
|
|
|
|
}
|
2018-06-06 14:37:38 -07:00
|
|
|
|
2019-09-10 09:04:20 -07:00
|
|
|
cbuf->buffer_size =
|
|
|
|
|
MIN2(input->buffer_size,
|
|
|
|
|
iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset);
|
|
|
|
|
|
2019-04-16 23:44:15 -07:00
|
|
|
struct iris_resource *res = (void *) cbuf->buffer;
|
2018-11-21 00:38:49 -08:00
|
|
|
res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
|
2019-09-10 11:14:57 -07:00
|
|
|
res->bind_stages |= 1 << stage;
|
2018-06-06 02:16:52 -07:00
|
|
|
} else {
|
2019-04-16 23:01:41 -07:00
|
|
|
shs->bound_cbufs &= ~(1u << index);
|
2019-04-16 23:44:15 -07:00
|
|
|
pipe_resource_reference(&cbuf->buffer, NULL);
|
2018-06-06 02:16:52 -07:00
|
|
|
}
|
2018-06-15 12:33:58 -07:00
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
|
2018-02-09 14:21:54 -08:00
|
|
|
}
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2018-11-08 23:10:46 -08:00
|
|
|
static void
|
2019-06-14 14:03:28 +02:00
|
|
|
upload_sysvals(struct iris_context *ice,
|
2020-08-11 10:30:42 -05:00
|
|
|
gl_shader_stage stage,
|
|
|
|
|
const struct pipe_grid_info *grid)
|
2018-11-08 23:10:46 -08:00
|
|
|
{
|
2019-04-22 11:27:37 -07:00
|
|
|
UNUSED struct iris_genx_state *genx = ice->state.genx;
|
2018-11-08 23:10:46 -08:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
2018-11-09 00:51:58 -08:00
|
|
|
|
2019-06-14 14:03:28 +02:00
|
|
|
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
|
2020-08-11 10:30:42 -05:00
|
|
|
if (!shader || (shader->num_system_values == 0 &&
|
|
|
|
|
shader->kernel_input_size == 0))
|
2018-11-09 00:51:58 -08:00
|
|
|
return;
|
2018-11-08 23:19:53 -08:00
|
|
|
|
2019-06-14 14:03:28 +02:00
|
|
|
assert(shader->num_cbufs > 0);
|
|
|
|
|
|
|
|
|
|
unsigned sysval_cbuf_index = shader->num_cbufs - 1;
|
|
|
|
|
struct pipe_shader_buffer *cbuf = &shs->constbuf[sysval_cbuf_index];
|
2020-08-11 10:30:42 -05:00
|
|
|
unsigned system_values_start =
|
|
|
|
|
ALIGN(shader->kernel_input_size, sizeof(uint32_t));
|
|
|
|
|
unsigned upload_size = system_values_start +
|
|
|
|
|
shader->num_system_values * sizeof(uint32_t);
|
|
|
|
|
void *map = NULL;
|
2019-06-14 14:03:28 +02:00
|
|
|
|
|
|
|
|
assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS);
|
2019-04-16 23:44:15 -07:00
|
|
|
u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64,
|
2020-08-11 10:30:42 -05:00
|
|
|
&cbuf->buffer_offset, &cbuf->buffer, &map);
|
2018-11-08 23:19:53 -08:00
|
|
|
|
2020-08-11 10:30:42 -05:00
|
|
|
if (shader->kernel_input_size > 0)
|
|
|
|
|
memcpy(map, grid->input, shader->kernel_input_size);
|
|
|
|
|
|
|
|
|
|
uint32_t *sysval_map = map + system_values_start;
|
2018-11-09 02:04:23 -08:00
|
|
|
for (int i = 0; i < shader->num_system_values; i++) {
|
|
|
|
|
uint32_t sysval = shader->system_values[i];
|
2018-11-08 23:19:53 -08:00
|
|
|
uint32_t value = 0;
|
|
|
|
|
|
2018-11-30 02:27:07 -08:00
|
|
|
if (BRW_PARAM_DOMAIN(sysval) == BRW_PARAM_DOMAIN_IMAGE) {
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
2018-11-30 02:27:07 -08:00
|
|
|
unsigned img = BRW_PARAM_IMAGE_IDX(sysval);
|
|
|
|
|
unsigned offset = BRW_PARAM_IMAGE_OFFSET(sysval);
|
2019-04-22 11:27:37 -07:00
|
|
|
struct brw_image_param *param =
|
|
|
|
|
&genx->shaders[stage].image_param[img];
|
2018-11-30 02:27:07 -08:00
|
|
|
|
|
|
|
|
assert(offset < sizeof(struct brw_image_param));
|
|
|
|
|
value = ((uint32_t *) param)[offset];
|
2019-04-22 11:27:37 -07:00
|
|
|
#endif
|
2018-11-30 02:27:07 -08:00
|
|
|
} else if (sysval == BRW_PARAM_BUILTIN_ZERO) {
|
|
|
|
|
value = 0;
|
|
|
|
|
} else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
|
2018-11-09 02:11:16 -08:00
|
|
|
int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval);
|
|
|
|
|
int comp = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval);
|
|
|
|
|
value = fui(ice->state.clip_planes.ucp[plane][comp]);
|
2018-12-04 14:11:51 -08:00
|
|
|
} else if (sysval == BRW_PARAM_BUILTIN_PATCH_VERTICES_IN) {
|
|
|
|
|
if (stage == MESA_SHADER_TESS_CTRL) {
|
|
|
|
|
value = ice->state.vertices_per_patch;
|
|
|
|
|
} else {
|
|
|
|
|
assert(stage == MESA_SHADER_TESS_EVAL);
|
|
|
|
|
const struct shader_info *tcs_info =
|
|
|
|
|
iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
|
2019-03-07 20:14:59 -08:00
|
|
|
if (tcs_info)
|
|
|
|
|
value = tcs_info->tess.tcs_vertices_out;
|
|
|
|
|
else
|
|
|
|
|
value = ice->state.vertices_per_patch;
|
2018-12-04 14:11:51 -08:00
|
|
|
}
|
2019-03-06 20:56:37 -08:00
|
|
|
} else if (sysval >= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X &&
|
|
|
|
|
sysval <= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W) {
|
|
|
|
|
unsigned i = sysval - BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
|
|
|
|
|
value = fui(ice->state.default_outer_level[i]);
|
|
|
|
|
} else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X) {
|
|
|
|
|
value = fui(ice->state.default_inner_level[0]);
|
|
|
|
|
} else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y) {
|
|
|
|
|
value = fui(ice->state.default_inner_level[1]);
|
2020-04-28 14:03:47 -07:00
|
|
|
} else if (sysval >= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X &&
|
|
|
|
|
sysval <= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z) {
|
|
|
|
|
unsigned i = sysval - BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X;
|
|
|
|
|
value = ice->state.last_block[i];
|
2020-10-06 18:06:05 -05:00
|
|
|
} else if (sysval == BRW_PARAM_BUILTIN_WORK_DIM) {
|
|
|
|
|
value = grid->work_dim;
|
2018-11-09 02:11:16 -08:00
|
|
|
} else {
|
|
|
|
|
assert(!"unhandled system value");
|
|
|
|
|
}
|
2018-11-08 23:19:53 -08:00
|
|
|
|
2020-08-11 10:30:42 -05:00
|
|
|
*sysval_map++ = value;
|
2018-11-08 23:19:53 -08:00
|
|
|
}
|
2018-11-08 23:10:46 -08:00
|
|
|
|
2019-04-16 23:44:15 -07:00
|
|
|
cbuf->buffer_size = upload_size;
|
2019-05-28 17:52:58 -05:00
|
|
|
iris_upload_ubo_ssbo_surf_state(ice, cbuf,
|
2020-10-07 07:44:56 -07:00
|
|
|
&shs->constbuf_surf_state[sysval_cbuf_index],
|
|
|
|
|
ISL_SURF_USAGE_CONSTANT_BUFFER_BIT);
|
2019-06-14 14:03:28 +02:00
|
|
|
|
|
|
|
|
shs->sysvals_need_upload = false;
|
2018-11-08 23:10:46 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_shader_buffers() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* This binds SSBOs and ABOs. Unfortunately, we need to stream out
|
|
|
|
|
* SURFACE_STATE here, as the buffer offset may change each time.
|
|
|
|
|
*/
|
2018-07-24 15:54:00 -07:00
|
|
|
static void
|
|
|
|
|
iris_set_shader_buffers(struct pipe_context *ctx,
|
|
|
|
|
enum pipe_shader_type p_stage,
|
|
|
|
|
unsigned start_slot, unsigned count,
|
2019-02-27 21:54:47 -05:00
|
|
|
const struct pipe_shader_buffer *buffers,
|
|
|
|
|
unsigned writable_bitmask)
|
2018-07-24 15:54:00 -07:00
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
|
|
|
|
gl_shader_stage stage = stage_from_pipe(p_stage);
|
2018-08-18 23:39:48 -07:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
2018-07-24 15:54:00 -07:00
|
|
|
|
2019-04-16 22:54:40 -07:00
|
|
|
unsigned modified_bits = u_bit_consecutive(start_slot, count);
|
|
|
|
|
|
|
|
|
|
shs->bound_ssbos &= ~modified_bits;
|
|
|
|
|
shs->writable_ssbos &= ~modified_bits;
|
|
|
|
|
shs->writable_ssbos |= writable_bitmask << start_slot;
|
|
|
|
|
|
2018-07-24 15:54:00 -07:00
|
|
|
for (unsigned i = 0; i < count; i++) {
|
2018-07-24 20:57:02 -07:00
|
|
|
if (buffers && buffers[i].buffer) {
|
2019-04-16 23:44:15 -07:00
|
|
|
struct iris_resource *res = (void *) buffers[i].buffer;
|
|
|
|
|
struct pipe_shader_buffer *ssbo = &shs->ssbo[start_slot + i];
|
|
|
|
|
struct iris_state_ref *surf_state =
|
|
|
|
|
&shs->ssbo_surf_state[start_slot + i];
|
2021-02-08 16:39:42 -08:00
|
|
|
pipe_resource_reference(&ssbo->buffer, &res->base.b);
|
2019-04-16 23:44:15 -07:00
|
|
|
ssbo->buffer_offset = buffers[i].buffer_offset;
|
|
|
|
|
ssbo->buffer_size =
|
|
|
|
|
MIN2(buffers[i].buffer_size, res->bo->size - ssbo->buffer_offset);
|
2018-07-24 15:54:00 -07:00
|
|
|
|
2019-04-16 22:54:40 -07:00
|
|
|
shs->bound_ssbos |= 1 << (start_slot + i);
|
|
|
|
|
|
2020-10-07 07:44:56 -07:00
|
|
|
isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
|
|
|
|
|
|
|
|
|
|
iris_upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, usage);
|
2018-07-24 15:54:00 -07:00
|
|
|
|
2019-04-16 23:44:15 -07:00
|
|
|
res->bind_history |= PIPE_BIND_SHADER_BUFFER;
|
2019-09-10 11:14:57 -07:00
|
|
|
res->bind_stages |= 1 << stage;
|
2019-04-05 11:54:10 -07:00
|
|
|
|
2021-02-08 16:39:42 -08:00
|
|
|
util_range_add(&res->base.b, &res->valid_buffer_range, ssbo->buffer_offset,
|
2019-04-05 11:54:10 -07:00
|
|
|
ssbo->buffer_offset + ssbo->buffer_size);
|
2018-07-24 15:54:00 -07:00
|
|
|
} else {
|
2019-04-16 23:44:15 -07:00
|
|
|
pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL);
|
|
|
|
|
pipe_resource_reference(&shs->ssbo_surf_state[start_slot + i].res,
|
2018-07-24 15:54:00 -07:00
|
|
|
NULL);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-29 16:36:23 -07:00
|
|
|
ice->state.dirty |= (IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
|
|
|
|
|
IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES);
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
|
2018-07-24 15:54:00 -07:00
|
|
|
}
|
|
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
|
|
|
|
iris_delete_state(struct pipe_context *ctx, void *state)
|
|
|
|
|
{
|
|
|
|
|
free(state);
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_vertex_buffers() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_vertex_buffers(struct pipe_context *ctx,
|
|
|
|
|
unsigned start_slot, unsigned count,
|
2020-12-21 03:01:34 -05:00
|
|
|
unsigned unbind_num_trailing_slots,
|
2020-12-26 17:29:45 -05:00
|
|
|
bool take_ownership,
|
2017-11-23 23:15:14 -08:00
|
|
|
const struct pipe_vertex_buffer *buffers)
|
|
|
|
|
{
|
2018-01-10 00:19:29 -08:00
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
2019-11-05 15:08:01 -08:00
|
|
|
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
|
2018-12-04 16:38:14 -08:00
|
|
|
struct iris_genx_state *genx = ice->state.genx;
|
2018-01-21 13:14:49 -08:00
|
|
|
|
2021-01-27 23:27:02 -08:00
|
|
|
ice->state.bound_vertex_buffers &=
|
|
|
|
|
~u_bit_consecutive64(start_slot, count + unbind_num_trailing_slots);
|
2018-01-21 13:14:49 -08:00
|
|
|
|
2018-12-04 16:38:14 -08:00
|
|
|
for (unsigned i = 0; i < count; i++) {
|
|
|
|
|
const struct pipe_vertex_buffer *buffer = buffers ? &buffers[i] : NULL;
|
|
|
|
|
struct iris_vertex_buffer_state *state =
|
|
|
|
|
&genx->vertex_buffers[start_slot + i];
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2018-12-04 16:38:14 -08:00
|
|
|
if (!buffer) {
|
|
|
|
|
pipe_resource_reference(&state->resource, NULL);
|
|
|
|
|
continue;
|
2018-06-18 00:23:25 -07:00
|
|
|
}
|
|
|
|
|
|
2019-04-04 15:04:50 +03:00
|
|
|
/* We may see user buffers that are NULL bindings. */
|
|
|
|
|
assert(!(buffer->is_user_buffer && buffer->buffer.user != NULL));
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2020-05-29 16:11:58 -07:00
|
|
|
if (buffer->buffer.resource &&
|
|
|
|
|
state->resource != buffer->buffer.resource)
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFER_FLUSHES;
|
|
|
|
|
|
2020-12-26 17:29:45 -05:00
|
|
|
if (take_ownership) {
|
|
|
|
|
pipe_resource_reference(&state->resource, NULL);
|
|
|
|
|
state->resource = buffer->buffer.resource;
|
|
|
|
|
} else {
|
|
|
|
|
pipe_resource_reference(&state->resource, buffer->buffer.resource);
|
|
|
|
|
}
|
2018-12-04 16:38:14 -08:00
|
|
|
struct iris_resource *res = (void *) state->resource;
|
2018-01-21 13:14:49 -08:00
|
|
|
|
2019-09-23 20:37:39 -07:00
|
|
|
state->offset = (int) buffer->buffer_offset;
|
|
|
|
|
|
2019-01-06 20:22:15 -08:00
|
|
|
if (res) {
|
|
|
|
|
ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
|
2018-11-21 00:38:49 -08:00
|
|
|
res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
|
2019-01-06 20:22:15 -08:00
|
|
|
}
|
2018-11-21 00:38:49 -08:00
|
|
|
|
2018-12-04 16:38:14 -08:00
|
|
|
iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
|
2017-11-23 23:15:14 -08:00
|
|
|
vb.VertexBufferIndex = start_slot + i;
|
|
|
|
|
vb.AddressModifyEnable = true;
|
2018-12-04 16:38:14 -08:00
|
|
|
vb.BufferPitch = buffer->stride;
|
2018-11-02 14:54:35 -07:00
|
|
|
if (res) {
|
2021-02-08 16:39:42 -08:00
|
|
|
vb.BufferSize = res->base.b.width0 - (int) buffer->buffer_offset;
|
2018-11-02 14:54:35 -07:00
|
|
|
vb.BufferStartingAddress =
|
2021-07-19 21:23:18 -07:00
|
|
|
ro_bo(NULL, res->bo->address + (int) buffer->buffer_offset);
|
2020-10-07 07:44:56 -07:00
|
|
|
vb.MOCS = iris_mocs(res->bo, &screen->isl_dev,
|
|
|
|
|
ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
|
2021-04-20 16:06:13 -07:00
|
|
|
#if GFX_VER >= 12
|
2021-04-12 10:11:40 -07:00
|
|
|
vb.L3BypassDisable = true;
|
|
|
|
|
#endif
|
2018-11-02 14:54:35 -07:00
|
|
|
} else {
|
|
|
|
|
vb.NullVertexBuffer = true;
|
2021-10-19 05:22:45 -07:00
|
|
|
vb.MOCS = iris_mocs(NULL, &screen->isl_dev,
|
|
|
|
|
ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
|
2018-11-02 14:54:35 -07:00
|
|
|
}
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-21 03:01:34 -05:00
|
|
|
for (unsigned i = 0; i < unbind_num_trailing_slots; i++) {
|
|
|
|
|
struct iris_vertex_buffer_state *state =
|
|
|
|
|
&genx->vertex_buffers[start_slot + count + i];
|
|
|
|
|
|
|
|
|
|
pipe_resource_reference(&state->resource, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-10 00:19:29 -08:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Gallium CSO for vertex elements.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
struct iris_vertex_element_state {
|
|
|
|
|
uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
|
2018-06-07 01:45:47 -07:00
|
|
|
uint32_t vf_instancing[33 * GENX(3DSTATE_VF_INSTANCING_length)];
|
2019-02-27 20:44:27 +01:00
|
|
|
uint32_t edgeflag_ve[GENX(VERTEX_ELEMENT_STATE_length)];
|
|
|
|
|
uint32_t edgeflag_vfi[GENX(3DSTATE_VF_INSTANCING_length)];
|
2017-11-23 23:15:14 -08:00
|
|
|
unsigned count;
|
|
|
|
|
};
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->create_vertex_elements() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
|
2019-02-27 20:44:27 +01:00
|
|
|
* and 3DSTATE_VF_INSTANCING commands. The vertex_elements and vf_instancing
|
|
|
|
|
* arrays are ready to be emitted at draw time if no EdgeFlag or SGVs are
|
|
|
|
|
* needed. In these cases we will need information available at draw time.
|
|
|
|
|
* We setup edgeflag_ve and edgeflag_vfi as alternatives last
|
|
|
|
|
* 3DSTATE_VERTEX_ELEMENT and 3DSTATE_VF_INSTANCING that can be used at
|
|
|
|
|
* draw time if we detect that EdgeFlag is needed by the Vertex Shader.
|
2018-07-30 23:49:34 -07:00
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void *
|
|
|
|
|
iris_create_vertex_elements(struct pipe_context *ctx,
|
|
|
|
|
unsigned count,
|
|
|
|
|
const struct pipe_vertex_element *state)
|
|
|
|
|
{
|
2018-10-07 20:31:09 -07:00
|
|
|
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
|
2021-04-05 13:19:39 -07:00
|
|
|
const struct intel_device_info *devinfo = &screen->devinfo;
|
2017-11-23 23:15:14 -08:00
|
|
|
struct iris_vertex_element_state *cso =
|
|
|
|
|
malloc(sizeof(struct iris_vertex_element_state));
|
|
|
|
|
|
2018-07-18 16:27:07 -07:00
|
|
|
cso->count = count;
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2018-04-26 23:42:10 -07:00
|
|
|
iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS), cso->vertex_elements, ve) {
|
2018-07-18 16:27:07 -07:00
|
|
|
ve.DWordLength =
|
|
|
|
|
1 + GENX(VERTEX_ELEMENT_STATE_length) * MAX2(count, 1) - 2;
|
2018-04-26 23:42:10 -07:00
|
|
|
}
|
2017-11-23 23:15:14 -08:00
|
|
|
|
|
|
|
|
uint32_t *ve_pack_dest = &cso->vertex_elements[1];
|
2018-06-07 01:45:47 -07:00
|
|
|
uint32_t *vfi_pack_dest = cso->vf_instancing;
|
2017-11-23 23:15:14 -08:00
|
|
|
|
2018-06-26 13:32:19 -07:00
|
|
|
if (count == 0) {
|
|
|
|
|
iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
|
|
|
|
|
ve.Valid = true;
|
|
|
|
|
ve.SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT;
|
|
|
|
|
ve.Component0Control = VFCOMP_STORE_0;
|
|
|
|
|
ve.Component1Control = VFCOMP_STORE_0;
|
|
|
|
|
ve.Component2Control = VFCOMP_STORE_0;
|
|
|
|
|
ve.Component3Control = VFCOMP_STORE_1_FP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
for (int i = 0; i < count; i++) {
|
2018-10-07 20:31:09 -07:00
|
|
|
const struct iris_format_info fmt =
|
|
|
|
|
iris_format_for_usage(devinfo, state[i].src_format, 0);
|
2018-01-30 02:44:25 -08:00
|
|
|
unsigned comp[4] = { VFCOMP_STORE_SRC, VFCOMP_STORE_SRC,
|
|
|
|
|
VFCOMP_STORE_SRC, VFCOMP_STORE_SRC };
|
|
|
|
|
|
2018-10-07 20:31:09 -07:00
|
|
|
switch (isl_format_get_num_channels(fmt.fmt)) {
|
2021-04-10 17:11:58 +02:00
|
|
|
case 0: comp[0] = VFCOMP_STORE_0; FALLTHROUGH;
|
|
|
|
|
case 1: comp[1] = VFCOMP_STORE_0; FALLTHROUGH;
|
|
|
|
|
case 2: comp[2] = VFCOMP_STORE_0; FALLTHROUGH;
|
2018-01-30 02:44:25 -08:00
|
|
|
case 3:
|
2018-10-07 20:31:09 -07:00
|
|
|
comp[3] = isl_format_has_int_channel(fmt.fmt) ? VFCOMP_STORE_1_INT
|
|
|
|
|
: VFCOMP_STORE_1_FP;
|
2018-01-30 02:44:25 -08:00
|
|
|
break;
|
|
|
|
|
}
|
2017-11-23 23:15:14 -08:00
|
|
|
iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
|
2019-02-27 20:44:27 +01:00
|
|
|
ve.EdgeFlagEnable = false;
|
2017-11-23 23:15:14 -08:00
|
|
|
ve.VertexBufferIndex = state[i].vertex_buffer_index;
|
|
|
|
|
ve.Valid = true;
|
|
|
|
|
ve.SourceElementOffset = state[i].src_offset;
|
2018-10-07 20:31:09 -07:00
|
|
|
ve.SourceElementFormat = fmt.fmt;
|
2018-01-30 02:44:25 -08:00
|
|
|
ve.Component0Control = comp[0];
|
|
|
|
|
ve.Component1Control = comp[1];
|
|
|
|
|
ve.Component2Control = comp[2];
|
|
|
|
|
ve.Component3Control = comp[3];
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2018-06-07 01:45:47 -07:00
|
|
|
iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
|
2017-11-23 23:15:14 -08:00
|
|
|
vi.VertexElementIndex = i;
|
|
|
|
|
vi.InstancingEnable = state[i].instance_divisor > 0;
|
|
|
|
|
vi.InstanceDataStepRate = state[i].instance_divisor;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
|
2018-06-07 01:45:47 -07:00
|
|
|
vfi_pack_dest += GENX(3DSTATE_VF_INSTANCING_length);
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2019-02-27 20:44:27 +01:00
|
|
|
/* An alternative version of the last VE and VFI is stored so it
|
|
|
|
|
* can be used at draw time in case Vertex Shader uses EdgeFlag
|
|
|
|
|
*/
|
|
|
|
|
if (count) {
|
|
|
|
|
const unsigned edgeflag_index = count - 1;
|
|
|
|
|
const struct iris_format_info fmt =
|
|
|
|
|
iris_format_for_usage(devinfo, state[edgeflag_index].src_format, 0);
|
|
|
|
|
iris_pack_state(GENX(VERTEX_ELEMENT_STATE), cso->edgeflag_ve, ve) {
|
|
|
|
|
ve.EdgeFlagEnable = true ;
|
|
|
|
|
ve.VertexBufferIndex = state[edgeflag_index].vertex_buffer_index;
|
|
|
|
|
ve.Valid = true;
|
|
|
|
|
ve.SourceElementOffset = state[edgeflag_index].src_offset;
|
|
|
|
|
ve.SourceElementFormat = fmt.fmt;
|
|
|
|
|
ve.Component0Control = VFCOMP_STORE_SRC;
|
|
|
|
|
ve.Component1Control = VFCOMP_STORE_0;
|
|
|
|
|
ve.Component2Control = VFCOMP_STORE_0;
|
|
|
|
|
ve.Component3Control = VFCOMP_STORE_0;
|
|
|
|
|
}
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_VF_INSTANCING), cso->edgeflag_vfi, vi) {
|
|
|
|
|
/* The vi.VertexElementIndex of the EdgeFlag Vertex Element is filled
|
|
|
|
|
* at draw time, as it should change if SGVs are emitted.
|
|
|
|
|
*/
|
|
|
|
|
vi.InstancingEnable = state[edgeflag_index].instance_divisor > 0;
|
|
|
|
|
vi.InstanceDataStepRate = state[edgeflag_index].instance_divisor;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
return cso;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->bind_vertex_elements_state() driver hook.
|
|
|
|
|
*/
|
2018-01-09 21:29:09 -08:00
|
|
|
static void
|
|
|
|
|
iris_bind_vertex_elements_state(struct pipe_context *ctx, void *state)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
2018-07-18 09:23:24 -07:00
|
|
|
struct iris_vertex_element_state *old_cso = ice->state.cso_vertex_elements;
|
|
|
|
|
struct iris_vertex_element_state *new_cso = state;
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
|
|
|
|
|
* we need to re-emit it to ensure we're overriding the right one.
|
|
|
|
|
*/
|
2018-07-18 09:23:24 -07:00
|
|
|
if (new_cso && cso_changed(count))
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
|
2018-01-09 21:29:09 -08:00
|
|
|
|
|
|
|
|
ice->state.cso_vertex_elements = state;
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_VERTEX_ELEMENTS;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->create_stream_output_target() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* "Target" here refers to a destination buffer. We translate this into
|
|
|
|
|
* a 3DSTATE_SO_BUFFER packet. We can handle most fields, but don't yet
|
|
|
|
|
* know which buffer this represents, or whether we ought to zero the
|
|
|
|
|
* write-offsets, or append. Those are handled in the set() hook.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static struct pipe_stream_output_target *
|
|
|
|
|
iris_create_stream_output_target(struct pipe_context *ctx,
|
2018-11-21 00:38:49 -08:00
|
|
|
struct pipe_resource *p_res,
|
2017-11-23 23:15:14 -08:00
|
|
|
unsigned buffer_offset,
|
|
|
|
|
unsigned buffer_size)
|
|
|
|
|
{
|
2018-11-21 00:38:49 -08:00
|
|
|
struct iris_resource *res = (void *) p_res;
|
2018-06-29 12:58:31 -07:00
|
|
|
struct iris_stream_output_target *cso = calloc(1, sizeof(*cso));
|
|
|
|
|
if (!cso)
|
2017-11-23 23:15:14 -08:00
|
|
|
return NULL;
|
|
|
|
|
|
2018-11-21 00:38:49 -08:00
|
|
|
res->bind_history |= PIPE_BIND_STREAM_OUTPUT;
|
|
|
|
|
|
2018-06-29 12:58:31 -07:00
|
|
|
pipe_reference_init(&cso->base.reference, 1);
|
2018-11-21 00:38:49 -08:00
|
|
|
pipe_resource_reference(&cso->base.buffer, p_res);
|
2018-06-29 12:58:31 -07:00
|
|
|
cso->base.buffer_offset = buffer_offset;
|
|
|
|
|
cso->base.buffer_size = buffer_size;
|
|
|
|
|
cso->base.context = ctx;
|
|
|
|
|
|
2021-02-08 16:39:42 -08:00
|
|
|
util_range_add(&res->base.b, &res->valid_buffer_range, buffer_offset,
|
2019-04-05 11:54:10 -07:00
|
|
|
buffer_offset + buffer_size);
|
|
|
|
|
|
2018-06-29 12:58:31 -07:00
|
|
|
return &cso->base;
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
iris_stream_output_target_destroy(struct pipe_context *ctx,
|
2018-06-29 12:58:31 -07:00
|
|
|
struct pipe_stream_output_target *state)
|
2017-11-23 23:15:14 -08:00
|
|
|
{
|
2018-06-29 12:58:31 -07:00
|
|
|
struct iris_stream_output_target *cso = (void *) state;
|
|
|
|
|
|
|
|
|
|
pipe_resource_reference(&cso->base.buffer, NULL);
|
|
|
|
|
pipe_resource_reference(&cso->offset.res, NULL);
|
|
|
|
|
|
|
|
|
|
free(cso);
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_stream_output_targets() driver hook.
|
|
|
|
|
*
|
|
|
|
|
* At this point, we know which targets are bound to a particular index,
|
|
|
|
|
* and also whether we want to append or start over. We can finish the
|
|
|
|
|
* 3DSTATE_SO_BUFFER packets we started earlier.
|
|
|
|
|
*/
|
2017-11-23 23:15:14 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_stream_output_targets(struct pipe_context *ctx,
|
|
|
|
|
unsigned num_targets,
|
|
|
|
|
struct pipe_stream_output_target **targets,
|
|
|
|
|
const unsigned *offsets)
|
|
|
|
|
{
|
2018-06-29 12:58:31 -07:00
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
2018-07-11 12:45:19 -07:00
|
|
|
struct iris_genx_state *genx = ice->state.genx;
|
|
|
|
|
uint32_t *so_buffers = genx->so_buffers;
|
2019-11-05 15:08:01 -08:00
|
|
|
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
|
2018-07-11 12:45:19 -07:00
|
|
|
|
|
|
|
|
const bool active = num_targets > 0;
|
|
|
|
|
if (ice->state.streamout_active != active) {
|
|
|
|
|
ice->state.streamout_active = active;
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
|
2018-10-24 14:45:00 -07:00
|
|
|
|
|
|
|
|
/* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
|
|
|
|
|
* it's a non-pipelined command. If we're switching streamout on, we
|
|
|
|
|
* may have missed emitting it earlier, so do so now. (We're already
|
|
|
|
|
* taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
|
|
|
|
|
*/
|
2019-04-25 11:19:46 +03:00
|
|
|
if (active) {
|
2018-10-24 14:45:00 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST;
|
2019-04-25 11:19:46 +03:00
|
|
|
} else {
|
|
|
|
|
uint32_t flush = 0;
|
|
|
|
|
for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
|
|
|
|
|
struct iris_stream_output_target *tgt =
|
|
|
|
|
(void *) ice->state.so_target[i];
|
|
|
|
|
if (tgt) {
|
|
|
|
|
struct iris_resource *res = (void *) tgt->base.buffer;
|
|
|
|
|
|
2020-10-19 22:33:05 -05:00
|
|
|
flush |= iris_flush_bits_for_history(ice, res);
|
2019-04-25 11:19:46 +03:00
|
|
|
iris_dirty_for_history(ice, res);
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-04-20 16:06:13 -07:00
|
|
|
#if GFX_VER >= 12
|
2021-04-12 10:11:40 -07:00
|
|
|
/* SO draws require flushing of const cache to make SO data
|
|
|
|
|
* observable when VB/IB are cached in L3.
|
|
|
|
|
*/
|
|
|
|
|
if (flush & PIPE_CONTROL_VF_CACHE_INVALIDATE)
|
|
|
|
|
flush |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
|
|
|
|
|
#endif
|
2019-06-19 16:04:50 -05:00
|
|
|
iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
|
|
|
|
|
"make streamout results visible", flush);
|
2019-04-25 11:19:46 +03:00
|
|
|
}
|
2018-07-11 12:45:19 -07:00
|
|
|
}
|
|
|
|
|
|
2018-07-11 17:05:10 -07:00
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
|
pipe_so_target_reference(&ice->state.so_target[i],
|
|
|
|
|
i < num_targets ? targets[i] : NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-11 12:45:19 -07:00
|
|
|
/* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
|
|
|
|
|
if (!active)
|
|
|
|
|
return;
|
2018-06-29 12:58:31 -07:00
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < 4; i++,
|
|
|
|
|
so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
|
|
|
|
|
|
2019-04-27 00:24:05 -07:00
|
|
|
struct iris_stream_output_target *tgt = (void *) ice->state.so_target[i];
|
|
|
|
|
unsigned offset = offsets[i];
|
|
|
|
|
|
|
|
|
|
if (!tgt) {
|
2019-10-23 23:45:58 +01:00
|
|
|
iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER < 12
|
2018-06-29 12:58:31 -07:00
|
|
|
sob.SOBufferIndex = i;
|
2019-10-23 23:45:58 +01:00
|
|
|
#else
|
|
|
|
|
sob._3DCommandOpcode = 0;
|
|
|
|
|
sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
|
|
|
|
|
#endif
|
2021-10-19 05:20:00 -07:00
|
|
|
sob.MOCS = iris_mocs(NULL, &screen->isl_dev, 0);
|
2019-10-23 23:45:58 +01:00
|
|
|
}
|
2018-06-29 12:58:31 -07:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-01 05:12:30 -08:00
|
|
|
if (!tgt->offset.res)
|
2020-08-25 15:27:07 -07:00
|
|
|
upload_state(ctx->const_uploader, &tgt->offset, sizeof(uint32_t), 4);
|
2021-02-01 05:12:30 -08:00
|
|
|
|
2018-12-05 00:55:45 -08:00
|
|
|
struct iris_resource *res = (void *) tgt->base.buffer;
|
2018-09-12 22:23:50 -07:00
|
|
|
|
2018-06-29 12:58:31 -07:00
|
|
|
/* Note that offsets[i] will either be 0, causing us to zero
|
|
|
|
|
* the value in the buffer, or 0xFFFFFFFF, which happens to mean
|
|
|
|
|
* "continue appending at the existing offset."
|
|
|
|
|
*/
|
2019-04-27 00:24:05 -07:00
|
|
|
assert(offset == 0 || offset == 0xFFFFFFFF);
|
|
|
|
|
|
2021-02-02 17:02:05 -08:00
|
|
|
/* When we're first called with an offset of 0, we want the next
|
|
|
|
|
* 3DSTATE_SO_BUFFER packets to reset the offset to the beginning.
|
|
|
|
|
* Any further times we emit those packets, we want to use 0xFFFFFFFF
|
|
|
|
|
* to continue appending from the current offset.
|
|
|
|
|
*
|
|
|
|
|
* Note that we might be called by Begin (offset = 0), Pause, then
|
|
|
|
|
* Resume (offset = 0xFFFFFFFF) before ever drawing (where these
|
|
|
|
|
* commands will actually be sent to the GPU). In this case, we
|
|
|
|
|
* don't want to append - we still want to do our initial zeroing.
|
2019-04-27 00:24:05 -07:00
|
|
|
*/
|
2021-02-02 17:02:05 -08:00
|
|
|
if (offset == 0)
|
|
|
|
|
tgt->zero_offset = true;
|
2018-06-29 12:58:31 -07:00
|
|
|
|
2018-12-05 00:55:45 -08:00
|
|
|
iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER < 12
|
2019-10-23 23:45:58 +01:00
|
|
|
sob.SOBufferIndex = i;
|
|
|
|
|
#else
|
|
|
|
|
sob._3DCommandOpcode = 0;
|
|
|
|
|
sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
|
|
|
|
|
#endif
|
2018-12-05 00:55:45 -08:00
|
|
|
sob.SurfaceBaseAddress =
|
2021-07-19 21:23:18 -07:00
|
|
|
rw_bo(NULL, res->bo->address + tgt->base.buffer_offset,
|
2020-05-29 16:38:43 -07:00
|
|
|
IRIS_DOMAIN_OTHER_WRITE);
|
2018-12-05 00:55:45 -08:00
|
|
|
sob.SOBufferEnable = true;
|
|
|
|
|
sob.StreamOffsetWriteEnable = true;
|
|
|
|
|
sob.StreamOutputBufferOffsetAddressEnable = true;
|
2020-10-07 07:44:56 -07:00
|
|
|
sob.MOCS = iris_mocs(res->bo, &screen->isl_dev, 0);
|
2018-12-05 00:55:45 -08:00
|
|
|
|
|
|
|
|
sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
|
|
|
|
|
sob.StreamOutputBufferOffsetAddress =
|
2021-07-19 21:23:18 -07:00
|
|
|
rw_bo(NULL, iris_resource_bo(tgt->offset.res)->address +
|
2020-05-29 16:38:43 -07:00
|
|
|
tgt->offset.offset, IRIS_DOMAIN_OTHER_WRITE);
|
2021-02-02 17:02:05 -08:00
|
|
|
sob.StreamOffset = 0xFFFFFFFF; /* not offset, see above */
|
2018-06-29 12:58:31 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
|
|
|
|
|
* 3DSTATE_STREAMOUT packets.
|
|
|
|
|
*
|
|
|
|
|
* 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
|
|
|
|
|
* hardware to record. We can create it entirely based on the shader, with
|
|
|
|
|
* no dynamic state dependencies.
|
|
|
|
|
*
|
|
|
|
|
* 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
|
|
|
|
|
* state-based settings. We capture the shader-related ones here, and merge
|
|
|
|
|
* the rest in at draw time.
|
|
|
|
|
*/
|
2018-06-29 12:58:31 -07:00
|
|
|
static uint32_t *
|
|
|
|
|
iris_create_so_decl_list(const struct pipe_stream_output_info *info,
|
|
|
|
|
const struct brw_vue_map *vue_map)
|
|
|
|
|
{
|
|
|
|
|
struct GENX(SO_DECL) so_decl[MAX_VERTEX_STREAMS][128];
|
|
|
|
|
int buffer_mask[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
|
|
|
|
|
int next_offset[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
|
|
|
|
|
int decls[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
|
|
|
|
|
int max_decls = 0;
|
|
|
|
|
STATIC_ASSERT(ARRAY_SIZE(so_decl[0]) >= MAX_PROGRAM_OUTPUTS);
|
|
|
|
|
|
|
|
|
|
memset(so_decl, 0, sizeof(so_decl));
|
|
|
|
|
|
|
|
|
|
/* Construct the list of SO_DECLs to be emitted. The formatting of the
|
|
|
|
|
* command feels strange -- each dword pair contains a SO_DECL per stream.
|
|
|
|
|
*/
|
|
|
|
|
for (unsigned i = 0; i < info->num_outputs; i++) {
|
|
|
|
|
const struct pipe_stream_output *output = &info->output[i];
|
|
|
|
|
const int buffer = output->output_buffer;
|
|
|
|
|
const int varying = output->register_index;
|
|
|
|
|
const unsigned stream_id = output->stream;
|
|
|
|
|
assert(stream_id < MAX_VERTEX_STREAMS);
|
|
|
|
|
|
|
|
|
|
buffer_mask[stream_id] |= 1 << buffer;
|
|
|
|
|
|
|
|
|
|
assert(vue_map->varying_to_slot[varying] >= 0);
|
|
|
|
|
|
|
|
|
|
/* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
|
|
|
|
|
* array. Instead, it simply increments DstOffset for the following
|
|
|
|
|
* input by the number of components that should be skipped.
|
|
|
|
|
*
|
|
|
|
|
* Our hardware is unusual in that it requires us to program SO_DECLs
|
|
|
|
|
* for fake "hole" components, rather than simply taking the offset
|
|
|
|
|
* for each real varying. Each hole can have size 1, 2, 3, or 4; we
|
|
|
|
|
* program as many size = 4 holes as we can, then a final hole to
|
|
|
|
|
* accommodate the final 1, 2, or 3 remaining.
|
|
|
|
|
*/
|
|
|
|
|
int skip_components = output->dst_offset - next_offset[buffer];
|
|
|
|
|
|
|
|
|
|
while (skip_components > 0) {
|
|
|
|
|
so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
|
|
|
|
|
.HoleFlag = 1,
|
|
|
|
|
.OutputBufferSlot = output->output_buffer,
|
|
|
|
|
.ComponentMask = (1 << MIN2(skip_components, 4)) - 1,
|
|
|
|
|
};
|
|
|
|
|
skip_components -= 4;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
next_offset[buffer] = output->dst_offset + output->num_components;
|
|
|
|
|
|
|
|
|
|
so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
|
|
|
|
|
.OutputBufferSlot = output->output_buffer,
|
|
|
|
|
.RegisterIndex = vue_map->varying_to_slot[varying],
|
|
|
|
|
.ComponentMask =
|
|
|
|
|
((1 << output->num_components) - 1) << output->start_component,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if (decls[stream_id] > max_decls)
|
|
|
|
|
max_decls = decls[stream_id];
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-11 12:45:19 -07:00
|
|
|
unsigned dwords = GENX(3DSTATE_STREAMOUT_length) + (3 + 2 * max_decls);
|
|
|
|
|
uint32_t *map = ralloc_size(NULL, sizeof(uint32_t) * dwords);
|
|
|
|
|
uint32_t *so_decl_map = map + GENX(3DSTATE_STREAMOUT_length);
|
|
|
|
|
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_STREAMOUT), map, sol) {
|
|
|
|
|
int urb_entry_read_offset = 0;
|
|
|
|
|
int urb_entry_read_length = (vue_map->num_slots + 1) / 2 -
|
|
|
|
|
urb_entry_read_offset;
|
2018-06-29 12:58:31 -07:00
|
|
|
|
2018-07-11 12:45:19 -07:00
|
|
|
/* We always read the whole vertex. This could be reduced at some
|
|
|
|
|
* point by reading less and offsetting the register index in the
|
|
|
|
|
* SO_DECLs.
|
|
|
|
|
*/
|
|
|
|
|
sol.Stream0VertexReadOffset = urb_entry_read_offset;
|
|
|
|
|
sol.Stream0VertexReadLength = urb_entry_read_length - 1;
|
|
|
|
|
sol.Stream1VertexReadOffset = urb_entry_read_offset;
|
|
|
|
|
sol.Stream1VertexReadLength = urb_entry_read_length - 1;
|
|
|
|
|
sol.Stream2VertexReadOffset = urb_entry_read_offset;
|
|
|
|
|
sol.Stream2VertexReadLength = urb_entry_read_length - 1;
|
|
|
|
|
sol.Stream3VertexReadOffset = urb_entry_read_offset;
|
|
|
|
|
sol.Stream3VertexReadLength = urb_entry_read_length - 1;
|
|
|
|
|
|
|
|
|
|
/* Set buffer pitches; 0 means unbound. */
|
|
|
|
|
sol.Buffer0SurfacePitch = 4 * info->stride[0];
|
|
|
|
|
sol.Buffer1SurfacePitch = 4 * info->stride[1];
|
|
|
|
|
sol.Buffer2SurfacePitch = 4 * info->stride[2];
|
|
|
|
|
sol.Buffer3SurfacePitch = 4 * info->stride[3];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_SO_DECL_LIST), so_decl_map, list) {
|
2018-06-29 12:58:31 -07:00
|
|
|
list.DWordLength = 3 + 2 * max_decls - 2;
|
|
|
|
|
list.StreamtoBufferSelects0 = buffer_mask[0];
|
|
|
|
|
list.StreamtoBufferSelects1 = buffer_mask[1];
|
|
|
|
|
list.StreamtoBufferSelects2 = buffer_mask[2];
|
|
|
|
|
list.StreamtoBufferSelects3 = buffer_mask[3];
|
|
|
|
|
list.NumEntries0 = decls[0];
|
|
|
|
|
list.NumEntries1 = decls[1];
|
|
|
|
|
list.NumEntries2 = decls[2];
|
|
|
|
|
list.NumEntries3 = decls[3];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < max_decls; i++) {
|
2018-07-11 16:26:06 -07:00
|
|
|
iris_pack_state(GENX(SO_DECL_ENTRY), so_decl_map + 3 + i * 2, entry) {
|
2018-06-29 12:58:31 -07:00
|
|
|
entry.Stream0Decl = so_decl[0][i];
|
|
|
|
|
entry.Stream1Decl = so_decl[1][i];
|
|
|
|
|
entry.Stream2Decl = so_decl[2][i];
|
|
|
|
|
entry.Stream3Decl = so_decl[3][i];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-11 12:45:19 -07:00
|
|
|
return map;
|
2018-06-29 12:58:31 -07:00
|
|
|
}
|
|
|
|
|
|
2018-01-29 15:06:04 -08:00
|
|
|
static void
|
2018-04-19 19:04:17 -07:00
|
|
|
iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,
|
|
|
|
|
const struct brw_vue_map *last_vue_map,
|
|
|
|
|
bool two_sided_color,
|
|
|
|
|
unsigned *out_offset,
|
|
|
|
|
unsigned *out_length)
|
|
|
|
|
{
|
|
|
|
|
/* The compiler computes the first URB slot without considering COL/BFC
|
|
|
|
|
* swizzling (because it doesn't know whether it's enabled), so we need
|
|
|
|
|
* to do that here too. This may result in a smaller offset, which
|
|
|
|
|
* should be safe.
|
|
|
|
|
*/
|
|
|
|
|
const unsigned first_slot =
|
|
|
|
|
brw_compute_first_urb_slot_required(fs_input_slots, last_vue_map);
|
|
|
|
|
|
|
|
|
|
/* This becomes the URB read offset (counted in pairs of slots). */
|
|
|
|
|
assert(first_slot % 2 == 0);
|
|
|
|
|
*out_offset = first_slot / 2;
|
|
|
|
|
|
|
|
|
|
/* We need to adjust the inputs read to account for front/back color
|
|
|
|
|
* swizzling, as it can make the URB length longer.
|
|
|
|
|
*/
|
|
|
|
|
for (int c = 0; c <= 1; c++) {
|
|
|
|
|
if (fs_input_slots & (VARYING_BIT_COL0 << c)) {
|
|
|
|
|
/* If two sided color is enabled, the fragment shader's gl_Color
|
|
|
|
|
* (COL0) input comes from either the gl_FrontColor (COL0) or
|
|
|
|
|
* gl_BackColor (BFC0) input varyings. Mark BFC as used, too.
|
|
|
|
|
*/
|
|
|
|
|
if (two_sided_color)
|
|
|
|
|
fs_input_slots |= (VARYING_BIT_BFC0 << c);
|
|
|
|
|
|
|
|
|
|
/* If front color isn't written, we opt to give them back color
|
|
|
|
|
* instead of an undefined value. Switch from COL to BFC.
|
|
|
|
|
*/
|
|
|
|
|
if (last_vue_map->varying_to_slot[VARYING_SLOT_COL0 + c] == -1) {
|
|
|
|
|
fs_input_slots &= ~(VARYING_BIT_COL0 << c);
|
|
|
|
|
fs_input_slots |= (VARYING_BIT_BFC0 << c);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Compute the minimum URB Read Length necessary for the FS inputs.
|
|
|
|
|
*
|
|
|
|
|
* From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
|
|
|
|
|
* 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
|
|
|
|
|
*
|
|
|
|
|
* "This field should be set to the minimum length required to read the
|
|
|
|
|
* maximum source attribute. The maximum source attribute is indicated
|
|
|
|
|
* by the maximum value of the enabled Attribute # Source Attribute if
|
|
|
|
|
* Attribute Swizzle Enable is set, Number of Output Attributes-1 if
|
|
|
|
|
* enable is not set.
|
|
|
|
|
* read_length = ceiling((max_source_attr + 1) / 2)
|
|
|
|
|
*
|
|
|
|
|
* [errata] Corruption/Hang possible if length programmed larger than
|
|
|
|
|
* recommended"
|
|
|
|
|
*
|
|
|
|
|
* Similar text exists for Ivy Bridge.
|
|
|
|
|
*
|
|
|
|
|
* We find the last URB slot that's actually read by the FS.
|
|
|
|
|
*/
|
|
|
|
|
unsigned last_read_slot = last_vue_map->num_slots - 1;
|
|
|
|
|
while (last_read_slot > first_slot && !(fs_input_slots &
|
|
|
|
|
(1ull << last_vue_map->slot_to_varying[last_read_slot])))
|
|
|
|
|
--last_read_slot;
|
|
|
|
|
|
|
|
|
|
/* The URB read length is the difference of the two, counted in pairs. */
|
|
|
|
|
*out_length = DIV_ROUND_UP(last_read_slot - first_slot + 1, 2);
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-20 15:11:09 -07:00
|
|
|
static void
|
|
|
|
|
iris_emit_sbe_swiz(struct iris_batch *batch,
|
|
|
|
|
const struct iris_context *ice,
|
2021-02-18 14:27:19 -08:00
|
|
|
const struct brw_vue_map *vue_map,
|
2018-07-23 15:29:00 -07:00
|
|
|
unsigned urb_read_offset,
|
|
|
|
|
unsigned sprite_coord_enables)
|
2018-06-20 15:11:09 -07:00
|
|
|
{
|
|
|
|
|
struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) attr_overrides[16] = {};
|
|
|
|
|
const struct brw_wm_prog_data *wm_prog_data = (void *)
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
|
|
|
|
|
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
|
|
|
|
|
|
|
|
|
|
/* XXX: this should be generated when putting programs in place */
|
|
|
|
|
|
2019-11-02 08:06:03 +01:00
|
|
|
for (uint8_t idx = 0; idx < wm_prog_data->urb_setup_attribs_count; idx++) {
|
|
|
|
|
const uint8_t fs_attr = wm_prog_data->urb_setup_attribs[idx];
|
2018-06-20 15:11:09 -07:00
|
|
|
const int input_index = wm_prog_data->urb_setup[fs_attr];
|
|
|
|
|
if (input_index < 0 || input_index >= 16)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) *attr =
|
|
|
|
|
&attr_overrides[input_index];
|
2018-08-25 00:11:24 -07:00
|
|
|
int slot = vue_map->varying_to_slot[fs_attr];
|
2018-06-20 15:11:09 -07:00
|
|
|
|
|
|
|
|
/* Viewport and Layer are stored in the VUE header. We need to override
|
|
|
|
|
* them to zero if earlier stages didn't write them, as GL requires that
|
|
|
|
|
* they read back as zero when not explicitly set.
|
|
|
|
|
*/
|
|
|
|
|
switch (fs_attr) {
|
|
|
|
|
case VARYING_SLOT_VIEWPORT:
|
|
|
|
|
case VARYING_SLOT_LAYER:
|
|
|
|
|
attr->ComponentOverrideX = true;
|
|
|
|
|
attr->ComponentOverrideW = true;
|
|
|
|
|
attr->ConstantSource = CONST_0000;
|
|
|
|
|
|
|
|
|
|
if (!(vue_map->slots_valid & VARYING_BIT_LAYER))
|
|
|
|
|
attr->ComponentOverrideY = true;
|
|
|
|
|
if (!(vue_map->slots_valid & VARYING_BIT_VIEWPORT))
|
|
|
|
|
attr->ComponentOverrideZ = true;
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-23 15:29:00 -07:00
|
|
|
if (sprite_coord_enables & (1 << input_index))
|
|
|
|
|
continue;
|
|
|
|
|
|
2018-06-20 15:11:09 -07:00
|
|
|
/* If there was only a back color written but not front, use back
|
|
|
|
|
* as the color instead of undefined.
|
|
|
|
|
*/
|
|
|
|
|
if (slot == -1 && fs_attr == VARYING_SLOT_COL0)
|
|
|
|
|
slot = vue_map->varying_to_slot[VARYING_SLOT_BFC0];
|
|
|
|
|
if (slot == -1 && fs_attr == VARYING_SLOT_COL1)
|
|
|
|
|
slot = vue_map->varying_to_slot[VARYING_SLOT_BFC1];
|
|
|
|
|
|
|
|
|
|
/* Not written by the previous stage - undefined. */
|
|
|
|
|
if (slot == -1) {
|
|
|
|
|
attr->ComponentOverrideX = true;
|
|
|
|
|
attr->ComponentOverrideY = true;
|
|
|
|
|
attr->ComponentOverrideZ = true;
|
|
|
|
|
attr->ComponentOverrideW = true;
|
|
|
|
|
attr->ConstantSource = CONST_0001_FLOAT;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Compute the location of the attribute relative to the read offset,
|
|
|
|
|
* which is counted in 256-bit increments (two 128-bit VUE slots).
|
|
|
|
|
*/
|
|
|
|
|
const int source_attr = slot - 2 * urb_read_offset;
|
|
|
|
|
assert(source_attr >= 0 && source_attr <= 32);
|
|
|
|
|
attr->SourceAttribute = source_attr;
|
|
|
|
|
|
|
|
|
|
/* If we are doing two-sided color, and the VUE slot following this one
|
|
|
|
|
* represents a back-facing color, then we need to instruct the SF unit
|
|
|
|
|
* to do back-facing swizzling.
|
|
|
|
|
*/
|
|
|
|
|
if (cso_rast->light_twoside &&
|
|
|
|
|
((vue_map->slot_to_varying[slot] == VARYING_SLOT_COL0 &&
|
|
|
|
|
vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC0) ||
|
|
|
|
|
(vue_map->slot_to_varying[slot] == VARYING_SLOT_COL1 &&
|
|
|
|
|
vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC1)))
|
|
|
|
|
attr->SwizzleSelect = INPUTATTR_FACING;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_SBE_SWIZ), sbes) {
|
|
|
|
|
for (int i = 0; i < 16; i++)
|
|
|
|
|
sbes.Attribute[i] = attr_overrides[i];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-15 17:21:14 +03:00
|
|
|
static bool
|
|
|
|
|
iris_is_drawing_points(const struct iris_context *ice)
|
|
|
|
|
{
|
|
|
|
|
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
|
|
|
|
|
|
|
|
|
|
if (cso_rast->fill_mode_point) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ice->shaders.prog[MESA_SHADER_GEOMETRY]) {
|
|
|
|
|
const struct brw_gs_prog_data *gs_prog_data =
|
|
|
|
|
(void *) ice->shaders.prog[MESA_SHADER_GEOMETRY]->prog_data;
|
|
|
|
|
return gs_prog_data->output_topology == _3DPRIM_POINTLIST;
|
|
|
|
|
} else if (ice->shaders.prog[MESA_SHADER_TESS_EVAL]) {
|
|
|
|
|
const struct brw_tes_prog_data *tes_data =
|
|
|
|
|
(void *) ice->shaders.prog[MESA_SHADER_TESS_EVAL]->prog_data;
|
|
|
|
|
return tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
|
|
|
|
|
} else {
|
|
|
|
|
return ice->state.prim_mode == PIPE_PRIM_POINTS;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-23 15:29:00 -07:00
|
|
|
static unsigned
|
|
|
|
|
iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data *prog_data,
|
|
|
|
|
const struct iris_rasterizer_state *cso)
|
|
|
|
|
{
|
|
|
|
|
unsigned overrides = 0;
|
|
|
|
|
|
|
|
|
|
if (prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
|
|
|
|
|
overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_PNTC];
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
|
if ((cso->sprite_coord_enable & (1 << i)) &&
|
|
|
|
|
prog_data->urb_setup[VARYING_SLOT_TEX0 + i] != -1)
|
|
|
|
|
overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_TEX0 + i];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return overrides;
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-19 19:04:17 -07:00
|
|
|
static void
|
|
|
|
|
iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice)
|
2018-01-29 15:06:04 -08:00
|
|
|
{
|
2018-04-19 19:04:17 -07:00
|
|
|
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
|
|
|
|
|
const struct brw_wm_prog_data *wm_prog_data = (void *)
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
|
2021-02-18 14:27:19 -08:00
|
|
|
const struct brw_vue_map *last_vue_map =
|
|
|
|
|
&brw_vue_prog_data(ice->shaders.last_vue_shader->prog_data)->vue_map;
|
2018-01-29 15:06:04 -08:00
|
|
|
|
|
|
|
|
unsigned urb_read_offset, urb_read_length;
|
2021-12-15 00:53:49 -08:00
|
|
|
iris_compute_sbe_urb_read_interval(wm_prog_data->inputs,
|
2021-02-18 14:27:19 -08:00
|
|
|
last_vue_map,
|
2018-04-19 19:04:17 -07:00
|
|
|
cso_rast->light_twoside,
|
|
|
|
|
&urb_read_offset, &urb_read_length);
|
2018-01-29 15:06:04 -08:00
|
|
|
|
2018-07-23 15:29:00 -07:00
|
|
|
unsigned sprite_coord_overrides =
|
2020-10-15 17:21:14 +03:00
|
|
|
iris_is_drawing_points(ice) ?
|
|
|
|
|
iris_calculate_point_sprite_overrides(wm_prog_data, cso_rast) : 0;
|
2018-07-23 15:29:00 -07:00
|
|
|
|
2018-04-19 19:04:17 -07:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_SBE), sbe) {
|
2018-01-29 15:06:04 -08:00
|
|
|
sbe.AttributeSwizzleEnable = true;
|
|
|
|
|
sbe.NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs;
|
|
|
|
|
sbe.PointSpriteTextureCoordinateOrigin = cso_rast->sprite_coord_mode;
|
|
|
|
|
sbe.VertexURBEntryReadOffset = urb_read_offset;
|
|
|
|
|
sbe.VertexURBEntryReadLength = urb_read_length;
|
|
|
|
|
sbe.ForceVertexURBEntryReadOffset = true;
|
|
|
|
|
sbe.ForceVertexURBEntryReadLength = true;
|
|
|
|
|
sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs;
|
2018-07-23 15:29:00 -07:00
|
|
|
sbe.PointSpriteTextureCoordinateEnable = sprite_coord_overrides;
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 9
|
2018-04-19 19:04:17 -07:00
|
|
|
for (int i = 0; i < 32; i++) {
|
2018-01-29 15:06:04 -08:00
|
|
|
sbe.AttributeActiveComponentFormat[i] = ACTIVE_COMPONENT_XYZW;
|
|
|
|
|
}
|
2018-11-07 14:23:27 +10:00
|
|
|
#endif
|
2021-12-14 15:25:26 -08:00
|
|
|
|
|
|
|
|
/* Ask the hardware to supply PrimitiveID if the fragment shader
|
|
|
|
|
* reads it but a previous stage didn't write one.
|
|
|
|
|
*/
|
|
|
|
|
if ((wm_prog_data->inputs & VARYING_BIT_PRIMITIVE_ID) &&
|
|
|
|
|
last_vue_map->varying_to_slot[VARYING_SLOT_PRIMITIVE_ID] == -1) {
|
|
|
|
|
sbe.PrimitiveIDOverrideAttributeSelect =
|
|
|
|
|
wm_prog_data->urb_setup[VARYING_SLOT_PRIMITIVE_ID];
|
|
|
|
|
sbe.PrimitiveIDOverrideComponentX = true;
|
|
|
|
|
sbe.PrimitiveIDOverrideComponentY = true;
|
|
|
|
|
sbe.PrimitiveIDOverrideComponentZ = true;
|
|
|
|
|
sbe.PrimitiveIDOverrideComponentW = true;
|
|
|
|
|
}
|
2018-01-29 15:06:04 -08:00
|
|
|
}
|
2018-06-20 15:11:09 -07:00
|
|
|
|
2021-02-18 14:27:19 -08:00
|
|
|
iris_emit_sbe_swiz(batch, ice, last_vue_map, urb_read_offset,
|
|
|
|
|
sprite_coord_overrides);
|
2018-01-29 15:06:04 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/* ------------------------------------------------------------------- */
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Populate VS program key fields based on the current state.
|
|
|
|
|
*/
|
2018-01-25 02:03:18 -08:00
|
|
|
static void
|
|
|
|
|
iris_populate_vs_key(const struct iris_context *ice,
|
2018-10-26 22:18:56 -07:00
|
|
|
const struct shader_info *info,
|
2019-06-27 15:06:30 +10:00
|
|
|
gl_shader_stage last_stage,
|
2019-12-08 20:25:42 -08:00
|
|
|
struct iris_vs_prog_key *key)
|
2018-01-25 02:03:18 -08:00
|
|
|
{
|
2018-10-26 22:18:56 -07:00
|
|
|
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
|
|
|
|
|
|
2018-11-09 17:35:29 -08:00
|
|
|
if (info->clip_distance_array_size == 0 &&
|
2019-06-27 15:06:30 +10:00
|
|
|
(info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
|
|
|
|
|
last_stage == MESA_SHADER_VERTEX)
|
2019-12-08 20:25:42 -08:00
|
|
|
key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
|
2018-01-25 02:03:18 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Populate TCS program key fields based on the current state.
|
|
|
|
|
*/
|
2018-01-25 02:03:18 -08:00
|
|
|
static void
|
|
|
|
|
iris_populate_tcs_key(const struct iris_context *ice,
|
2019-12-08 20:25:42 -08:00
|
|
|
struct iris_tcs_prog_key *key)
|
2018-01-25 02:03:18 -08:00
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Populate TES program key fields based on the current state.
|
|
|
|
|
*/
|
2018-01-25 02:03:18 -08:00
|
|
|
static void
|
|
|
|
|
iris_populate_tes_key(const struct iris_context *ice,
|
2019-06-28 22:25:57 +10:00
|
|
|
const struct shader_info *info,
|
|
|
|
|
gl_shader_stage last_stage,
|
2019-12-08 20:25:42 -08:00
|
|
|
struct iris_tes_prog_key *key)
|
2018-01-25 02:03:18 -08:00
|
|
|
{
|
2019-06-28 22:25:57 +10:00
|
|
|
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
|
|
|
|
|
|
|
|
|
|
if (info->clip_distance_array_size == 0 &&
|
|
|
|
|
(info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
|
|
|
|
|
last_stage == MESA_SHADER_TESS_EVAL)
|
2019-12-08 20:25:42 -08:00
|
|
|
key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
|
2018-01-25 02:03:18 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Populate GS program key fields based on the current state.
|
|
|
|
|
*/
|
2018-01-25 02:03:18 -08:00
|
|
|
static void
|
|
|
|
|
iris_populate_gs_key(const struct iris_context *ice,
|
2019-06-27 15:06:30 +10:00
|
|
|
const struct shader_info *info,
|
|
|
|
|
gl_shader_stage last_stage,
|
2019-12-08 20:25:42 -08:00
|
|
|
struct iris_gs_prog_key *key)
|
2018-01-25 02:03:18 -08:00
|
|
|
{
|
2019-06-27 15:06:30 +10:00
|
|
|
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
|
|
|
|
|
|
|
|
|
|
if (info->clip_distance_array_size == 0 &&
|
|
|
|
|
(info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
|
|
|
|
|
last_stage == MESA_SHADER_GEOMETRY)
|
2019-12-08 20:25:42 -08:00
|
|
|
key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
|
2018-01-25 02:03:18 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Populate FS program key fields based on the current state.
|
|
|
|
|
*/
|
2018-01-25 02:03:18 -08:00
|
|
|
static void
|
|
|
|
|
iris_populate_fs_key(const struct iris_context *ice,
|
2019-07-07 16:32:09 -07:00
|
|
|
const struct shader_info *info,
|
2019-12-08 20:25:42 -08:00
|
|
|
struct iris_fs_prog_key *key)
|
2018-01-25 02:03:18 -08:00
|
|
|
{
|
2019-04-18 22:29:27 -07:00
|
|
|
struct iris_screen *screen = (void *) ice->ctx.screen;
|
2018-01-29 15:06:08 -08:00
|
|
|
const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
|
|
|
|
|
const struct iris_depth_stencil_alpha_state *zsa = ice->state.cso_zsa;
|
|
|
|
|
const struct iris_rasterizer_state *rast = ice->state.cso_rast;
|
|
|
|
|
const struct iris_blend_state *blend = ice->state.cso_blend;
|
2018-01-25 02:03:18 -08:00
|
|
|
|
|
|
|
|
key->nr_color_regions = fb->nr_cbufs;
|
|
|
|
|
|
2018-01-25 02:09:59 -08:00
|
|
|
key->clamp_fragment_color = rast->clamp_fragment_color;
|
|
|
|
|
|
i965,iris,anv: Make alpha to coverage work with sample mask
From "Alpha Coverage" section of SKL PRM Volume 7:
"If Pixel Shader outputs oMask, AlphaToCoverage is disabled in
hardware, regardless of the state setting for this feature."
From OpenGL spec 4.6, "15.2 Shader Execution":
"The built-in integer array gl_SampleMask can be used to change
the sample coverage for a fragment from within the shader."
From OpenGL spec 4.6, "17.3.1 Alpha To Coverage":
"If SAMPLE_ALPHA_TO_COVERAGE is enabled, a temporary coverage value
is generated where each bit is determined by the alpha value at the
corresponding sample location. The temporary coverage value is then
ANDed with the fragment coverage value to generate a new fragment
coverage value."
Similar wording could be found in Vulkan spec 1.1.100
"25.6. Multisample Coverage"
Thus we need to compute alpha to coverage dithering manually in shader
and replace sample mask store with the bitwise-AND of sample mask and
alpha to coverage dithering.
The following formula is used to compute final sample mask:
m = int(16.0 * clamp(src0_alpha, 0.0, 1.0))
dither_mask = 0x1111 * ((0xfea80 >> (m & ~3)) & 0xf) |
0x0808 * (m & 2) | 0x0100 * (m & 1)
sample_mask = sample_mask & dither_mask
Credits to Francisco Jerez <currojerez@riseup.net> for creating it.
It gives a number of ones proportional to the alpha for 2, 4, 8 or 16
least significant bits of the result.
GEN6 hardware does not have issue with simultaneous usage of sample mask
and alpha to coverage however due to the wrong sending order of oMask
and src0_alpha it is still affected by it.
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=109743
Signed-off-by: Danylo Piliaiev <danylo.piliaiev@globallogic.com>
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
2019-02-20 19:39:18 +02:00
|
|
|
key->alpha_to_coverage = blend->alpha_to_coverage;
|
|
|
|
|
|
2020-12-04 08:19:57 -05:00
|
|
|
key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha_enabled;
|
2018-01-25 02:09:59 -08:00
|
|
|
|
2019-07-07 16:32:09 -07:00
|
|
|
key->flat_shade = rast->flatshade &&
|
|
|
|
|
(info->inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1));
|
2018-06-26 01:00:37 -07:00
|
|
|
|
2018-07-16 15:36:34 -07:00
|
|
|
key->persample_interp = rast->force_persample_interp;
|
|
|
|
|
key->multisample_fbo = rast->multisample && fb->samples > 1;
|
2018-01-25 02:03:18 -08:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
key->coherent_fb_fetch = GFX_VER >= 9;
|
2018-07-16 15:36:34 -07:00
|
|
|
|
2019-04-18 22:29:27 -07:00
|
|
|
key->force_dual_color_blend =
|
|
|
|
|
screen->driconf.dual_color_blend_by_location &&
|
|
|
|
|
(blend->blend_enables & 1) && blend->dual_color_blending;
|
2018-01-25 02:03:18 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-26 21:59:20 -07:00
|
|
|
static void
|
|
|
|
|
iris_populate_cs_key(const struct iris_context *ice,
|
2019-12-08 20:25:42 -08:00
|
|
|
struct iris_cs_prog_key *key)
|
2018-07-26 21:59:20 -07:00
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-25 20:12:37 -08:00
|
|
|
static uint64_t
|
|
|
|
|
KSP(const struct iris_compiled_shader *shader)
|
|
|
|
|
{
|
2018-06-28 00:57:49 -07:00
|
|
|
struct iris_resource *res = (void *) shader->assembly.res;
|
|
|
|
|
return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
|
2018-01-25 20:12:37 -08:00
|
|
|
}
|
|
|
|
|
|
2018-11-07 22:05:14 -08:00
|
|
|
#define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
|
2018-01-25 20:12:37 -08:00
|
|
|
pkt.KernelStartPointer = KSP(shader); \
|
2020-01-01 17:38:01 -08:00
|
|
|
pkt.BindingTableEntryCount = shader->bt.size_bytes / 4; \
|
2018-01-25 01:36:49 -08:00
|
|
|
pkt.FloatingPointMode = prog_data->use_alt_mode; \
|
|
|
|
|
\
|
|
|
|
|
pkt.DispatchGRFStartRegisterForURBData = \
|
|
|
|
|
prog_data->dispatch_grf_start_reg; \
|
|
|
|
|
pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
|
|
|
|
|
pkt.prefix##URBEntryReadOffset = 0; \
|
|
|
|
|
\
|
|
|
|
|
pkt.StatisticsEnable = true; \
|
2018-11-07 22:05:14 -08:00
|
|
|
pkt.Enable = true; \
|
|
|
|
|
\
|
|
|
|
|
if (prog_data->total_scratch) { \
|
2020-10-20 14:27:58 -05:00
|
|
|
INIT_THREAD_SCRATCH_SIZE(pkt) \
|
2018-11-07 22:05:14 -08:00
|
|
|
}
|
2017-12-27 02:54:26 -08:00
|
|
|
|
2020-10-20 14:27:58 -05:00
|
|
|
#if GFX_VERx10 >= 125
|
|
|
|
|
#define INIT_THREAD_SCRATCH_SIZE(pkt)
|
|
|
|
|
#define MERGE_SCRATCH_ADDR(name) \
|
|
|
|
|
{ \
|
|
|
|
|
uint32_t pkt2[GENX(name##_length)] = {0}; \
|
|
|
|
|
_iris_pack_command(batch, GENX(name), pkt2, p) { \
|
|
|
|
|
p.ScratchSpaceBuffer = scratch_addr >> 4; \
|
|
|
|
|
} \
|
|
|
|
|
iris_emit_merge(batch, pkt, pkt2, GENX(name##_length)); \
|
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
#define INIT_THREAD_SCRATCH_SIZE(pkt) \
|
|
|
|
|
pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
|
2021-02-03 01:41:42 -08:00
|
|
|
#define MERGE_SCRATCH_ADDR(name) \
|
|
|
|
|
{ \
|
|
|
|
|
uint32_t pkt2[GENX(name##_length)] = {0}; \
|
|
|
|
|
_iris_pack_command(batch, GENX(name), pkt2, p) { \
|
2021-02-12 11:39:45 -08:00
|
|
|
p.ScratchSpaceBasePointer = \
|
|
|
|
|
rw_bo(NULL, scratch_addr, IRIS_DOMAIN_NONE); \
|
2021-02-03 01:41:42 -08:00
|
|
|
} \
|
|
|
|
|
iris_emit_merge(batch, pkt, pkt2, GENX(name##_length)); \
|
|
|
|
|
}
|
2020-10-20 14:27:58 -05:00
|
|
|
#endif
|
2021-02-03 01:41:42 -08:00
|
|
|
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Encode most of 3DSTATE_VS based on the compiled shader.
|
|
|
|
|
*/
|
2018-01-25 01:36:49 -08:00
|
|
|
static void
|
2021-04-05 13:19:39 -07:00
|
|
|
iris_store_vs_state(const struct intel_device_info *devinfo,
|
2018-06-09 00:01:09 -07:00
|
|
|
struct iris_compiled_shader *shader)
|
2018-01-25 01:36:49 -08:00
|
|
|
{
|
|
|
|
|
struct brw_stage_prog_data *prog_data = shader->prog_data;
|
|
|
|
|
struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
|
2018-01-09 11:44:04 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_pack_command(GENX(3DSTATE_VS), shader->derived_data, vs) {
|
2018-11-07 22:05:14 -08:00
|
|
|
INIT_THREAD_DISPATCH_FIELDS(vs, Vertex, MESA_SHADER_VERTEX);
|
2018-01-25 01:36:49 -08:00
|
|
|
vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
|
|
|
|
|
vs.SIMD8DispatchEnable = true;
|
|
|
|
|
vs.UserClipDistanceCullTestEnableBitmask =
|
|
|
|
|
vue_prog_data->cull_distance_mask;
|
2018-01-09 11:51:34 -08:00
|
|
|
}
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
2018-01-09 11:51:34 -08:00
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Encode most of 3DSTATE_HS based on the compiled shader.
|
|
|
|
|
*/
|
2018-01-25 01:36:49 -08:00
|
|
|
static void
|
2021-04-05 13:19:39 -07:00
|
|
|
iris_store_tcs_state(const struct intel_device_info *devinfo,
|
2018-06-09 00:01:09 -07:00
|
|
|
struct iris_compiled_shader *shader)
|
2018-01-25 01:36:49 -08:00
|
|
|
{
|
|
|
|
|
struct brw_stage_prog_data *prog_data = shader->prog_data;
|
|
|
|
|
struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
|
|
|
|
|
struct brw_tcs_prog_data *tcs_prog_data = (void *) prog_data;
|
2018-01-09 21:29:09 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
|
2018-11-07 22:05:14 -08:00
|
|
|
INIT_THREAD_DISPATCH_FIELDS(hs, Vertex, MESA_SHADER_TESS_CTRL);
|
2018-01-25 01:36:49 -08:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2021-03-29 17:15:41 -07:00
|
|
|
/* Wa_1604578095:
|
2020-01-21 17:54:01 +02:00
|
|
|
*
|
|
|
|
|
* Hang occurs when the number of max threads is less than 2 times
|
|
|
|
|
* the number of instance count. The number of max threads must be
|
|
|
|
|
* more than 2 times the number of instance count.
|
|
|
|
|
*/
|
|
|
|
|
assert((devinfo->max_tcs_threads / 2) > tcs_prog_data->instances);
|
2020-04-30 23:12:07 +00:00
|
|
|
hs.DispatchGRFStartRegisterForURBData = prog_data->dispatch_grf_start_reg & 0x1f;
|
|
|
|
|
hs.DispatchGRFStartRegisterForURBData5 = prog_data->dispatch_grf_start_reg >> 5;
|
2020-01-21 17:54:01 +02:00
|
|
|
#endif
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
hs.InstanceCount = tcs_prog_data->instances - 1;
|
|
|
|
|
hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
|
|
|
|
|
hs.IncludeVertexHandles = true;
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 12
|
2020-01-23 22:39:35 -08:00
|
|
|
/* Patch Count threshold specifies the maximum number of patches that
|
|
|
|
|
* will be accumulated before a thread dispatch is forced.
|
|
|
|
|
*/
|
|
|
|
|
hs.PatchCountThreshold = tcs_prog_data->patch_count_threshold;
|
|
|
|
|
#endif
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 9
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
hs.DispatchMode = vue_prog_data->dispatch_mode;
|
|
|
|
|
hs.IncludePrimitiveID = tcs_prog_data->include_primitive_id;
|
|
|
|
|
#endif
|
2018-01-10 00:36:44 -08:00
|
|
|
}
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
2018-01-10 00:36:44 -08:00
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
|
|
|
|
|
*/
|
2018-01-25 01:36:49 -08:00
|
|
|
static void
|
2021-04-05 13:19:39 -07:00
|
|
|
iris_store_tes_state(const struct intel_device_info *devinfo,
|
2018-06-09 00:01:09 -07:00
|
|
|
struct iris_compiled_shader *shader)
|
2018-01-25 01:36:49 -08:00
|
|
|
{
|
|
|
|
|
struct brw_stage_prog_data *prog_data = shader->prog_data;
|
|
|
|
|
struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
|
|
|
|
|
struct brw_tes_prog_data *tes_prog_data = (void *) prog_data;
|
2018-01-21 15:28:59 -08:00
|
|
|
|
2021-10-14 13:22:08 +02:00
|
|
|
uint32_t *ds_state = (void *) shader->derived_data;
|
|
|
|
|
uint32_t *te_state = ds_state + GENX(3DSTATE_DS_length);
|
|
|
|
|
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_DS), ds_state, ds) {
|
|
|
|
|
INIT_THREAD_DISPATCH_FIELDS(ds, Patch, MESA_SHADER_TESS_EVAL);
|
|
|
|
|
|
|
|
|
|
ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
|
|
|
|
|
ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
|
|
|
|
|
ds.ComputeWCoordinateEnable =
|
|
|
|
|
tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
|
|
|
|
|
|
2021-10-21 12:54:01 -07:00
|
|
|
#if GFX_VER >= 12
|
|
|
|
|
ds.PrimitiveIDNotRequired = !tes_prog_data->include_primitive_id;
|
|
|
|
|
#endif
|
2021-10-14 13:22:08 +02:00
|
|
|
ds.UserClipDistanceCullTestEnableBitmask =
|
|
|
|
|
vue_prog_data->cull_distance_mask;
|
|
|
|
|
}
|
2018-01-21 17:34:41 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_pack_command(GENX(3DSTATE_TE), te_state, te) {
|
|
|
|
|
te.Partitioning = tes_prog_data->partitioning;
|
|
|
|
|
te.OutputTopology = tes_prog_data->output_topology;
|
|
|
|
|
te.TEDomain = tes_prog_data->domain;
|
|
|
|
|
te.TEEnable = true;
|
|
|
|
|
te.MaximumTessellationFactorOdd = 63.0;
|
|
|
|
|
te.MaximumTessellationFactorNotOdd = 64.0;
|
2020-06-04 11:50:35 -07:00
|
|
|
#if GFX_VERx10 >= 125
|
|
|
|
|
te.TessellationDistributionMode = TEDMODE_RR_FREE;
|
|
|
|
|
te.TessellationDistributionLevel = TEDLEVEL_PATCH;
|
|
|
|
|
/* 64_TRIANGLES */
|
|
|
|
|
te.SmallPatchThreshold = 3;
|
|
|
|
|
/* 1K_TRIANGLES */
|
|
|
|
|
te.TargetBlockSize = 8;
|
|
|
|
|
/* 1K_TRIANGLES */
|
|
|
|
|
te.LocalBOPAccumulatorThreshold = 1;
|
|
|
|
|
#endif
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
|
|
|
|
}
|
2018-01-09 21:29:09 -08:00
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Encode most of 3DSTATE_GS based on the compiled shader.
|
|
|
|
|
*/
|
2018-01-25 01:36:49 -08:00
|
|
|
static void
|
2021-04-05 13:19:39 -07:00
|
|
|
iris_store_gs_state(const struct intel_device_info *devinfo,
|
2018-06-09 00:01:09 -07:00
|
|
|
struct iris_compiled_shader *shader)
|
2018-01-25 01:36:49 -08:00
|
|
|
{
|
|
|
|
|
struct brw_stage_prog_data *prog_data = shader->prog_data;
|
|
|
|
|
struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
|
|
|
|
|
struct brw_gs_prog_data *gs_prog_data = (void *) prog_data;
|
2018-01-10 00:19:29 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_pack_command(GENX(3DSTATE_GS), shader->derived_data, gs) {
|
2018-11-07 22:05:14 -08:00
|
|
|
INIT_THREAD_DISPATCH_FIELDS(gs, Vertex, MESA_SHADER_GEOMETRY);
|
2018-01-22 22:40:51 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
|
|
|
|
|
gs.OutputTopology = gs_prog_data->output_topology;
|
|
|
|
|
gs.ControlDataHeaderSize =
|
|
|
|
|
gs_prog_data->control_data_header_size_hwords;
|
|
|
|
|
gs.InstanceControl = gs_prog_data->invocations - 1;
|
2018-06-26 13:35:47 -07:00
|
|
|
gs.DispatchMode = DISPATCH_MODE_SIMD8;
|
2018-01-25 01:36:49 -08:00
|
|
|
gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
|
|
|
|
|
gs.ControlDataFormat = gs_prog_data->control_data_format;
|
|
|
|
|
gs.ReorderMode = TRAILING;
|
|
|
|
|
gs.ExpectedVertexCount = gs_prog_data->vertices_in;
|
|
|
|
|
gs.MaximumNumberofThreads =
|
2021-03-16 10:14:30 -07:00
|
|
|
GFX_VER == 8 ? (devinfo->max_gs_threads / 2 - 1)
|
2018-01-25 01:36:49 -08:00
|
|
|
: (devinfo->max_gs_threads - 1);
|
2018-01-22 22:40:51 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (gs_prog_data->static_vertex_count != -1) {
|
|
|
|
|
gs.StaticOutput = true;
|
|
|
|
|
gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count;
|
|
|
|
|
}
|
|
|
|
|
gs.IncludeVertexHandles = vue_prog_data->include_vue_handles;
|
2018-01-11 22:50:12 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
gs.UserClipDistanceCullTestEnableBitmask =
|
|
|
|
|
vue_prog_data->cull_distance_mask;
|
2018-01-11 22:50:12 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
const int urb_entry_write_offset = 1;
|
|
|
|
|
const uint32_t urb_entry_output_length =
|
|
|
|
|
DIV_ROUND_UP(vue_prog_data->vue_map.num_slots, 2) -
|
|
|
|
|
urb_entry_write_offset;
|
2018-01-23 01:23:54 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
gs.VertexURBEntryOutputReadOffset = urb_entry_write_offset;
|
|
|
|
|
gs.VertexURBEntryOutputLength = MAX2(urb_entry_output_length, 1);
|
2018-01-22 23:39:38 -08:00
|
|
|
}
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
2018-01-11 22:50:12 -08:00
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
|
|
|
|
|
*/
|
2018-01-25 01:36:49 -08:00
|
|
|
static void
|
2021-04-05 13:19:39 -07:00
|
|
|
iris_store_fs_state(const struct intel_device_info *devinfo,
|
2018-06-09 00:01:09 -07:00
|
|
|
struct iris_compiled_shader *shader)
|
2018-01-25 01:36:49 -08:00
|
|
|
{
|
|
|
|
|
struct brw_stage_prog_data *prog_data = shader->prog_data;
|
|
|
|
|
struct brw_wm_prog_data *wm_prog_data = (void *) shader->prog_data;
|
2018-01-23 01:07:09 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
uint32_t *ps_state = (void *) shader->derived_data;
|
|
|
|
|
uint32_t *psx_state = ps_state + GENX(3DSTATE_PS_length);
|
2018-01-23 01:07:09 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
|
|
|
|
|
ps.VectorMaskEnable = true;
|
2020-01-01 17:38:01 -08:00
|
|
|
ps.BindingTableEntryCount = shader->bt.size_bytes / 4;
|
2018-01-25 01:36:49 -08:00
|
|
|
ps.FloatingPointMode = prog_data->use_alt_mode;
|
2021-11-01 17:22:51 -07:00
|
|
|
ps.MaximumNumberofThreadsPerPSD =
|
|
|
|
|
devinfo->max_threads_per_psd - (GFX_VER == 8 ? 2 : 1);
|
2018-01-23 01:07:09 -08:00
|
|
|
|
2018-12-27 00:49:56 -08:00
|
|
|
ps.PushConstantEnable = prog_data->ubo_ranges[0].length > 0;
|
2018-01-23 01:07:09 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
/* From the documentation for this packet:
|
|
|
|
|
* "If the PS kernel does not need the Position XY Offsets to
|
|
|
|
|
* compute a Position Value, then this field should be programmed
|
|
|
|
|
* to POSOFFSET_NONE."
|
|
|
|
|
*
|
|
|
|
|
* "SW Recommendation: If the PS kernel needs the Position Offsets
|
|
|
|
|
* to compute a Position XY value, this field should match Position
|
|
|
|
|
* ZW Interpolation Mode to ensure a consistent position.xyzw
|
|
|
|
|
* computation."
|
|
|
|
|
*
|
|
|
|
|
* We only require XY sample offsets. So, this recommendation doesn't
|
|
|
|
|
* look useful at the moment. We might need this in future.
|
|
|
|
|
*/
|
|
|
|
|
ps.PositionXYOffsetSelect =
|
|
|
|
|
wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE;
|
2018-11-07 22:05:14 -08:00
|
|
|
|
2020-10-20 14:27:58 -05:00
|
|
|
if (prog_data->total_scratch) {
|
|
|
|
|
INIT_THREAD_SCRATCH_SIZE(ps);
|
|
|
|
|
}
|
2018-01-22 23:39:38 -08:00
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
|
|
|
|
|
psx.PixelShaderValid = true;
|
|
|
|
|
psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
|
2019-02-11 11:40:38 -08:00
|
|
|
psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
|
2018-01-25 01:36:49 -08:00
|
|
|
psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
|
|
|
|
|
psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
|
|
|
|
|
psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
|
|
|
|
|
psx.PixelShaderIsPerSample = wm_prog_data->persample_dispatch;
|
2018-11-07 14:23:27 +10:00
|
|
|
psx.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
|
2018-01-22 23:39:38 -08:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 9
|
2018-01-25 01:36:49 -08:00
|
|
|
psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
|
|
|
|
|
psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
|
2018-11-07 14:23:27 +10:00
|
|
|
#endif
|
2018-01-11 22:50:12 -08:00
|
|
|
}
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
2018-01-11 22:50:12 -08:00
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Compute the size of the derived data (shader command packets).
|
|
|
|
|
*
|
|
|
|
|
* This must match the data written by the iris_store_xs_state() functions.
|
|
|
|
|
*/
|
2018-07-26 21:59:20 -07:00
|
|
|
static void
|
2021-04-05 13:19:39 -07:00
|
|
|
iris_store_cs_state(const struct intel_device_info *devinfo,
|
2018-07-26 21:59:20 -07:00
|
|
|
struct iris_compiled_shader *shader)
|
|
|
|
|
{
|
|
|
|
|
struct brw_cs_prog_data *cs_prog_data = (void *) shader->prog_data;
|
|
|
|
|
void *map = shader->derived_data;
|
|
|
|
|
|
|
|
|
|
iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), map, desc) {
|
2021-03-16 10:09:00 -07:00
|
|
|
#if GFX_VERx10 < 125
|
2018-07-26 21:59:20 -07:00
|
|
|
desc.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
|
|
|
|
|
desc.CrossThreadConstantDataReadLength =
|
|
|
|
|
cs_prog_data->push.cross_thread.regs;
|
2019-02-13 11:11:43 -08:00
|
|
|
#else
|
|
|
|
|
assert(cs_prog_data->push.per_thread.regs == 0);
|
|
|
|
|
assert(cs_prog_data->push.cross_thread.regs == 0);
|
|
|
|
|
#endif
|
|
|
|
|
desc.BarrierEnable = cs_prog_data->uses_barrier;
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2020-03-03 08:07:32 -08:00
|
|
|
/* TODO: Check if we are missing workarounds and enable mid-thread
|
|
|
|
|
* preemption.
|
|
|
|
|
*
|
|
|
|
|
* We still have issues with mid-thread preemption (it was already
|
2021-03-29 15:40:04 -07:00
|
|
|
* disabled by the kernel on gfx11, due to missing workarounds). It's
|
2020-03-03 08:07:32 -08:00
|
|
|
* possible that we are just missing some workarounds, and could enable
|
|
|
|
|
* it later, but for now let's disable it to fix a GPU in compute in Car
|
|
|
|
|
* Chase (and possibly more).
|
|
|
|
|
*/
|
|
|
|
|
desc.ThreadPreemptionDisable = true;
|
|
|
|
|
#endif
|
2018-07-26 21:59:20 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
static unsigned
|
|
|
|
|
iris_derived_program_state_size(enum iris_program_cache_id cache_id)
|
|
|
|
|
{
|
2018-04-21 23:27:15 -07:00
|
|
|
assert(cache_id <= IRIS_CACHE_BLORP);
|
2018-01-22 23:39:38 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
static const unsigned dwords[] = {
|
|
|
|
|
[IRIS_CACHE_VS] = GENX(3DSTATE_VS_length),
|
|
|
|
|
[IRIS_CACHE_TCS] = GENX(3DSTATE_HS_length),
|
|
|
|
|
[IRIS_CACHE_TES] = GENX(3DSTATE_TE_length) + GENX(3DSTATE_DS_length),
|
|
|
|
|
[IRIS_CACHE_GS] = GENX(3DSTATE_GS_length),
|
|
|
|
|
[IRIS_CACHE_FS] =
|
|
|
|
|
GENX(3DSTATE_PS_length) + GENX(3DSTATE_PS_EXTRA_length),
|
2018-07-26 21:59:20 -07:00
|
|
|
[IRIS_CACHE_CS] = GENX(INTERFACE_DESCRIPTOR_DATA_length),
|
2018-04-21 23:27:15 -07:00
|
|
|
[IRIS_CACHE_BLORP] = 0,
|
2018-01-25 01:36:49 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
return sizeof(uint32_t) * dwords[cache_id];
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Create any state packets corresponding to the given shader stage
|
|
|
|
|
* (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
|
|
|
|
|
* This means that we can look up a program in the in-memory cache and
|
|
|
|
|
* get most of the state packet without having to reconstruct it.
|
|
|
|
|
*/
|
2018-01-25 01:36:49 -08:00
|
|
|
static void
|
2021-04-05 13:19:39 -07:00
|
|
|
iris_store_derived_program_state(const struct intel_device_info *devinfo,
|
2018-06-09 00:01:09 -07:00
|
|
|
enum iris_program_cache_id cache_id,
|
|
|
|
|
struct iris_compiled_shader *shader)
|
2018-01-25 01:36:49 -08:00
|
|
|
{
|
|
|
|
|
switch (cache_id) {
|
|
|
|
|
case IRIS_CACHE_VS:
|
2021-02-03 01:41:42 -08:00
|
|
|
iris_store_vs_state(devinfo, shader);
|
2018-01-25 01:36:49 -08:00
|
|
|
break;
|
|
|
|
|
case IRIS_CACHE_TCS:
|
2021-02-03 01:41:42 -08:00
|
|
|
iris_store_tcs_state(devinfo, shader);
|
2018-01-25 01:36:49 -08:00
|
|
|
break;
|
|
|
|
|
case IRIS_CACHE_TES:
|
2021-02-03 01:41:42 -08:00
|
|
|
iris_store_tes_state(devinfo, shader);
|
2018-01-25 01:36:49 -08:00
|
|
|
break;
|
|
|
|
|
case IRIS_CACHE_GS:
|
2021-02-03 01:41:42 -08:00
|
|
|
iris_store_gs_state(devinfo, shader);
|
2018-01-25 01:36:49 -08:00
|
|
|
break;
|
|
|
|
|
case IRIS_CACHE_FS:
|
2021-02-03 01:41:42 -08:00
|
|
|
iris_store_fs_state(devinfo, shader);
|
2018-01-25 01:36:49 -08:00
|
|
|
break;
|
|
|
|
|
case IRIS_CACHE_CS:
|
2021-02-03 01:41:42 -08:00
|
|
|
iris_store_cs_state(devinfo, shader);
|
2018-01-25 01:36:49 -08:00
|
|
|
break;
|
2021-04-13 17:12:50 +02:00
|
|
|
case IRIS_CACHE_BLORP:
|
2018-01-25 01:36:49 -08:00
|
|
|
break;
|
2018-01-10 00:19:29 -08:00
|
|
|
}
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
2018-01-10 00:19:29 -08:00
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/* ------------------------------------------------------------------- */
|
|
|
|
|
|
2018-01-25 21:39:44 -08:00
|
|
|
static const uint32_t push_constant_opcodes[] = {
|
|
|
|
|
[MESA_SHADER_VERTEX] = 21,
|
|
|
|
|
[MESA_SHADER_TESS_CTRL] = 25, /* HS */
|
|
|
|
|
[MESA_SHADER_TESS_EVAL] = 26, /* DS */
|
|
|
|
|
[MESA_SHADER_GEOMETRY] = 22,
|
|
|
|
|
[MESA_SHADER_FRAGMENT] = 23,
|
|
|
|
|
[MESA_SHADER_COMPUTE] = 0,
|
|
|
|
|
};
|
|
|
|
|
|
2018-10-02 10:21:57 -07:00
|
|
|
static uint32_t
|
|
|
|
|
use_null_surface(struct iris_batch *batch, struct iris_context *ice)
|
|
|
|
|
{
|
|
|
|
|
struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
|
|
|
|
|
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, state_bo, false, IRIS_DOMAIN_NONE);
|
2018-10-02 10:21:57 -07:00
|
|
|
|
|
|
|
|
return ice->state.unbound_tex.offset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
|
use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
|
|
|
|
|
{
|
2018-10-24 14:16:38 -07:00
|
|
|
/* If set_framebuffer_state() was never called, fall back to 1x1x1 */
|
|
|
|
|
if (!ice->state.null_fb.res)
|
|
|
|
|
return use_null_surface(batch, ice);
|
|
|
|
|
|
2018-10-02 10:21:57 -07:00
|
|
|
struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
|
|
|
|
|
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, state_bo, false, IRIS_DOMAIN_NONE);
|
2018-10-02 10:21:57 -07:00
|
|
|
|
|
|
|
|
return ice->state.null_fb.offset;
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-07 19:51:05 -08:00
|
|
|
static uint32_t
|
|
|
|
|
surf_state_offset_for_aux(struct iris_resource *res,
|
2019-03-27 14:42:12 -07:00
|
|
|
unsigned aux_modes,
|
2018-12-07 19:51:05 -08:00
|
|
|
enum isl_aux_usage aux_usage)
|
|
|
|
|
{
|
2020-06-01 08:48:56 -07:00
|
|
|
assert(aux_modes & (1 << aux_usage));
|
2018-12-07 19:51:05 -08:00
|
|
|
return SURFACE_STATE_ALIGNMENT *
|
iris: Fix broken aux.possible/sampler_usages bitmask handling
For renderable surfaces, we allocate SURFACE_STATEs for each bit in
res->aux.possible_usages. Sampler views use res->aux.sampler_usages.
When pinning buffers, we call surf_state_offset_for_aux() to calculate
the offset to the desired surface state. surf_state_offset_for_aux()
took an aux_modes parameter, which should be one of those two fields.
However...it was not using that parameter. It always used the broader
res->aux.possible_usages field directly.
One of the callers, update_clear_value(), was passing incorrect masks
for this parameter. It iterated through the bits in order, using
u_bit_scan(), which destructively modifies the mask. So each time we
called it, the count of bits before our selected mode was 0, which would
cause us to always update the SURFACE_STATE for ISL_AUX_USAGE_NONE,
rather than updating each in turn. This was hidden by the earlier bug
where surf_state_offset_for_aux() ignored the parameter.
Fixes: 7339660e803 ("iris: Add aux.sampler_usages.")
Reviewed-by: Rafael Antognolli <rafael.antognolli@intel.com>
2019-08-19 22:36:36 -07:00
|
|
|
util_bitcount(aux_modes & ((1 << aux_usage) - 1));
|
2018-12-07 19:51:05 -08:00
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 9
|
2019-03-01 10:34:40 -08:00
|
|
|
static void
|
|
|
|
|
surf_state_update_clear_value(struct iris_batch *batch,
|
|
|
|
|
struct iris_resource *res,
|
|
|
|
|
struct iris_state_ref *state,
|
2019-03-27 14:42:12 -07:00
|
|
|
unsigned aux_modes,
|
2019-03-01 10:34:40 -08:00
|
|
|
enum isl_aux_usage aux_usage)
|
|
|
|
|
{
|
|
|
|
|
struct isl_device *isl_dev = &batch->screen->isl_dev;
|
|
|
|
|
struct iris_bo *state_bo = iris_resource_bo(state->res);
|
iris: Update fast clear colors on Gen9 with direct immediate writes.
Gen11 stores the fast clear color in an "indirect clear buffer", as
a packed pixel value. Gen9 hardware stores it as a float or integer
value, which is interpreted via the format. We were trying to store
that in a buffer, for similarity with Icelake, and MI_COPY_MEM_MEM
it from there to the actual SURFACE_STATE bytes where it's stored.
This unfortunately doesn't work for blorp_copy(), which does bit-for-bit
copies, and overrides the format to a CCS-compatible UINT format. This
causes the clear color to be interpreted in the overridden format.
Normally, we provide the clear color on the CPU, and blorp_blit.c:2611
converts it to a packed pixel value in the original format, then unpacks
it in the overridden format, so the clear color we use expands to the
bits we originally desired.
However, BLORP doesn't support this pack/unpack with an indirect clear
buffer, as it would need to do the math on the GPU. On Gen11+, it isn't
necessary, as the hardware does the right thing.
This patch changes Gen9 to stop using an indirect clear buffer and
simply do PIPE_CONTROLs with post-sync write immediate operations
to store the new color over the surface states for regular drawing.
BLORP continues streaming out surface states, and handles fast clear
colors on the CPU.
Fixes: 53c484ba8ac ("iris: blorp using resolve hooks")
Reviewed-by: Rafael Antognolli <rafael.antognolli@intel.com>
2019-08-19 13:57:46 -07:00
|
|
|
uint64_t real_offset = state->offset + IRIS_MEMZONE_BINDER_START;
|
2021-07-19 21:23:18 -07:00
|
|
|
uint32_t offset_into_bo = real_offset - state_bo->address;
|
2019-03-01 10:34:40 -08:00
|
|
|
uint32_t clear_offset = offset_into_bo +
|
|
|
|
|
isl_dev->ss.clear_value_offset +
|
2019-03-27 14:42:12 -07:00
|
|
|
surf_state_offset_for_aux(res, aux_modes, aux_usage);
|
iris: Update fast clear colors on Gen9 with direct immediate writes.
Gen11 stores the fast clear color in an "indirect clear buffer", as
a packed pixel value. Gen9 hardware stores it as a float or integer
value, which is interpreted via the format. We were trying to store
that in a buffer, for similarity with Icelake, and MI_COPY_MEM_MEM
it from there to the actual SURFACE_STATE bytes where it's stored.
This unfortunately doesn't work for blorp_copy(), which does bit-for-bit
copies, and overrides the format to a CCS-compatible UINT format. This
causes the clear color to be interpreted in the overridden format.
Normally, we provide the clear color on the CPU, and blorp_blit.c:2611
converts it to a packed pixel value in the original format, then unpacks
it in the overridden format, so the clear color we use expands to the
bits we originally desired.
However, BLORP doesn't support this pack/unpack with an indirect clear
buffer, as it would need to do the math on the GPU. On Gen11+, it isn't
necessary, as the hardware does the right thing.
This patch changes Gen9 to stop using an indirect clear buffer and
simply do PIPE_CONTROLs with post-sync write immediate operations
to store the new color over the surface states for regular drawing.
BLORP continues streaming out surface states, and handles fast clear
colors on the CPU.
Fixes: 53c484ba8ac ("iris: blorp using resolve hooks")
Reviewed-by: Rafael Antognolli <rafael.antognolli@intel.com>
2019-08-19 13:57:46 -07:00
|
|
|
uint32_t *color = res->aux.clear_color.u32;
|
2019-03-01 10:34:40 -08:00
|
|
|
|
iris: Update fast clear colors on Gen9 with direct immediate writes.
Gen11 stores the fast clear color in an "indirect clear buffer", as
a packed pixel value. Gen9 hardware stores it as a float or integer
value, which is interpreted via the format. We were trying to store
that in a buffer, for similarity with Icelake, and MI_COPY_MEM_MEM
it from there to the actual SURFACE_STATE bytes where it's stored.
This unfortunately doesn't work for blorp_copy(), which does bit-for-bit
copies, and overrides the format to a CCS-compatible UINT format. This
causes the clear color to be interpreted in the overridden format.
Normally, we provide the clear color on the CPU, and blorp_blit.c:2611
converts it to a packed pixel value in the original format, then unpacks
it in the overridden format, so the clear color we use expands to the
bits we originally desired.
However, BLORP doesn't support this pack/unpack with an indirect clear
buffer, as it would need to do the math on the GPU. On Gen11+, it isn't
necessary, as the hardware does the right thing.
This patch changes Gen9 to stop using an indirect clear buffer and
simply do PIPE_CONTROLs with post-sync write immediate operations
to store the new color over the surface states for regular drawing.
BLORP continues streaming out surface states, and handles fast clear
colors on the CPU.
Fixes: 53c484ba8ac ("iris: blorp using resolve hooks")
Reviewed-by: Rafael Antognolli <rafael.antognolli@intel.com>
2019-08-19 13:57:46 -07:00
|
|
|
assert(isl_dev->ss.clear_value_size == 16);
|
|
|
|
|
|
|
|
|
|
if (aux_usage == ISL_AUX_USAGE_HIZ) {
|
|
|
|
|
iris_emit_pipe_control_write(batch, "update fast clear value (Z)",
|
|
|
|
|
PIPE_CONTROL_WRITE_IMMEDIATE,
|
|
|
|
|
state_bo, clear_offset, color[0]);
|
|
|
|
|
} else {
|
|
|
|
|
iris_emit_pipe_control_write(batch, "update fast clear color (RG__)",
|
|
|
|
|
PIPE_CONTROL_WRITE_IMMEDIATE,
|
|
|
|
|
state_bo, clear_offset,
|
|
|
|
|
(uint64_t) color[0] |
|
|
|
|
|
(uint64_t) color[1] << 32);
|
|
|
|
|
iris_emit_pipe_control_write(batch, "update fast clear color (__BA)",
|
|
|
|
|
PIPE_CONTROL_WRITE_IMMEDIATE,
|
|
|
|
|
state_bo, clear_offset + 8,
|
|
|
|
|
(uint64_t) color[2] |
|
|
|
|
|
(uint64_t) color[3] << 32);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_emit_pipe_control_flush(batch,
|
|
|
|
|
"update fast clear: state cache invalidate",
|
|
|
|
|
PIPE_CONTROL_FLUSH_ENABLE |
|
|
|
|
|
PIPE_CONTROL_STATE_CACHE_INVALIDATE);
|
2019-03-01 10:34:40 -08:00
|
|
|
}
|
2019-08-23 08:12:37 -07:00
|
|
|
#endif
|
2019-03-01 10:34:40 -08:00
|
|
|
|
2019-03-19 12:47:58 -07:00
|
|
|
static void
|
2019-03-27 13:09:34 -07:00
|
|
|
update_clear_value(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
2019-03-19 12:47:58 -07:00
|
|
|
struct iris_resource *res,
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
struct iris_surface_state *surf_state,
|
iris: Fix broken aux.possible/sampler_usages bitmask handling
For renderable surfaces, we allocate SURFACE_STATEs for each bit in
res->aux.possible_usages. Sampler views use res->aux.sampler_usages.
When pinning buffers, we call surf_state_offset_for_aux() to calculate
the offset to the desired surface state. surf_state_offset_for_aux()
took an aux_modes parameter, which should be one of those two fields.
However...it was not using that parameter. It always used the broader
res->aux.possible_usages field directly.
One of the callers, update_clear_value(), was passing incorrect masks
for this parameter. It iterated through the bits in order, using
u_bit_scan(), which destructively modifies the mask. So each time we
called it, the count of bits before our selected mode was 0, which would
cause us to always update the SURFACE_STATE for ISL_AUX_USAGE_NONE,
rather than updating each in turn. This was hidden by the earlier bug
where surf_state_offset_for_aux() ignored the parameter.
Fixes: 7339660e803 ("iris: Add aux.sampler_usages.")
Reviewed-by: Rafael Antognolli <rafael.antognolli@intel.com>
2019-08-19 22:36:36 -07:00
|
|
|
unsigned all_aux_modes,
|
2019-03-27 13:09:34 -07:00
|
|
|
struct isl_view *view)
|
2019-03-19 12:47:58 -07:00
|
|
|
{
|
2019-08-19 13:52:37 -07:00
|
|
|
UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
|
iris: Fix broken aux.possible/sampler_usages bitmask handling
For renderable surfaces, we allocate SURFACE_STATEs for each bit in
res->aux.possible_usages. Sampler views use res->aux.sampler_usages.
When pinning buffers, we call surf_state_offset_for_aux() to calculate
the offset to the desired surface state. surf_state_offset_for_aux()
took an aux_modes parameter, which should be one of those two fields.
However...it was not using that parameter. It always used the broader
res->aux.possible_usages field directly.
One of the callers, update_clear_value(), was passing incorrect masks
for this parameter. It iterated through the bits in order, using
u_bit_scan(), which destructively modifies the mask. So each time we
called it, the count of bits before our selected mode was 0, which would
cause us to always update the SURFACE_STATE for ISL_AUX_USAGE_NONE,
rather than updating each in turn. This was hidden by the earlier bug
where surf_state_offset_for_aux() ignored the parameter.
Fixes: 7339660e803 ("iris: Add aux.sampler_usages.")
Reviewed-by: Rafael Antognolli <rafael.antognolli@intel.com>
2019-08-19 22:36:36 -07:00
|
|
|
UNUSED unsigned aux_modes = all_aux_modes;
|
2019-03-26 11:08:22 -07:00
|
|
|
|
2021-03-29 15:40:04 -07:00
|
|
|
/* We only need to update the clear color in the surface state for gfx8 and
|
|
|
|
|
* gfx9. Newer gens can read it directly from the clear color state buffer.
|
2019-03-26 11:08:22 -07:00
|
|
|
*/
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 9
|
2019-08-19 13:52:37 -07:00
|
|
|
/* Skip updating the ISL_AUX_USAGE_NONE surface state */
|
|
|
|
|
aux_modes &= ~(1 << ISL_AUX_USAGE_NONE);
|
2019-03-26 11:08:22 -07:00
|
|
|
|
2019-08-19 13:52:37 -07:00
|
|
|
while (aux_modes) {
|
|
|
|
|
enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
|
2019-03-27 13:09:34 -07:00
|
|
|
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
surf_state_update_clear_value(batch, res, &surf_state->ref,
|
|
|
|
|
all_aux_modes, aux_usage);
|
2019-08-19 13:52:37 -07:00
|
|
|
}
|
2021-03-16 10:14:30 -07:00
|
|
|
#elif GFX_VER == 8
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
/* TODO: Could update rather than re-filling */
|
|
|
|
|
alloc_surface_states(surf_state, all_aux_modes);
|
|
|
|
|
|
|
|
|
|
void *map = surf_state->cpu;
|
2019-03-27 14:42:12 -07:00
|
|
|
|
2019-08-19 13:52:37 -07:00
|
|
|
while (aux_modes) {
|
|
|
|
|
enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
|
2019-11-14 23:26:07 -08:00
|
|
|
fill_surface_state(isl_dev, map, res, &res->surf, view, aux_usage,
|
|
|
|
|
0, 0, 0);
|
2019-08-19 13:52:37 -07:00
|
|
|
map += SURFACE_STATE_ALIGNMENT;
|
2019-03-19 12:47:58 -07:00
|
|
|
}
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
|
|
|
|
|
upload_surface_states(ice->state.surface_uploader, surf_state);
|
2019-08-19 13:52:37 -07:00
|
|
|
#endif
|
2019-03-19 12:47:58 -07:00
|
|
|
}
|
|
|
|
|
|
2018-04-06 23:57:45 -07:00
|
|
|
/**
|
|
|
|
|
* Add a surface to the validation list, as well as the buffer containing
|
|
|
|
|
* the corresponding SURFACE_STATE.
|
|
|
|
|
*
|
|
|
|
|
* Returns the binding table entry (offset to SURFACE_STATE).
|
|
|
|
|
*/
|
|
|
|
|
static uint32_t
|
2019-03-27 13:09:34 -07:00
|
|
|
use_surface(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
2018-04-06 23:57:45 -07:00
|
|
|
struct pipe_surface *p_surf,
|
2018-12-07 19:51:05 -08:00
|
|
|
bool writeable,
|
2019-07-16 11:08:28 -07:00
|
|
|
enum isl_aux_usage aux_usage,
|
2020-05-29 16:38:43 -07:00
|
|
|
bool is_read_surface,
|
|
|
|
|
enum iris_domain access)
|
2018-04-06 23:57:45 -07:00
|
|
|
{
|
|
|
|
|
struct iris_surface *surf = (void *) p_surf;
|
2018-12-07 19:51:05 -08:00
|
|
|
struct iris_resource *res = (void *) p_surf->texture;
|
2019-07-16 11:08:28 -07:00
|
|
|
uint32_t offset = 0;
|
2018-04-06 23:57:45 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER == 8 && is_read_surface && !surf->surface_state_read.ref.res) {
|
2021-02-01 04:51:11 -08:00
|
|
|
upload_surface_states(ice->state.surface_uploader,
|
|
|
|
|
&surf->surface_state_read);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!surf->surface_state.ref.res) {
|
|
|
|
|
upload_surface_states(ice->state.surface_uploader,
|
|
|
|
|
&surf->surface_state);
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-03 13:53:40 -07:00
|
|
|
if (memcmp(&res->aux.clear_color, &surf->clear_color,
|
|
|
|
|
sizeof(surf->clear_color)) != 0) {
|
|
|
|
|
update_clear_value(ice, batch, res, &surf->surface_state,
|
|
|
|
|
res->aux.possible_usages, &surf->view);
|
|
|
|
|
if (GFX_VER == 8) {
|
|
|
|
|
update_clear_value(ice, batch, res, &surf->surface_state_read,
|
|
|
|
|
res->aux.possible_usages, &surf->read_view);
|
2019-03-19 12:47:58 -07:00
|
|
|
}
|
2021-09-03 13:53:40 -07:00
|
|
|
surf->clear_color = res->aux.clear_color;
|
2019-03-01 10:34:40 -08:00
|
|
|
}
|
2018-12-08 11:52:55 -08:00
|
|
|
|
2021-09-03 13:53:40 -07:00
|
|
|
if (res->aux.clear_color_bo)
|
|
|
|
|
iris_use_pinned_bo(batch, res->aux.clear_color_bo, false, access);
|
|
|
|
|
|
|
|
|
|
if (res->aux.bo)
|
|
|
|
|
iris_use_pinned_bo(batch, res->aux.bo, writeable, access);
|
|
|
|
|
|
2021-09-03 14:01:36 -07:00
|
|
|
iris_use_pinned_bo(batch, res->bo, writeable, access);
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER == 8 && is_read_surface) {
|
2021-02-01 04:42:18 -08:00
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state_read.ref.res), false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
|
|
|
|
} else {
|
|
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.ref.res), false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
offset = (GFX_VER == 8 && is_read_surface)
|
2019-11-14 16:06:10 -08:00
|
|
|
? surf->surface_state_read.ref.offset
|
|
|
|
|
: surf->surface_state.ref.offset;
|
2019-07-16 11:08:28 -07:00
|
|
|
|
|
|
|
|
return offset +
|
2019-03-27 14:42:12 -07:00
|
|
|
surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
|
2018-04-06 23:57:45 -07:00
|
|
|
}
|
|
|
|
|
|
2018-04-07 06:35:51 -07:00
|
|
|
static uint32_t
|
2018-12-07 19:51:05 -08:00
|
|
|
use_sampler_view(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
struct iris_sampler_view *isv)
|
2018-04-07 06:35:51 -07:00
|
|
|
{
|
2018-12-07 19:51:05 -08:00
|
|
|
enum isl_aux_usage aux_usage =
|
2019-11-22 21:52:12 -08:00
|
|
|
iris_resource_texture_aux_usage(ice, isv->res, isv->view.format);
|
2018-12-07 19:51:05 -08:00
|
|
|
|
2021-02-01 04:51:11 -08:00
|
|
|
if (!isv->surface_state.ref.res)
|
|
|
|
|
upload_surface_states(ice->state.surface_uploader, &isv->surface_state);
|
|
|
|
|
|
2021-09-03 13:53:40 -07:00
|
|
|
if (memcmp(&isv->res->aux.clear_color, &isv->clear_color,
|
|
|
|
|
sizeof(isv->clear_color)) != 0) {
|
|
|
|
|
update_clear_value(ice, batch, isv->res, &isv->surface_state,
|
|
|
|
|
isv->res->aux.sampler_usages, &isv->view);
|
|
|
|
|
isv->clear_color = isv->res->aux.clear_color;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (isv->res->aux.clear_color_bo) {
|
|
|
|
|
iris_use_pinned_bo(batch, isv->res->aux.clear_color_bo,
|
|
|
|
|
false, IRIS_DOMAIN_OTHER_READ);
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-01 10:34:40 -08:00
|
|
|
if (isv->res->aux.bo) {
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, isv->res->aux.bo,
|
|
|
|
|
false, IRIS_DOMAIN_OTHER_READ);
|
2019-03-01 10:34:40 -08:00
|
|
|
}
|
2018-12-08 11:52:55 -08:00
|
|
|
|
2021-02-01 04:42:18 -08:00
|
|
|
iris_use_pinned_bo(batch, isv->res->bo, false, IRIS_DOMAIN_OTHER_READ);
|
|
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.ref.res), false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
|
|
|
|
|
2019-11-14 16:06:10 -08:00
|
|
|
return isv->surface_state.ref.offset +
|
2019-03-27 14:42:12 -07:00
|
|
|
surf_state_offset_for_aux(isv->res, isv->res->aux.sampler_usages,
|
|
|
|
|
aux_usage);
|
2018-04-07 06:35:51 -07:00
|
|
|
}
|
|
|
|
|
|
2018-06-06 14:56:10 -07:00
|
|
|
static uint32_t
|
2019-04-16 23:44:15 -07:00
|
|
|
use_ubo_ssbo(struct iris_batch *batch,
|
|
|
|
|
struct iris_context *ice,
|
|
|
|
|
struct pipe_shader_buffer *buf,
|
|
|
|
|
struct iris_state_ref *surf_state,
|
2020-05-29 16:38:43 -07:00
|
|
|
bool writable, enum iris_domain access)
|
2018-06-06 14:56:10 -07:00
|
|
|
{
|
2019-09-14 23:18:20 -07:00
|
|
|
if (!buf->buffer || !surf_state->res)
|
2018-10-02 10:21:57 -07:00
|
|
|
return use_null_surface(batch, ice);
|
|
|
|
|
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable, access);
|
|
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2018-07-24 16:03:32 -07:00
|
|
|
|
|
|
|
|
return surf_state->offset;
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-30 15:49:32 -07:00
|
|
|
static uint32_t
|
|
|
|
|
use_image(struct iris_batch *batch, struct iris_context *ice,
|
2020-03-12 13:43:14 +02:00
|
|
|
struct iris_shader_state *shs, const struct shader_info *info,
|
|
|
|
|
int i)
|
2018-08-30 15:49:32 -07:00
|
|
|
{
|
2019-04-22 11:27:37 -07:00
|
|
|
struct iris_image_view *iv = &shs->image[i];
|
|
|
|
|
struct iris_resource *res = (void *) iv->base.resource;
|
|
|
|
|
|
|
|
|
|
if (!res)
|
2018-08-30 15:49:32 -07:00
|
|
|
return use_null_surface(batch, ice);
|
|
|
|
|
|
2019-04-22 11:27:37 -07:00
|
|
|
bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE;
|
2018-08-30 15:49:32 -07:00
|
|
|
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, res->bo, write, IRIS_DOMAIN_NONE);
|
|
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.ref.res),
|
|
|
|
|
false, IRIS_DOMAIN_NONE);
|
2018-08-30 15:49:32 -07:00
|
|
|
|
2018-12-08 11:52:55 -08:00
|
|
|
if (res->aux.bo)
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, res->aux.bo, write, IRIS_DOMAIN_NONE);
|
2018-12-08 11:52:55 -08:00
|
|
|
|
2020-03-12 13:43:14 +02:00
|
|
|
enum isl_aux_usage aux_usage =
|
|
|
|
|
iris_image_view_aux_usage(ice, &iv->base, info);
|
|
|
|
|
|
|
|
|
|
return iv->surface_state.ref.offset +
|
|
|
|
|
surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
|
2018-08-30 15:49:32 -07:00
|
|
|
}
|
|
|
|
|
|
2018-09-08 19:43:34 -07:00
|
|
|
#define push_bt_entry(addr) \
|
2018-09-15 14:50:26 -07:00
|
|
|
assert(addr >= binder_addr); \
|
2019-05-22 22:17:27 -07:00
|
|
|
assert(s < shader->bt.size_bytes / sizeof(uint32_t)); \
|
2018-09-15 14:50:26 -07:00
|
|
|
if (!pin_only) bt_map[s++] = (addr) - binder_addr;
|
2018-11-24 02:55:40 -08:00
|
|
|
|
2019-05-23 14:17:59 -07:00
|
|
|
#define bt_assert(section) \
|
|
|
|
|
if (!pin_only && shader->bt.used_mask[section] != 0) \
|
|
|
|
|
assert(shader->bt.offsets[section] == s);
|
2018-09-08 19:43:34 -07:00
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Populate the binding table for a given shader stage.
|
|
|
|
|
*
|
|
|
|
|
* This fills out the table of pointers to surfaces required by the shader,
|
|
|
|
|
* and also adds those buffers to the validation list so the kernel can make
|
|
|
|
|
* resident before running our batch.
|
|
|
|
|
*/
|
2018-06-06 11:59:17 -07:00
|
|
|
static void
|
|
|
|
|
iris_populate_binding_table(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
2018-09-15 14:50:26 -07:00
|
|
|
gl_shader_stage stage,
|
2018-10-19 01:14:38 -07:00
|
|
|
bool pin_only)
|
2018-06-06 11:59:17 -07:00
|
|
|
{
|
2018-09-08 19:43:34 -07:00
|
|
|
const struct iris_binder *binder = &ice->state.binder;
|
2018-06-06 11:59:17 -07:00
|
|
|
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
|
|
|
|
|
if (!shader)
|
|
|
|
|
return;
|
|
|
|
|
|
2019-05-23 14:17:59 -07:00
|
|
|
struct iris_binding_table *bt = &shader->bt;
|
2018-11-23 19:12:36 -08:00
|
|
|
UNUSED struct brw_stage_prog_data *prog_data = shader->prog_data;
|
2018-08-18 23:39:48 -07:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
2021-07-19 21:23:18 -07:00
|
|
|
uint32_t binder_addr = binder->bo->address;
|
2018-07-24 16:03:32 -07:00
|
|
|
|
2018-06-06 11:59:17 -07:00
|
|
|
uint32_t *bt_map = binder->map + binder->bt_offset[stage];
|
|
|
|
|
int s = 0;
|
|
|
|
|
|
2018-09-21 12:22:34 -07:00
|
|
|
const struct shader_info *info = iris_get_shader_info(ice, stage);
|
|
|
|
|
if (!info) {
|
|
|
|
|
/* TCS passthrough doesn't need a binding table. */
|
|
|
|
|
assert(stage == MESA_SHADER_TESS_CTRL);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-04 13:38:36 -07:00
|
|
|
if (stage == MESA_SHADER_COMPUTE &&
|
|
|
|
|
shader->bt.used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS]) {
|
2018-09-23 15:25:10 -07:00
|
|
|
/* surface for gl_NumWorkGroups */
|
2018-10-19 01:14:38 -07:00
|
|
|
struct iris_state_ref *grid_data = &ice->state.grid_size;
|
|
|
|
|
struct iris_state_ref *grid_state = &ice->state.grid_surf_state;
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(grid_data->res), false,
|
|
|
|
|
IRIS_DOMAIN_OTHER_READ);
|
|
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(grid_state->res), false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2018-10-19 01:14:38 -07:00
|
|
|
push_bt_entry(grid_state->offset);
|
2018-09-23 15:25:10 -07:00
|
|
|
}
|
|
|
|
|
|
2018-06-06 11:59:17 -07:00
|
|
|
if (stage == MESA_SHADER_FRAGMENT) {
|
|
|
|
|
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
|
2018-07-30 22:59:52 -07:00
|
|
|
/* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
|
2018-07-31 10:33:35 +10:00
|
|
|
if (cso_fb->nr_cbufs) {
|
|
|
|
|
for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
|
2018-12-07 19:51:05 -08:00
|
|
|
uint32_t addr;
|
|
|
|
|
if (cso_fb->cbufs[i]) {
|
2019-03-27 13:09:34 -07:00
|
|
|
addr = use_surface(ice, batch, cso_fb->cbufs[i], true,
|
2020-05-29 16:38:43 -07:00
|
|
|
ice->state.draw_aux_usage[i], false,
|
|
|
|
|
IRIS_DOMAIN_RENDER_WRITE);
|
2018-12-07 19:51:05 -08:00
|
|
|
} else {
|
|
|
|
|
addr = use_null_fb_surface(batch, ice);
|
|
|
|
|
}
|
2018-09-08 19:43:34 -07:00
|
|
|
push_bt_entry(addr);
|
2018-07-31 10:33:35 +10:00
|
|
|
}
|
2021-03-16 10:14:30 -07:00
|
|
|
} else if (GFX_VER < 11) {
|
2018-09-08 19:43:34 -07:00
|
|
|
uint32_t addr = use_null_fb_surface(batch, ice);
|
|
|
|
|
push_bt_entry(addr);
|
2018-07-30 22:59:52 -07:00
|
|
|
}
|
2018-06-06 11:59:17 -07:00
|
|
|
}
|
|
|
|
|
|
2019-05-23 14:17:59 -07:00
|
|
|
#define foreach_surface_used(index, group) \
|
|
|
|
|
bt_assert(group); \
|
|
|
|
|
for (int index = 0; index < bt->sizes[group]; index++) \
|
|
|
|
|
if (iris_group_index_to_bti(bt, group, index) != \
|
|
|
|
|
IRIS_SURFACE_NOT_USED)
|
2018-06-06 11:59:17 -07:00
|
|
|
|
2019-07-16 11:08:28 -07:00
|
|
|
foreach_surface_used(i, IRIS_SURFACE_GROUP_RENDER_TARGET_READ) {
|
|
|
|
|
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
|
|
|
|
|
uint32_t addr;
|
|
|
|
|
if (cso_fb->cbufs[i]) {
|
|
|
|
|
addr = use_surface(ice, batch, cso_fb->cbufs[i],
|
2020-05-29 16:38:43 -07:00
|
|
|
false, ice->state.draw_aux_usage[i], true,
|
|
|
|
|
IRIS_DOMAIN_OTHER_READ);
|
2019-07-16 11:08:28 -07:00
|
|
|
push_bt_entry(addr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-23 14:17:59 -07:00
|
|
|
foreach_surface_used(i, IRIS_SURFACE_GROUP_TEXTURE) {
|
2018-08-18 23:43:14 -07:00
|
|
|
struct iris_sampler_view *view = shs->textures[i];
|
2018-12-07 19:51:05 -08:00
|
|
|
uint32_t addr = view ? use_sampler_view(ice, batch, view)
|
2018-09-08 19:43:34 -07:00
|
|
|
: use_null_surface(batch, ice);
|
|
|
|
|
push_bt_entry(addr);
|
2018-06-06 11:59:17 -07:00
|
|
|
}
|
|
|
|
|
|
2019-05-23 14:17:59 -07:00
|
|
|
foreach_surface_used(i, IRIS_SURFACE_GROUP_IMAGE) {
|
2020-03-12 13:43:14 +02:00
|
|
|
uint32_t addr = use_image(batch, ice, shs, info, i);
|
2018-10-10 21:44:43 -07:00
|
|
|
push_bt_entry(addr);
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-23 14:17:59 -07:00
|
|
|
foreach_surface_used(i, IRIS_SURFACE_GROUP_UBO) {
|
2020-08-11 19:43:17 -05:00
|
|
|
uint32_t addr = use_ubo_ssbo(batch, ice, &shs->constbuf[i],
|
|
|
|
|
&shs->constbuf_surf_state[i], false,
|
|
|
|
|
IRIS_DOMAIN_OTHER_READ);
|
2019-05-28 17:33:58 -05:00
|
|
|
push_bt_entry(addr);
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-23 14:17:59 -07:00
|
|
|
foreach_surface_used(i, IRIS_SURFACE_GROUP_SSBO) {
|
|
|
|
|
uint32_t addr =
|
|
|
|
|
use_ubo_ssbo(batch, ice, &shs->ssbo[i], &shs->ssbo_surf_state[i],
|
2020-05-29 16:38:43 -07:00
|
|
|
shs->writable_ssbos & (1u << i), IRIS_DOMAIN_NONE);
|
2019-05-23 14:17:59 -07:00
|
|
|
push_bt_entry(addr);
|
2018-07-24 16:03:32 -07:00
|
|
|
}
|
|
|
|
|
|
2018-06-06 11:59:17 -07:00
|
|
|
#if 0
|
2019-01-24 09:26:38 -08:00
|
|
|
/* XXX: YUV surfaces not implemented yet */
|
2018-11-23 19:12:36 -08:00
|
|
|
bt_assert(plane_start[1], ...);
|
|
|
|
|
bt_assert(plane_start[2], ...);
|
2018-06-06 11:59:17 -07:00
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-13 03:07:00 -07:00
|
|
|
static void
|
|
|
|
|
iris_use_optional_res(struct iris_batch *batch,
|
|
|
|
|
struct pipe_resource *res,
|
2020-05-29 16:38:43 -07:00
|
|
|
bool writeable,
|
|
|
|
|
enum iris_domain access)
|
2018-06-13 03:07:00 -07:00
|
|
|
{
|
|
|
|
|
if (res) {
|
|
|
|
|
struct iris_bo *bo = iris_resource_bo(res);
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, bo, writeable, access);
|
2018-06-13 03:07:00 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-09 00:50:24 -08:00
|
|
|
static void
|
2019-03-09 01:02:06 -08:00
|
|
|
pin_depth_and_stencil_buffers(struct iris_batch *batch,
|
|
|
|
|
struct pipe_surface *zsbuf,
|
|
|
|
|
struct iris_depth_stencil_alpha_state *cso_zsa)
|
2019-03-09 00:50:24 -08:00
|
|
|
{
|
|
|
|
|
if (!zsbuf)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
struct iris_resource *zres, *sres;
|
|
|
|
|
iris_get_depth_stencil_resources(zsbuf->texture, &zres, &sres);
|
|
|
|
|
|
|
|
|
|
if (zres) {
|
2020-05-29 16:38:43 -07:00
|
|
|
const enum iris_domain access = cso_zsa->depth_writes_enabled ?
|
|
|
|
|
IRIS_DOMAIN_DEPTH_WRITE : IRIS_DOMAIN_OTHER_READ;
|
|
|
|
|
iris_use_pinned_bo(batch, zres->bo, cso_zsa->depth_writes_enabled,
|
|
|
|
|
access);
|
2019-03-09 00:50:24 -08:00
|
|
|
if (zres->aux.bo) {
|
|
|
|
|
iris_use_pinned_bo(batch, zres->aux.bo,
|
2020-05-29 16:38:43 -07:00
|
|
|
cso_zsa->depth_writes_enabled, access);
|
2019-03-09 00:50:24 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (sres) {
|
2020-05-29 16:38:43 -07:00
|
|
|
const enum iris_domain access = cso_zsa->stencil_writes_enabled ?
|
|
|
|
|
IRIS_DOMAIN_DEPTH_WRITE : IRIS_DOMAIN_OTHER_READ;
|
|
|
|
|
iris_use_pinned_bo(batch, sres->bo, cso_zsa->stencil_writes_enabled,
|
|
|
|
|
access);
|
2019-03-09 00:50:24 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-12 11:39:45 -08:00
|
|
|
static uint32_t
|
|
|
|
|
pin_scratch_space(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
const struct brw_stage_prog_data *prog_data,
|
|
|
|
|
gl_shader_stage stage)
|
|
|
|
|
{
|
|
|
|
|
uint32_t scratch_addr = 0;
|
|
|
|
|
|
|
|
|
|
if (prog_data->total_scratch > 0) {
|
|
|
|
|
struct iris_bo *scratch_bo =
|
|
|
|
|
iris_get_scratch_space(ice, prog_data->total_scratch, stage);
|
|
|
|
|
iris_use_pinned_bo(batch, scratch_bo, true, IRIS_DOMAIN_NONE);
|
|
|
|
|
|
2020-10-20 14:27:58 -05:00
|
|
|
#if GFX_VERx10 >= 125
|
|
|
|
|
const struct iris_state_ref *ref =
|
|
|
|
|
iris_get_scratch_surf(ice, prog_data->total_scratch);
|
|
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(ref->res),
|
|
|
|
|
false, IRIS_DOMAIN_NONE);
|
|
|
|
|
scratch_addr = ref->offset +
|
2021-07-19 21:23:18 -07:00
|
|
|
iris_resource_bo(ref->res)->address -
|
2020-10-20 14:27:58 -05:00
|
|
|
IRIS_MEMZONE_BINDLESS_START;
|
|
|
|
|
assert((scratch_addr & 0x3f) == 0 && scratch_addr < (1 << 26));
|
|
|
|
|
#else
|
2021-07-19 21:23:18 -07:00
|
|
|
scratch_addr = scratch_bo->address;
|
2020-10-20 14:27:58 -05:00
|
|
|
#endif
|
2021-02-12 11:39:45 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return scratch_addr;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/* ------------------------------------------------------------------- */
|
2018-06-13 03:07:00 -07:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Pin any BOs which were installed by a previous batch, and restored
|
|
|
|
|
* via the hardware logical context mechanism.
|
|
|
|
|
*
|
|
|
|
|
* We don't need to re-emit all state every batch - the hardware context
|
|
|
|
|
* mechanism will save and restore it for us. This includes pointers to
|
|
|
|
|
* various BOs...which won't exist unless we ask the kernel to pin them
|
|
|
|
|
* by adding them to the validation list.
|
|
|
|
|
*
|
|
|
|
|
* We can skip buffers if we've re-emitted those packets, as we're
|
|
|
|
|
* overwriting those stale pointers with new ones, and don't actually
|
|
|
|
|
* refer to the old BOs.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
2018-09-19 12:25:18 -07:00
|
|
|
iris_restore_render_saved_bos(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
const struct pipe_draw_info *draw)
|
2018-06-13 03:07:00 -07:00
|
|
|
{
|
2018-12-04 16:38:14 -08:00
|
|
|
struct iris_genx_state *genx = ice->state.genx;
|
|
|
|
|
|
2018-06-25 08:25:22 -07:00
|
|
|
const uint64_t clean = ~ice->state.dirty;
|
2020-05-29 16:57:01 -07:00
|
|
|
const uint64_t stage_clean = ~ice->state.stage_dirty;
|
2018-06-13 03:07:00 -07:00
|
|
|
|
|
|
|
|
if (clean & IRIS_DIRTY_CC_VIEWPORT) {
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_optional_res(batch, ice->state.last_res.cc_vp, false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2018-06-13 03:07:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (clean & IRIS_DIRTY_SF_CL_VIEWPORT) {
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_optional_res(batch, ice->state.last_res.sf_cl_vp, false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2018-06-13 03:07:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (clean & IRIS_DIRTY_BLEND_STATE) {
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_optional_res(batch, ice->state.last_res.blend, false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2018-06-13 03:07:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (clean & IRIS_DIRTY_COLOR_CALC_STATE) {
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_optional_res(batch, ice->state.last_res.color_calc, false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2018-06-13 03:07:00 -07:00
|
|
|
}
|
|
|
|
|
|
2018-06-15 16:22:58 -07:00
|
|
|
if (clean & IRIS_DIRTY_SCISSOR_RECT) {
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_optional_res(batch, ice->state.last_res.scissor, false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2018-06-13 03:07:00 -07:00
|
|
|
}
|
|
|
|
|
|
2018-12-02 16:06:01 -08:00
|
|
|
if (ice->state.streamout_active && (clean & IRIS_DIRTY_SO_BUFFERS)) {
|
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
|
struct iris_stream_output_target *tgt =
|
|
|
|
|
(void *) ice->state.so_target[i];
|
|
|
|
|
if (tgt) {
|
|
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
|
2020-05-29 16:38:43 -07:00
|
|
|
true, IRIS_DOMAIN_OTHER_WRITE);
|
2018-12-02 16:06:01 -08:00
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
|
2020-05-29 16:38:43 -07:00
|
|
|
true, IRIS_DOMAIN_OTHER_WRITE);
|
2018-12-02 16:06:01 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-13 03:07:00 -07:00
|
|
|
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
|
2020-05-29 16:57:01 -07:00
|
|
|
if (!(stage_clean & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)))
|
2018-06-13 03:07:00 -07:00
|
|
|
continue;
|
|
|
|
|
|
2018-08-18 23:39:48 -07:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
2018-06-13 03:07:00 -07:00
|
|
|
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
|
|
|
|
|
|
|
|
|
|
if (!shader)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
|
const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
|
|
|
|
|
|
|
|
|
|
if (range->length == 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
2019-05-22 22:17:27 -07:00
|
|
|
/* Range block is a binding table index, map back to UBO index. */
|
2019-05-23 14:17:59 -07:00
|
|
|
unsigned block_index = iris_bti_to_group_index(
|
|
|
|
|
&shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
|
|
|
|
|
assert(block_index != IRIS_SURFACE_NOT_USED);
|
2019-05-22 22:17:27 -07:00
|
|
|
|
|
|
|
|
struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
|
2019-04-16 23:44:15 -07:00
|
|
|
struct iris_resource *res = (void *) cbuf->buffer;
|
2018-06-13 03:07:00 -07:00
|
|
|
|
2018-06-19 23:37:10 -07:00
|
|
|
if (res)
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_OTHER_READ);
|
2018-06-19 23:37:10 -07:00
|
|
|
else
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, batch->screen->workaround_bo, false,
|
|
|
|
|
IRIS_DOMAIN_OTHER_READ);
|
2018-06-13 03:07:00 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-15 14:50:26 -07:00
|
|
|
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
|
2020-05-29 16:57:01 -07:00
|
|
|
if (stage_clean & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
|
2018-09-15 14:50:26 -07:00
|
|
|
/* Re-pin any buffers referred to by the binding table. */
|
2018-10-19 01:14:38 -07:00
|
|
|
iris_populate_binding_table(ice, batch, stage, true);
|
2018-09-15 14:50:26 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-13 03:07:00 -07:00
|
|
|
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
|
2018-08-18 23:43:14 -07:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
|
|
|
|
struct pipe_resource *res = shs->sampler_table.res;
|
2018-06-13 03:07:00 -07:00
|
|
|
if (res)
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(res), false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2018-06-13 03:07:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
|
2020-05-29 16:57:01 -07:00
|
|
|
if (stage_clean & (IRIS_STAGE_DIRTY_VS << stage)) {
|
2018-06-13 03:07:00 -07:00
|
|
|
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
|
2018-12-12 01:41:39 -08:00
|
|
|
|
2018-06-28 00:57:49 -07:00
|
|
|
if (shader) {
|
|
|
|
|
struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
|
2018-06-13 03:07:00 -07:00
|
|
|
|
2021-02-12 11:39:45 -08:00
|
|
|
pin_scratch_space(ice, batch, shader->prog_data, stage);
|
2018-12-12 01:41:39 -08:00
|
|
|
}
|
2018-06-13 03:07:00 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-09 01:02:06 -08:00
|
|
|
if ((clean & IRIS_DIRTY_DEPTH_BUFFER) &&
|
|
|
|
|
(clean & IRIS_DIRTY_WM_DEPTH_STENCIL)) {
|
2018-06-13 03:07:00 -07:00
|
|
|
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
|
2019-03-09 01:02:06 -08:00
|
|
|
pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
|
2018-06-13 03:07:00 -07:00
|
|
|
}
|
|
|
|
|
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_optional_res(batch, ice->state.last_res.index_buffer, false,
|
2020-02-05 19:42:04 -08:00
|
|
|
IRIS_DOMAIN_VF_READ);
|
2018-06-13 03:07:00 -07:00
|
|
|
|
|
|
|
|
if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
|
2018-12-04 16:38:14 -08:00
|
|
|
uint64_t bound = ice->state.bound_vertex_buffers;
|
|
|
|
|
while (bound) {
|
|
|
|
|
const int i = u_bit_scan64(&bound);
|
|
|
|
|
struct pipe_resource *res = genx->vertex_buffers[i].resource;
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(res), false,
|
2020-02-05 19:42:04 -08:00
|
|
|
IRIS_DOMAIN_VF_READ);
|
2018-06-13 03:07:00 -07:00
|
|
|
}
|
|
|
|
|
}
|
2021-10-06 14:45:02 -07:00
|
|
|
|
|
|
|
|
#if GFX_VERx10 == 125
|
|
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(ice->state.pixel_hashing_tables),
|
|
|
|
|
false, IRIS_DOMAIN_NONE);
|
|
|
|
|
#else
|
|
|
|
|
assert(!ice->state.pixel_hashing_tables);
|
|
|
|
|
#endif
|
2018-06-13 03:07:00 -07:00
|
|
|
}
|
|
|
|
|
|
2018-09-19 12:25:18 -07:00
|
|
|
static void
|
|
|
|
|
iris_restore_compute_saved_bos(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
const struct pipe_grid_info *grid)
|
|
|
|
|
{
|
2020-05-29 16:57:01 -07:00
|
|
|
const uint64_t stage_clean = ~ice->state.stage_dirty;
|
2018-09-19 12:25:18 -07:00
|
|
|
|
|
|
|
|
const int stage = MESA_SHADER_COMPUTE;
|
|
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
|
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
if (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) {
|
2018-09-19 12:25:18 -07:00
|
|
|
/* Re-pin any buffers referred to by the binding table. */
|
2018-10-19 01:14:38 -07:00
|
|
|
iris_populate_binding_table(ice, batch, stage, true);
|
2018-09-19 12:25:18 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct pipe_resource *sampler_res = shs->sampler_table.res;
|
|
|
|
|
if (sampler_res)
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(sampler_res), false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2018-09-19 12:25:18 -07:00
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
if ((stage_clean & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS) &&
|
|
|
|
|
(stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) &&
|
|
|
|
|
(stage_clean & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
|
|
|
|
|
(stage_clean & IRIS_STAGE_DIRTY_CS)) {
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_optional_res(batch, ice->state.last_res.cs_desc, false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2019-06-26 23:56:45 -07:00
|
|
|
}
|
|
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
if (stage_clean & IRIS_STAGE_DIRTY_CS) {
|
2018-09-19 12:25:18 -07:00
|
|
|
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
|
2018-12-12 01:41:39 -08:00
|
|
|
|
2018-09-19 12:25:18 -07:00
|
|
|
if (shader) {
|
|
|
|
|
struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
|
2018-09-19 12:25:18 -07:00
|
|
|
|
2021-03-16 10:09:00 -07:00
|
|
|
if (GFX_VERx10 < 125) {
|
2019-02-13 11:11:43 -08:00
|
|
|
struct iris_bo *curbe_bo =
|
|
|
|
|
iris_resource_bo(ice->state.last_res.cs_thread_ids);
|
|
|
|
|
iris_use_pinned_bo(batch, curbe_bo, false, IRIS_DOMAIN_NONE);
|
|
|
|
|
}
|
2019-06-26 23:38:59 -07:00
|
|
|
|
2021-02-12 11:39:45 -08:00
|
|
|
pin_scratch_space(ice, batch, shader->prog_data, stage);
|
2018-12-12 01:41:39 -08:00
|
|
|
}
|
2018-09-19 12:25:18 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-08 19:43:34 -07:00
|
|
|
/**
|
|
|
|
|
* Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
iris_update_surface_base_address(struct iris_batch *batch,
|
|
|
|
|
struct iris_binder *binder)
|
|
|
|
|
{
|
2021-07-19 21:23:18 -07:00
|
|
|
if (batch->last_surface_base_address == binder->bo->address)
|
2018-09-08 19:43:34 -07:00
|
|
|
return;
|
|
|
|
|
|
2020-10-07 07:44:56 -07:00
|
|
|
struct isl_device *isl_dev = &batch->screen->isl_dev;
|
2020-12-14 11:11:59 +02:00
|
|
|
uint32_t mocs = isl_mocs(isl_dev, 0, false);
|
2019-11-05 15:08:01 -08:00
|
|
|
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
|
|
|
|
|
2019-09-03 15:34:54 -07:00
|
|
|
flush_before_state_base_change(batch);
|
2018-09-08 19:43:34 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 12
|
2021-03-29 17:15:41 -07:00
|
|
|
/* Wa_1607854226:
|
2020-01-13 17:50:06 +02:00
|
|
|
*
|
|
|
|
|
* Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
|
|
|
|
|
* mode by putting the pipeline temporarily in 3D mode..
|
|
|
|
|
*/
|
2020-02-20 11:02:52 -08:00
|
|
|
if (batch->name == IRIS_BATCH_COMPUTE)
|
|
|
|
|
emit_pipeline_select(batch, _3D);
|
2020-01-13 17:50:06 +02:00
|
|
|
#endif
|
|
|
|
|
|
2018-09-08 19:43:34 -07:00
|
|
|
iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
|
|
|
|
|
sba.SurfaceStateBaseAddressModifyEnable = true;
|
|
|
|
|
sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0);
|
2019-08-22 16:52:52 -07:00
|
|
|
|
|
|
|
|
/* The hardware appears to pay attention to the MOCS fields even
|
|
|
|
|
* if you don't set the "Address Modify Enable" bit for the base.
|
|
|
|
|
*/
|
2019-11-05 15:08:01 -08:00
|
|
|
sba.GeneralStateMOCS = mocs;
|
|
|
|
|
sba.StatelessDataPortAccessMOCS = mocs;
|
|
|
|
|
sba.DynamicStateMOCS = mocs;
|
|
|
|
|
sba.IndirectObjectMOCS = mocs;
|
|
|
|
|
sba.InstructionMOCS = mocs;
|
|
|
|
|
sba.SurfaceStateMOCS = mocs;
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 9
|
2019-11-05 15:08:01 -08:00
|
|
|
sba.BindlessSurfaceStateMOCS = mocs;
|
2021-10-18 22:56:48 -07:00
|
|
|
#endif
|
|
|
|
|
#if GFX_VER >= 11
|
|
|
|
|
sba.BindlessSamplerStateMOCS = mocs;
|
2019-08-22 16:52:52 -07:00
|
|
|
#endif
|
2018-09-08 19:43:34 -07:00
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 12
|
2021-03-29 17:15:41 -07:00
|
|
|
/* Wa_1607854226:
|
2020-01-13 17:50:06 +02:00
|
|
|
*
|
|
|
|
|
* Put the pipeline back into compute mode.
|
|
|
|
|
*/
|
2020-02-20 11:02:52 -08:00
|
|
|
if (batch->name == IRIS_BATCH_COMPUTE)
|
|
|
|
|
emit_pipeline_select(batch, GPGPU);
|
2020-01-13 17:50:06 +02:00
|
|
|
#endif
|
|
|
|
|
|
2018-10-09 14:14:15 -07:00
|
|
|
if (GFX_VERx10 >= 125) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POOL_ALLOC), btpa) {
|
|
|
|
|
btpa.BindingTablePoolBaseAddress = ro_bo(binder->bo, 0);
|
|
|
|
|
btpa.BindingTablePoolBufferSize = IRIS_BINDER_SIZE / 4096;
|
|
|
|
|
#if GFX_VERx10 < 125
|
|
|
|
|
btpa.BindingTablePoolEnable = true;
|
|
|
|
|
#endif
|
|
|
|
|
btpa.MOCS = mocs;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-03 15:34:54 -07:00
|
|
|
flush_after_state_base_change(batch);
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_end(batch);
|
2019-09-03 15:34:54 -07:00
|
|
|
|
2021-07-19 21:23:18 -07:00
|
|
|
batch->last_surface_base_address = binder->bo->address;
|
2018-09-08 19:43:34 -07:00
|
|
|
}
|
|
|
|
|
|
2019-07-25 13:09:08 +03:00
|
|
|
static inline void
|
|
|
|
|
iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
|
|
|
|
|
bool window_space_position, float *zmin, float *zmax)
|
|
|
|
|
{
|
|
|
|
|
if (window_space_position) {
|
|
|
|
|
*zmin = 0.f;
|
|
|
|
|
*zmax = 1.f;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2018-04-27 16:39:30 -07:00
|
|
|
void
|
2020-02-25 15:04:08 -08:00
|
|
|
genX(invalidate_aux_map_state)(struct iris_batch *batch)
|
2018-04-27 16:39:30 -07:00
|
|
|
{
|
|
|
|
|
struct iris_screen *screen = batch->screen;
|
|
|
|
|
void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
|
|
|
|
|
if (!aux_map_ctx)
|
|
|
|
|
return;
|
2021-03-03 13:49:18 -08:00
|
|
|
uint32_t aux_map_state_num = intel_aux_map_get_state_num(aux_map_ctx);
|
2018-04-27 16:39:30 -07:00
|
|
|
if (batch->last_aux_map_state != aux_map_state_num) {
|
2020-02-25 15:08:32 -08:00
|
|
|
/* HSD 1209978178: docs say that before programming the aux table:
|
|
|
|
|
*
|
|
|
|
|
* "Driver must ensure that the engine is IDLE but ensure it doesn't
|
|
|
|
|
* add extra flushes in the case it knows that the engine is already
|
|
|
|
|
* IDLE."
|
|
|
|
|
*
|
|
|
|
|
* An end of pipe sync is needed here, otherwise we see GPU hangs in
|
|
|
|
|
* dEQP-GLES31.functional.copy_image.* tests.
|
|
|
|
|
*/
|
|
|
|
|
iris_emit_end_of_pipe_sync(batch, "Invalidate aux map table",
|
|
|
|
|
PIPE_CONTROL_CS_STALL);
|
|
|
|
|
|
2018-04-27 16:39:30 -07:00
|
|
|
/* If the aux-map state number increased, then we need to rewrite the
|
|
|
|
|
* register. Rewriting the register is used to both set the aux-map
|
|
|
|
|
* translation table address, and also to invalidate any previously
|
|
|
|
|
* cached translations.
|
|
|
|
|
*/
|
2020-02-25 15:04:08 -08:00
|
|
|
iris_load_register_imm32(batch, GENX(GFX_CCS_AUX_INV_num), 1);
|
2018-04-27 16:39:30 -07:00
|
|
|
batch->last_aux_map_state = aux_map_state_num;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-02-25 15:04:08 -08:00
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
init_aux_map_state(struct iris_batch *batch)
|
|
|
|
|
{
|
|
|
|
|
struct iris_screen *screen = batch->screen;
|
|
|
|
|
void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
|
|
|
|
|
if (!aux_map_ctx)
|
|
|
|
|
return;
|
|
|
|
|
|
2021-03-03 13:49:18 -08:00
|
|
|
uint64_t base_addr = intel_aux_map_get_base(aux_map_ctx);
|
2020-02-25 15:04:08 -08:00
|
|
|
assert(base_addr != 0 && align64(base_addr, 32 * 1024) == base_addr);
|
|
|
|
|
iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num),
|
|
|
|
|
base_addr);
|
|
|
|
|
}
|
2018-04-27 16:39:30 -07:00
|
|
|
#endif
|
|
|
|
|
|
2019-09-23 10:15:52 -07:00
|
|
|
struct push_bos {
|
|
|
|
|
struct {
|
|
|
|
|
struct iris_address addr;
|
|
|
|
|
uint32_t length;
|
|
|
|
|
} buffers[4];
|
|
|
|
|
int buffer_count;
|
2019-09-23 13:25:01 -07:00
|
|
|
uint32_t max_length;
|
2019-09-23 10:15:52 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
setup_constant_buffers(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
int stage,
|
|
|
|
|
struct push_bos *push_bos)
|
|
|
|
|
{
|
|
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
|
|
|
|
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
|
|
|
|
|
struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
|
|
|
|
|
|
2019-12-03 11:15:38 -08:00
|
|
|
uint32_t push_range_sum = 0;
|
|
|
|
|
|
2019-09-23 10:15:52 -07:00
|
|
|
int n = 0;
|
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
|
const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
|
|
|
|
|
|
|
|
|
|
if (range->length == 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
2019-12-03 11:15:38 -08:00
|
|
|
push_range_sum += range->length;
|
|
|
|
|
|
2019-09-23 13:25:01 -07:00
|
|
|
if (range->length > push_bos->max_length)
|
|
|
|
|
push_bos->max_length = range->length;
|
|
|
|
|
|
2019-09-23 10:15:52 -07:00
|
|
|
/* Range block is a binding table index, map back to UBO index. */
|
|
|
|
|
unsigned block_index = iris_bti_to_group_index(
|
|
|
|
|
&shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
|
|
|
|
|
assert(block_index != IRIS_SURFACE_NOT_USED);
|
|
|
|
|
|
|
|
|
|
struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
|
|
|
|
|
struct iris_resource *res = (void *) cbuf->buffer;
|
|
|
|
|
|
|
|
|
|
assert(cbuf->buffer_offset % 32 == 0);
|
|
|
|
|
|
|
|
|
|
push_bos->buffers[n].length = range->length;
|
|
|
|
|
push_bos->buffers[n].addr =
|
|
|
|
|
res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
|
2020-02-21 18:06:18 +02:00
|
|
|
: batch->screen->workaround_address;
|
2019-09-23 10:15:52 -07:00
|
|
|
n++;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-03 11:15:38 -08:00
|
|
|
/* From the 3DSTATE_CONSTANT_XS and 3DSTATE_CONSTANT_ALL programming notes:
|
|
|
|
|
*
|
|
|
|
|
* "The sum of all four read length fields must be less than or
|
|
|
|
|
* equal to the size of 64."
|
|
|
|
|
*/
|
|
|
|
|
assert(push_range_sum <= 64);
|
|
|
|
|
|
2019-09-23 10:15:52 -07:00
|
|
|
push_bos->buffer_count = n;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
emit_push_constant_packets(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
int stage,
|
|
|
|
|
const struct push_bos *push_bos)
|
|
|
|
|
{
|
2020-02-05 00:53:10 -08:00
|
|
|
UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
|
2019-09-23 10:15:52 -07:00
|
|
|
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
|
|
|
|
|
struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
|
|
|
|
|
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
|
|
|
|
|
pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
|
2020-05-13 15:08:33 -07:00
|
|
|
|
|
|
|
|
#if GFX_VER >= 9
|
2020-12-14 11:11:59 +02:00
|
|
|
pkt.MOCS = isl_mocs(isl_dev, 0, false);
|
2020-02-05 00:53:10 -08:00
|
|
|
#endif
|
2020-05-13 15:08:33 -07:00
|
|
|
|
2019-09-23 10:15:52 -07:00
|
|
|
if (prog_data) {
|
|
|
|
|
/* The Skylake PRM contains the following restriction:
|
|
|
|
|
*
|
|
|
|
|
* "The driver must ensure The following case does not occur
|
|
|
|
|
* without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
|
|
|
|
|
* buffer 3 read length equal to zero committed followed by a
|
|
|
|
|
* 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
|
|
|
|
|
* zero committed."
|
|
|
|
|
*
|
|
|
|
|
* To avoid this, we program the buffers in the highest slots.
|
|
|
|
|
* This way, slot 0 is only used if slot 3 is also used.
|
|
|
|
|
*/
|
|
|
|
|
int n = push_bos->buffer_count;
|
|
|
|
|
assert(n <= 4);
|
|
|
|
|
const unsigned shift = 4 - n;
|
|
|
|
|
for (int i = 0; i < n; i++) {
|
|
|
|
|
pkt.ConstantBody.ReadLength[i + shift] =
|
|
|
|
|
push_bos->buffers[i].length;
|
|
|
|
|
pkt.ConstantBody.Buffer[i + shift] = push_bos->buffers[i].addr;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2019-09-23 13:25:01 -07:00
|
|
|
static void
|
|
|
|
|
emit_push_constant_packet_all(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
uint32_t shader_mask,
|
|
|
|
|
const struct push_bos *push_bos)
|
|
|
|
|
{
|
2020-02-05 00:53:10 -08:00
|
|
|
struct isl_device *isl_dev = &batch->screen->isl_dev;
|
|
|
|
|
|
2019-09-23 13:25:01 -07:00
|
|
|
if (!push_bos) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), pc) {
|
|
|
|
|
pc.ShaderUpdateEnable = shader_mask;
|
2021-10-19 05:31:55 -07:00
|
|
|
pc.MOCS = iris_mocs(NULL, isl_dev, 0);
|
2019-09-23 13:25:01 -07:00
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const uint32_t n = push_bos->buffer_count;
|
|
|
|
|
const uint32_t max_pointers = 4;
|
|
|
|
|
const uint32_t num_dwords = 2 + 2 * n;
|
|
|
|
|
uint32_t const_all[2 + 2 * max_pointers];
|
|
|
|
|
uint32_t *dw = &const_all[0];
|
|
|
|
|
|
|
|
|
|
assert(n <= max_pointers);
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_CONSTANT_ALL), dw, all) {
|
|
|
|
|
all.DWordLength = num_dwords - 2;
|
2020-12-14 11:11:59 +02:00
|
|
|
all.MOCS = isl_mocs(isl_dev, 0, false);
|
2019-09-23 13:25:01 -07:00
|
|
|
all.ShaderUpdateEnable = shader_mask;
|
|
|
|
|
all.PointerBufferMask = (1 << n) - 1;
|
|
|
|
|
}
|
|
|
|
|
dw += 2;
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < n; i++) {
|
|
|
|
|
_iris_pack_state(batch, GENX(3DSTATE_CONSTANT_ALL_DATA),
|
|
|
|
|
dw + i * 2, data) {
|
|
|
|
|
data.PointerToConstantBuffer = push_bos->buffers[i].addr;
|
|
|
|
|
data.ConstantBufferReadLength = push_bos->buffers[i].length;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
iris_batch_emit(batch, const_all, sizeof(uint32_t) * num_dwords);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2021-06-15 08:37:56 -07:00
|
|
|
void
|
|
|
|
|
genX(emit_depth_state_workarounds)(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
const struct isl_surf *surf)
|
|
|
|
|
{
|
|
|
|
|
#if GFX_VERx10 == 120
|
|
|
|
|
const bool fmt_is_d16 = surf->format == ISL_FORMAT_R16_UNORM;
|
|
|
|
|
|
2021-06-16 10:22:48 -07:00
|
|
|
switch (ice->state.genx->depth_reg_mode) {
|
|
|
|
|
case IRIS_DEPTH_REG_MODE_HW_DEFAULT:
|
|
|
|
|
if (!fmt_is_d16)
|
|
|
|
|
return;
|
|
|
|
|
break;
|
|
|
|
|
case IRIS_DEPTH_REG_MODE_D16:
|
|
|
|
|
if (fmt_is_d16)
|
|
|
|
|
return;
|
|
|
|
|
break;
|
|
|
|
|
case IRIS_DEPTH_REG_MODE_UNKNOWN:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-15 08:37:56 -07:00
|
|
|
/* We'll change some CHICKEN registers depending on the depth surface
|
|
|
|
|
* format. Do a depth flush and stall so the pipeline is not using these
|
|
|
|
|
* settings while we change the registers.
|
|
|
|
|
*/
|
|
|
|
|
iris_emit_end_of_pipe_sync(batch,
|
|
|
|
|
"Workaround: Stop pipeline for 14010455700",
|
|
|
|
|
PIPE_CONTROL_DEPTH_STALL |
|
|
|
|
|
PIPE_CONTROL_DEPTH_CACHE_FLUSH);
|
|
|
|
|
|
|
|
|
|
/* Wa_14010455700
|
|
|
|
|
*
|
|
|
|
|
* To avoid sporadic corruptions “Set 0x7010[9] when Depth Buffer
|
|
|
|
|
* Surface Format is D16_UNORM , surface type is not NULL & 1X_MSAA”.
|
|
|
|
|
*/
|
|
|
|
|
iris_emit_reg(batch, GENX(COMMON_SLICE_CHICKEN1), reg) {
|
|
|
|
|
reg.HIZPlaneOptimizationdisablebit = fmt_is_d16 && surf->samples == 1;
|
|
|
|
|
reg.HIZPlaneOptimizationdisablebitMask = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Wa_1806527549
|
|
|
|
|
*
|
|
|
|
|
* Set HIZ_CHICKEN (7018h) bit 13 = 1 when depth buffer is D16_UNORM.
|
|
|
|
|
*/
|
|
|
|
|
iris_emit_reg(batch, GENX(HIZ_CHICKEN), reg) {
|
|
|
|
|
reg.HZDepthTestLEGEOptimizationDisable = fmt_is_d16;
|
|
|
|
|
reg.HZDepthTestLEGEOptimizationDisableMask = true;
|
|
|
|
|
}
|
2021-06-16 10:22:48 -07:00
|
|
|
|
|
|
|
|
ice->state.genx->depth_reg_mode =
|
|
|
|
|
fmt_is_d16 ? IRIS_DEPTH_REG_MODE_D16 : IRIS_DEPTH_REG_MODE_HW_DEFAULT;
|
2021-06-15 08:37:56 -07:00
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
static void
|
2018-09-01 00:58:29 -07:00
|
|
|
iris_upload_dirty_render_state(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
const struct pipe_draw_info *draw)
|
2018-01-25 01:36:49 -08:00
|
|
|
{
|
2018-06-25 08:25:22 -07:00
|
|
|
const uint64_t dirty = ice->state.dirty;
|
2020-05-29 16:57:01 -07:00
|
|
|
const uint64_t stage_dirty = ice->state.stage_dirty;
|
2018-01-25 01:36:49 -08:00
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER) &&
|
|
|
|
|
!(stage_dirty & IRIS_ALL_STAGE_DIRTY_FOR_RENDER))
|
2018-09-01 00:58:29 -07:00
|
|
|
return;
|
|
|
|
|
|
2018-06-29 12:58:31 -07:00
|
|
|
struct iris_genx_state *genx = ice->state.genx;
|
2018-09-08 19:43:34 -07:00
|
|
|
struct iris_binder *binder = &ice->state.binder;
|
2018-01-25 01:36:49 -08:00
|
|
|
struct brw_wm_prog_data *wm_prog_data = (void *)
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
|
|
|
|
|
|
|
|
|
|
if (dirty & IRIS_DIRTY_CC_VIEWPORT) {
|
2018-07-14 01:29:33 -07:00
|
|
|
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
|
|
|
|
|
uint32_t cc_vp_address;
|
|
|
|
|
|
|
|
|
|
/* XXX: could avoid streaming for depth_clip [0,1] case. */
|
|
|
|
|
uint32_t *cc_vp_map =
|
|
|
|
|
stream_state(batch, ice->state.dynamic_uploader,
|
|
|
|
|
&ice->state.last_res.cc_vp,
|
|
|
|
|
4 * ice->state.num_viewports *
|
|
|
|
|
GENX(CC_VIEWPORT_length), 32, &cc_vp_address);
|
|
|
|
|
for (int i = 0; i < ice->state.num_viewports; i++) {
|
|
|
|
|
float zmin, zmax;
|
2019-08-12 10:44:41 +03:00
|
|
|
iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz,
|
2019-07-25 13:09:08 +03:00
|
|
|
ice->state.window_space_position,
|
2019-08-12 10:44:41 +03:00
|
|
|
&zmin, &zmax);
|
2018-07-14 01:29:33 -07:00
|
|
|
if (cso_rast->depth_clip_near)
|
|
|
|
|
zmin = 0.0;
|
|
|
|
|
if (cso_rast->depth_clip_far)
|
|
|
|
|
zmax = 1.0;
|
|
|
|
|
|
|
|
|
|
iris_pack_state(GENX(CC_VIEWPORT), cc_vp_map, ccv) {
|
|
|
|
|
ccv.MinimumDepth = zmin;
|
|
|
|
|
ccv.MaximumDepth = zmax;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cc_vp_map += GENX(CC_VIEWPORT_length);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), ptr) {
|
2018-07-14 01:29:33 -07:00
|
|
|
ptr.CCViewportPointer = cc_vp_address;
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
2018-01-22 23:39:38 -08:00
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (dirty & IRIS_DIRTY_SF_CL_VIEWPORT) {
|
2018-12-03 02:02:49 -08:00
|
|
|
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
|
|
|
|
|
uint32_t sf_cl_vp_address;
|
|
|
|
|
uint32_t *vp_map =
|
|
|
|
|
stream_state(batch, ice->state.dynamic_uploader,
|
|
|
|
|
&ice->state.last_res.sf_cl_vp,
|
|
|
|
|
4 * ice->state.num_viewports *
|
|
|
|
|
GENX(SF_CLIP_VIEWPORT_length), 64, &sf_cl_vp_address);
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < ice->state.num_viewports; i++) {
|
|
|
|
|
const struct pipe_viewport_state *state = &ice->state.viewports[i];
|
2018-12-03 02:08:23 -08:00
|
|
|
float gb_xmin, gb_xmax, gb_ymin, gb_ymax;
|
2018-12-03 02:02:49 -08:00
|
|
|
|
|
|
|
|
float vp_xmin = viewport_extent(state, 0, -1.0f);
|
|
|
|
|
float vp_xmax = viewport_extent(state, 0, 1.0f);
|
|
|
|
|
float vp_ymin = viewport_extent(state, 1, -1.0f);
|
|
|
|
|
float vp_ymax = viewport_extent(state, 1, 1.0f);
|
|
|
|
|
|
2021-03-03 13:49:18 -08:00
|
|
|
intel_calculate_guardband_size(cso_fb->width, cso_fb->height,
|
2021-03-09 09:44:02 -08:00
|
|
|
state->scale[0], state->scale[1],
|
|
|
|
|
state->translate[0], state->translate[1],
|
|
|
|
|
&gb_xmin, &gb_xmax, &gb_ymin, &gb_ymax);
|
2018-12-03 02:08:23 -08:00
|
|
|
|
2018-12-03 02:02:49 -08:00
|
|
|
iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
|
|
|
|
|
vp.ViewportMatrixElementm00 = state->scale[0];
|
|
|
|
|
vp.ViewportMatrixElementm11 = state->scale[1];
|
|
|
|
|
vp.ViewportMatrixElementm22 = state->scale[2];
|
|
|
|
|
vp.ViewportMatrixElementm30 = state->translate[0];
|
|
|
|
|
vp.ViewportMatrixElementm31 = state->translate[1];
|
|
|
|
|
vp.ViewportMatrixElementm32 = state->translate[2];
|
2018-12-03 02:08:23 -08:00
|
|
|
vp.XMinClipGuardband = gb_xmin;
|
|
|
|
|
vp.XMaxClipGuardband = gb_xmax;
|
|
|
|
|
vp.YMinClipGuardband = gb_ymin;
|
|
|
|
|
vp.YMaxClipGuardband = gb_ymax;
|
2018-12-03 02:02:49 -08:00
|
|
|
vp.XMinViewPort = MAX2(vp_xmin, 0);
|
|
|
|
|
vp.XMaxViewPort = MIN2(vp_xmax, cso_fb->width) - 1;
|
|
|
|
|
vp.YMinViewPort = MAX2(vp_ymin, 0);
|
|
|
|
|
vp.YMaxViewPort = MIN2(vp_ymax, cso_fb->height) - 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vp_map += GENX(SF_CLIP_VIEWPORT_length);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), ptr) {
|
2018-12-03 02:02:49 -08:00
|
|
|
ptr.SFClipViewportPointer = sf_cl_vp_address;
|
2018-01-10 00:19:29 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (dirty & IRIS_DIRTY_URB) {
|
2019-03-06 13:27:28 -08:00
|
|
|
for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
|
|
|
|
|
if (!ice->shaders.prog[i]) {
|
iris: Reconfigure the URB only if it's necessary or possibly useful
Reconfiguring the URB partitioning is likely to cause shader stalls,
as the dividing line between each stage's section of memory is moving.
(Technically, 3DSTATE_URB_* are pipelined commands, but that mostly
means that the command streamer doesn't need to stall.) So it should
be beneficial to update the URB configuration less often.
If the previous URB configuration already has enough space for our
current shader's needs, we can just continue using it, assuming we
are able to allocate the maximum number of URB entries per stage.
However, if we ran out of URB space and had to limit the number of
URB entrties for a stage, and the per-entry size is larger than we
need, we should reconfigure it to try and improve concurrency.
So, we begin tracking the last URB configuration in the context,
and compare against that when updating shader variants.
Cuts 36% of the URB reconfigurations (excluding BLORP) from a
Shadow of Mordor trace, and 46% from a GFXBench Manhattan 3.0 trace.
One nice thing is that this removes the need to look at the old
prog_data when updating shaders, which should make it possible to
unbind shader variants without causing spurious URB updates.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8721>
2021-01-25 22:25:51 -08:00
|
|
|
ice->shaders.urb.size[i] = 1;
|
2019-03-06 13:27:28 -08:00
|
|
|
} else {
|
|
|
|
|
struct brw_vue_prog_data *vue_prog_data =
|
|
|
|
|
(void *) ice->shaders.prog[i]->prog_data;
|
iris: Reconfigure the URB only if it's necessary or possibly useful
Reconfiguring the URB partitioning is likely to cause shader stalls,
as the dividing line between each stage's section of memory is moving.
(Technically, 3DSTATE_URB_* are pipelined commands, but that mostly
means that the command streamer doesn't need to stall.) So it should
be beneficial to update the URB configuration less often.
If the previous URB configuration already has enough space for our
current shader's needs, we can just continue using it, assuming we
are able to allocate the maximum number of URB entries per stage.
However, if we ran out of URB space and had to limit the number of
URB entrties for a stage, and the per-entry size is larger than we
need, we should reconfigure it to try and improve concurrency.
So, we begin tracking the last URB configuration in the context,
and compare against that when updating shader variants.
Cuts 36% of the URB reconfigurations (excluding BLORP) from a
Shadow of Mordor trace, and 46% from a GFXBench Manhattan 3.0 trace.
One nice thing is that this removes the need to look at the old
prog_data when updating shaders, which should make it possible to
unbind shader variants without causing spurious URB updates.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8721>
2021-01-25 22:25:51 -08:00
|
|
|
ice->shaders.urb.size[i] = vue_prog_data->urb_entry_size;
|
2019-03-06 13:27:28 -08:00
|
|
|
}
|
iris: Reconfigure the URB only if it's necessary or possibly useful
Reconfiguring the URB partitioning is likely to cause shader stalls,
as the dividing line between each stage's section of memory is moving.
(Technically, 3DSTATE_URB_* are pipelined commands, but that mostly
means that the command streamer doesn't need to stall.) So it should
be beneficial to update the URB configuration less often.
If the previous URB configuration already has enough space for our
current shader's needs, we can just continue using it, assuming we
are able to allocate the maximum number of URB entries per stage.
However, if we ran out of URB space and had to limit the number of
URB entrties for a stage, and the per-entry size is larger than we
need, we should reconfigure it to try and improve concurrency.
So, we begin tracking the last URB configuration in the context,
and compare against that when updating shader variants.
Cuts 36% of the URB reconfigurations (excluding BLORP) from a
Shadow of Mordor trace, and 46% from a GFXBench Manhattan 3.0 trace.
One nice thing is that this removes the need to look at the old
prog_data when updating shaders, which should make it possible to
unbind shader variants without causing spurious URB updates.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8721>
2021-01-25 22:25:51 -08:00
|
|
|
assert(ice->shaders.urb.size[i] != 0);
|
2019-03-06 13:27:28 -08:00
|
|
|
}
|
|
|
|
|
|
2021-03-03 13:49:18 -08:00
|
|
|
intel_get_urb_config(&batch->screen->devinfo,
|
2021-03-09 09:44:02 -08:00
|
|
|
batch->screen->l3_config_3d,
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL,
|
|
|
|
|
ice->shaders.urb.size,
|
|
|
|
|
ice->shaders.urb.entries,
|
|
|
|
|
ice->shaders.urb.start,
|
|
|
|
|
&ice->state.urb_deref_block_size,
|
|
|
|
|
&ice->shaders.urb.constrained);
|
2020-01-17 13:38:52 -06:00
|
|
|
|
|
|
|
|
for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) {
|
|
|
|
|
urb._3DCommandSubOpcode += i;
|
iris: Reconfigure the URB only if it's necessary or possibly useful
Reconfiguring the URB partitioning is likely to cause shader stalls,
as the dividing line between each stage's section of memory is moving.
(Technically, 3DSTATE_URB_* are pipelined commands, but that mostly
means that the command streamer doesn't need to stall.) So it should
be beneficial to update the URB configuration less often.
If the previous URB configuration already has enough space for our
current shader's needs, we can just continue using it, assuming we
are able to allocate the maximum number of URB entries per stage.
However, if we ran out of URB space and had to limit the number of
URB entrties for a stage, and the per-entry size is larger than we
need, we should reconfigure it to try and improve concurrency.
So, we begin tracking the last URB configuration in the context,
and compare against that when updating shader variants.
Cuts 36% of the URB reconfigurations (excluding BLORP) from a
Shadow of Mordor trace, and 46% from a GFXBench Manhattan 3.0 trace.
One nice thing is that this removes the need to look at the old
prog_data when updating shaders, which should make it possible to
unbind shader variants without causing spurious URB updates.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8721>
2021-01-25 22:25:51 -08:00
|
|
|
urb.VSURBStartingAddress = ice->shaders.urb.start[i];
|
|
|
|
|
urb.VSURBEntryAllocationSize = ice->shaders.urb.size[i] - 1;
|
|
|
|
|
urb.VSNumberofURBEntries = ice->shaders.urb.entries[i];
|
2020-01-17 13:38:52 -06:00
|
|
|
}
|
|
|
|
|
}
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
2018-01-10 00:19:29 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (dirty & IRIS_DIRTY_BLEND_STATE) {
|
2018-01-25 21:53:41 -08:00
|
|
|
struct iris_blend_state *cso_blend = ice->state.cso_blend;
|
2018-01-30 01:50:44 -08:00
|
|
|
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
|
2018-01-25 21:53:41 -08:00
|
|
|
struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
|
2018-08-29 15:24:45 -07:00
|
|
|
const int header_dwords = GENX(BLEND_STATE_length);
|
2019-02-15 14:22:52 -08:00
|
|
|
|
|
|
|
|
/* Always write at least one BLEND_STATE - the final RT message will
|
|
|
|
|
* reference BLEND_STATE[0] even if there aren't color writes. There
|
|
|
|
|
* may still be alpha testing, computed depth, and so on.
|
|
|
|
|
*/
|
|
|
|
|
const int rt_dwords =
|
|
|
|
|
MAX2(cso_fb->nr_cbufs, 1) * GENX(BLEND_STATE_ENTRY_length);
|
|
|
|
|
|
2018-01-25 21:53:41 -08:00
|
|
|
uint32_t blend_offset;
|
|
|
|
|
uint32_t *blend_map =
|
2018-06-15 11:55:28 -07:00
|
|
|
stream_state(batch, ice->state.dynamic_uploader,
|
|
|
|
|
&ice->state.last_res.blend,
|
2018-08-29 15:24:45 -07:00
|
|
|
4 * (header_dwords + rt_dwords), 64, &blend_offset);
|
2018-01-25 21:53:41 -08:00
|
|
|
|
|
|
|
|
uint32_t blend_state_header;
|
|
|
|
|
iris_pack_state(GENX(BLEND_STATE), &blend_state_header, bs) {
|
2020-12-04 08:19:57 -05:00
|
|
|
bs.AlphaTestEnable = cso_zsa->alpha_enabled;
|
|
|
|
|
bs.AlphaTestFunction = translate_compare_func(cso_zsa->alpha_func);
|
2018-01-25 21:53:41 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blend_map[0] = blend_state_header | cso_blend->blend_state[0];
|
2018-08-29 15:24:45 -07:00
|
|
|
memcpy(&blend_map[1], &cso_blend->blend_state[1], 4 * rt_dwords);
|
2018-01-25 21:53:41 -08:00
|
|
|
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), ptr) {
|
|
|
|
|
ptr.BlendStatePointer = blend_offset;
|
|
|
|
|
ptr.BlendStatePointerValid = true;
|
|
|
|
|
}
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (dirty & IRIS_DIRTY_COLOR_CALC_STATE) {
|
|
|
|
|
struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
2018-11-07 14:23:27 +10:00
|
|
|
struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
|
|
|
|
|
#endif
|
2018-01-25 01:36:49 -08:00
|
|
|
uint32_t cc_offset;
|
|
|
|
|
void *cc_map =
|
2018-04-06 00:05:24 -07:00
|
|
|
stream_state(batch, ice->state.dynamic_uploader,
|
2018-06-15 11:55:28 -07:00
|
|
|
&ice->state.last_res.color_calc,
|
2018-04-06 00:05:24 -07:00
|
|
|
sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length),
|
|
|
|
|
64, &cc_offset);
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_pack_state(GENX(COLOR_CALC_STATE), cc_map, cc) {
|
|
|
|
|
cc.AlphaTestFormat = ALPHATEST_FLOAT32;
|
2020-12-04 08:19:57 -05:00
|
|
|
cc.AlphaReferenceValueAsFLOAT32 = cso->alpha_ref_value;
|
2018-01-25 01:36:49 -08:00
|
|
|
cc.BlendConstantColorRed = ice->state.blend_color.color[0];
|
|
|
|
|
cc.BlendConstantColorGreen = ice->state.blend_color.color[1];
|
|
|
|
|
cc.BlendConstantColorBlue = ice->state.blend_color.color[2];
|
|
|
|
|
cc.BlendConstantColorAlpha = ice->state.blend_color.color[3];
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
2018-11-07 14:23:27 +10:00
|
|
|
cc.StencilReferenceValue = p_stencil_refs->ref_value[0];
|
|
|
|
|
cc.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
|
|
|
|
|
#endif
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), ptr) {
|
|
|
|
|
ptr.ColorCalcStatePointer = cc_offset;
|
|
|
|
|
ptr.ColorCalcStatePointerValid = true;
|
2018-01-10 00:19:29 -08:00
|
|
|
}
|
2018-01-20 01:07:41 -08:00
|
|
|
}
|
2018-01-10 00:19:29 -08:00
|
|
|
|
2021-03-29 17:15:41 -07:00
|
|
|
/* Wa_1604061319
|
2019-11-19 15:00:06 -08:00
|
|
|
*
|
|
|
|
|
* 3DSTATE_CONSTANT_* needs to be programmed before BTP_*
|
|
|
|
|
*
|
|
|
|
|
* Testing shows that all the 3DSTATE_CONSTANT_XS need to be emitted if
|
|
|
|
|
* any stage has a dirty binding table.
|
|
|
|
|
*/
|
2021-03-16 10:14:30 -07:00
|
|
|
const bool emit_const_wa = GFX_VER >= 11 &&
|
2020-05-29 16:57:01 -07:00
|
|
|
((dirty & IRIS_DIRTY_RENDER_BUFFER) ||
|
2021-07-03 10:04:47 +10:00
|
|
|
(stage_dirty & IRIS_ALL_STAGE_DIRTY_BINDINGS_FOR_RENDER));
|
2019-11-19 15:00:06 -08:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2019-09-23 13:25:01 -07:00
|
|
|
uint32_t nobuffer_stages = 0;
|
|
|
|
|
#endif
|
|
|
|
|
|
2018-01-25 21:39:44 -08:00
|
|
|
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
|
2020-05-29 16:57:01 -07:00
|
|
|
if (!(stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)) &&
|
2019-11-19 15:00:06 -08:00
|
|
|
!emit_const_wa)
|
2018-02-09 14:21:54 -08:00
|
|
|
continue;
|
|
|
|
|
|
2018-08-18 23:39:48 -07:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
2018-05-29 07:32:43 -07:00
|
|
|
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
|
2018-02-09 14:21:54 -08:00
|
|
|
|
2018-06-06 02:16:52 -07:00
|
|
|
if (!shader)
|
2018-01-25 21:39:44 -08:00
|
|
|
continue;
|
|
|
|
|
|
2019-06-14 14:03:28 +02:00
|
|
|
if (shs->sysvals_need_upload)
|
2020-08-11 10:30:42 -05:00
|
|
|
upload_sysvals(ice, stage, NULL);
|
2018-11-08 23:10:46 -08:00
|
|
|
|
2019-09-23 10:15:52 -07:00
|
|
|
struct push_bos push_bos = {};
|
|
|
|
|
setup_constant_buffers(ice, batch, stage, &push_bos);
|
2019-09-23 13:25:01 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2019-09-23 13:25:01 -07:00
|
|
|
/* If this stage doesn't have any push constants, emit it later in a
|
|
|
|
|
* single CONSTANT_ALL packet with all the other stages.
|
|
|
|
|
*/
|
|
|
|
|
if (push_bos.buffer_count == 0) {
|
|
|
|
|
nobuffer_stages |= 1 << stage;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
|
|
|
|
|
* contains only 5 bits, so we can only use it for buffers smaller than
|
|
|
|
|
* 32.
|
|
|
|
|
*/
|
|
|
|
|
if (push_bos.max_length < 32) {
|
|
|
|
|
emit_push_constant_packet_all(ice, batch, 1 << stage, &push_bos);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2019-09-23 10:15:52 -07:00
|
|
|
emit_push_constant_packets(ice, batch, stage, &push_bos);
|
2018-01-25 21:39:44 -08:00
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2019-09-23 13:25:01 -07:00
|
|
|
if (nobuffer_stages)
|
|
|
|
|
emit_push_constant_packet_all(ice, batch, nobuffer_stages, NULL);
|
|
|
|
|
#endif
|
|
|
|
|
|
2018-06-15 12:33:58 -07:00
|
|
|
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
|
2021-03-29 15:46:12 -07:00
|
|
|
/* Gfx9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted
|
2019-09-10 03:08:46 -07:00
|
|
|
* in order to commit constants. TODO: Investigate "Disable Gather
|
|
|
|
|
* at Set Shader" to go back to legacy mode...
|
|
|
|
|
*/
|
2020-05-29 16:57:01 -07:00
|
|
|
if (stage_dirty & ((IRIS_STAGE_DIRTY_BINDINGS_VS |
|
2021-03-16 10:14:30 -07:00
|
|
|
(GFX_VER == 9 ? IRIS_STAGE_DIRTY_CONSTANTS_VS : 0))
|
2020-05-29 16:57:01 -07:00
|
|
|
<< stage)) {
|
2018-06-06 11:59:17 -07:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
|
|
|
|
|
ptr._3DCommandSubOpcode = 38 + stage;
|
|
|
|
|
ptr.PointertoVSBindingTable = binder->bt_offset[stage];
|
2018-01-30 01:40:14 -08:00
|
|
|
}
|
|
|
|
|
}
|
2018-06-15 12:33:58 -07:00
|
|
|
}
|
2018-03-26 14:11:55 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER >= 11 && (dirty & IRIS_DIRTY_RENDER_BUFFER)) {
|
2019-02-15 11:35:28 -08:00
|
|
|
// XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
|
|
|
|
|
// XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
|
|
|
|
|
|
|
|
|
|
/* The PIPE_CONTROL command description says:
|
|
|
|
|
*
|
|
|
|
|
* "Whenever a Binding Table Index (BTI) used by a Render Target
|
|
|
|
|
* Message points to a different RENDER_SURFACE_STATE, SW must issue a
|
|
|
|
|
* Render Target Cache Flush by enabling this bit. When render target
|
|
|
|
|
* flush is set due to new association of BTI, PS Scoreboard Stall bit
|
|
|
|
|
* must be set in this packet."
|
|
|
|
|
*/
|
|
|
|
|
// XXX: does this need to happen at 3DSTATE_BTP_PS time?
|
|
|
|
|
iris_emit_pipe_control_flush(batch, "workaround: RT BTI change [draw]",
|
|
|
|
|
PIPE_CONTROL_RENDER_TARGET_FLUSH |
|
|
|
|
|
PIPE_CONTROL_STALL_AT_SCOREBOARD);
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-23 00:43:36 +02:00
|
|
|
if (dirty & IRIS_DIRTY_RENDER_BUFFER)
|
|
|
|
|
trace_framebuffer_state(&batch->trace, batch, &ice->state.framebuffer);
|
|
|
|
|
|
2018-06-15 12:33:58 -07:00
|
|
|
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
|
2020-05-29 16:57:01 -07:00
|
|
|
if (stage_dirty & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
|
2018-10-19 01:14:38 -07:00
|
|
|
iris_populate_binding_table(ice, batch, stage, false);
|
2018-03-26 14:11:55 -07:00
|
|
|
}
|
2018-01-30 01:40:14 -08:00
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
|
2020-05-29 16:57:01 -07:00
|
|
|
if (!(stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage)) ||
|
2018-01-30 01:44:07 -08:00
|
|
|
!ice->shaders.prog[stage])
|
2018-01-25 01:36:49 -08:00
|
|
|
continue;
|
2018-01-22 23:39:38 -08:00
|
|
|
|
2018-12-04 15:34:30 -08:00
|
|
|
iris_upload_sampler_states(ice, stage);
|
|
|
|
|
|
2018-08-18 23:43:14 -07:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
|
|
|
|
struct pipe_resource *res = shs->sampler_table.res;
|
2018-06-13 03:06:50 -07:00
|
|
|
if (res)
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(res), false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2018-06-13 03:06:50 -07:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ptr) {
|
|
|
|
|
ptr._3DCommandSubOpcode = 43 + stage;
|
2018-08-18 23:43:14 -07:00
|
|
|
ptr.PointertoVSSamplerState = shs->sampler_table.offset;
|
2018-01-22 23:39:38 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-04 15:34:30 -08:00
|
|
|
if (ice->state.need_border_colors)
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2018-12-04 15:34:30 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (dirty & IRIS_DIRTY_MULTISAMPLE) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
|
|
|
|
|
ms.PixelLocation =
|
|
|
|
|
ice->state.cso_rast->half_pixel_center ? CENTER : UL_CORNER;
|
|
|
|
|
if (ice->state.framebuffer.samples > 0)
|
|
|
|
|
ms.NumberofMultisamples = ffs(ice->state.framebuffer.samples) - 1;
|
2018-01-22 23:39:38 -08:00
|
|
|
}
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (dirty & IRIS_DIRTY_SAMPLE_MASK) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) {
|
2019-01-15 23:22:48 -08:00
|
|
|
ms.SampleMask = ice->state.sample_mask;
|
2018-01-22 23:39:38 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
|
2020-05-29 16:57:01 -07:00
|
|
|
if (!(stage_dirty & (IRIS_STAGE_DIRTY_VS << stage)))
|
2018-01-25 01:36:49 -08:00
|
|
|
continue;
|
|
|
|
|
|
2018-01-30 02:16:34 -08:00
|
|
|
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
|
|
|
|
|
|
|
|
|
|
if (shader) {
|
2019-04-11 11:51:26 -07:00
|
|
|
struct brw_stage_prog_data *prog_data = shader->prog_data;
|
2018-06-28 00:57:49 -07:00
|
|
|
struct iris_resource *cache = (void *) shader->assembly.res;
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, cache->bo, false, IRIS_DOMAIN_NONE);
|
2019-04-11 11:51:26 -07:00
|
|
|
|
2021-02-12 11:39:45 -08:00
|
|
|
uint32_t scratch_addr =
|
|
|
|
|
pin_scratch_space(ice, batch, prog_data, stage);
|
2019-08-03 11:37:34 -05:00
|
|
|
|
|
|
|
|
if (stage == MESA_SHADER_FRAGMENT) {
|
|
|
|
|
UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast;
|
2019-07-07 17:14:15 -07:00
|
|
|
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
|
|
|
|
|
|
2019-08-03 11:37:34 -05:00
|
|
|
uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0};
|
2021-02-03 01:41:42 -08:00
|
|
|
_iris_pack_command(batch, GENX(3DSTATE_PS), ps_state, ps) {
|
2019-08-03 11:37:34 -05:00
|
|
|
ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
|
|
|
|
|
ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
|
|
|
|
|
ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
|
|
|
|
|
|
|
|
|
|
/* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say:
|
|
|
|
|
*
|
|
|
|
|
* "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16,
|
|
|
|
|
* SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch
|
|
|
|
|
* mode."
|
|
|
|
|
*
|
2021-03-29 15:46:12 -07:00
|
|
|
* 16x MSAA only exists on Gfx9+, so we can skip this on Gfx8.
|
2019-08-03 11:37:34 -05:00
|
|
|
*/
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER >= 9 && cso_fb->samples == 16 &&
|
2019-08-03 11:37:34 -05:00
|
|
|
!wm_prog_data->persample_dispatch) {
|
|
|
|
|
assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
|
|
|
|
|
ps._32PixelDispatchEnable = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ps.DispatchGRFStartRegisterForConstantSetupData0 =
|
|
|
|
|
brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
|
|
|
|
|
ps.DispatchGRFStartRegisterForConstantSetupData1 =
|
|
|
|
|
brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
|
|
|
|
|
ps.DispatchGRFStartRegisterForConstantSetupData2 =
|
|
|
|
|
brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
|
|
|
|
|
|
|
|
|
|
ps.KernelStartPointer0 = KSP(shader) +
|
|
|
|
|
brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
|
|
|
|
|
ps.KernelStartPointer1 = KSP(shader) +
|
|
|
|
|
brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
|
|
|
|
|
ps.KernelStartPointer2 = KSP(shader) +
|
|
|
|
|
brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
|
2021-02-03 01:41:42 -08:00
|
|
|
|
2020-10-20 14:27:58 -05:00
|
|
|
#if GFX_VERx10 >= 125
|
|
|
|
|
ps.ScratchSpaceBuffer = scratch_addr >> 4;
|
|
|
|
|
#else
|
2021-02-12 11:39:45 -08:00
|
|
|
ps.ScratchSpaceBasePointer =
|
|
|
|
|
rw_bo(NULL, scratch_addr, IRIS_DOMAIN_NONE);
|
2020-10-20 14:27:58 -05:00
|
|
|
#endif
|
2019-07-07 17:14:15 -07:00
|
|
|
}
|
2019-04-18 13:21:56 -04:00
|
|
|
|
2019-08-03 11:37:34 -05:00
|
|
|
uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 9
|
2019-08-16 17:48:53 -05:00
|
|
|
if (!wm_prog_data->uses_sample_mask)
|
|
|
|
|
psx.InputCoverageMaskState = ICMS_NONE;
|
|
|
|
|
else if (wm_prog_data->post_depth_coverage)
|
2019-04-18 13:21:56 -04:00
|
|
|
psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
|
2019-08-03 11:37:34 -05:00
|
|
|
else if (wm_prog_data->inner_coverage &&
|
|
|
|
|
cso->conservative_rasterization)
|
2019-04-18 13:21:56 -04:00
|
|
|
psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE;
|
|
|
|
|
else
|
|
|
|
|
psx.InputCoverageMaskState = ICMS_NORMAL;
|
2019-08-03 11:37:34 -05:00
|
|
|
#else
|
|
|
|
|
psx.PixelShaderUsesInputCoverageMask =
|
|
|
|
|
wm_prog_data->uses_sample_mask;
|
|
|
|
|
#endif
|
2019-04-18 13:21:56 -04:00
|
|
|
}
|
2019-04-11 11:51:26 -07:00
|
|
|
|
2019-08-03 11:37:34 -05:00
|
|
|
uint32_t *shader_ps = (uint32_t *) shader->derived_data;
|
|
|
|
|
uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length);
|
2019-07-07 17:14:15 -07:00
|
|
|
iris_emit_merge(batch, shader_ps, ps_state,
|
|
|
|
|
GENX(3DSTATE_PS_length));
|
2019-08-03 11:37:34 -05:00
|
|
|
iris_emit_merge(batch, shader_psx, psx_state,
|
2019-04-18 13:21:56 -04:00
|
|
|
GENX(3DSTATE_PS_EXTRA_length));
|
2021-02-12 11:39:45 -08:00
|
|
|
} else if (scratch_addr) {
|
2021-02-03 01:41:42 -08:00
|
|
|
uint32_t *pkt = (uint32_t *) shader->derived_data;
|
|
|
|
|
switch (stage) {
|
|
|
|
|
case MESA_SHADER_VERTEX: MERGE_SCRATCH_ADDR(3DSTATE_VS); break;
|
|
|
|
|
case MESA_SHADER_TESS_CTRL: MERGE_SCRATCH_ADDR(3DSTATE_HS); break;
|
|
|
|
|
case MESA_SHADER_TESS_EVAL: MERGE_SCRATCH_ADDR(3DSTATE_DS); break;
|
|
|
|
|
case MESA_SHADER_GEOMETRY: MERGE_SCRATCH_ADDR(3DSTATE_GS); break;
|
|
|
|
|
}
|
2019-08-03 11:37:34 -05:00
|
|
|
} else {
|
2019-04-18 13:21:56 -04:00
|
|
|
iris_batch_emit(batch, shader->derived_data,
|
|
|
|
|
iris_derived_program_state_size(stage));
|
2019-08-03 11:37:34 -05:00
|
|
|
}
|
2018-01-25 01:36:49 -08:00
|
|
|
} else {
|
|
|
|
|
if (stage == MESA_SHADER_TESS_EVAL) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_TE), te);
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_DS), ds);
|
|
|
|
|
} else if (stage == MESA_SHADER_GEOMETRY) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_GS), gs);
|
2018-01-22 23:39:38 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-18 09:02:35 -07:00
|
|
|
if (ice->state.streamout_active) {
|
|
|
|
|
if (dirty & IRIS_DIRTY_SO_BUFFERS) {
|
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
|
struct iris_stream_output_target *tgt =
|
|
|
|
|
(void *) ice->state.so_target[i];
|
2021-02-02 17:02:05 -08:00
|
|
|
const uint32_t dwords = GENX(3DSTATE_SO_BUFFER_length);
|
|
|
|
|
uint32_t *so_buffers = genx->so_buffers + i * dwords;
|
|
|
|
|
bool zero_offset = false;
|
|
|
|
|
|
2018-07-18 09:02:35 -07:00
|
|
|
if (tgt) {
|
2021-02-02 17:02:05 -08:00
|
|
|
zero_offset = tgt->zero_offset;
|
2018-07-18 09:02:35 -07:00
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
|
2020-05-29 16:38:43 -07:00
|
|
|
true, IRIS_DOMAIN_OTHER_WRITE);
|
2018-07-18 09:02:35 -07:00
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
|
2020-05-29 16:38:43 -07:00
|
|
|
true, IRIS_DOMAIN_OTHER_WRITE);
|
2018-07-18 09:02:35 -07:00
|
|
|
}
|
2021-02-02 17:02:05 -08:00
|
|
|
|
|
|
|
|
if (zero_offset) {
|
|
|
|
|
/* Skip the last DWord which contains "Stream Offset" of
|
|
|
|
|
* 0xFFFFFFFF and instead emit a dword of zero directly.
|
|
|
|
|
*/
|
|
|
|
|
STATIC_ASSERT(GENX(3DSTATE_SO_BUFFER_StreamOffset_start) ==
|
|
|
|
|
32 * (dwords - 1));
|
|
|
|
|
const uint32_t zero = 0;
|
|
|
|
|
iris_batch_emit(batch, so_buffers, 4 * (dwords - 1));
|
|
|
|
|
iris_batch_emit(batch, &zero, sizeof(zero));
|
|
|
|
|
tgt->zero_offset = false;
|
|
|
|
|
} else {
|
|
|
|
|
iris_batch_emit(batch, so_buffers, 4 * dwords);
|
|
|
|
|
}
|
2018-07-11 17:05:10 -07:00
|
|
|
}
|
|
|
|
|
}
|
2018-06-29 12:58:31 -07:00
|
|
|
|
2018-07-18 09:02:35 -07:00
|
|
|
if ((dirty & IRIS_DIRTY_SO_DECL_LIST) && ice->state.streamout) {
|
2021-05-03 17:39:28 -07:00
|
|
|
/* Wa_16011773973:
|
|
|
|
|
* If SOL is enabled and SO_DECL state has to be programmed,
|
|
|
|
|
* 1. Send 3D State SOL state with SOL disabled
|
|
|
|
|
* 2. Send SO_DECL NP state
|
|
|
|
|
* 3. Send 3D State SOL with SOL Enabled
|
|
|
|
|
*/
|
|
|
|
|
if (intel_device_info_is_dg2(&batch->screen->devinfo))
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
|
|
|
|
|
|
2018-07-18 09:02:35 -07:00
|
|
|
uint32_t *decl_list =
|
|
|
|
|
ice->state.streamout + GENX(3DSTATE_STREAMOUT_length);
|
|
|
|
|
iris_batch_emit(batch, decl_list, 4 * ((decl_list[0] & 0xff) + 2));
|
|
|
|
|
}
|
2018-06-29 12:58:31 -07:00
|
|
|
|
2018-07-18 09:02:35 -07:00
|
|
|
if (dirty & IRIS_DIRTY_STREAMOUT) {
|
|
|
|
|
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
|
2018-07-11 12:45:19 -07:00
|
|
|
|
|
|
|
|
uint32_t dynamic_sol[GENX(3DSTATE_STREAMOUT_length)];
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_STREAMOUT), dynamic_sol, sol) {
|
|
|
|
|
sol.SOFunctionEnable = true;
|
|
|
|
|
sol.SOStatisticsEnable = true;
|
|
|
|
|
|
2018-09-28 12:07:54 +02:00
|
|
|
sol.RenderingDisable = cso_rast->rasterizer_discard &&
|
|
|
|
|
!ice->state.prims_generated_query_active;
|
2018-07-11 12:45:19 -07:00
|
|
|
sol.ReorderMode = cso_rast->flatshade_first ? LEADING : TRAILING;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(ice->state.streamout);
|
|
|
|
|
|
|
|
|
|
iris_emit_merge(batch, ice->state.streamout, dynamic_sol,
|
|
|
|
|
GENX(3DSTATE_STREAMOUT_length));
|
|
|
|
|
}
|
2018-07-18 09:02:35 -07:00
|
|
|
} else {
|
|
|
|
|
if (dirty & IRIS_DIRTY_STREAMOUT) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
|
|
|
|
|
}
|
2018-07-11 12:45:19 -07:00
|
|
|
}
|
2018-01-10 00:19:29 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (dirty & IRIS_DIRTY_CLIP) {
|
|
|
|
|
struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
|
|
|
|
|
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
|
2018-01-10 00:19:29 -08:00
|
|
|
|
2019-04-28 23:25:10 -07:00
|
|
|
bool gs_or_tes = ice->shaders.prog[MESA_SHADER_GEOMETRY] ||
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_TESS_EVAL];
|
|
|
|
|
bool points_or_lines = cso_rast->fill_mode_point_or_line ||
|
|
|
|
|
(gs_or_tes ? ice->shaders.output_topology_is_points_or_lines
|
|
|
|
|
: ice->state.prim_is_points_or_lines);
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
|
2018-12-02 14:39:29 -08:00
|
|
|
cl.StatisticsEnable = ice->state.statistics_counters_enabled;
|
2019-07-25 13:09:08 +03:00
|
|
|
if (cso_rast->rasterizer_discard)
|
|
|
|
|
cl.ClipMode = CLIPMODE_REJECT_ALL;
|
|
|
|
|
else if (ice->state.window_space_position)
|
|
|
|
|
cl.ClipMode = CLIPMODE_ACCEPT_ALL;
|
|
|
|
|
else
|
|
|
|
|
cl.ClipMode = CLIPMODE_NORMAL;
|
|
|
|
|
|
|
|
|
|
cl.PerspectiveDivideDisable = ice->state.window_space_position;
|
2019-04-28 23:25:10 -07:00
|
|
|
cl.ViewportXYClipTestEnable = !points_or_lines;
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (wm_prog_data->barycentric_interp_modes &
|
|
|
|
|
BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
|
|
|
|
|
cl.NonPerspectiveBarycentricEnable = true;
|
2018-01-10 00:19:29 -08:00
|
|
|
|
2019-11-03 23:50:38 -08:00
|
|
|
cl.ForceZeroRTAIndexEnable = cso_fb->layers <= 1;
|
2018-06-20 15:35:10 -07:00
|
|
|
cl.MaximumVPIndex = ice->state.num_viewports - 1;
|
2018-01-10 00:19:29 -08:00
|
|
|
}
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_emit_merge(batch, cso_rast->clip, dynamic_clip,
|
|
|
|
|
ARRAY_SIZE(cso_rast->clip));
|
2018-01-10 00:19:29 -08:00
|
|
|
}
|
2017-12-27 02:54:26 -08:00
|
|
|
|
2020-01-17 14:41:50 -06:00
|
|
|
if (dirty & (IRIS_DIRTY_RASTER | IRIS_DIRTY_URB)) {
|
2018-01-25 01:36:49 -08:00
|
|
|
struct iris_rasterizer_state *cso = ice->state.cso_rast;
|
|
|
|
|
iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
|
2017-12-27 02:54:26 -08:00
|
|
|
|
2019-07-25 13:09:08 +03:00
|
|
|
uint32_t dynamic_sf[GENX(3DSTATE_SF_length)];
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) {
|
|
|
|
|
sf.ViewportTransformEnable = !ice->state.window_space_position;
|
2020-01-17 14:41:50 -06:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2020-01-17 14:41:50 -06:00
|
|
|
sf.DerefBlockSize = ice->state.urb_deref_block_size;
|
|
|
|
|
#endif
|
2019-07-25 13:09:08 +03:00
|
|
|
}
|
|
|
|
|
iris_emit_merge(batch, cso->sf, dynamic_sf,
|
|
|
|
|
ARRAY_SIZE(dynamic_sf));
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
2017-12-27 02:54:26 -08:00
|
|
|
|
2018-06-09 00:01:09 -07:00
|
|
|
if (dirty & IRIS_DIRTY_WM) {
|
2018-01-25 01:36:49 -08:00
|
|
|
struct iris_rasterizer_state *cso = ice->state.cso_rast;
|
|
|
|
|
uint32_t dynamic_wm[GENX(3DSTATE_WM_length)];
|
2017-12-27 02:54:26 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_pack_command(GENX(3DSTATE_WM), &dynamic_wm, wm) {
|
2018-11-08 01:14:27 -08:00
|
|
|
wm.StatisticsEnable = ice->state.statistics_counters_enabled;
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
wm.BarycentricInterpolationMode =
|
|
|
|
|
wm_prog_data->barycentric_interp_modes;
|
2017-12-27 02:54:26 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (wm_prog_data->early_fragment_tests)
|
|
|
|
|
wm.EarlyDepthStencilControl = EDSC_PREPS;
|
|
|
|
|
else if (wm_prog_data->has_side_effects)
|
|
|
|
|
wm.EarlyDepthStencilControl = EDSC_PSEXEC;
|
2019-02-11 14:22:50 -08:00
|
|
|
|
|
|
|
|
/* We could skip this bit if color writes are enabled. */
|
|
|
|
|
if (wm_prog_data->has_side_effects || wm_prog_data->uses_kill)
|
|
|
|
|
wm.ForceThreadDispatchEnable = ForceON;
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
|
|
|
|
iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm));
|
|
|
|
|
}
|
2017-12-27 02:54:26 -08:00
|
|
|
|
2018-08-18 01:24:38 -07:00
|
|
|
if (dirty & IRIS_DIRTY_SBE) {
|
2018-04-19 19:04:17 -07:00
|
|
|
iris_emit_sbe(batch, ice);
|
2018-01-29 15:06:04 -08:00
|
|
|
}
|
2017-12-27 02:54:26 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (dirty & IRIS_DIRTY_PS_BLEND) {
|
2018-01-25 21:58:31 -08:00
|
|
|
struct iris_blend_state *cso_blend = ice->state.cso_blend;
|
|
|
|
|
struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
|
2019-02-11 12:07:51 -08:00
|
|
|
const struct shader_info *fs_info =
|
|
|
|
|
iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
|
|
|
|
|
|
2018-01-25 21:58:31 -08:00
|
|
|
uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)];
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
|
2019-02-11 12:07:51 -08:00
|
|
|
pb.HasWriteableRT = has_writeable_rt(cso_blend, fs_info);
|
2020-12-04 08:19:57 -05:00
|
|
|
pb.AlphaTestEnable = cso_zsa->alpha_enabled;
|
2019-05-02 21:14:49 -07:00
|
|
|
|
|
|
|
|
/* The dual source blending docs caution against using SRC1 factors
|
|
|
|
|
* when the shader doesn't use a dual source render target write.
|
|
|
|
|
* Empirically, this can lead to GPU hangs, and the results are
|
|
|
|
|
* undefined anyway, so simply disable blending to avoid the hang.
|
|
|
|
|
*/
|
|
|
|
|
pb.ColorBufferBlendEnable = (cso_blend->blend_enables & 1) &&
|
|
|
|
|
(!cso_blend->dual_color_blending || wm_prog_data->dual_src_blend);
|
2018-01-25 21:58:31 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iris_emit_merge(batch, cso_blend->ps_blend, dynamic_pb,
|
|
|
|
|
ARRAY_SIZE(cso_blend->ps_blend));
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
2017-12-27 02:54:26 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (dirty & IRIS_DIRTY_WM_DEPTH_STENCIL) {
|
|
|
|
|
struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 9 && GFX_VER < 12
|
2018-01-25 01:36:49 -08:00
|
|
|
struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
|
|
|
|
|
uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
|
|
|
|
|
wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
|
|
|
|
|
wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
|
|
|
|
|
}
|
|
|
|
|
iris_emit_merge(batch, cso->wmds, stencil_refs, ARRAY_SIZE(cso->wmds));
|
2018-11-07 14:23:27 +10:00
|
|
|
#else
|
2020-02-04 10:49:59 -08:00
|
|
|
/* Use modify disable fields which allow us to emit packets
|
|
|
|
|
* directly instead of merging them later.
|
|
|
|
|
*/
|
2018-11-07 14:23:27 +10:00
|
|
|
iris_batch_emit(batch, cso->wmds, sizeof(cso->wmds));
|
|
|
|
|
#endif
|
2019-10-23 20:56:45 +01:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2019-10-23 20:56:45 +01:00
|
|
|
iris_batch_emit(batch, cso->depth_bounds, sizeof(cso->depth_bounds));
|
|
|
|
|
#endif
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
2018-01-21 23:55:04 -08:00
|
|
|
|
2020-02-04 10:49:59 -08:00
|
|
|
if (dirty & IRIS_DIRTY_STENCIL_REF) {
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2020-02-04 10:49:59 -08:00
|
|
|
/* Use modify disable fields which allow us to emit packets
|
|
|
|
|
* directly instead of merging them later.
|
|
|
|
|
*/
|
|
|
|
|
struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
|
|
|
|
|
uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
|
|
|
|
|
wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
|
|
|
|
|
wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
|
|
|
|
|
wmds.StencilTestMaskModifyDisable = true;
|
|
|
|
|
wmds.StencilWriteMaskModifyDisable = true;
|
|
|
|
|
wmds.StencilStateModifyDisable = true;
|
|
|
|
|
wmds.DepthStateModifyDisable = true;
|
|
|
|
|
}
|
|
|
|
|
iris_batch_emit(batch, stencil_refs, sizeof(stencil_refs));
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-15 16:22:58 -07:00
|
|
|
if (dirty & IRIS_DIRTY_SCISSOR_RECT) {
|
2021-03-29 17:15:41 -07:00
|
|
|
/* Wa_1409725701:
|
2020-11-05 10:33:44 -08:00
|
|
|
* "The viewport-specific state used by the SF unit (SCISSOR_RECT) is
|
|
|
|
|
* stored as an array of up to 16 elements. The location of first
|
|
|
|
|
* element of the array, as specified by Pointer to SCISSOR_RECT,
|
|
|
|
|
* should be aligned to a 64-byte boundary.
|
|
|
|
|
*/
|
|
|
|
|
uint32_t alignment = 64;
|
2018-06-20 16:07:05 -07:00
|
|
|
uint32_t scissor_offset =
|
2018-06-15 11:55:28 -07:00
|
|
|
emit_state(batch, ice->state.dynamic_uploader,
|
|
|
|
|
&ice->state.last_res.scissor,
|
|
|
|
|
ice->state.scissors,
|
2018-04-06 00:05:24 -07:00
|
|
|
sizeof(struct pipe_scissor_state) *
|
2020-11-05 10:33:44 -08:00
|
|
|
ice->state.num_viewports, alignment);
|
2018-01-21 23:55:04 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_SCISSOR_STATE_POINTERS), ptr) {
|
|
|
|
|
ptr.ScissorRectPointer = scissor_offset;
|
|
|
|
|
}
|
2018-01-21 23:55:04 -08:00
|
|
|
}
|
|
|
|
|
|
2018-05-08 23:52:07 -07:00
|
|
|
if (dirty & IRIS_DIRTY_DEPTH_BUFFER) {
|
2018-07-01 22:13:07 -07:00
|
|
|
struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
|
2018-05-08 23:52:07 -07:00
|
|
|
|
2021-06-17 09:39:50 -07:00
|
|
|
/* Do not emit the cso yet. We may need to update clear params first. */
|
|
|
|
|
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
|
2021-06-17 09:48:20 -07:00
|
|
|
struct iris_resource *zres = NULL, *sres = NULL;
|
2021-06-17 09:39:50 -07:00
|
|
|
if (cso_fb->zsbuf) {
|
|
|
|
|
iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
|
|
|
|
|
&zres, &sres);
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-17 09:48:20 -07:00
|
|
|
if (zres && ice->state.hiz_usage != ISL_AUX_USAGE_NONE) {
|
2021-06-17 10:12:12 -07:00
|
|
|
uint32_t *clear_params =
|
|
|
|
|
cso_z->packets + ARRAY_SIZE(cso_z->packets) -
|
|
|
|
|
GENX(3DSTATE_CLEAR_PARAMS_length);
|
2021-06-17 09:48:20 -07:00
|
|
|
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_CLEAR_PARAMS), clear_params, clear) {
|
|
|
|
|
clear.DepthClearValueValid = true;
|
2021-09-07 08:45:05 -07:00
|
|
|
clear.DepthClearValue = zres->aux.clear_color.f32[0];
|
2021-06-17 09:48:20 -07:00
|
|
|
}
|
2021-06-17 09:39:50 -07:00
|
|
|
}
|
|
|
|
|
|
2021-06-17 10:12:12 -07:00
|
|
|
iris_batch_emit(batch, cso_z->packets, sizeof(cso_z->packets));
|
2021-06-15 10:38:38 -07:00
|
|
|
|
|
|
|
|
if (zres)
|
|
|
|
|
genX(emit_depth_state_workarounds)(ice, batch, &zres->surf);
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER >= 12) {
|
2021-03-29 17:15:41 -07:00
|
|
|
/* Wa_1408224581
|
2019-11-13 14:30:57 -08:00
|
|
|
*
|
2021-03-29 15:46:12 -07:00
|
|
|
* Workaround: Gfx12LP Astep only An additional pipe control with
|
2019-11-13 14:30:57 -08:00
|
|
|
* post-sync = store dword operation would be required.( w/a is to
|
|
|
|
|
* have an additional pipe control after the stencil state whenever
|
|
|
|
|
* the surface state bits of this state is changing).
|
2021-06-18 19:40:10 -07:00
|
|
|
*
|
|
|
|
|
* This also seems sufficient to handle Wa_14014148106.
|
2019-11-13 14:30:57 -08:00
|
|
|
*/
|
|
|
|
|
iris_emit_pipe_control_write(batch, "WA for stencil state",
|
|
|
|
|
PIPE_CONTROL_WRITE_IMMEDIATE,
|
2020-02-21 18:06:18 +02:00
|
|
|
batch->screen->workaround_address.bo,
|
|
|
|
|
batch->screen->workaround_address.offset, 0);
|
2019-11-13 14:30:57 -08:00
|
|
|
}
|
2019-03-09 01:02:06 -08:00
|
|
|
}
|
2018-05-08 23:52:07 -07:00
|
|
|
|
2019-03-09 01:02:06 -08:00
|
|
|
if (dirty & (IRIS_DIRTY_DEPTH_BUFFER | IRIS_DIRTY_WM_DEPTH_STENCIL)) {
|
|
|
|
|
/* Listen for buffer changes, and also write enable changes. */
|
|
|
|
|
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
|
|
|
|
|
pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
|
2018-05-08 23:52:07 -07:00
|
|
|
}
|
2018-01-21 23:55:04 -08:00
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (dirty & IRIS_DIRTY_POLYGON_STIPPLE) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_PATTERN), poly) {
|
|
|
|
|
for (int i = 0; i < 32; i++) {
|
|
|
|
|
poly.PatternRow[i] = ice->state.poly_stipple.stipple[i];
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-01-21 23:55:04 -08:00
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (dirty & IRIS_DIRTY_LINE_STIPPLE) {
|
|
|
|
|
struct iris_rasterizer_state *cso = ice->state.cso_rast;
|
|
|
|
|
iris_batch_emit(batch, cso->line_stipple, sizeof(cso->line_stipple));
|
|
|
|
|
}
|
2018-01-22 23:25:18 -08:00
|
|
|
|
2018-08-31 18:03:19 -07:00
|
|
|
if (dirty & IRIS_DIRTY_VF_TOPOLOGY) {
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
|
|
|
|
|
topo.PrimitiveTopologyType =
|
2021-08-13 02:29:56 -04:00
|
|
|
translate_prim_type(draw->mode, ice->state.vertices_per_patch);
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
2018-01-22 23:25:18 -08:00
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
|
2018-12-04 16:38:14 -08:00
|
|
|
int count = util_bitcount64(ice->state.bound_vertex_buffers);
|
2020-04-28 14:04:58 -07:00
|
|
|
uint64_t dynamic_bound = ice->state.bound_vertex_buffers;
|
2019-02-26 14:37:23 +01:00
|
|
|
|
|
|
|
|
if (ice->state.vs_uses_draw_params) {
|
2019-09-18 20:12:33 -07:00
|
|
|
assert(ice->draw.draw_params.res);
|
2019-02-26 14:37:23 +01:00
|
|
|
|
|
|
|
|
struct iris_vertex_buffer_state *state =
|
|
|
|
|
&(ice->state.genx->vertex_buffers[count]);
|
2019-09-18 20:12:33 -07:00
|
|
|
pipe_resource_reference(&state->resource, ice->draw.draw_params.res);
|
2019-02-26 14:37:23 +01:00
|
|
|
struct iris_resource *res = (void *) state->resource;
|
|
|
|
|
|
|
|
|
|
iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
|
|
|
|
|
vb.VertexBufferIndex = count;
|
|
|
|
|
vb.AddressModifyEnable = true;
|
|
|
|
|
vb.BufferPitch = 0;
|
2019-09-18 20:12:33 -07:00
|
|
|
vb.BufferSize = res->bo->size - ice->draw.draw_params.offset;
|
2019-02-26 14:37:23 +01:00
|
|
|
vb.BufferStartingAddress =
|
2021-07-19 21:23:18 -07:00
|
|
|
ro_bo(NULL, res->bo->address +
|
2019-09-18 20:12:33 -07:00
|
|
|
(int) ice->draw.draw_params.offset);
|
2020-10-07 07:44:56 -07:00
|
|
|
vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev,
|
|
|
|
|
ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
|
2021-04-20 16:06:13 -07:00
|
|
|
#if GFX_VER >= 12
|
2021-04-12 10:11:40 -07:00
|
|
|
vb.L3BypassDisable = true;
|
|
|
|
|
#endif
|
2019-02-26 14:37:23 +01:00
|
|
|
}
|
|
|
|
|
dynamic_bound |= 1ull << count;
|
|
|
|
|
count++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ice->state.vs_uses_derived_draw_params) {
|
|
|
|
|
struct iris_vertex_buffer_state *state =
|
|
|
|
|
&(ice->state.genx->vertex_buffers[count]);
|
|
|
|
|
pipe_resource_reference(&state->resource,
|
2019-09-18 20:12:33 -07:00
|
|
|
ice->draw.derived_draw_params.res);
|
|
|
|
|
struct iris_resource *res = (void *) ice->draw.derived_draw_params.res;
|
2019-02-26 14:37:23 +01:00
|
|
|
|
|
|
|
|
iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
|
|
|
|
|
vb.VertexBufferIndex = count;
|
|
|
|
|
vb.AddressModifyEnable = true;
|
|
|
|
|
vb.BufferPitch = 0;
|
|
|
|
|
vb.BufferSize =
|
2019-09-18 20:12:33 -07:00
|
|
|
res->bo->size - ice->draw.derived_draw_params.offset;
|
2019-02-26 14:37:23 +01:00
|
|
|
vb.BufferStartingAddress =
|
2021-07-19 21:23:18 -07:00
|
|
|
ro_bo(NULL, res->bo->address +
|
2019-09-18 20:12:33 -07:00
|
|
|
(int) ice->draw.derived_draw_params.offset);
|
2020-10-07 07:44:56 -07:00
|
|
|
vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev,
|
|
|
|
|
ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
|
2021-04-20 16:06:13 -07:00
|
|
|
#if GFX_VER >= 12
|
2021-04-12 10:11:40 -07:00
|
|
|
vb.L3BypassDisable = true;
|
|
|
|
|
#endif
|
2019-02-26 14:37:23 +01:00
|
|
|
}
|
|
|
|
|
dynamic_bound |= 1ull << count;
|
|
|
|
|
count++;
|
|
|
|
|
}
|
2018-01-21 23:55:04 -08:00
|
|
|
|
2018-12-04 16:38:14 -08:00
|
|
|
if (count) {
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 11
|
2021-03-29 15:46:12 -07:00
|
|
|
/* Gfx11+ doesn't need the cache workaround below */
|
2019-11-25 10:04:38 -08:00
|
|
|
uint64_t bound = dynamic_bound;
|
|
|
|
|
while (bound) {
|
|
|
|
|
const int i = u_bit_scan64(&bound);
|
|
|
|
|
iris_use_optional_res(batch, genx->vertex_buffers[i].resource,
|
2020-02-05 19:42:04 -08:00
|
|
|
false, IRIS_DOMAIN_VF_READ);
|
2019-11-25 10:04:38 -08:00
|
|
|
}
|
|
|
|
|
#else
|
2018-11-21 00:06:46 -08:00
|
|
|
/* The VF cache designers cut corners, and made the cache key's
|
|
|
|
|
* <VertexBufferIndex, Memory Address> tuple only consider the bottom
|
|
|
|
|
* 32 bits of the address. If you have two vertex buffers which get
|
|
|
|
|
* placed exactly 4 GiB apart and use them in back-to-back draw calls,
|
|
|
|
|
* you can get collisions (even within a single batch).
|
|
|
|
|
*
|
|
|
|
|
* So, we need to do a VF cache invalidate if the buffer for a VB
|
|
|
|
|
* slot slot changes [48:32] address bits from the previous time.
|
|
|
|
|
*/
|
2018-12-02 14:16:08 -08:00
|
|
|
unsigned flush_flags = 0;
|
2018-01-21 23:55:04 -08:00
|
|
|
|
2019-02-26 14:37:23 +01:00
|
|
|
uint64_t bound = dynamic_bound;
|
2018-12-04 16:38:14 -08:00
|
|
|
while (bound) {
|
|
|
|
|
const int i = u_bit_scan64(&bound);
|
2018-11-21 00:06:46 -08:00
|
|
|
uint16_t high_bits = 0;
|
|
|
|
|
|
2018-12-04 16:38:14 -08:00
|
|
|
struct iris_resource *res =
|
|
|
|
|
(void *) genx->vertex_buffers[i].resource;
|
2018-11-21 00:06:46 -08:00
|
|
|
if (res) {
|
2020-02-05 19:42:04 -08:00
|
|
|
iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_VF_READ);
|
2018-11-21 00:06:46 -08:00
|
|
|
|
2021-07-19 21:23:18 -07:00
|
|
|
high_bits = res->bo->address >> 32ull;
|
2018-11-21 00:06:46 -08:00
|
|
|
if (high_bits != ice->state.last_vbo_high_bits[i]) {
|
2019-01-17 23:44:09 -08:00
|
|
|
flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE |
|
|
|
|
|
PIPE_CONTROL_CS_STALL;
|
2018-11-21 00:06:46 -08:00
|
|
|
ice->state.last_vbo_high_bits[i] = high_bits;
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-06-26 13:25:22 -07:00
|
|
|
}
|
2018-11-21 00:06:46 -08:00
|
|
|
|
2019-06-19 16:04:50 -05:00
|
|
|
if (flush_flags) {
|
|
|
|
|
iris_emit_pipe_control_flush(batch,
|
|
|
|
|
"workaround: VF cache 32-bit key [VB]",
|
|
|
|
|
flush_flags);
|
|
|
|
|
}
|
2019-11-25 10:04:38 -08:00
|
|
|
#endif
|
2018-11-21 00:06:46 -08:00
|
|
|
|
2018-12-04 16:38:14 -08:00
|
|
|
const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
|
|
|
|
|
|
|
|
|
|
uint32_t *map =
|
|
|
|
|
iris_get_command_space(batch, 4 * (1 + vb_dwords * count));
|
|
|
|
|
_iris_pack_command(batch, GENX(3DSTATE_VERTEX_BUFFERS), map, vb) {
|
|
|
|
|
vb.DWordLength = (vb_dwords * count + 1) - 2;
|
|
|
|
|
}
|
|
|
|
|
map += 1;
|
|
|
|
|
|
2019-02-26 14:37:23 +01:00
|
|
|
bound = dynamic_bound;
|
2018-12-04 16:38:14 -08:00
|
|
|
while (bound) {
|
|
|
|
|
const int i = u_bit_scan64(&bound);
|
|
|
|
|
memcpy(map, genx->vertex_buffers[i].state,
|
|
|
|
|
sizeof(uint32_t) * vb_dwords);
|
|
|
|
|
map += vb_dwords;
|
|
|
|
|
}
|
2018-01-21 23:55:04 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
if (dirty & IRIS_DIRTY_VERTEX_ELEMENTS) {
|
|
|
|
|
struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
|
2018-07-18 16:27:07 -07:00
|
|
|
const unsigned entries = MAX2(cso->count, 1);
|
2019-02-26 14:37:23 +01:00
|
|
|
if (!(ice->state.vs_needs_sgvs_element ||
|
2019-02-27 20:44:27 +01:00
|
|
|
ice->state.vs_uses_derived_draw_params ||
|
|
|
|
|
ice->state.vs_needs_edge_flag)) {
|
2019-02-26 14:37:23 +01:00
|
|
|
iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) *
|
|
|
|
|
(1 + entries * GENX(VERTEX_ELEMENT_STATE_length)));
|
|
|
|
|
} else {
|
|
|
|
|
uint32_t dynamic_ves[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
|
2019-02-27 20:44:27 +01:00
|
|
|
const unsigned dyn_count = cso->count +
|
2019-02-26 14:37:23 +01:00
|
|
|
ice->state.vs_needs_sgvs_element +
|
|
|
|
|
ice->state.vs_uses_derived_draw_params;
|
|
|
|
|
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS),
|
|
|
|
|
&dynamic_ves, ve) {
|
|
|
|
|
ve.DWordLength =
|
|
|
|
|
1 + GENX(VERTEX_ELEMENT_STATE_length) * dyn_count - 2;
|
|
|
|
|
}
|
2019-02-27 20:44:27 +01:00
|
|
|
memcpy(&dynamic_ves[1], &cso->vertex_elements[1],
|
|
|
|
|
(cso->count - ice->state.vs_needs_edge_flag) *
|
2019-02-26 14:37:23 +01:00
|
|
|
GENX(VERTEX_ELEMENT_STATE_length) * sizeof(uint32_t));
|
|
|
|
|
uint32_t *ve_pack_dest =
|
2019-02-27 20:44:27 +01:00
|
|
|
&dynamic_ves[1 + (cso->count - ice->state.vs_needs_edge_flag) *
|
|
|
|
|
GENX(VERTEX_ELEMENT_STATE_length)];
|
2019-02-26 14:37:23 +01:00
|
|
|
|
|
|
|
|
if (ice->state.vs_needs_sgvs_element) {
|
|
|
|
|
uint32_t base_ctrl = ice->state.vs_uses_draw_params ?
|
|
|
|
|
VFCOMP_STORE_SRC : VFCOMP_STORE_0;
|
|
|
|
|
iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
|
|
|
|
|
ve.Valid = true;
|
|
|
|
|
ve.VertexBufferIndex =
|
|
|
|
|
util_bitcount64(ice->state.bound_vertex_buffers);
|
|
|
|
|
ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
|
|
|
|
|
ve.Component0Control = base_ctrl;
|
|
|
|
|
ve.Component1Control = base_ctrl;
|
|
|
|
|
ve.Component2Control = VFCOMP_STORE_0;
|
|
|
|
|
ve.Component3Control = VFCOMP_STORE_0;
|
|
|
|
|
}
|
|
|
|
|
ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
|
|
|
|
|
}
|
|
|
|
|
if (ice->state.vs_uses_derived_draw_params) {
|
|
|
|
|
iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
|
|
|
|
|
ve.Valid = true;
|
|
|
|
|
ve.VertexBufferIndex =
|
|
|
|
|
util_bitcount64(ice->state.bound_vertex_buffers) +
|
|
|
|
|
ice->state.vs_uses_draw_params;
|
|
|
|
|
ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
|
|
|
|
|
ve.Component0Control = VFCOMP_STORE_SRC;
|
|
|
|
|
ve.Component1Control = VFCOMP_STORE_SRC;
|
|
|
|
|
ve.Component2Control = VFCOMP_STORE_0;
|
|
|
|
|
ve.Component3Control = VFCOMP_STORE_0;
|
|
|
|
|
}
|
|
|
|
|
ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
|
|
|
|
|
}
|
2019-02-27 20:44:27 +01:00
|
|
|
if (ice->state.vs_needs_edge_flag) {
|
|
|
|
|
for (int i = 0; i < GENX(VERTEX_ELEMENT_STATE_length); i++)
|
|
|
|
|
ve_pack_dest[i] = cso->edgeflag_ve[i];
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-26 14:37:23 +01:00
|
|
|
iris_batch_emit(batch, &dynamic_ves, sizeof(uint32_t) *
|
|
|
|
|
(1 + dyn_count * GENX(VERTEX_ELEMENT_STATE_length)));
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-27 20:44:27 +01:00
|
|
|
if (!ice->state.vs_needs_edge_flag) {
|
|
|
|
|
iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) *
|
|
|
|
|
entries * GENX(3DSTATE_VF_INSTANCING_length));
|
|
|
|
|
} else {
|
|
|
|
|
assert(cso->count > 0);
|
|
|
|
|
const unsigned edgeflag_index = cso->count - 1;
|
|
|
|
|
uint32_t dynamic_vfi[33 * GENX(3DSTATE_VF_INSTANCING_length)];
|
|
|
|
|
memcpy(&dynamic_vfi[0], cso->vf_instancing, edgeflag_index *
|
|
|
|
|
GENX(3DSTATE_VF_INSTANCING_length) * sizeof(uint32_t));
|
|
|
|
|
|
|
|
|
|
uint32_t *vfi_pack_dest = &dynamic_vfi[0] +
|
|
|
|
|
edgeflag_index * GENX(3DSTATE_VF_INSTANCING_length);
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
|
|
|
|
|
vi.VertexElementIndex = edgeflag_index +
|
|
|
|
|
ice->state.vs_needs_sgvs_element +
|
|
|
|
|
ice->state.vs_uses_derived_draw_params;
|
|
|
|
|
}
|
|
|
|
|
for (int i = 0; i < GENX(3DSTATE_VF_INSTANCING_length); i++)
|
|
|
|
|
vfi_pack_dest[i] |= cso->edgeflag_vfi[i];
|
|
|
|
|
|
|
|
|
|
iris_batch_emit(batch, &dynamic_vfi[0], sizeof(uint32_t) *
|
|
|
|
|
entries * GENX(3DSTATE_VF_INSTANCING_length));
|
|
|
|
|
}
|
2018-07-18 09:23:24 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (dirty & IRIS_DIRTY_VF_SGVS) {
|
|
|
|
|
const struct brw_vs_prog_data *vs_prog_data = (void *)
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_VERTEX]->prog_data;
|
|
|
|
|
struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
|
|
|
|
|
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_VF_SGVS), sgv) {
|
|
|
|
|
if (vs_prog_data->uses_vertexid) {
|
|
|
|
|
sgv.VertexIDEnable = true;
|
|
|
|
|
sgv.VertexIDComponentNumber = 2;
|
2019-02-27 20:44:27 +01:00
|
|
|
sgv.VertexIDElementOffset =
|
|
|
|
|
cso->count - ice->state.vs_needs_edge_flag;
|
2018-07-18 09:23:24 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vs_prog_data->uses_instanceid) {
|
|
|
|
|
sgv.InstanceIDEnable = true;
|
|
|
|
|
sgv.InstanceIDComponentNumber = 3;
|
2019-02-27 20:44:27 +01:00
|
|
|
sgv.InstanceIDElementOffset =
|
|
|
|
|
cso->count - ice->state.vs_needs_edge_flag;
|
2018-07-18 09:23:24 -07:00
|
|
|
}
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
|
|
|
|
}
|
2018-01-21 23:55:04 -08:00
|
|
|
|
2018-08-31 18:03:19 -07:00
|
|
|
if (dirty & IRIS_DIRTY_VF) {
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_VF), vf) {
|
2020-06-03 11:50:38 -07:00
|
|
|
#if GFX_VERx10 >= 125
|
|
|
|
|
vf.GeometryDistributionEnable = true;
|
|
|
|
|
#endif
|
2018-01-25 01:36:49 -08:00
|
|
|
if (draw->primitive_restart) {
|
|
|
|
|
vf.IndexedDrawCutIndexEnable = true;
|
|
|
|
|
vf.CutIndex = draw->restart_index;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-01-21 23:55:04 -08:00
|
|
|
|
2020-06-03 11:50:38 -07:00
|
|
|
#if GFX_VERx10 >= 125
|
|
|
|
|
if (dirty & IRIS_DIRTY_VFG) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_VFG), vfg) {
|
|
|
|
|
/* If 3DSTATE_TE: TE Enable == 1 then RR_STRICT else RR_FREE*/
|
|
|
|
|
vfg.DistributionMode =
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL ? RR_STRICT :
|
|
|
|
|
RR_FREE;
|
|
|
|
|
vfg.DistributionGranularity = BatchLevelGranularity;
|
2021-09-10 15:17:54 -07:00
|
|
|
/* Wa_14014890652 */
|
|
|
|
|
if (intel_device_info_is_dg2(&batch->screen->devinfo))
|
|
|
|
|
vfg.GranularityThresholdDisable = 1;
|
2020-06-03 11:50:38 -07:00
|
|
|
vfg.ListCutIndexEnable = draw->primitive_restart;
|
|
|
|
|
/* 192 vertices for TRILIST_ADJ */
|
|
|
|
|
vfg.ListNBatchSizeScale = 0;
|
|
|
|
|
/* Batch size of 384 vertices */
|
|
|
|
|
vfg.List3BatchSizeScale = 2;
|
|
|
|
|
/* Batch size of 128 vertices */
|
|
|
|
|
vfg.List2BatchSizeScale = 1;
|
|
|
|
|
/* Batch size of 128 vertices */
|
|
|
|
|
vfg.List1BatchSizeScale = 2;
|
|
|
|
|
/* Batch size of 256 vertices for STRIP topologies */
|
|
|
|
|
vfg.StripBatchSizeScale = 3;
|
|
|
|
|
/* 192 control points for PATCHLIST_3 */
|
|
|
|
|
vfg.PatchBatchSizeScale = 1;
|
|
|
|
|
/* 192 control points for PATCHLIST_3 */
|
|
|
|
|
vfg.PatchBatchSizeMultiplier = 31;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2019-04-12 11:55:38 -07:00
|
|
|
if (dirty & IRIS_DIRTY_VF_STATISTICS) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(3DSTATE_VF_STATISTICS), vf) {
|
|
|
|
|
vf.StatisticsEnable = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
2019-09-25 00:31:07 -07:00
|
|
|
if (dirty & IRIS_DIRTY_PMA_FIX) {
|
|
|
|
|
bool enable = want_pma_fix(ice);
|
|
|
|
|
genX(update_pma_fix)(ice, batch, enable);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2019-08-10 12:45:46 -07:00
|
|
|
if (ice->state.current_hash_scale != 1)
|
|
|
|
|
genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1);
|
2018-04-27 16:39:30 -07:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2020-02-25 15:04:08 -08:00
|
|
|
genX(invalidate_aux_map_state)(batch);
|
2018-04-27 16:39:30 -07:00
|
|
|
#endif
|
2018-09-01 00:58:29 -07:00
|
|
|
}
|
|
|
|
|
|
2020-05-29 16:11:58 -07:00
|
|
|
static void
|
|
|
|
|
flush_vbos(struct iris_context *ice, struct iris_batch *batch)
|
|
|
|
|
{
|
|
|
|
|
struct iris_genx_state *genx = ice->state.genx;
|
|
|
|
|
uint64_t bound = ice->state.bound_vertex_buffers;
|
|
|
|
|
while (bound) {
|
|
|
|
|
const int i = u_bit_scan64(&bound);
|
|
|
|
|
struct iris_bo *bo = iris_resource_bo(genx->vertex_buffers[i].resource);
|
|
|
|
|
iris_emit_buffer_barrier_for(batch, bo, IRIS_DOMAIN_VF_READ);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-01 00:58:29 -07:00
|
|
|
static void
|
|
|
|
|
iris_upload_render_state(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
2020-10-30 09:53:50 -04:00
|
|
|
const struct pipe_draw_info *draw,
|
2021-04-11 13:35:38 -04:00
|
|
|
unsigned drawid_offset,
|
2020-11-01 09:04:40 -05:00
|
|
|
const struct pipe_draw_indirect_info *indirect,
|
2021-04-11 09:49:49 -04:00
|
|
|
const struct pipe_draw_start_count_bias *sc)
|
2018-09-01 00:58:29 -07:00
|
|
|
{
|
2019-05-10 00:44:39 +03:00
|
|
|
bool use_predicate = ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT;
|
|
|
|
|
|
2021-11-23 00:43:36 +02:00
|
|
|
trace_intel_begin_draw(&batch->trace, batch);
|
|
|
|
|
|
2020-05-29 16:11:58 -07:00
|
|
|
if (ice->state.dirty & IRIS_DIRTY_VERTEX_BUFFER_FLUSHES)
|
|
|
|
|
flush_vbos(ice, batch);
|
|
|
|
|
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
|
|
|
|
|
2018-09-15 14:35:47 -07:00
|
|
|
/* Always pin the binder. If we're emitting new binding table pointers,
|
|
|
|
|
* we need it. If not, we're probably inheriting old tables via the
|
|
|
|
|
* context, and need it anyway. Since true zero-bindings cases are
|
|
|
|
|
* practically non-existent, just pin it and avoid last_res tracking.
|
|
|
|
|
*/
|
2020-05-29 16:38:43 -07:00
|
|
|
iris_use_pinned_bo(batch, ice->state.binder.bo, false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
2018-09-15 14:35:47 -07:00
|
|
|
|
2021-03-29 12:26:41 -07:00
|
|
|
if (!batch->contains_draw) {
|
|
|
|
|
if (GFX_VER == 12) {
|
|
|
|
|
/* Re-emit constants when starting a new batch buffer in order to
|
|
|
|
|
* work around push constant corruption on context switch.
|
|
|
|
|
*
|
|
|
|
|
* XXX - Provide hardware spec quotation when available.
|
|
|
|
|
*/
|
|
|
|
|
ice->state.stage_dirty |= (IRIS_STAGE_DIRTY_CONSTANTS_VS |
|
|
|
|
|
IRIS_STAGE_DIRTY_CONSTANTS_TCS |
|
|
|
|
|
IRIS_STAGE_DIRTY_CONSTANTS_TES |
|
|
|
|
|
IRIS_STAGE_DIRTY_CONSTANTS_GS |
|
|
|
|
|
IRIS_STAGE_DIRTY_CONSTANTS_FS);
|
|
|
|
|
}
|
|
|
|
|
batch->contains_draw = true;
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-06 21:06:17 -08:00
|
|
|
if (!batch->contains_draw_with_next_seqno) {
|
2018-11-21 11:54:37 -08:00
|
|
|
iris_restore_render_saved_bos(ice, batch, draw);
|
2021-03-29 12:26:41 -07:00
|
|
|
batch->contains_draw_with_next_seqno = true;
|
2018-11-21 11:54:37 -08:00
|
|
|
}
|
|
|
|
|
|
2018-09-01 00:58:29 -07:00
|
|
|
iris_upload_dirty_render_state(ice, batch, draw);
|
|
|
|
|
|
|
|
|
|
if (draw->index_size > 0) {
|
|
|
|
|
unsigned offset;
|
|
|
|
|
|
|
|
|
|
if (draw->has_user_indices) {
|
2020-11-28 00:44:19 -05:00
|
|
|
unsigned start_offset = draw->index_size * sc->start;
|
|
|
|
|
|
2020-08-25 15:40:27 -07:00
|
|
|
u_upload_data(ice->ctx.const_uploader, start_offset,
|
2020-11-28 00:44:19 -05:00
|
|
|
sc->count * draw->index_size, 4,
|
|
|
|
|
(char*)draw->index.user + start_offset,
|
2018-09-20 17:27:47 -07:00
|
|
|
&offset, &ice->state.last_res.index_buffer);
|
2020-11-28 00:44:19 -05:00
|
|
|
offset -= start_offset;
|
2018-09-01 00:58:29 -07:00
|
|
|
} else {
|
2018-11-21 00:38:49 -08:00
|
|
|
struct iris_resource *res = (void *) draw->index.resource;
|
|
|
|
|
res->bind_history |= PIPE_BIND_INDEX_BUFFER;
|
|
|
|
|
|
2018-09-20 17:27:47 -07:00
|
|
|
pipe_resource_reference(&ice->state.last_res.index_buffer,
|
|
|
|
|
draw->index.resource);
|
2018-09-01 00:58:29 -07:00
|
|
|
offset = 0;
|
2020-05-05 12:02:07 -07:00
|
|
|
|
|
|
|
|
iris_emit_buffer_barrier_for(batch, res->bo, IRIS_DOMAIN_VF_READ);
|
2018-09-01 00:58:29 -07:00
|
|
|
}
|
|
|
|
|
|
2019-02-16 00:57:54 -08:00
|
|
|
struct iris_genx_state *genx = ice->state.genx;
|
2018-09-20 17:27:47 -07:00
|
|
|
struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
|
|
|
|
|
|
2019-02-16 00:57:54 -08:00
|
|
|
uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)];
|
|
|
|
|
iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) {
|
2018-09-01 00:58:29 -07:00
|
|
|
ib.IndexFormat = draw->index_size >> 1;
|
2020-10-07 07:44:56 -07:00
|
|
|
ib.MOCS = iris_mocs(bo, &batch->screen->isl_dev,
|
|
|
|
|
ISL_SURF_USAGE_INDEX_BUFFER_BIT);
|
2019-03-18 00:51:18 -07:00
|
|
|
ib.BufferSize = bo->size - offset;
|
2021-07-19 21:23:18 -07:00
|
|
|
ib.BufferStartingAddress = ro_bo(NULL, bo->address + offset);
|
2021-04-20 16:06:13 -07:00
|
|
|
#if GFX_VER >= 12
|
2021-04-12 10:11:40 -07:00
|
|
|
ib.L3BypassDisable = true;
|
|
|
|
|
#endif
|
2019-02-16 00:57:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (memcmp(genx->last_index_buffer, ib_packet, sizeof(ib_packet)) != 0) {
|
|
|
|
|
memcpy(genx->last_index_buffer, ib_packet, sizeof(ib_packet));
|
|
|
|
|
iris_batch_emit(batch, ib_packet, sizeof(ib_packet));
|
2020-02-05 19:42:04 -08:00
|
|
|
iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_VF_READ);
|
2018-09-01 00:58:29 -07:00
|
|
|
}
|
2018-11-21 00:06:46 -08:00
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER < 11
|
2018-11-21 00:06:46 -08:00
|
|
|
/* The VF cache key only uses 32-bits, see vertex buffer comment above */
|
2021-07-19 21:23:18 -07:00
|
|
|
uint16_t high_bits = bo->address >> 32ull;
|
2018-11-21 00:06:46 -08:00
|
|
|
if (high_bits != ice->state.last_index_bo_high_bits) {
|
2019-06-19 16:04:50 -05:00
|
|
|
iris_emit_pipe_control_flush(batch,
|
|
|
|
|
"workaround: VF cache 32-bit key [IB]",
|
|
|
|
|
PIPE_CONTROL_VF_CACHE_INVALIDATE |
|
|
|
|
|
PIPE_CONTROL_CS_STALL);
|
2018-11-21 00:06:46 -08:00
|
|
|
ice->state.last_index_bo_high_bits = high_bits;
|
|
|
|
|
}
|
2019-11-25 10:04:38 -08:00
|
|
|
#endif
|
2018-09-01 00:58:29 -07:00
|
|
|
}
|
2018-01-21 23:55:04 -08:00
|
|
|
|
2018-07-14 22:15:39 -07:00
|
|
|
#define _3DPRIM_END_OFFSET 0x2420
|
|
|
|
|
#define _3DPRIM_START_VERTEX 0x2430
|
|
|
|
|
#define _3DPRIM_VERTEX_COUNT 0x2434
|
|
|
|
|
#define _3DPRIM_INSTANCE_COUNT 0x2438
|
|
|
|
|
#define _3DPRIM_START_INSTANCE 0x243C
|
|
|
|
|
#define _3DPRIM_BASE_VERTEX 0x2440
|
|
|
|
|
|
2021-11-04 23:07:26 -07:00
|
|
|
struct mi_builder b;
|
|
|
|
|
mi_builder_init(&b, &batch->screen->devinfo, batch);
|
|
|
|
|
|
2020-10-30 09:53:50 -04:00
|
|
|
if (indirect && !indirect->count_from_stream_output) {
|
|
|
|
|
if (indirect->indirect_draw_count) {
|
2019-05-10 00:44:39 +03:00
|
|
|
use_predicate = true;
|
|
|
|
|
|
|
|
|
|
struct iris_bo *draw_count_bo =
|
2020-10-30 09:53:50 -04:00
|
|
|
iris_resource_bo(indirect->indirect_draw_count);
|
2019-05-10 00:44:39 +03:00
|
|
|
unsigned draw_count_offset =
|
2020-10-30 09:53:50 -04:00
|
|
|
indirect->indirect_draw_count_offset;
|
2019-05-10 00:44:39 +03:00
|
|
|
|
|
|
|
|
if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
|
2019-04-01 15:23:51 -07:00
|
|
|
/* comparison = draw id < draw count */
|
2021-03-03 12:29:39 -06:00
|
|
|
struct mi_value comparison =
|
2021-04-11 13:35:38 -04:00
|
|
|
mi_ult(&b, mi_imm(drawid_offset),
|
2021-03-03 12:29:39 -06:00
|
|
|
mi_mem32(ro_bo(draw_count_bo, draw_count_offset)));
|
2019-04-01 15:23:51 -07:00
|
|
|
|
|
|
|
|
/* predicate = comparison & conditional rendering predicate */
|
2021-03-03 12:29:39 -06:00
|
|
|
mi_store(&b, mi_reg32(MI_PREDICATE_RESULT),
|
|
|
|
|
mi_iand(&b, comparison, mi_reg32(CS_GPR(15))));
|
2019-05-10 00:44:39 +03:00
|
|
|
} else {
|
|
|
|
|
uint32_t mi_predicate;
|
2018-07-14 22:15:39 -07:00
|
|
|
|
2019-05-10 00:44:39 +03:00
|
|
|
/* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
|
2021-11-04 23:07:26 -07:00
|
|
|
mi_store(&b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(drawid_offset));
|
2019-05-10 00:44:39 +03:00
|
|
|
/* Upload the current draw count from the draw parameters buffer
|
2021-11-04 23:07:26 -07:00
|
|
|
* to MI_PREDICATE_SRC0. Zero the top 32-bits of
|
|
|
|
|
* MI_PREDICATE_SRC0.
|
2019-05-10 00:44:39 +03:00
|
|
|
*/
|
2021-11-04 23:07:26 -07:00
|
|
|
mi_store(&b, mi_reg64(MI_PREDICATE_SRC0),
|
|
|
|
|
mi_mem32(ro_bo(draw_count_bo, draw_count_offset)));
|
2019-05-10 00:44:39 +03:00
|
|
|
|
2021-04-11 13:35:38 -04:00
|
|
|
if (drawid_offset == 0) {
|
2019-05-10 00:44:39 +03:00
|
|
|
mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
|
|
|
|
|
MI_PREDICATE_COMBINEOP_SET |
|
|
|
|
|
MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
|
|
|
|
|
} else {
|
|
|
|
|
/* While draw_index < draw_count the predicate's result will be
|
|
|
|
|
* (draw_index == draw_count) ^ TRUE = TRUE
|
|
|
|
|
* When draw_index == draw_count the result is
|
|
|
|
|
* (TRUE) ^ TRUE = FALSE
|
|
|
|
|
* After this all results will be:
|
|
|
|
|
* (FALSE) ^ FALSE = FALSE
|
|
|
|
|
*/
|
|
|
|
|
mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOAD |
|
|
|
|
|
MI_PREDICATE_COMBINEOP_XOR |
|
|
|
|
|
MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
|
|
|
|
|
}
|
|
|
|
|
iris_batch_emit(batch, &mi_predicate, sizeof(uint32_t));
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-10-30 09:53:50 -04:00
|
|
|
struct iris_bo *bo = iris_resource_bo(indirect->buffer);
|
2018-07-14 22:15:39 -07:00
|
|
|
assert(bo);
|
|
|
|
|
|
2021-11-04 23:07:26 -07:00
|
|
|
mi_store(&b, mi_reg32(_3DPRIM_VERTEX_COUNT),
|
|
|
|
|
mi_mem32(ro_bo(bo, indirect->offset + 0)));
|
|
|
|
|
mi_store(&b, mi_reg32(_3DPRIM_INSTANCE_COUNT),
|
|
|
|
|
mi_mem32(ro_bo(bo, indirect->offset + 4)));
|
|
|
|
|
mi_store(&b, mi_reg32(_3DPRIM_START_VERTEX),
|
|
|
|
|
mi_mem32(ro_bo(bo, indirect->offset + 8)));
|
2018-07-14 22:15:39 -07:00
|
|
|
if (draw->index_size) {
|
2021-11-04 23:07:26 -07:00
|
|
|
mi_store(&b, mi_reg32(_3DPRIM_BASE_VERTEX),
|
|
|
|
|
mi_mem32(ro_bo(bo, indirect->offset + 12)));
|
|
|
|
|
mi_store(&b, mi_reg32(_3DPRIM_START_INSTANCE),
|
|
|
|
|
mi_mem32(ro_bo(bo, indirect->offset + 16)));
|
2018-07-14 22:15:39 -07:00
|
|
|
} else {
|
2021-11-04 23:07:26 -07:00
|
|
|
mi_store(&b, mi_reg32(_3DPRIM_START_INSTANCE),
|
|
|
|
|
mi_mem32(ro_bo(bo, indirect->offset + 12)));
|
|
|
|
|
mi_store(&b, mi_reg32(_3DPRIM_BASE_VERTEX), mi_imm(0));
|
2018-07-14 22:15:39 -07:00
|
|
|
}
|
2020-10-30 09:53:50 -04:00
|
|
|
} else if (indirect && indirect->count_from_stream_output) {
|
2018-12-04 22:19:33 -08:00
|
|
|
struct iris_stream_output_target *so =
|
2020-10-30 09:53:50 -04:00
|
|
|
(void *) indirect->count_from_stream_output;
|
2018-12-04 22:19:33 -08:00
|
|
|
|
2019-01-24 09:26:38 -08:00
|
|
|
/* XXX: Replace with actual cache tracking */
|
2019-06-19 16:04:50 -05:00
|
|
|
iris_emit_pipe_control_flush(batch,
|
|
|
|
|
"draw count from stream output stall",
|
|
|
|
|
PIPE_CONTROL_CS_STALL);
|
2018-12-04 22:19:33 -08:00
|
|
|
|
2019-04-01 15:23:51 -07:00
|
|
|
struct iris_address addr =
|
|
|
|
|
ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
|
2021-03-03 12:29:39 -06:00
|
|
|
struct mi_value offset =
|
|
|
|
|
mi_iadd_imm(&b, mi_mem32(addr), -so->base.buffer_offset);
|
|
|
|
|
mi_store(&b, mi_reg32(_3DPRIM_VERTEX_COUNT),
|
|
|
|
|
mi_udiv32_imm(&b, offset, so->stride));
|
2021-11-04 23:07:26 -07:00
|
|
|
mi_store(&b, mi_reg32(_3DPRIM_START_VERTEX), mi_imm(0));
|
|
|
|
|
mi_store(&b, mi_reg32(_3DPRIM_BASE_VERTEX), mi_imm(0));
|
|
|
|
|
mi_store(&b, mi_reg32(_3DPRIM_START_INSTANCE), mi_imm(0));
|
|
|
|
|
mi_store(&b, mi_reg32(_3DPRIM_INSTANCE_COUNT),
|
|
|
|
|
mi_imm(draw->instance_count));
|
2018-07-14 22:15:39 -07:00
|
|
|
}
|
2018-01-21 23:55:04 -08:00
|
|
|
|
2020-10-27 15:56:06 -07:00
|
|
|
iris_measure_snapshot(ice, batch, INTEL_SNAPSHOT_DRAW, draw, indirect, sc);
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
|
|
|
|
|
prim.VertexAccessType = draw->index_size > 0 ? RANDOM : SEQUENTIAL;
|
2019-05-10 00:44:39 +03:00
|
|
|
prim.PredicateEnable = use_predicate;
|
2018-11-27 15:30:16 -08:00
|
|
|
|
2020-10-30 09:53:50 -04:00
|
|
|
if (indirect) {
|
2018-12-04 22:19:33 -08:00
|
|
|
prim.IndirectParameterEnable = true;
|
|
|
|
|
} else {
|
|
|
|
|
prim.StartInstanceLocation = draw->start_instance;
|
|
|
|
|
prim.InstanceCount = draw->instance_count;
|
2020-11-01 09:04:40 -05:00
|
|
|
prim.VertexCountPerInstance = sc->count;
|
2018-12-04 22:19:33 -08:00
|
|
|
|
2020-11-01 09:04:40 -05:00
|
|
|
prim.StartVertexLocation = sc->start;
|
2018-01-21 23:55:04 -08:00
|
|
|
|
2018-12-04 22:19:33 -08:00
|
|
|
if (draw->index_size) {
|
2021-04-11 10:26:29 -04:00
|
|
|
prim.BaseVertexLocation += sc->index_bias;
|
2018-12-04 22:19:33 -08:00
|
|
|
}
|
2018-01-25 01:36:49 -08:00
|
|
|
}
|
2018-01-21 23:55:04 -08:00
|
|
|
}
|
2020-04-23 17:58:48 -07:00
|
|
|
|
|
|
|
|
iris_batch_sync_region_end(batch);
|
2021-11-23 00:43:36 +02:00
|
|
|
|
|
|
|
|
trace_intel_end_draw(&batch->trace, batch, 0);
|
2018-01-22 22:31:27 -08:00
|
|
|
}
|
|
|
|
|
|
2019-02-13 11:10:39 -08:00
|
|
|
static void
|
|
|
|
|
iris_load_indirect_location(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
const struct pipe_grid_info *grid)
|
|
|
|
|
{
|
|
|
|
|
#define GPGPU_DISPATCHDIMX 0x2500
|
|
|
|
|
#define GPGPU_DISPATCHDIMY 0x2504
|
|
|
|
|
#define GPGPU_DISPATCHDIMZ 0x2508
|
|
|
|
|
|
|
|
|
|
assert(grid->indirect);
|
|
|
|
|
|
|
|
|
|
struct iris_state_ref *grid_size = &ice->state.grid_size;
|
|
|
|
|
struct iris_bo *bo = iris_resource_bo(grid_size->res);
|
2021-11-02 03:36:04 -07:00
|
|
|
struct mi_builder b;
|
|
|
|
|
mi_builder_init(&b, &batch->screen->devinfo, batch);
|
|
|
|
|
struct mi_value size_x = mi_mem32(ro_bo(bo, grid_size->offset + 0));
|
|
|
|
|
struct mi_value size_y = mi_mem32(ro_bo(bo, grid_size->offset + 4));
|
|
|
|
|
struct mi_value size_z = mi_mem32(ro_bo(bo, grid_size->offset + 8));
|
|
|
|
|
mi_store(&b, mi_reg32(GPGPU_DISPATCHDIMX), size_x);
|
|
|
|
|
mi_store(&b, mi_reg32(GPGPU_DISPATCHDIMY), size_y);
|
|
|
|
|
mi_store(&b, mi_reg32(GPGPU_DISPATCHDIMZ), size_z);
|
2019-02-13 11:10:39 -08:00
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:09:00 -07:00
|
|
|
#if GFX_VERx10 >= 125
|
2019-02-13 11:11:43 -08:00
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
iris_upload_compute_walker(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
const struct pipe_grid_info *grid)
|
|
|
|
|
{
|
|
|
|
|
const uint64_t stage_dirty = ice->state.stage_dirty;
|
|
|
|
|
struct iris_screen *screen = batch->screen;
|
2021-04-05 13:19:39 -07:00
|
|
|
const struct intel_device_info *devinfo = &screen->devinfo;
|
2019-02-13 11:11:43 -08:00
|
|
|
struct iris_binder *binder = &ice->state.binder;
|
|
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
|
|
|
|
|
struct iris_compiled_shader *shader =
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_COMPUTE];
|
|
|
|
|
struct brw_stage_prog_data *prog_data = shader->prog_data;
|
|
|
|
|
struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
|
2021-04-28 10:56:28 -07:00
|
|
|
const struct brw_cs_dispatch_info dispatch =
|
|
|
|
|
brw_cs_get_dispatch_info(devinfo, cs_prog_data, grid->block);
|
2019-02-13 11:11:43 -08:00
|
|
|
|
2021-11-23 00:43:36 +02:00
|
|
|
trace_intel_begin_compute(&batch->trace, batch);
|
|
|
|
|
|
2019-02-13 11:11:43 -08:00
|
|
|
if (stage_dirty & IRIS_STAGE_DIRTY_CS) {
|
|
|
|
|
iris_emit_cmd(batch, GENX(CFE_STATE), cfe) {
|
|
|
|
|
cfe.MaximumNumberofThreads =
|
2021-09-08 16:20:24 -07:00
|
|
|
devinfo->max_cs_threads * devinfo->subslice_total - 1;
|
2020-10-20 14:27:58 -05:00
|
|
|
if (prog_data->total_scratch > 0) {
|
|
|
|
|
cfe.ScratchSpaceBuffer =
|
|
|
|
|
iris_get_scratch_surf(ice, prog_data->total_scratch)->offset >> 4;
|
|
|
|
|
}
|
2019-02-13 11:11:43 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (grid->indirect)
|
|
|
|
|
iris_load_indirect_location(ice, batch, grid);
|
|
|
|
|
|
|
|
|
|
iris_emit_cmd(batch, GENX(COMPUTE_WALKER), cw) {
|
|
|
|
|
cw.IndirectParameterEnable = grid->indirect;
|
2021-04-28 10:56:28 -07:00
|
|
|
cw.SIMDSize = dispatch.simd_size / 16;
|
2019-02-13 11:11:43 -08:00
|
|
|
cw.LocalXMaximum = grid->block[0] - 1;
|
|
|
|
|
cw.LocalYMaximum = grid->block[1] - 1;
|
|
|
|
|
cw.LocalZMaximum = grid->block[2] - 1;
|
|
|
|
|
cw.ThreadGroupIDXDimension = grid->grid[0];
|
|
|
|
|
cw.ThreadGroupIDYDimension = grid->grid[1];
|
|
|
|
|
cw.ThreadGroupIDZDimension = grid->grid[2];
|
2021-04-28 10:56:28 -07:00
|
|
|
cw.ExecutionMask = dispatch.right_mask;
|
2021-10-31 23:22:42 -07:00
|
|
|
cw.PostSync.MOCS = iris_mocs(NULL, &screen->isl_dev, 0);
|
2019-02-13 11:11:43 -08:00
|
|
|
|
2020-06-17 14:04:02 -05:00
|
|
|
cw.InterfaceDescriptor = (struct GENX(INTERFACE_DESCRIPTOR_DATA)) {
|
2019-02-13 11:11:43 -08:00
|
|
|
.KernelStartPointer = KSP(shader),
|
2021-04-28 10:56:28 -07:00
|
|
|
.NumberofThreadsinGPGPUThreadGroup = dispatch.threads,
|
2019-02-13 11:11:43 -08:00
|
|
|
.SharedLocalMemorySize =
|
2021-03-16 10:14:30 -07:00
|
|
|
encode_slm_size(GFX_VER, prog_data->total_shared),
|
2021-02-04 19:11:07 -06:00
|
|
|
.NumberOfBarriers = cs_prog_data->uses_barrier,
|
2019-02-13 11:11:43 -08:00
|
|
|
.SamplerStatePointer = shs->sampler_table.offset,
|
|
|
|
|
.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE],
|
|
|
|
|
};
|
|
|
|
|
|
2021-04-28 10:56:28 -07:00
|
|
|
assert(brw_cs_push_const_total_size(cs_prog_data, dispatch.threads) == 0);
|
2019-02-13 11:11:43 -08:00
|
|
|
}
|
|
|
|
|
|
2021-11-23 00:43:36 +02:00
|
|
|
trace_intel_end_compute(&batch->trace, batch, grid->grid[0], grid->grid[1], grid->grid[2]);
|
2019-02-13 11:11:43 -08:00
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:09:00 -07:00
|
|
|
#else /* #if GFX_VERx10 >= 125 */
|
2019-02-13 11:11:43 -08:00
|
|
|
|
2018-07-26 21:59:20 -07:00
|
|
|
static void
|
2019-02-12 18:11:16 -08:00
|
|
|
iris_upload_gpgpu_walker(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
const struct pipe_grid_info *grid)
|
2018-07-26 21:59:20 -07:00
|
|
|
{
|
2020-05-29 16:57:01 -07:00
|
|
|
const uint64_t stage_dirty = ice->state.stage_dirty;
|
2018-07-26 21:59:20 -07:00
|
|
|
struct iris_screen *screen = batch->screen;
|
2021-04-05 13:19:39 -07:00
|
|
|
const struct intel_device_info *devinfo = &screen->devinfo;
|
2018-07-26 21:59:20 -07:00
|
|
|
struct iris_binder *binder = &ice->state.binder;
|
|
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
|
2020-10-06 15:57:21 -05:00
|
|
|
struct iris_uncompiled_shader *ish =
|
|
|
|
|
ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
|
2018-07-26 21:59:20 -07:00
|
|
|
struct iris_compiled_shader *shader =
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_COMPUTE];
|
|
|
|
|
struct brw_stage_prog_data *prog_data = shader->prog_data;
|
|
|
|
|
struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
|
2021-04-28 10:56:28 -07:00
|
|
|
const struct brw_cs_dispatch_info dispatch =
|
|
|
|
|
brw_cs_get_dispatch_info(devinfo, cs_prog_data, grid->block);
|
2018-04-27 16:39:30 -07:00
|
|
|
|
2021-11-23 00:43:36 +02:00
|
|
|
trace_intel_begin_compute(&batch->trace, batch);
|
|
|
|
|
|
2021-05-04 20:24:54 -05:00
|
|
|
if ((stage_dirty & IRIS_STAGE_DIRTY_CS) ||
|
|
|
|
|
cs_prog_data->local_size[0] == 0 /* Variable local group size */) {
|
2021-03-29 15:46:12 -07:00
|
|
|
/* The MEDIA_VFE_STATE documentation for Gfx8+ says:
|
2018-10-19 01:29:05 -07:00
|
|
|
*
|
|
|
|
|
* "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
|
|
|
|
|
* the only bits that are changed are scoreboard related: Scoreboard
|
|
|
|
|
* Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta. For
|
|
|
|
|
* these scoreboard related states, a MEDIA_STATE_FLUSH is
|
|
|
|
|
* sufficient."
|
|
|
|
|
*/
|
2019-06-19 16:04:50 -05:00
|
|
|
iris_emit_pipe_control_flush(batch,
|
|
|
|
|
"workaround: stall before MEDIA_VFE_STATE",
|
|
|
|
|
PIPE_CONTROL_CS_STALL);
|
2018-07-26 21:59:20 -07:00
|
|
|
|
2018-10-19 01:29:05 -07:00
|
|
|
iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
|
|
|
|
|
if (prog_data->total_scratch) {
|
2021-02-12 11:39:45 -08:00
|
|
|
uint32_t scratch_addr =
|
|
|
|
|
pin_scratch_space(ice, batch, prog_data, MESA_SHADER_COMPUTE);
|
|
|
|
|
|
2018-11-07 22:05:14 -08:00
|
|
|
vfe.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
|
2021-02-12 11:39:45 -08:00
|
|
|
vfe.ScratchSpaceBasePointer =
|
|
|
|
|
rw_bo(NULL, scratch_addr, IRIS_DOMAIN_NONE);
|
2018-10-19 01:29:05 -07:00
|
|
|
}
|
2018-07-26 21:59:20 -07:00
|
|
|
|
2018-10-19 01:29:05 -07:00
|
|
|
vfe.MaximumNumberofThreads =
|
2021-09-08 16:20:24 -07:00
|
|
|
devinfo->max_cs_threads * devinfo->subslice_total - 1;
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER < 11
|
2018-10-19 01:29:05 -07:00
|
|
|
vfe.ResetGatewayTimer =
|
|
|
|
|
Resettingrelativetimerandlatchingtheglobaltimestamp;
|
2018-07-26 21:59:20 -07:00
|
|
|
#endif
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 8
|
2018-11-07 14:23:27 +10:00
|
|
|
vfe.BypassGatewayControl = true;
|
|
|
|
|
#endif
|
2018-10-19 01:29:05 -07:00
|
|
|
vfe.NumberofURBEntries = 2;
|
|
|
|
|
vfe.URBEntryAllocationSize = 2;
|
2018-07-26 21:59:20 -07:00
|
|
|
|
2018-10-19 01:29:05 -07:00
|
|
|
vfe.CURBEAllocationSize =
|
2021-04-28 10:56:28 -07:00
|
|
|
ALIGN(cs_prog_data->push.per_thread.regs * dispatch.threads +
|
2018-10-19 01:29:05 -07:00
|
|
|
cs_prog_data->push.cross_thread.regs, 2);
|
|
|
|
|
}
|
2018-07-26 21:59:20 -07:00
|
|
|
}
|
|
|
|
|
|
2019-01-24 09:26:38 -08:00
|
|
|
/* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
|
2020-09-02 13:48:51 -05:00
|
|
|
if ((stage_dirty & IRIS_STAGE_DIRTY_CS) ||
|
|
|
|
|
cs_prog_data->local_size[0] == 0 /* Variable local group size */) {
|
2019-06-26 23:33:40 -07:00
|
|
|
uint32_t curbe_data_offset = 0;
|
|
|
|
|
assert(cs_prog_data->push.cross_thread.dwords == 0 &&
|
|
|
|
|
cs_prog_data->push.per_thread.dwords == 1 &&
|
|
|
|
|
cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
|
2020-03-20 21:02:06 -07:00
|
|
|
const unsigned push_const_size =
|
2021-04-28 10:56:28 -07:00
|
|
|
brw_cs_push_const_total_size(cs_prog_data, dispatch.threads);
|
2019-06-26 23:33:40 -07:00
|
|
|
uint32_t *curbe_data_map =
|
2019-06-26 23:38:59 -07:00
|
|
|
stream_state(batch, ice->state.dynamic_uploader,
|
|
|
|
|
&ice->state.last_res.cs_thread_ids,
|
2020-03-20 21:02:06 -07:00
|
|
|
ALIGN(push_const_size, 64), 64,
|
2019-06-26 23:33:40 -07:00
|
|
|
&curbe_data_offset);
|
|
|
|
|
assert(curbe_data_map);
|
2020-03-20 21:02:06 -07:00
|
|
|
memset(curbe_data_map, 0x5a, ALIGN(push_const_size, 64));
|
2021-04-28 10:56:28 -07:00
|
|
|
iris_fill_cs_push_const_buffer(cs_prog_data, dispatch.threads,
|
|
|
|
|
curbe_data_map);
|
2019-06-26 23:33:40 -07:00
|
|
|
|
2018-10-19 01:29:05 -07:00
|
|
|
iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
|
2020-03-20 21:02:06 -07:00
|
|
|
curbe.CURBETotalDataLength = ALIGN(push_const_size, 64);
|
2018-10-19 01:29:05 -07:00
|
|
|
curbe.CURBEDataStartAddress = curbe_data_offset;
|
|
|
|
|
}
|
2018-07-26 21:59:20 -07:00
|
|
|
}
|
|
|
|
|
|
2018-10-25 17:53:23 -05:00
|
|
|
for (unsigned i = 0; i < IRIS_MAX_GLOBAL_BINDINGS; i++) {
|
|
|
|
|
struct pipe_resource *res = ice->state.global_bindings[i];
|
|
|
|
|
if (!res)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(res),
|
|
|
|
|
true, IRIS_DOMAIN_NONE);
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
if (stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_CS |
|
|
|
|
|
IRIS_STAGE_DIRTY_BINDINGS_CS |
|
|
|
|
|
IRIS_STAGE_DIRTY_CONSTANTS_CS |
|
|
|
|
|
IRIS_STAGE_DIRTY_CS)) {
|
2018-10-19 01:29:05 -07:00
|
|
|
uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
|
2018-07-26 21:59:20 -07:00
|
|
|
|
2018-10-19 01:29:05 -07:00
|
|
|
iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
|
2020-10-14 14:56:19 -07:00
|
|
|
idd.SharedLocalMemorySize =
|
2021-03-16 10:14:30 -07:00
|
|
|
encode_slm_size(GFX_VER, ish->kernel_shared_size);
|
2020-05-21 00:09:19 -07:00
|
|
|
idd.KernelStartPointer =
|
2021-04-28 10:56:28 -07:00
|
|
|
KSP(shader) + brw_cs_prog_data_prog_offset(cs_prog_data,
|
|
|
|
|
dispatch.simd_size);
|
2018-10-19 01:29:05 -07:00
|
|
|
idd.SamplerStatePointer = shs->sampler_table.offset;
|
|
|
|
|
idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
|
2021-04-28 10:56:28 -07:00
|
|
|
idd.NumberofThreadsinGPGPUThreadGroup = dispatch.threads;
|
2018-10-19 01:29:05 -07:00
|
|
|
}
|
2018-07-26 21:59:20 -07:00
|
|
|
|
2018-10-19 01:29:05 -07:00
|
|
|
for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
|
|
|
|
|
desc[i] |= ((uint32_t *) shader->derived_data)[i];
|
2018-07-26 21:59:20 -07:00
|
|
|
|
2018-10-19 01:29:05 -07:00
|
|
|
iris_emit_cmd(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
|
|
|
|
|
load.InterfaceDescriptorTotalLength =
|
|
|
|
|
GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
|
|
|
|
|
load.InterfaceDescriptorDataStartAddress =
|
|
|
|
|
emit_state(batch, ice->state.dynamic_uploader,
|
2019-06-26 23:56:45 -07:00
|
|
|
&ice->state.last_res.cs_desc, desc, sizeof(desc), 64);
|
2018-10-19 01:29:05 -07:00
|
|
|
}
|
|
|
|
|
}
|
2018-07-26 21:59:20 -07:00
|
|
|
|
2019-02-13 11:10:39 -08:00
|
|
|
if (grid->indirect)
|
|
|
|
|
iris_load_indirect_location(ice, batch, grid);
|
2018-09-21 02:28:44 -07:00
|
|
|
|
2020-10-27 15:56:06 -07:00
|
|
|
iris_measure_snapshot(ice, batch, INTEL_SNAPSHOT_COMPUTE, NULL, NULL, NULL);
|
|
|
|
|
|
2018-07-26 21:59:20 -07:00
|
|
|
iris_emit_cmd(batch, GENX(GPGPU_WALKER), ggw) {
|
2018-09-21 02:28:44 -07:00
|
|
|
ggw.IndirectParameterEnable = grid->indirect != NULL;
|
2021-04-28 10:56:28 -07:00
|
|
|
ggw.SIMDSize = dispatch.simd_size / 16;
|
2018-07-26 21:59:20 -07:00
|
|
|
ggw.ThreadDepthCounterMaximum = 0;
|
|
|
|
|
ggw.ThreadHeightCounterMaximum = 0;
|
2021-04-28 10:56:28 -07:00
|
|
|
ggw.ThreadWidthCounterMaximum = dispatch.threads - 1;
|
2018-09-18 13:04:59 -07:00
|
|
|
ggw.ThreadGroupIDXDimension = grid->grid[0];
|
|
|
|
|
ggw.ThreadGroupIDYDimension = grid->grid[1];
|
|
|
|
|
ggw.ThreadGroupIDZDimension = grid->grid[2];
|
2021-04-28 10:56:28 -07:00
|
|
|
ggw.RightExecutionMask = dispatch.right_mask;
|
2018-07-26 21:59:20 -07:00
|
|
|
ggw.BottomExecutionMask = 0xffffffff;
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-23 15:09:55 -07:00
|
|
|
iris_emit_cmd(batch, GENX(MEDIA_STATE_FLUSH), msf);
|
2021-11-23 00:43:36 +02:00
|
|
|
|
|
|
|
|
trace_intel_end_compute(&batch->trace, batch, grid->grid[0], grid->grid[1], grid->grid[2]);
|
2019-02-12 18:11:16 -08:00
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:09:00 -07:00
|
|
|
#endif /* #if GFX_VERx10 >= 125 */
|
2019-02-13 11:11:43 -08:00
|
|
|
|
2019-02-12 18:11:16 -08:00
|
|
|
static void
|
|
|
|
|
iris_upload_compute_state(struct iris_context *ice,
|
|
|
|
|
struct iris_batch *batch,
|
|
|
|
|
const struct pipe_grid_info *grid)
|
|
|
|
|
{
|
|
|
|
|
const uint64_t stage_dirty = ice->state.stage_dirty;
|
|
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
|
|
|
|
|
struct iris_compiled_shader *shader =
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_COMPUTE];
|
|
|
|
|
|
|
|
|
|
iris_batch_sync_region_start(batch);
|
|
|
|
|
|
|
|
|
|
/* Always pin the binder. If we're emitting new binding table pointers,
|
|
|
|
|
* we need it. If not, we're probably inheriting old tables via the
|
|
|
|
|
* context, and need it anyway. Since true zero-bindings cases are
|
|
|
|
|
* practically non-existent, just pin it and avoid last_res tracking.
|
|
|
|
|
*/
|
|
|
|
|
iris_use_pinned_bo(batch, ice->state.binder.bo, false, IRIS_DOMAIN_NONE);
|
|
|
|
|
|
2020-08-20 23:47:51 -05:00
|
|
|
if (((stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
|
|
|
|
|
shs->sysvals_need_upload) ||
|
|
|
|
|
shader->kernel_input_size > 0)
|
2020-08-11 10:30:42 -05:00
|
|
|
upload_sysvals(ice, MESA_SHADER_COMPUTE, grid);
|
2019-02-12 18:11:16 -08:00
|
|
|
|
|
|
|
|
if (stage_dirty & IRIS_STAGE_DIRTY_BINDINGS_CS)
|
|
|
|
|
iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
|
|
|
|
|
|
|
|
|
|
if (stage_dirty & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS)
|
|
|
|
|
iris_upload_sampler_states(ice, MESA_SHADER_COMPUTE);
|
|
|
|
|
|
|
|
|
|
iris_use_optional_res(batch, shs->sampler_table.res, false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
|
|
|
|
iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
|
|
|
|
|
|
|
|
|
if (ice->state.need_border_colors)
|
|
|
|
|
iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false,
|
|
|
|
|
IRIS_DOMAIN_NONE);
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2019-02-12 18:11:16 -08:00
|
|
|
genX(invalidate_aux_map_state)(batch);
|
|
|
|
|
#endif
|
|
|
|
|
|
2021-03-16 10:09:00 -07:00
|
|
|
#if GFX_VERx10 >= 125
|
2019-02-13 11:11:43 -08:00
|
|
|
iris_upload_compute_walker(ice, batch, grid);
|
|
|
|
|
#else
|
2019-02-12 18:11:16 -08:00
|
|
|
iris_upload_gpgpu_walker(ice, batch, grid);
|
2019-02-13 11:11:43 -08:00
|
|
|
#endif
|
2018-09-23 15:09:55 -07:00
|
|
|
|
2020-02-06 21:06:17 -08:00
|
|
|
if (!batch->contains_draw_with_next_seqno) {
|
2018-09-19 12:25:18 -07:00
|
|
|
iris_restore_compute_saved_bos(ice, batch, grid);
|
2020-02-06 21:06:17 -08:00
|
|
|
batch->contains_draw_with_next_seqno = batch->contains_draw = true;
|
2018-07-26 21:59:20 -07:00
|
|
|
}
|
2020-04-23 17:58:48 -07:00
|
|
|
|
|
|
|
|
iris_batch_sync_region_end(batch);
|
2018-07-26 21:59:20 -07:00
|
|
|
}
|
|
|
|
|
|
2018-06-16 09:56:59 -07:00
|
|
|
/**
|
|
|
|
|
* State module teardown.
|
|
|
|
|
*/
|
2018-01-25 01:36:49 -08:00
|
|
|
static void
|
2018-01-09 14:34:15 -08:00
|
|
|
iris_destroy_state(struct iris_context *ice)
|
|
|
|
|
{
|
2018-12-04 16:38:14 -08:00
|
|
|
struct iris_genx_state *genx = ice->state.genx;
|
|
|
|
|
|
2021-10-06 14:45:02 -07:00
|
|
|
pipe_resource_reference(&ice->state.pixel_hashing_tables, NULL);
|
|
|
|
|
|
2019-09-18 20:12:33 -07:00
|
|
|
pipe_resource_reference(&ice->draw.draw_params.res, NULL);
|
|
|
|
|
pipe_resource_reference(&ice->draw.derived_draw_params.res, NULL);
|
2019-06-27 11:49:41 -07:00
|
|
|
|
2019-10-08 10:11:52 -07:00
|
|
|
/* Loop over all VBOs, including ones for draw parameters */
|
|
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(genx->vertex_buffers); i++) {
|
2018-12-04 16:38:14 -08:00
|
|
|
pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL);
|
|
|
|
|
}
|
2019-10-08 10:11:52 -07:00
|
|
|
|
2019-01-24 09:01:53 -08:00
|
|
|
free(ice->state.genx);
|
2018-06-16 09:56:59 -07:00
|
|
|
|
2019-06-27 14:09:05 -07:00
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
|
pipe_so_target_reference(&ice->state.so_target[i], NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-09 23:13:16 -08:00
|
|
|
for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
|
|
|
|
|
pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
|
|
|
|
|
}
|
|
|
|
|
pipe_surface_reference(&ice->state.framebuffer.zsbuf, NULL);
|
2018-06-16 09:56:59 -07:00
|
|
|
|
2018-06-16 10:39:26 -07:00
|
|
|
for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
|
2018-08-18 23:43:14 -07:00
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[stage];
|
|
|
|
|
pipe_resource_reference(&shs->sampler_table.res, NULL);
|
2019-01-24 09:01:53 -08:00
|
|
|
for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
|
2019-04-16 23:44:15 -07:00
|
|
|
pipe_resource_reference(&shs->constbuf[i].buffer, NULL);
|
|
|
|
|
pipe_resource_reference(&shs->constbuf_surf_state[i].res, NULL);
|
2019-01-24 09:01:53 -08:00
|
|
|
}
|
|
|
|
|
for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
|
2019-04-22 11:27:37 -07:00
|
|
|
pipe_resource_reference(&shs->image[i].base.resource, NULL);
|
2019-11-14 16:06:10 -08:00
|
|
|
pipe_resource_reference(&shs->image[i].surface_state.ref.res, NULL);
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
free(shs->image[i].surface_state.cpu);
|
2019-01-24 09:01:53 -08:00
|
|
|
}
|
|
|
|
|
for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
|
2019-04-16 23:44:15 -07:00
|
|
|
pipe_resource_reference(&shs->ssbo[i].buffer, NULL);
|
|
|
|
|
pipe_resource_reference(&shs->ssbo_surf_state[i].res, NULL);
|
2019-01-24 09:01:53 -08:00
|
|
|
}
|
|
|
|
|
for (int i = 0; i < IRIS_MAX_TEXTURE_SAMPLERS; i++) {
|
|
|
|
|
pipe_sampler_view_reference((struct pipe_sampler_view **)
|
|
|
|
|
&shs->textures[i], NULL);
|
|
|
|
|
}
|
2018-06-16 10:39:26 -07:00
|
|
|
}
|
2018-06-16 09:56:59 -07:00
|
|
|
|
2019-01-24 09:01:53 -08:00
|
|
|
pipe_resource_reference(&ice->state.grid_size.res, NULL);
|
|
|
|
|
pipe_resource_reference(&ice->state.grid_surf_state.res, NULL);
|
|
|
|
|
|
|
|
|
|
pipe_resource_reference(&ice->state.null_fb.res, NULL);
|
2018-11-28 15:22:07 -08:00
|
|
|
pipe_resource_reference(&ice->state.unbound_tex.res, NULL);
|
|
|
|
|
|
2018-06-16 09:56:59 -07:00
|
|
|
pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);
|
|
|
|
|
pipe_resource_reference(&ice->state.last_res.sf_cl_vp, NULL);
|
|
|
|
|
pipe_resource_reference(&ice->state.last_res.color_calc, NULL);
|
|
|
|
|
pipe_resource_reference(&ice->state.last_res.scissor, NULL);
|
|
|
|
|
pipe_resource_reference(&ice->state.last_res.blend, NULL);
|
2018-09-20 17:27:47 -07:00
|
|
|
pipe_resource_reference(&ice->state.last_res.index_buffer, NULL);
|
2019-06-26 23:38:59 -07:00
|
|
|
pipe_resource_reference(&ice->state.last_res.cs_thread_ids, NULL);
|
2019-06-26 23:56:45 -07:00
|
|
|
pipe_resource_reference(&ice->state.last_res.cs_desc, NULL);
|
2018-01-09 14:34:15 -08:00
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/* ------------------------------------------------------------------- */
|
|
|
|
|
|
2019-03-12 14:51:22 -07:00
|
|
|
static void
|
|
|
|
|
iris_rebind_buffer(struct iris_context *ice,
|
2019-10-31 09:41:49 -07:00
|
|
|
struct iris_resource *res)
|
2019-03-12 14:51:22 -07:00
|
|
|
{
|
|
|
|
|
struct pipe_context *ctx = &ice->ctx;
|
|
|
|
|
struct iris_genx_state *genx = ice->state.genx;
|
|
|
|
|
|
2021-02-08 16:39:42 -08:00
|
|
|
assert(res->base.b.target == PIPE_BUFFER);
|
2019-03-12 14:51:22 -07:00
|
|
|
|
|
|
|
|
/* Buffers can't be framebuffer attachments, nor display related,
|
|
|
|
|
* and we don't have upstream Clover support.
|
|
|
|
|
*/
|
|
|
|
|
assert(!(res->bind_history & (PIPE_BIND_DEPTH_STENCIL |
|
|
|
|
|
PIPE_BIND_RENDER_TARGET |
|
|
|
|
|
PIPE_BIND_BLENDABLE |
|
|
|
|
|
PIPE_BIND_DISPLAY_TARGET |
|
|
|
|
|
PIPE_BIND_CURSOR |
|
|
|
|
|
PIPE_BIND_COMPUTE_RESOURCE |
|
|
|
|
|
PIPE_BIND_GLOBAL)));
|
|
|
|
|
|
|
|
|
|
if (res->bind_history & PIPE_BIND_VERTEX_BUFFER) {
|
|
|
|
|
uint64_t bound_vbs = ice->state.bound_vertex_buffers;
|
|
|
|
|
while (bound_vbs) {
|
|
|
|
|
const int i = u_bit_scan64(&bound_vbs);
|
|
|
|
|
struct iris_vertex_buffer_state *state = &genx->vertex_buffers[i];
|
|
|
|
|
|
|
|
|
|
/* Update the CPU struct */
|
|
|
|
|
STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32);
|
|
|
|
|
STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64);
|
|
|
|
|
uint64_t *addr = (uint64_t *) &state->state[1];
|
2019-10-31 09:41:49 -07:00
|
|
|
struct iris_bo *bo = iris_resource_bo(state->resource);
|
2019-03-12 14:51:22 -07:00
|
|
|
|
2021-07-19 21:23:18 -07:00
|
|
|
if (*addr != bo->address + state->offset) {
|
|
|
|
|
*addr = bo->address + state->offset;
|
2020-05-29 16:11:58 -07:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
|
|
|
|
|
IRIS_DIRTY_VERTEX_BUFFER_FLUSHES;
|
2019-03-12 14:51:22 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-16 00:57:54 -08:00
|
|
|
/* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit
|
|
|
|
|
* the 3DSTATE_INDEX_BUFFER packet whenever the address changes.
|
|
|
|
|
*
|
|
|
|
|
* There is also no need to handle these:
|
2019-03-12 14:51:22 -07:00
|
|
|
* - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
|
|
|
|
|
* - PIPE_BIND_QUERY_BUFFER (no persistent state references)
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
if (res->bind_history & PIPE_BIND_STREAM_OUTPUT) {
|
2021-02-02 18:42:41 -08:00
|
|
|
uint32_t *so_buffers = genx->so_buffers;
|
|
|
|
|
for (unsigned i = 0; i < 4; i++,
|
|
|
|
|
so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
|
|
|
|
|
|
|
|
|
|
/* There are no other fields in bits 127:64 */
|
|
|
|
|
uint64_t *addr = (uint64_t *) &so_buffers[2];
|
|
|
|
|
STATIC_ASSERT(GENX(3DSTATE_SO_BUFFER_SurfaceBaseAddress_start) == 66);
|
|
|
|
|
STATIC_ASSERT(GENX(3DSTATE_SO_BUFFER_SurfaceBaseAddress_bits) == 46);
|
|
|
|
|
|
|
|
|
|
struct pipe_stream_output_target *tgt = ice->state.so_target[i];
|
|
|
|
|
if (tgt) {
|
|
|
|
|
struct iris_bo *bo = iris_resource_bo(tgt->buffer);
|
2021-07-19 21:23:18 -07:00
|
|
|
if (*addr != bo->address + tgt->buffer_offset) {
|
|
|
|
|
*addr = bo->address + tgt->buffer_offset;
|
2021-02-02 18:42:41 -08:00
|
|
|
ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-03-12 14:51:22 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (int s = MESA_SHADER_VERTEX; s < MESA_SHADER_STAGES; s++) {
|
|
|
|
|
struct iris_shader_state *shs = &ice->state.shaders[s];
|
|
|
|
|
enum pipe_shader_type p_stage = stage_to_pipe(s);
|
|
|
|
|
|
2019-09-10 11:14:57 -07:00
|
|
|
if (!(res->bind_stages & (1 << s)))
|
|
|
|
|
continue;
|
|
|
|
|
|
2019-03-12 14:51:22 -07:00
|
|
|
if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
|
|
|
|
|
/* Skip constant buffer 0, it's for regular uniforms, not UBOs */
|
|
|
|
|
uint32_t bound_cbufs = shs->bound_cbufs & ~1u;
|
|
|
|
|
while (bound_cbufs) {
|
|
|
|
|
const int i = u_bit_scan(&bound_cbufs);
|
|
|
|
|
struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
|
|
|
|
|
struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
|
|
|
|
|
|
|
|
|
|
if (res->bo == iris_resource_bo(cbuf->buffer)) {
|
2019-09-14 23:18:20 -07:00
|
|
|
pipe_resource_reference(&surf_state->res, NULL);
|
2020-05-05 13:05:52 -07:00
|
|
|
shs->dirty_cbufs |= 1u << i;
|
2020-05-29 16:36:23 -07:00
|
|
|
ice->state.dirty |= (IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
|
|
|
|
|
IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES);
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << s;
|
2019-03-12 14:51:22 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (res->bind_history & PIPE_BIND_SHADER_BUFFER) {
|
|
|
|
|
uint32_t bound_ssbos = shs->bound_ssbos;
|
|
|
|
|
while (bound_ssbos) {
|
|
|
|
|
const int i = u_bit_scan(&bound_ssbos);
|
|
|
|
|
struct pipe_shader_buffer *ssbo = &shs->ssbo[i];
|
|
|
|
|
|
|
|
|
|
if (res->bo == iris_resource_bo(ssbo->buffer)) {
|
|
|
|
|
struct pipe_shader_buffer buf = {
|
2021-02-08 16:39:42 -08:00
|
|
|
.buffer = &res->base.b,
|
2019-03-12 14:51:22 -07:00
|
|
|
.buffer_offset = ssbo->buffer_offset,
|
|
|
|
|
.buffer_size = ssbo->buffer_size,
|
|
|
|
|
};
|
|
|
|
|
iris_set_shader_buffers(ctx, p_stage, i, 1, &buf,
|
|
|
|
|
(shs->writable_ssbos >> i) & 1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (res->bind_history & PIPE_BIND_SAMPLER_VIEW) {
|
|
|
|
|
uint32_t bound_sampler_views = shs->bound_sampler_views;
|
|
|
|
|
while (bound_sampler_views) {
|
|
|
|
|
const int i = u_bit_scan(&bound_sampler_views);
|
|
|
|
|
struct iris_sampler_view *isv = shs->textures[i];
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
struct iris_bo *bo = isv->res->bo;
|
2019-03-12 14:51:22 -07:00
|
|
|
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
if (update_surface_state_addrs(ice->state.surface_uploader,
|
|
|
|
|
&isv->surface_state, bo)) {
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
|
2019-03-12 14:51:22 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (res->bind_history & PIPE_BIND_SHADER_IMAGE) {
|
|
|
|
|
uint32_t bound_image_views = shs->bound_image_views;
|
|
|
|
|
while (bound_image_views) {
|
|
|
|
|
const int i = u_bit_scan(&bound_image_views);
|
|
|
|
|
struct iris_image_view *iv = &shs->image[i];
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
struct iris_bo *bo = iris_resource_bo(iv->base.resource);
|
2019-03-12 14:51:22 -07:00
|
|
|
|
iris: Maintain CPU-side SURFACE_STATE copies for views and surfaces.
When replacing the backing storage for texture buffers, image buffers,
and so on, we may need to update the "Surface Base Address" field in
any corresponding SURFACE_STATE. This is easier to accomplish if we
have a copy on the CPU - we can just compare the current field, update
it, and re-upload.
This patch adds a CPU-side copy to the new iris_surface_state wrapper
struct, and reworks allocation and upload to fill things out on the
CPU copy first, then upload that to the GPU when finished.
This will be necessary to fix iris_invalidate_resource bugs shortly.
Technically, we never replace the backing storage for pipe_surfaces
(render targets), so we don't need to make this change there. However,
it's nice to have surfaces, sampler views, and image views handled
similarly. Plus, if we ever wanted to swap out backing storage for
busy textures, we'd need this infrastructure.
v2: Properly free memory (caught by Andrii Simiklit)
2019-11-14 17:17:43 -08:00
|
|
|
if (update_surface_state_addrs(ice->state.surface_uploader,
|
|
|
|
|
&iv->surface_state, bo)) {
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
|
2019-03-12 14:51:22 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------- */
|
|
|
|
|
|
2020-02-10 01:24:29 -08:00
|
|
|
/**
|
|
|
|
|
* Introduce a batch synchronization boundary, and update its cache coherency
|
|
|
|
|
* status to reflect the execution of a PIPE_CONTROL command with the
|
|
|
|
|
* specified flags.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
batch_mark_sync_for_pipe_control(struct iris_batch *batch, uint32_t flags)
|
|
|
|
|
{
|
|
|
|
|
iris_batch_sync_boundary(batch);
|
|
|
|
|
|
|
|
|
|
if ((flags & PIPE_CONTROL_CS_STALL)) {
|
|
|
|
|
if ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH))
|
|
|
|
|
iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_RENDER_WRITE);
|
|
|
|
|
|
|
|
|
|
if ((flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))
|
|
|
|
|
iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_DEPTH_WRITE);
|
|
|
|
|
|
2020-05-29 17:22:55 -07:00
|
|
|
if ((flags & PIPE_CONTROL_DATA_CACHE_FLUSH))
|
|
|
|
|
iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_DATA_WRITE);
|
|
|
|
|
|
2020-02-10 01:24:29 -08:00
|
|
|
if ((flags & PIPE_CONTROL_FLUSH_ENABLE))
|
|
|
|
|
iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_OTHER_WRITE);
|
|
|
|
|
|
|
|
|
|
if ((flags & (PIPE_CONTROL_CACHE_FLUSH_BITS |
|
2020-05-29 17:21:28 -07:00
|
|
|
PIPE_CONTROL_STALL_AT_SCOREBOARD))) {
|
|
|
|
|
iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_VF_READ);
|
2020-02-10 01:24:29 -08:00
|
|
|
iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_OTHER_READ);
|
2020-05-29 17:21:28 -07:00
|
|
|
}
|
2020-02-10 01:24:29 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH))
|
|
|
|
|
iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_RENDER_WRITE);
|
|
|
|
|
|
|
|
|
|
if ((flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))
|
|
|
|
|
iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_DEPTH_WRITE);
|
|
|
|
|
|
2020-05-29 17:22:55 -07:00
|
|
|
if ((flags & PIPE_CONTROL_DATA_CACHE_FLUSH))
|
|
|
|
|
iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_DATA_WRITE);
|
|
|
|
|
|
2020-02-10 01:24:29 -08:00
|
|
|
if ((flags & PIPE_CONTROL_FLUSH_ENABLE))
|
|
|
|
|
iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_OTHER_WRITE);
|
|
|
|
|
|
2020-05-29 17:21:28 -07:00
|
|
|
if ((flags & PIPE_CONTROL_VF_CACHE_INVALIDATE))
|
|
|
|
|
iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_VF_READ);
|
|
|
|
|
|
2020-02-10 01:24:29 -08:00
|
|
|
if ((flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) &&
|
|
|
|
|
(flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE))
|
|
|
|
|
iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_OTHER_READ);
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-19 12:52:51 -07:00
|
|
|
static unsigned
|
|
|
|
|
flags_to_post_sync_op(uint32_t flags)
|
|
|
|
|
{
|
|
|
|
|
if (flags & PIPE_CONTROL_WRITE_IMMEDIATE)
|
|
|
|
|
return WriteImmediateData;
|
|
|
|
|
|
|
|
|
|
if (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT)
|
|
|
|
|
return WritePSDepthCount;
|
|
|
|
|
|
|
|
|
|
if (flags & PIPE_CONTROL_WRITE_TIMESTAMP)
|
|
|
|
|
return WriteTimestamp;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Do the given flags have a Post Sync or LRI Post Sync operation?
|
|
|
|
|
*/
|
|
|
|
|
static enum pipe_control_flags
|
|
|
|
|
get_post_sync_flags(enum pipe_control_flags flags)
|
|
|
|
|
{
|
|
|
|
|
flags &= PIPE_CONTROL_WRITE_IMMEDIATE |
|
|
|
|
|
PIPE_CONTROL_WRITE_DEPTH_COUNT |
|
|
|
|
|
PIPE_CONTROL_WRITE_TIMESTAMP |
|
|
|
|
|
PIPE_CONTROL_LRI_POST_SYNC_OP;
|
|
|
|
|
|
|
|
|
|
/* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
|
|
|
|
|
* "LRI Post Sync Operation". So more than one bit set would be illegal.
|
|
|
|
|
*/
|
|
|
|
|
assert(util_bitcount(flags) <= 1);
|
|
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-20 09:12:54 -08:00
|
|
|
#define IS_COMPUTE_PIPELINE(batch) (batch->name == IRIS_BATCH_COMPUTE)
|
2018-04-19 12:52:51 -07:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Emit a series of PIPE_CONTROL commands, taking into account any
|
|
|
|
|
* workarounds necessary to actually accomplish the caller's request.
|
|
|
|
|
*
|
|
|
|
|
* Unless otherwise noted, spec quotations in this function come from:
|
|
|
|
|
*
|
|
|
|
|
* Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
|
|
|
|
|
* Restrictions for PIPE_CONTROL.
|
2018-07-30 23:49:34 -07:00
|
|
|
*
|
|
|
|
|
* You should not use this function directly. Use the helpers in
|
|
|
|
|
* iris_pipe_control.c instead, which may split the pipe control further.
|
2018-04-19 12:52:51 -07:00
|
|
|
*/
|
|
|
|
|
static void
|
2019-06-19 16:04:50 -05:00
|
|
|
iris_emit_raw_pipe_control(struct iris_batch *batch,
|
|
|
|
|
const char *reason,
|
|
|
|
|
uint32_t flags,
|
|
|
|
|
struct iris_bo *bo,
|
|
|
|
|
uint32_t offset,
|
|
|
|
|
uint64_t imm)
|
2018-04-19 12:52:51 -07:00
|
|
|
{
|
2021-04-05 13:19:39 -07:00
|
|
|
UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
|
2018-04-19 12:52:51 -07:00
|
|
|
enum pipe_control_flags post_sync_flags = get_post_sync_flags(flags);
|
|
|
|
|
enum pipe_control_flags non_lri_post_sync_flags =
|
|
|
|
|
post_sync_flags & ~PIPE_CONTROL_LRI_POST_SYNC_OP;
|
|
|
|
|
|
2020-09-04 16:16:48 -07:00
|
|
|
#if GFX_VER >= 12
|
|
|
|
|
if (batch->name == IRIS_BATCH_BLITTER) {
|
|
|
|
|
batch_mark_sync_for_pipe_control(batch, flags);
|
|
|
|
|
iris_batch_sync_region_start(batch);
|
|
|
|
|
|
|
|
|
|
/* The blitter doesn't actually use PIPE_CONTROL; rather it uses the
|
|
|
|
|
* MI_FLUSH_DW command. However, all of our code is set up to flush
|
|
|
|
|
* via emitting a pipe control, so we just translate it at this point,
|
|
|
|
|
* even if it is a bit hacky.
|
|
|
|
|
*/
|
|
|
|
|
iris_emit_cmd(batch, GENX(MI_FLUSH_DW), fd) {
|
|
|
|
|
fd.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
|
|
|
|
|
fd.ImmediateData = imm;
|
|
|
|
|
#if GFX_VERx10 >= 125
|
|
|
|
|
/* TODO: This may not always be necessary */
|
|
|
|
|
fd.FlushCCS = true;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
iris_batch_sync_region_end(batch);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2018-04-19 12:52:51 -07:00
|
|
|
/* Recursive PIPE_CONTROL workarounds --------------------------------
|
|
|
|
|
* (http://knowyourmeme.com/memes/xzibit-yo-dawg)
|
|
|
|
|
*
|
|
|
|
|
* We do these first because we want to look at the original operation,
|
|
|
|
|
* rather than any workarounds we set.
|
|
|
|
|
*/
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER == 9 && (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE)) {
|
2018-04-19 12:52:51 -07:00
|
|
|
/* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
|
|
|
|
|
* lists several workarounds:
|
|
|
|
|
*
|
|
|
|
|
* "Project: SKL, KBL, BXT
|
|
|
|
|
*
|
|
|
|
|
* If the VF Cache Invalidation Enable is set to a 1 in a
|
|
|
|
|
* PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
|
|
|
|
|
* sets to 0, with the VF Cache Invalidation Enable set to 0
|
|
|
|
|
* needs to be sent prior to the PIPE_CONTROL with VF Cache
|
|
|
|
|
* Invalidation Enable set to a 1."
|
|
|
|
|
*/
|
2019-06-19 16:04:50 -05:00
|
|
|
iris_emit_raw_pipe_control(batch,
|
|
|
|
|
"workaround: recursive VF cache invalidate",
|
|
|
|
|
0, NULL, 0, 0);
|
2018-04-19 12:52:51 -07:00
|
|
|
}
|
|
|
|
|
|
2021-03-29 17:15:41 -07:00
|
|
|
/* Wa_1409226450, Wait for EU to be idle before pipe control which
|
2020-01-16 13:16:24 -08:00
|
|
|
* invalidates the instruction cache
|
|
|
|
|
*/
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER == 12 && (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE)) {
|
2020-01-16 13:16:24 -08:00
|
|
|
iris_emit_raw_pipe_control(batch,
|
|
|
|
|
"workaround: CS stall before instruction "
|
|
|
|
|
"cache invalidate",
|
|
|
|
|
PIPE_CONTROL_CS_STALL |
|
|
|
|
|
PIPE_CONTROL_STALL_AT_SCOREBOARD, bo, offset,
|
|
|
|
|
imm);
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-07 12:38:03 -07:00
|
|
|
if (GFX_VER == 9 && IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
|
2018-04-19 12:52:51 -07:00
|
|
|
/* Project: SKL / Argument: LRI Post Sync Operation [23]
|
|
|
|
|
*
|
|
|
|
|
* "PIPECONTROL command with “Command Streamer Stall Enable” must be
|
|
|
|
|
* programmed prior to programming a PIPECONTROL command with "LRI
|
|
|
|
|
* Post Sync Operation" in GPGPU mode of operation (i.e when
|
|
|
|
|
* PIPELINE_SELECT command is set to GPGPU mode of operation)."
|
|
|
|
|
*
|
|
|
|
|
* The same text exists a few rows below for Post Sync Op.
|
|
|
|
|
*/
|
2019-06-19 16:04:50 -05:00
|
|
|
iris_emit_raw_pipe_control(batch,
|
|
|
|
|
"workaround: CS stall before gpgpu post-sync",
|
|
|
|
|
PIPE_CONTROL_CS_STALL, bo, offset, imm);
|
2018-04-19 12:52:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* "Flush Types" workarounds ---------------------------------------------
|
|
|
|
|
* We do these now because they may add post-sync operations or CS stalls.
|
|
|
|
|
*/
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER < 11 && flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
|
2018-04-19 12:52:51 -07:00
|
|
|
/* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
|
|
|
|
|
*
|
|
|
|
|
* "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
|
|
|
|
|
* 'Write PS Depth Count' or 'Write Timestamp'."
|
|
|
|
|
*/
|
|
|
|
|
if (!bo) {
|
|
|
|
|
flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
|
|
|
|
|
post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
|
|
|
|
|
non_lri_post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
|
2020-02-21 18:06:18 +02:00
|
|
|
bo = batch->screen->workaround_address.bo;
|
|
|
|
|
offset = batch->screen->workaround_address.offset;
|
2018-04-19 12:52:51 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flags & PIPE_CONTROL_DEPTH_STALL) {
|
|
|
|
|
/* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
|
|
|
|
|
*
|
|
|
|
|
* "This bit must be DISABLED for operations other than writing
|
|
|
|
|
* PS_DEPTH_COUNT."
|
|
|
|
|
*
|
|
|
|
|
* This seems like nonsense. An Ivybridge workaround requires us to
|
|
|
|
|
* emit a PIPE_CONTROL with a depth stall and write immediate post-sync
|
2021-03-29 15:46:12 -07:00
|
|
|
* operation. Gfx8+ requires us to emit depth stalls and depth cache
|
2018-04-19 12:52:51 -07:00
|
|
|
* flushes together. So, it's hard to imagine this means anything other
|
|
|
|
|
* than "we originally intended this to be used for PS_DEPTH_COUNT".
|
|
|
|
|
*
|
|
|
|
|
* We ignore the supposed restriction and do nothing.
|
|
|
|
|
*/
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flags & (PIPE_CONTROL_RENDER_TARGET_FLUSH |
|
|
|
|
|
PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
|
|
|
|
|
/* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
|
|
|
|
|
*
|
|
|
|
|
* "This bit must be DISABLED for End-of-pipe (Read) fences,
|
|
|
|
|
* PS_DEPTH_COUNT or TIMESTAMP queries."
|
|
|
|
|
*
|
|
|
|
|
* TODO: Implement end-of-pipe checking.
|
|
|
|
|
*/
|
|
|
|
|
assert(!(post_sync_flags & (PIPE_CONTROL_WRITE_DEPTH_COUNT |
|
|
|
|
|
PIPE_CONTROL_WRITE_TIMESTAMP)));
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER < 11 && (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
|
2018-04-19 12:52:51 -07:00
|
|
|
/* From the PIPE_CONTROL instruction table, bit 1:
|
|
|
|
|
*
|
|
|
|
|
* "This bit is ignored if Depth Stall Enable is set.
|
|
|
|
|
* Further, the render cache is not flushed even if Write Cache
|
|
|
|
|
* Flush Enable bit is set."
|
|
|
|
|
*
|
|
|
|
|
* We assert that the caller doesn't do this combination, to try and
|
|
|
|
|
* prevent mistakes. It shouldn't hurt the GPU, though.
|
2018-10-08 13:44:01 -07:00
|
|
|
*
|
2021-03-29 15:46:12 -07:00
|
|
|
* We skip this check on Gfx11+ as the "Stall at Pixel Scoreboard"
|
2018-10-08 13:44:01 -07:00
|
|
|
* and "Render Target Flush" combo is explicitly required for BTI
|
|
|
|
|
* update workarounds.
|
2018-04-19 12:52:51 -07:00
|
|
|
*/
|
|
|
|
|
assert(!(flags & (PIPE_CONTROL_DEPTH_STALL |
|
|
|
|
|
PIPE_CONTROL_RENDER_TARGET_FLUSH)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* PIPE_CONTROL page workarounds ------------------------------------- */
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER <= 8 && (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE)) {
|
2018-04-19 12:52:51 -07:00
|
|
|
/* From the PIPE_CONTROL page itself:
|
|
|
|
|
*
|
|
|
|
|
* "IVB, HSW, BDW
|
|
|
|
|
* Restriction: Pipe_control with CS-stall bit set must be issued
|
|
|
|
|
* before a pipe-control command that has the State Cache
|
|
|
|
|
* Invalidate bit set."
|
|
|
|
|
*/
|
|
|
|
|
flags |= PIPE_CONTROL_CS_STALL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flags & PIPE_CONTROL_FLUSH_LLC) {
|
|
|
|
|
/* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
|
|
|
|
|
*
|
|
|
|
|
* "Project: ALL
|
|
|
|
|
* SW must always program Post-Sync Operation to "Write Immediate
|
|
|
|
|
* Data" when Flush LLC is set."
|
|
|
|
|
*
|
|
|
|
|
* For now, we just require the caller to do it.
|
|
|
|
|
*/
|
|
|
|
|
assert(flags & PIPE_CONTROL_WRITE_IMMEDIATE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* "Post-Sync Operation" workarounds -------------------------------- */
|
|
|
|
|
|
|
|
|
|
/* Project: All / Argument: Global Snapshot Count Reset [19]
|
|
|
|
|
*
|
|
|
|
|
* "This bit must not be exercised on any product.
|
|
|
|
|
* Requires stall bit ([20] of DW1) set."
|
|
|
|
|
*
|
|
|
|
|
* We don't use this, so we just assert that it isn't used. The
|
|
|
|
|
* PIPE_CONTROL instruction page indicates that they intended this
|
|
|
|
|
* as a debug feature and don't think it is useful in production,
|
|
|
|
|
* but it may actually be usable, should we ever want to.
|
|
|
|
|
*/
|
|
|
|
|
assert((flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) == 0);
|
|
|
|
|
|
|
|
|
|
if (flags & (PIPE_CONTROL_MEDIA_STATE_CLEAR |
|
|
|
|
|
PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE)) {
|
|
|
|
|
/* Project: All / Arguments:
|
|
|
|
|
*
|
|
|
|
|
* - Generic Media State Clear [16]
|
|
|
|
|
* - Indirect State Pointers Disable [16]
|
|
|
|
|
*
|
|
|
|
|
* "Requires stall bit ([20] of DW1) set."
|
|
|
|
|
*
|
|
|
|
|
* Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
|
|
|
|
|
* State Clear) says:
|
|
|
|
|
*
|
|
|
|
|
* "PIPECONTROL command with “Command Streamer Stall Enable” must be
|
|
|
|
|
* programmed prior to programming a PIPECONTROL command with "Media
|
|
|
|
|
* State Clear" set in GPGPU mode of operation"
|
|
|
|
|
*
|
|
|
|
|
* This is a subset of the earlier rule, so there's nothing to do.
|
|
|
|
|
*/
|
|
|
|
|
flags |= PIPE_CONTROL_CS_STALL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flags & PIPE_CONTROL_STORE_DATA_INDEX) {
|
|
|
|
|
/* Project: All / Argument: Store Data Index
|
|
|
|
|
*
|
|
|
|
|
* "Post-Sync Operation ([15:14] of DW1) must be set to something other
|
|
|
|
|
* than '0'."
|
|
|
|
|
*
|
|
|
|
|
* For now, we just assert that the caller does this. We might want to
|
|
|
|
|
* automatically add a write to the workaround BO...
|
|
|
|
|
*/
|
|
|
|
|
assert(non_lri_post_sync_flags != 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flags & PIPE_CONTROL_SYNC_GFDT) {
|
|
|
|
|
/* Project: All / Argument: Sync GFDT
|
|
|
|
|
*
|
|
|
|
|
* "Post-Sync Operation ([15:14] of DW1) must be set to something other
|
|
|
|
|
* than '0' or 0x2520[13] must be set."
|
|
|
|
|
*
|
|
|
|
|
* For now, we just assert that the caller does this.
|
|
|
|
|
*/
|
|
|
|
|
assert(non_lri_post_sync_flags != 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flags & PIPE_CONTROL_TLB_INVALIDATE) {
|
|
|
|
|
/* Project: IVB+ / Argument: TLB inv
|
|
|
|
|
*
|
|
|
|
|
* "Requires stall bit ([20] of DW1) set."
|
|
|
|
|
*
|
|
|
|
|
* Also, from the PIPE_CONTROL instruction table:
|
|
|
|
|
*
|
|
|
|
|
* "Project: SKL+
|
|
|
|
|
* Post Sync Operation or CS stall must be set to ensure a TLB
|
|
|
|
|
* invalidation occurs. Otherwise no cycle will occur to the TLB
|
|
|
|
|
* cache to invalidate."
|
|
|
|
|
*
|
|
|
|
|
* This is not a subset of the earlier rule, so there's nothing to do.
|
|
|
|
|
*/
|
|
|
|
|
flags |= PIPE_CONTROL_CS_STALL;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER == 9 && devinfo->gt == 4) {
|
2018-04-19 12:52:51 -07:00
|
|
|
/* TODO: The big Skylake GT4 post sync op workaround */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
|
|
|
|
|
|
|
|
|
|
if (IS_COMPUTE_PIPELINE(batch)) {
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER >= 9 && (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE)) {
|
2018-04-19 12:52:51 -07:00
|
|
|
/* Project: SKL+ / Argument: Tex Invalidate
|
|
|
|
|
* "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
|
|
|
|
|
*/
|
|
|
|
|
flags |= PIPE_CONTROL_CS_STALL;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER == 8 && (post_sync_flags ||
|
2018-04-19 12:52:51 -07:00
|
|
|
(flags & (PIPE_CONTROL_NOTIFY_ENABLE |
|
|
|
|
|
PIPE_CONTROL_DEPTH_STALL |
|
|
|
|
|
PIPE_CONTROL_RENDER_TARGET_FLUSH |
|
|
|
|
|
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
|
|
|
|
|
PIPE_CONTROL_DATA_CACHE_FLUSH)))) {
|
|
|
|
|
/* Project: BDW / Arguments:
|
|
|
|
|
*
|
|
|
|
|
* - LRI Post Sync Operation [23]
|
|
|
|
|
* - Post Sync Op [15:14]
|
|
|
|
|
* - Notify En [8]
|
|
|
|
|
* - Depth Stall [13]
|
|
|
|
|
* - Render Target Cache Flush [12]
|
|
|
|
|
* - Depth Cache Flush [0]
|
|
|
|
|
* - DC Flush Enable [5]
|
|
|
|
|
*
|
|
|
|
|
* "Requires stall bit ([20] of DW) set for all GPGPU and Media
|
|
|
|
|
* Workloads."
|
|
|
|
|
*/
|
|
|
|
|
flags |= PIPE_CONTROL_CS_STALL;
|
|
|
|
|
|
|
|
|
|
/* Also, from the PIPE_CONTROL instruction table, bit 20:
|
|
|
|
|
*
|
|
|
|
|
* "Project: BDW
|
|
|
|
|
* This bit must be always set when PIPE_CONTROL command is
|
|
|
|
|
* programmed by GPGPU and MEDIA workloads, except for the cases
|
|
|
|
|
* when only Read Only Cache Invalidation bits are set (State
|
|
|
|
|
* Cache Invalidation Enable, Instruction cache Invalidation
|
|
|
|
|
* Enable, Texture Cache Invalidation Enable, Constant Cache
|
|
|
|
|
* Invalidation Enable). This is to WA FFDOP CG issue, this WA
|
|
|
|
|
* need not implemented when FF_DOP_CG is disable via "Fixed
|
|
|
|
|
* Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
|
|
|
|
|
*
|
|
|
|
|
* It sounds like we could avoid CS stalls in some cases, but we
|
|
|
|
|
* don't currently bother. This list isn't exactly the list above,
|
|
|
|
|
* either...
|
|
|
|
|
*/
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* "Stall" workarounds ----------------------------------------------
|
|
|
|
|
* These have to come after the earlier ones because we may have added
|
|
|
|
|
* some additional CS stalls above.
|
|
|
|
|
*/
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER < 9 && (flags & PIPE_CONTROL_CS_STALL)) {
|
2018-04-19 12:52:51 -07:00
|
|
|
/* Project: PRE-SKL, VLV, CHV
|
|
|
|
|
*
|
|
|
|
|
* "[All Stepping][All SKUs]:
|
|
|
|
|
*
|
|
|
|
|
* One of the following must also be set:
|
|
|
|
|
*
|
|
|
|
|
* - Render Target Cache Flush Enable ([12] of DW1)
|
|
|
|
|
* - Depth Cache Flush Enable ([0] of DW1)
|
|
|
|
|
* - Stall at Pixel Scoreboard ([1] of DW1)
|
|
|
|
|
* - Depth Stall ([13] of DW1)
|
|
|
|
|
* - Post-Sync Operation ([13] of DW1)
|
|
|
|
|
* - DC Flush Enable ([5] of DW1)"
|
|
|
|
|
*
|
|
|
|
|
* If we don't already have one of those bits set, we choose to add
|
|
|
|
|
* "Stall at Pixel Scoreboard". Some of the other bits require a
|
|
|
|
|
* CS stall as a workaround (see above), which would send us into
|
|
|
|
|
* an infinite recursion of PIPE_CONTROLs. "Stall at Pixel Scoreboard"
|
|
|
|
|
* appears to be safe, so we choose that.
|
|
|
|
|
*/
|
|
|
|
|
const uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
|
|
|
|
|
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
|
|
|
|
|
PIPE_CONTROL_WRITE_IMMEDIATE |
|
|
|
|
|
PIPE_CONTROL_WRITE_DEPTH_COUNT |
|
|
|
|
|
PIPE_CONTROL_WRITE_TIMESTAMP |
|
|
|
|
|
PIPE_CONTROL_STALL_AT_SCOREBOARD |
|
|
|
|
|
PIPE_CONTROL_DEPTH_STALL |
|
|
|
|
|
PIPE_CONTROL_DATA_CACHE_FLUSH;
|
|
|
|
|
if (!(flags & wa_bits))
|
|
|
|
|
flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
if (GFX_VER >= 12 && (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH)) {
|
2021-03-29 17:15:41 -07:00
|
|
|
/* Wa_1409600907:
|
2020-01-14 10:02:05 +02:00
|
|
|
*
|
|
|
|
|
* "PIPE_CONTROL with Depth Stall Enable bit must be set
|
|
|
|
|
* with any PIPE_CONTROL with Depth Flush Enable bit set.
|
|
|
|
|
*/
|
|
|
|
|
flags |= PIPE_CONTROL_DEPTH_STALL;
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-19 12:52:51 -07:00
|
|
|
/* Emit --------------------------------------------------------------- */
|
|
|
|
|
|
2021-10-13 11:21:41 +02:00
|
|
|
if (INTEL_DEBUG(DEBUG_PIPE_CONTROL)) {
|
2019-06-19 16:04:50 -05:00
|
|
|
fprintf(stderr,
|
2021-10-08 11:16:35 -07:00
|
|
|
" PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
|
2019-06-19 16:04:50 -05:00
|
|
|
(flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_CS_STALL) ? "CS " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_STALL_AT_SCOREBOARD) ? "Scoreboard " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) ? "VF " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ? "RT " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE) ? "Const " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) ? "TC " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_DATA_CACHE_FLUSH) ? "DC " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH) ? "ZFlush " : "",
|
2021-03-29 17:11:42 -07:00
|
|
|
(flags & PIPE_CONTROL_TILE_CACHE_FLUSH) ? "Tile " : "",
|
2019-06-19 16:04:50 -05:00
|
|
|
(flags & PIPE_CONTROL_DEPTH_STALL) ? "ZStall " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE) ? "State " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_TLB_INVALIDATE) ? "TLB " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE) ? "Inst " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_MEDIA_STATE_CLEAR) ? "MediaClear " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_NOTIFY_ENABLE) ? "Notify " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) ?
|
|
|
|
|
"SnapRes" : "",
|
|
|
|
|
(flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE) ?
|
|
|
|
|
"ISPDis" : "",
|
|
|
|
|
(flags & PIPE_CONTROL_WRITE_IMMEDIATE) ? "WriteImm " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_WRITE_DEPTH_COUNT) ? "WriteZCount " : "",
|
|
|
|
|
(flags & PIPE_CONTROL_WRITE_TIMESTAMP) ? "WriteTimestamp " : "",
|
2020-01-15 15:13:43 +02:00
|
|
|
(flags & PIPE_CONTROL_FLUSH_HDC) ? "HDC " : "",
|
2021-10-08 11:16:35 -07:00
|
|
|
(flags & PIPE_CONTROL_PSS_STALL_SYNC) ? "PSS " : "",
|
2019-06-19 16:04:50 -05:00
|
|
|
imm, reason);
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-10 01:24:29 -08:00
|
|
|
batch_mark_sync_for_pipe_control(batch, flags);
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
|
|
|
|
|
2021-11-23 00:43:36 +02:00
|
|
|
const bool trace_pc =
|
|
|
|
|
(flags & (PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CACHE_INVALIDATE_BITS)) != 0;
|
|
|
|
|
|
|
|
|
|
if (trace_pc)
|
|
|
|
|
trace_intel_begin_stall(&batch->trace, batch);
|
|
|
|
|
|
2018-04-19 12:52:51 -07:00
|
|
|
iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
|
2021-10-08 11:16:35 -07:00
|
|
|
#if GFX_VERx10 >= 125
|
|
|
|
|
pc.PSSStallSyncEnable = flags & PIPE_CONTROL_PSS_STALL_SYNC;
|
|
|
|
|
#endif
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 12
|
2019-04-29 11:05:07 -07:00
|
|
|
pc.TileCacheFlushEnable = flags & PIPE_CONTROL_TILE_CACHE_FLUSH;
|
2020-01-15 15:13:43 +02:00
|
|
|
#endif
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER >= 11
|
2020-01-15 15:13:43 +02:00
|
|
|
pc.HDCPipelineFlushEnable = flags & PIPE_CONTROL_FLUSH_HDC;
|
2019-04-29 11:05:07 -07:00
|
|
|
#endif
|
2018-04-19 12:52:51 -07:00
|
|
|
pc.LRIPostSyncOperation = NoLRIOperation;
|
|
|
|
|
pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
|
|
|
|
|
pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH;
|
|
|
|
|
pc.StoreDataIndex = 0;
|
|
|
|
|
pc.CommandStreamerStallEnable = flags & PIPE_CONTROL_CS_STALL;
|
|
|
|
|
pc.GlobalSnapshotCountReset =
|
|
|
|
|
flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET;
|
|
|
|
|
pc.TLBInvalidate = flags & PIPE_CONTROL_TLB_INVALIDATE;
|
|
|
|
|
pc.GenericMediaStateClear = flags & PIPE_CONTROL_MEDIA_STATE_CLEAR;
|
|
|
|
|
pc.StallAtPixelScoreboard = flags & PIPE_CONTROL_STALL_AT_SCOREBOARD;
|
|
|
|
|
pc.RenderTargetCacheFlushEnable =
|
|
|
|
|
flags & PIPE_CONTROL_RENDER_TARGET_FLUSH;
|
|
|
|
|
pc.DepthCacheFlushEnable = flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH;
|
|
|
|
|
pc.StateCacheInvalidationEnable =
|
|
|
|
|
flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE;
|
2022-01-31 11:52:21 +02:00
|
|
|
#if GFX_VER >= 12
|
|
|
|
|
/* Invalidates the L3 cache part in which index & vertex data is loaded
|
|
|
|
|
* when VERTEX_BUFFER_STATE::L3BypassDisable is set.
|
|
|
|
|
*/
|
|
|
|
|
pc.L3ReadOnlyCacheInvalidationEnable =
|
|
|
|
|
flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
|
|
|
|
|
#endif
|
2018-04-19 12:52:51 -07:00
|
|
|
pc.VFCacheInvalidationEnable = flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
|
|
|
|
|
pc.ConstantCacheInvalidationEnable =
|
|
|
|
|
flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE;
|
|
|
|
|
pc.PostSyncOperation = flags_to_post_sync_op(flags);
|
|
|
|
|
pc.DepthStallEnable = flags & PIPE_CONTROL_DEPTH_STALL;
|
|
|
|
|
pc.InstructionCacheInvalidateEnable =
|
|
|
|
|
flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE;
|
|
|
|
|
pc.NotifyEnable = flags & PIPE_CONTROL_NOTIFY_ENABLE;
|
|
|
|
|
pc.IndirectStatePointersDisable =
|
|
|
|
|
flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE;
|
|
|
|
|
pc.TextureCacheInvalidationEnable =
|
|
|
|
|
flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
|
2020-05-29 16:38:43 -07:00
|
|
|
pc.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
|
2018-04-19 12:52:51 -07:00
|
|
|
pc.ImmediateData = imm;
|
|
|
|
|
}
|
2020-04-23 17:58:48 -07:00
|
|
|
|
2021-11-23 00:43:36 +02:00
|
|
|
if (trace_pc) {
|
|
|
|
|
trace_intel_end_stall(&batch->trace, batch, flags,
|
|
|
|
|
iris_utrace_pipe_flush_bit_to_ds_stall_flag,
|
|
|
|
|
reason);
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_end(batch);
|
2018-04-19 12:52:51 -07:00
|
|
|
}
|
|
|
|
|
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 9
|
2019-04-24 16:43:36 -07:00
|
|
|
/**
|
2021-03-29 15:46:12 -07:00
|
|
|
* Preemption on Gfx9 has to be enabled or disabled in various cases.
|
2019-04-24 16:43:36 -07:00
|
|
|
*
|
|
|
|
|
* See these workarounds for preemption:
|
|
|
|
|
* - WaDisableMidObjectPreemptionForGSLineStripAdj
|
|
|
|
|
* - WaDisableMidObjectPreemptionForTrifanOrPolygon
|
|
|
|
|
* - WaDisableMidObjectPreemptionForLineLoop
|
|
|
|
|
* - WA#0798
|
|
|
|
|
*
|
2021-03-29 15:46:12 -07:00
|
|
|
* We don't put this in the vtable because it's only used on Gfx9.
|
2019-04-24 16:43:36 -07:00
|
|
|
*/
|
|
|
|
|
void
|
2021-03-29 15:40:04 -07:00
|
|
|
gfx9_toggle_preemption(struct iris_context *ice,
|
2019-04-24 16:43:36 -07:00
|
|
|
struct iris_batch *batch,
|
|
|
|
|
const struct pipe_draw_info *draw)
|
|
|
|
|
{
|
|
|
|
|
struct iris_genx_state *genx = ice->state.genx;
|
|
|
|
|
bool object_preemption = true;
|
|
|
|
|
|
|
|
|
|
/* WaDisableMidObjectPreemptionForGSLineStripAdj
|
|
|
|
|
*
|
|
|
|
|
* "WA: Disable mid-draw preemption when draw-call is a linestrip_adj
|
|
|
|
|
* and GS is enabled."
|
|
|
|
|
*/
|
|
|
|
|
if (draw->mode == PIPE_PRIM_LINE_STRIP_ADJACENCY &&
|
|
|
|
|
ice->shaders.prog[MESA_SHADER_GEOMETRY])
|
|
|
|
|
object_preemption = false;
|
|
|
|
|
|
|
|
|
|
/* WaDisableMidObjectPreemptionForTrifanOrPolygon
|
|
|
|
|
*
|
|
|
|
|
* "TriFan miscompare in Execlist Preemption test. Cut index that is
|
|
|
|
|
* on a previous context. End the previous, the resume another context
|
|
|
|
|
* with a tri-fan or polygon, and the vertex count is corrupted. If we
|
|
|
|
|
* prempt again we will cause corruption.
|
|
|
|
|
*
|
|
|
|
|
* WA: Disable mid-draw preemption when draw-call has a tri-fan."
|
|
|
|
|
*/
|
|
|
|
|
if (draw->mode == PIPE_PRIM_TRIANGLE_FAN)
|
|
|
|
|
object_preemption = false;
|
|
|
|
|
|
|
|
|
|
/* WaDisableMidObjectPreemptionForLineLoop
|
|
|
|
|
*
|
|
|
|
|
* "VF Stats Counters Missing a vertex when preemption enabled.
|
|
|
|
|
*
|
|
|
|
|
* WA: Disable mid-draw preemption when the draw uses a lineloop
|
|
|
|
|
* topology."
|
|
|
|
|
*/
|
|
|
|
|
if (draw->mode == PIPE_PRIM_LINE_LOOP)
|
|
|
|
|
object_preemption = false;
|
|
|
|
|
|
|
|
|
|
/* WA#0798
|
|
|
|
|
*
|
|
|
|
|
* "VF is corrupting GAFS data when preempted on an instance boundary
|
|
|
|
|
* and replayed with instancing enabled.
|
|
|
|
|
*
|
|
|
|
|
* WA: Disable preemption when using instanceing."
|
|
|
|
|
*/
|
|
|
|
|
if (draw->instance_count > 1)
|
|
|
|
|
object_preemption = false;
|
|
|
|
|
|
|
|
|
|
if (genx->object_preemption != object_preemption) {
|
|
|
|
|
iris_enable_obj_preemption(batch, object_preemption);
|
|
|
|
|
genx->object_preemption = object_preemption;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2019-02-16 00:57:54 -08:00
|
|
|
static void
|
|
|
|
|
iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch)
|
|
|
|
|
{
|
|
|
|
|
struct iris_genx_state *genx = ice->state.genx;
|
|
|
|
|
|
2021-06-16 10:22:48 -07:00
|
|
|
#if GFX_VERx10 == 120
|
|
|
|
|
genx->depth_reg_mode = IRIS_DEPTH_REG_MODE_UNKNOWN;
|
|
|
|
|
#endif
|
|
|
|
|
|
2019-02-16 00:57:54 -08:00
|
|
|
memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer));
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-22 11:43:12 -07:00
|
|
|
static void
|
|
|
|
|
iris_emit_mi_report_perf_count(struct iris_batch *batch,
|
|
|
|
|
struct iris_bo *bo,
|
|
|
|
|
uint32_t offset_in_bytes,
|
|
|
|
|
uint32_t report_id)
|
|
|
|
|
{
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_start(batch);
|
2019-04-22 11:43:12 -07:00
|
|
|
iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) {
|
2020-05-29 16:38:43 -07:00
|
|
|
mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes,
|
|
|
|
|
IRIS_DOMAIN_OTHER_WRITE);
|
2019-04-22 11:43:12 -07:00
|
|
|
mi_rpc.ReportID = report_id;
|
|
|
|
|
}
|
2020-04-23 17:58:48 -07:00
|
|
|
iris_batch_sync_region_end(batch);
|
2019-04-22 11:43:12 -07:00
|
|
|
}
|
|
|
|
|
|
2019-08-10 12:45:46 -07:00
|
|
|
/**
|
|
|
|
|
* Update the pixel hashing modes that determine the balancing of PS threads
|
|
|
|
|
* across subslices and slices.
|
|
|
|
|
*
|
|
|
|
|
* \param width Width bound of the rendering area (already scaled down if \p
|
|
|
|
|
* scale is greater than 1).
|
|
|
|
|
* \param height Height bound of the rendering area (already scaled down if \p
|
|
|
|
|
* scale is greater than 1).
|
|
|
|
|
* \param scale The number of framebuffer samples that could potentially be
|
|
|
|
|
* affected by an individual channel of the PS thread. This is
|
|
|
|
|
* typically one for single-sampled rendering, but for operations
|
|
|
|
|
* like CCS resolves and fast clears a single PS invocation may
|
|
|
|
|
* update a huge number of pixels, in which case a finer
|
|
|
|
|
* balancing is desirable in order to maximally utilize the
|
|
|
|
|
* bandwidth available. UINT_MAX can be used as shorthand for
|
|
|
|
|
* "finest hashing mode available".
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch,
|
|
|
|
|
unsigned width, unsigned height, unsigned scale)
|
|
|
|
|
{
|
2021-03-16 10:14:30 -07:00
|
|
|
#if GFX_VER == 9
|
2021-04-05 13:19:39 -07:00
|
|
|
const struct intel_device_info *devinfo = &batch->screen->devinfo;
|
2019-08-10 12:45:46 -07:00
|
|
|
const unsigned slice_hashing[] = {
|
2021-03-29 15:46:12 -07:00
|
|
|
/* Because all Gfx9 platforms with more than one slice require
|
2019-08-10 12:45:46 -07:00
|
|
|
* three-way subslice hashing, a single "normal" 16x16 slice hashing
|
|
|
|
|
* block is guaranteed to suffer from substantial imbalance, with one
|
|
|
|
|
* subslice receiving twice as much work as the other two in the
|
|
|
|
|
* slice.
|
|
|
|
|
*
|
|
|
|
|
* The performance impact of that would be particularly severe when
|
|
|
|
|
* three-way hashing is also in use for slice balancing (which is the
|
2021-03-29 15:46:12 -07:00
|
|
|
* case for all Gfx9 GT4 platforms), because one of the slices
|
2019-08-10 12:45:46 -07:00
|
|
|
* receives one every three 16x16 blocks in either direction, which
|
|
|
|
|
* is roughly the periodicity of the underlying subslice imbalance
|
|
|
|
|
* pattern ("roughly" because in reality the hardware's
|
|
|
|
|
* implementation of three-way hashing doesn't do exact modulo 3
|
|
|
|
|
* arithmetic, which somewhat decreases the magnitude of this effect
|
|
|
|
|
* in practice). This leads to a systematic subslice imbalance
|
|
|
|
|
* within that slice regardless of the size of the primitive. The
|
|
|
|
|
* 32x32 hashing mode guarantees that the subslice imbalance within a
|
|
|
|
|
* single slice hashing block is minimal, largely eliminating this
|
|
|
|
|
* effect.
|
|
|
|
|
*/
|
|
|
|
|
_32x32,
|
|
|
|
|
/* Finest slice hashing mode available. */
|
|
|
|
|
NORMAL
|
|
|
|
|
};
|
|
|
|
|
const unsigned subslice_hashing[] = {
|
|
|
|
|
/* 16x16 would provide a slight cache locality benefit especially
|
|
|
|
|
* visible in the sampler L1 cache efficiency of low-bandwidth
|
|
|
|
|
* non-LLC platforms, but it comes at the cost of greater subslice
|
|
|
|
|
* imbalance for primitives of dimensions approximately intermediate
|
|
|
|
|
* between 16x4 and 16x16.
|
|
|
|
|
*/
|
|
|
|
|
_16x4,
|
|
|
|
|
/* Finest subslice hashing mode available. */
|
|
|
|
|
_8x4
|
|
|
|
|
};
|
|
|
|
|
/* Dimensions of the smallest hashing block of a given hashing mode. If
|
|
|
|
|
* the rendering area is smaller than this there can't possibly be any
|
|
|
|
|
* benefit from switching to this mode, so we optimize out the
|
|
|
|
|
* transition.
|
|
|
|
|
*/
|
|
|
|
|
const unsigned min_size[][2] = {
|
|
|
|
|
{ 16, 4 },
|
|
|
|
|
{ 8, 4 }
|
|
|
|
|
};
|
|
|
|
|
const unsigned idx = scale > 1;
|
|
|
|
|
|
|
|
|
|
if (width > min_size[idx][0] || height > min_size[idx][1]) {
|
|
|
|
|
iris_emit_raw_pipe_control(batch,
|
|
|
|
|
"workaround: CS stall before GT_MODE LRI",
|
|
|
|
|
PIPE_CONTROL_STALL_AT_SCOREBOARD |
|
|
|
|
|
PIPE_CONTROL_CS_STALL,
|
|
|
|
|
NULL, 0, 0);
|
|
|
|
|
|
2021-03-11 20:50:49 -06:00
|
|
|
iris_emit_reg(batch, GENX(GT_MODE), reg) {
|
|
|
|
|
reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0);
|
|
|
|
|
reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0);
|
|
|
|
|
reg.SubsliceHashing = subslice_hashing[idx];
|
|
|
|
|
reg.SubsliceHashingMask = -1;
|
|
|
|
|
};
|
2019-08-10 12:45:46 -07:00
|
|
|
|
|
|
|
|
ice->state.current_hash_scale = scale;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-05 14:49:12 +02:00
|
|
|
static void
|
|
|
|
|
iris_set_frontend_noop(struct pipe_context *ctx, bool enable)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *) ctx;
|
|
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER], enable)) {
|
2020-05-29 16:54:35 -07:00
|
|
|
ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER;
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
|
|
|
|
|
}
|
2020-05-29 16:54:35 -07:00
|
|
|
|
2020-05-29 16:57:01 -07:00
|
|
|
if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE], enable)) {
|
2020-05-29 16:54:35 -07:00
|
|
|
ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE;
|
|
|
|
|
}
|
2019-12-05 14:49:12 +02:00
|
|
|
}
|
|
|
|
|
|
2020-11-11 08:59:46 +02:00
|
|
|
void
|
|
|
|
|
genX(init_screen_state)(struct iris_screen *screen)
|
|
|
|
|
{
|
2021-03-29 13:43:47 -07:00
|
|
|
assert(screen->devinfo.verx10 == GFX_VERx10);
|
2020-11-11 08:59:46 +02:00
|
|
|
screen->vtbl.destroy_state = iris_destroy_state;
|
|
|
|
|
screen->vtbl.init_render_context = iris_init_render_context;
|
|
|
|
|
screen->vtbl.init_compute_context = iris_init_compute_context;
|
|
|
|
|
screen->vtbl.upload_render_state = iris_upload_render_state;
|
|
|
|
|
screen->vtbl.update_surface_base_address = iris_update_surface_base_address;
|
|
|
|
|
screen->vtbl.upload_compute_state = iris_upload_compute_state;
|
|
|
|
|
screen->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
|
|
|
|
|
screen->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
|
|
|
|
|
screen->vtbl.rebind_buffer = iris_rebind_buffer;
|
|
|
|
|
screen->vtbl.load_register_reg32 = iris_load_register_reg32;
|
|
|
|
|
screen->vtbl.load_register_reg64 = iris_load_register_reg64;
|
|
|
|
|
screen->vtbl.load_register_imm32 = iris_load_register_imm32;
|
|
|
|
|
screen->vtbl.load_register_imm64 = iris_load_register_imm64;
|
|
|
|
|
screen->vtbl.load_register_mem32 = iris_load_register_mem32;
|
|
|
|
|
screen->vtbl.load_register_mem64 = iris_load_register_mem64;
|
|
|
|
|
screen->vtbl.store_register_mem32 = iris_store_register_mem32;
|
|
|
|
|
screen->vtbl.store_register_mem64 = iris_store_register_mem64;
|
|
|
|
|
screen->vtbl.store_data_imm32 = iris_store_data_imm32;
|
|
|
|
|
screen->vtbl.store_data_imm64 = iris_store_data_imm64;
|
|
|
|
|
screen->vtbl.copy_mem_mem = iris_copy_mem_mem;
|
|
|
|
|
screen->vtbl.derived_program_state_size = iris_derived_program_state_size;
|
|
|
|
|
screen->vtbl.store_derived_program_state = iris_store_derived_program_state;
|
|
|
|
|
screen->vtbl.create_so_decl_list = iris_create_so_decl_list;
|
|
|
|
|
screen->vtbl.populate_vs_key = iris_populate_vs_key;
|
|
|
|
|
screen->vtbl.populate_tcs_key = iris_populate_tcs_key;
|
|
|
|
|
screen->vtbl.populate_tes_key = iris_populate_tes_key;
|
|
|
|
|
screen->vtbl.populate_gs_key = iris_populate_gs_key;
|
|
|
|
|
screen->vtbl.populate_fs_key = iris_populate_fs_key;
|
|
|
|
|
screen->vtbl.populate_cs_key = iris_populate_cs_key;
|
|
|
|
|
screen->vtbl.lost_genx_state = iris_lost_genx_state;
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
void
|
2018-01-25 01:36:49 -08:00
|
|
|
genX(init_state)(struct iris_context *ice)
|
2017-11-23 23:15:14 -08:00
|
|
|
{
|
2018-01-21 18:04:05 -08:00
|
|
|
struct pipe_context *ctx = &ice->ctx;
|
2018-06-27 16:59:59 -07:00
|
|
|
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
|
2018-01-21 18:04:05 -08:00
|
|
|
|
2017-11-23 23:15:14 -08:00
|
|
|
ctx->create_blend_state = iris_create_blend_state;
|
2017-12-27 02:54:26 -08:00
|
|
|
ctx->create_depth_stencil_alpha_state = iris_create_zsa_state;
|
2017-11-23 23:15:14 -08:00
|
|
|
ctx->create_rasterizer_state = iris_create_rasterizer_state;
|
|
|
|
|
ctx->create_sampler_state = iris_create_sampler_state;
|
|
|
|
|
ctx->create_sampler_view = iris_create_sampler_view;
|
|
|
|
|
ctx->create_surface = iris_create_surface;
|
|
|
|
|
ctx->create_vertex_elements_state = iris_create_vertex_elements;
|
2017-12-27 02:54:26 -08:00
|
|
|
ctx->bind_blend_state = iris_bind_blend_state;
|
|
|
|
|
ctx->bind_depth_stencil_alpha_state = iris_bind_zsa_state;
|
2017-11-23 23:15:14 -08:00
|
|
|
ctx->bind_sampler_states = iris_bind_sampler_states;
|
2018-01-09 11:44:04 -08:00
|
|
|
ctx->bind_rasterizer_state = iris_bind_rasterizer_state;
|
2018-01-09 21:29:09 -08:00
|
|
|
ctx->bind_vertex_elements_state = iris_bind_vertex_elements_state;
|
2017-11-23 23:15:14 -08:00
|
|
|
ctx->delete_blend_state = iris_delete_state;
|
|
|
|
|
ctx->delete_depth_stencil_alpha_state = iris_delete_state;
|
|
|
|
|
ctx->delete_rasterizer_state = iris_delete_state;
|
|
|
|
|
ctx->delete_sampler_state = iris_delete_state;
|
|
|
|
|
ctx->delete_vertex_elements_state = iris_delete_state;
|
|
|
|
|
ctx->set_blend_color = iris_set_blend_color;
|
|
|
|
|
ctx->set_clip_state = iris_set_clip_state;
|
|
|
|
|
ctx->set_constant_buffer = iris_set_constant_buffer;
|
2018-07-24 15:54:00 -07:00
|
|
|
ctx->set_shader_buffers = iris_set_shader_buffers;
|
2018-08-30 15:45:36 -07:00
|
|
|
ctx->set_shader_images = iris_set_shader_images;
|
2017-11-23 23:15:14 -08:00
|
|
|
ctx->set_sampler_views = iris_set_sampler_views;
|
2018-10-25 17:51:04 -05:00
|
|
|
ctx->set_compute_resources = iris_set_compute_resources;
|
2018-10-25 17:53:23 -05:00
|
|
|
ctx->set_global_binding = iris_set_global_binding;
|
2018-09-21 12:22:34 -07:00
|
|
|
ctx->set_tess_state = iris_set_tess_state;
|
2021-08-13 02:29:56 -04:00
|
|
|
ctx->set_patch_vertices = iris_set_patch_vertices;
|
2017-11-23 23:15:14 -08:00
|
|
|
ctx->set_framebuffer_state = iris_set_framebuffer_state;
|
|
|
|
|
ctx->set_polygon_stipple = iris_set_polygon_stipple;
|
|
|
|
|
ctx->set_sample_mask = iris_set_sample_mask;
|
|
|
|
|
ctx->set_scissor_states = iris_set_scissor_states;
|
|
|
|
|
ctx->set_stencil_ref = iris_set_stencil_ref;
|
|
|
|
|
ctx->set_vertex_buffers = iris_set_vertex_buffers;
|
|
|
|
|
ctx->set_viewport_states = iris_set_viewport_states;
|
|
|
|
|
ctx->sampler_view_destroy = iris_sampler_view_destroy;
|
|
|
|
|
ctx->surface_destroy = iris_surface_destroy;
|
|
|
|
|
ctx->draw_vbo = iris_draw_vbo;
|
|
|
|
|
ctx->launch_grid = iris_launch_grid;
|
|
|
|
|
ctx->create_stream_output_target = iris_create_stream_output_target;
|
|
|
|
|
ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
|
|
|
|
|
ctx->set_stream_output_targets = iris_set_stream_output_targets;
|
2019-12-05 14:49:12 +02:00
|
|
|
ctx->set_frontend_noop = iris_set_frontend_noop;
|
2018-01-25 01:36:49 -08:00
|
|
|
|
|
|
|
|
ice->state.dirty = ~0ull;
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty = ~0ull;
|
2018-06-18 00:23:25 -07:00
|
|
|
|
2018-11-08 01:14:27 -08:00
|
|
|
ice->state.statistics_counters_enabled = true;
|
|
|
|
|
|
2018-07-27 16:02:09 -07:00
|
|
|
ice->state.sample_mask = 0xffff;
|
2018-06-20 16:11:08 -07:00
|
|
|
ice->state.num_viewports = 1;
|
2019-09-13 02:32:25 -07:00
|
|
|
ice->state.prim_mode = PIPE_PRIM_MAX;
|
2018-07-01 22:13:07 -07:00
|
|
|
ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
|
iris: Rework iris_update_draw_parameters to be more efficient
This improves a couple of things:
1. We now only update anything if the shader actually cares.
Previously, is_indexed_draw was causing us to flag dirty vertex
buffers, elements, and SGVs every time the shader switched between
indexed and non-indexed draws. This is a very common situation,
but we only need that information if the shader uses gl_BaseVertex.
We were also flagging things when switching between indirect/direct
draws as well, and now we only bother if it matters.
2. We upload new draw parameters only when necessary.
When we detect that the draw parameters have changed, we upload a
new copy, and use that. Previously we were uploading it every time
the vertex buffers were dirty (for possibly unrelated reasons) and
the shader needed that info. Tying these together also makes the
code a bit easier to follow.
In Civilization VI's benchmark, this code was flagging dirty state
many times per frame (49 average, 16 median, 614 maximum). Now it
occurs exactly once for the entire run.
2019-09-18 20:32:36 -07:00
|
|
|
ice->draw.derived_params.drawid = -1;
|
2018-06-27 16:59:59 -07:00
|
|
|
|
|
|
|
|
/* Make a 1x1x1 null surface for unbound textures */
|
2018-06-28 00:57:49 -07:00
|
|
|
void *null_surf_map =
|
|
|
|
|
upload_state(ice->state.surface_uploader, &ice->state.unbound_tex,
|
|
|
|
|
4 * GENX(RENDER_SURFACE_STATE_length), 64);
|
2021-06-07 05:26:05 +10:00
|
|
|
isl_null_fill_state(&screen->isl_dev, null_surf_map,
|
|
|
|
|
.size = isl_extent3d(1, 1, 1));
|
2018-09-11 01:09:27 -07:00
|
|
|
ice->state.unbound_tex.offset +=
|
|
|
|
|
iris_bo_offset_from_base_address(iris_resource_bo(ice->state.unbound_tex.res));
|
2018-10-23 01:36:26 -07:00
|
|
|
|
|
|
|
|
/* Default all scissor rectangles to be empty regions. */
|
|
|
|
|
for (int i = 0; i < IRIS_MAX_VIEWPORTS; i++) {
|
|
|
|
|
ice->state.scissors[i] = (struct pipe_scissor_state) {
|
|
|
|
|
.minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
|
|
|
|
|
};
|
|
|
|
|
}
|
2017-11-23 23:15:14 -08:00
|
|
|
}
|