2018-01-19 18:57:30 -08:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2017 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
2018-08-19 00:31:46 -07:00
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
2018-01-19 18:57:30 -08:00
|
|
|
*
|
2018-08-19 00:31:46 -07:00
|
|
|
* The above copyright notice and this permission notice shall be included
|
|
|
|
|
* in all copies or substantial portions of the Software.
|
2018-01-19 18:57:30 -08:00
|
|
|
*
|
2018-08-19 00:31:46 -07:00
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
|
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
2018-01-19 18:57:30 -08:00
|
|
|
*/
|
2018-08-19 00:31:46 -07:00
|
|
|
|
2018-01-19 18:57:30 -08:00
|
|
|
#include <stdio.h>
|
|
|
|
|
#include <time.h>
|
|
|
|
|
#include "pipe/p_defines.h"
|
|
|
|
|
#include "pipe/p_state.h"
|
2022-09-13 12:49:56 +03:00
|
|
|
#include "util/u_debug.h"
|
2018-01-20 02:47:04 -08:00
|
|
|
#include "util/ralloc.h"
|
2018-01-19 18:57:30 -08:00
|
|
|
#include "util/u_inlines.h"
|
2019-06-27 15:05:31 -07:00
|
|
|
#include "util/format/u_format.h"
|
2018-01-19 18:57:30 -08:00
|
|
|
#include "util/u_upload_mgr.h"
|
|
|
|
|
#include "iris_context.h"
|
2024-05-03 10:21:06 -07:00
|
|
|
#include "iris_perf.h"
|
2018-01-19 18:57:30 -08:00
|
|
|
#include "iris_resource.h"
|
|
|
|
|
#include "iris_screen.h"
|
2021-11-23 00:43:36 +02:00
|
|
|
#include "iris_utrace.h"
|
2021-03-03 13:20:06 -08:00
|
|
|
#include "common/intel_sample_positions.h"
|
2018-01-19 18:57:30 -08:00
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_debug_callback() driver hook.
|
|
|
|
|
*/
|
2018-01-19 18:57:30 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_debug_callback(struct pipe_context *ctx,
|
2022-03-31 04:22:26 +08:00
|
|
|
const struct util_debug_callback *cb)
|
2018-01-19 18:57:30 -08:00
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *)ctx;
|
2021-06-04 14:17:42 -07:00
|
|
|
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
|
|
|
|
|
|
|
|
|
|
util_queue_finish(&screen->shader_compiler_queue);
|
2018-01-19 18:57:30 -08:00
|
|
|
|
|
|
|
|
if (cb)
|
|
|
|
|
ice->dbg = *cb;
|
|
|
|
|
else
|
|
|
|
|
memset(&ice->dbg, 0, sizeof(ice->dbg));
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-07 23:19:30 -07:00
|
|
|
/**
|
|
|
|
|
* Called from the batch module when it detects a GPU hang.
|
|
|
|
|
*
|
|
|
|
|
* In this case, we've lost our GEM context, and can't rely on any existing
|
|
|
|
|
* state on the GPU. We must mark everything dirty and wipe away any saved
|
|
|
|
|
* assumptions about the last known state of the GPU.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
iris_lost_context_state(struct iris_batch *batch)
|
|
|
|
|
{
|
2020-10-28 14:13:48 -07:00
|
|
|
struct iris_context *ice = batch->ice;
|
2019-05-07 23:19:30 -07:00
|
|
|
|
|
|
|
|
if (batch->name == IRIS_BATCH_RENDER) {
|
2020-03-30 10:37:29 -04:00
|
|
|
batch->screen->vtbl.init_render_context(batch);
|
2019-05-07 23:19:30 -07:00
|
|
|
} else if (batch->name == IRIS_BATCH_COMPUTE) {
|
2020-03-30 10:37:29 -04:00
|
|
|
batch->screen->vtbl.init_compute_context(batch);
|
2020-08-28 15:08:29 -07:00
|
|
|
} else if (batch->name == IRIS_BATCH_BLITTER) {
|
|
|
|
|
/* No state to set up */
|
2019-05-07 23:19:30 -07:00
|
|
|
} else {
|
|
|
|
|
unreachable("unhandled batch reset");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ice->state.dirty = ~0ull;
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty = ~0ull;
|
2019-08-10 12:45:46 -07:00
|
|
|
ice->state.current_hash_scale = 0;
|
iris: Reconfigure the URB only if it's necessary or possibly useful
Reconfiguring the URB partitioning is likely to cause shader stalls,
as the dividing line between each stage's section of memory is moving.
(Technically, 3DSTATE_URB_* are pipelined commands, but that mostly
means that the command streamer doesn't need to stall.) So it should
be beneficial to update the URB configuration less often.
If the previous URB configuration already has enough space for our
current shader's needs, we can just continue using it, assuming we
are able to allocate the maximum number of URB entries per stage.
However, if we ran out of URB space and had to limit the number of
URB entrties for a stage, and the per-entry size is larger than we
need, we should reconfigure it to try and improve concurrency.
So, we begin tracking the last URB configuration in the context,
and compare against that when updating shader variants.
Cuts 36% of the URB reconfigurations (excluding BLORP) from a
Shadow of Mordor trace, and 46% from a GFXBench Manhattan 3.0 trace.
One nice thing is that this removes the need to look at the old
prog_data when updating shaders, which should make it possible to
unbind shader variants without causing spurious URB updates.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8721>
2021-01-25 22:25:51 -08:00
|
|
|
memset(&ice->shaders.urb, 0, sizeof(ice->shaders.urb));
|
2020-04-28 14:03:47 -07:00
|
|
|
memset(ice->state.last_block, 0, sizeof(ice->state.last_block));
|
2019-05-07 23:19:30 -07:00
|
|
|
memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
|
2022-09-17 19:59:56 +02:00
|
|
|
ice->state.last_grid_dim = 0;
|
2022-01-11 12:36:26 -08:00
|
|
|
batch->last_binder_address = ~0ull;
|
2018-04-27 16:39:30 -07:00
|
|
|
batch->last_aux_map_state = 0;
|
2020-03-30 10:37:29 -04:00
|
|
|
batch->screen->vtbl.lost_genx_state(ice, batch);
|
2019-05-07 23:19:30 -07:00
|
|
|
}
|
|
|
|
|
|
2019-05-08 11:33:50 -07:00
|
|
|
static enum pipe_reset_status
|
|
|
|
|
iris_get_device_reset_status(struct pipe_context *ctx)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *)ctx;
|
|
|
|
|
|
|
|
|
|
enum pipe_reset_status worst_reset = PIPE_NO_RESET;
|
|
|
|
|
|
|
|
|
|
/* Check the reset status of each batch's hardware context, and take the
|
|
|
|
|
* worst status (if one was guilty, proclaim guilt).
|
|
|
|
|
*/
|
2022-01-28 03:23:34 -08:00
|
|
|
iris_foreach_batch(ice, batch) {
|
2019-05-08 11:33:50 -07:00
|
|
|
enum pipe_reset_status batch_reset =
|
2022-01-28 03:23:34 -08:00
|
|
|
iris_batch_check_for_reset(batch);
|
2019-05-08 11:33:50 -07:00
|
|
|
|
|
|
|
|
if (batch_reset == PIPE_NO_RESET)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (worst_reset == PIPE_NO_RESET) {
|
|
|
|
|
worst_reset = batch_reset;
|
|
|
|
|
} else {
|
|
|
|
|
/* GUILTY < INNOCENT < UNKNOWN */
|
|
|
|
|
worst_reset = MIN2(worst_reset, batch_reset);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (worst_reset != PIPE_NO_RESET && ice->reset.reset)
|
|
|
|
|
ice->reset.reset(ice->reset.data, worst_reset);
|
|
|
|
|
|
|
|
|
|
return worst_reset;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-07 22:26:22 -07:00
|
|
|
static void
|
|
|
|
|
iris_set_device_reset_callback(struct pipe_context *ctx,
|
|
|
|
|
const struct pipe_device_reset_callback *cb)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *)ctx;
|
|
|
|
|
|
|
|
|
|
if (cb)
|
|
|
|
|
ice->reset = *cb;
|
|
|
|
|
else
|
|
|
|
|
memset(&ice->reset, 0, sizeof(ice->reset));
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-20 23:37:13 -07:00
|
|
|
static void
|
|
|
|
|
iris_get_sample_position(struct pipe_context *ctx,
|
|
|
|
|
unsigned sample_count,
|
|
|
|
|
unsigned sample_index,
|
|
|
|
|
float *out_value)
|
|
|
|
|
{
|
|
|
|
|
union {
|
|
|
|
|
struct {
|
|
|
|
|
float x[16];
|
|
|
|
|
float y[16];
|
|
|
|
|
} a;
|
|
|
|
|
struct {
|
|
|
|
|
float _0XOffset, _1XOffset, _2XOffset, _3XOffset,
|
|
|
|
|
_4XOffset, _5XOffset, _6XOffset, _7XOffset,
|
|
|
|
|
_8XOffset, _9XOffset, _10XOffset, _11XOffset,
|
|
|
|
|
_12XOffset, _13XOffset, _14XOffset, _15XOffset;
|
|
|
|
|
float _0YOffset, _1YOffset, _2YOffset, _3YOffset,
|
|
|
|
|
_4YOffset, _5YOffset, _6YOffset, _7YOffset,
|
|
|
|
|
_8YOffset, _9YOffset, _10YOffset, _11YOffset,
|
|
|
|
|
_12YOffset, _13YOffset, _14YOffset, _15YOffset;
|
|
|
|
|
} v;
|
|
|
|
|
} u;
|
|
|
|
|
switch (sample_count) {
|
2021-03-03 13:58:15 -08:00
|
|
|
case 1: INTEL_SAMPLE_POS_1X(u.v._); break;
|
|
|
|
|
case 2: INTEL_SAMPLE_POS_2X(u.v._); break;
|
|
|
|
|
case 4: INTEL_SAMPLE_POS_4X(u.v._); break;
|
|
|
|
|
case 8: INTEL_SAMPLE_POS_8X(u.v._); break;
|
|
|
|
|
case 16: INTEL_SAMPLE_POS_16X(u.v._); break;
|
2018-08-20 23:37:13 -07:00
|
|
|
default: unreachable("invalid sample count");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out_value[0] = u.a.x[sample_index];
|
|
|
|
|
out_value[1] = u.a.y[sample_index];
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-29 15:32:32 -07:00
|
|
|
static bool
|
|
|
|
|
create_dirty_dmabuf_set(struct iris_context *ice)
|
|
|
|
|
{
|
|
|
|
|
assert(ice->dirty_dmabufs == NULL);
|
|
|
|
|
|
|
|
|
|
ice->dirty_dmabufs = _mesa_pointer_set_create(ice);
|
|
|
|
|
return ice->dirty_dmabufs != NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
iris_mark_dirty_dmabuf(struct iris_context *ice,
|
|
|
|
|
struct pipe_resource *res)
|
|
|
|
|
{
|
|
|
|
|
if (!_mesa_set_search(ice->dirty_dmabufs, res)) {
|
|
|
|
|
_mesa_set_add(ice->dirty_dmabufs, res);
|
|
|
|
|
pipe_reference(NULL, &res->reference);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
clear_dirty_dmabuf_set(struct iris_context *ice)
|
|
|
|
|
{
|
|
|
|
|
set_foreach(ice->dirty_dmabufs, entry) {
|
|
|
|
|
struct pipe_resource *res = (struct pipe_resource *)entry->key;
|
|
|
|
|
if (pipe_reference(&res->reference, NULL))
|
|
|
|
|
res->screen->resource_destroy(res->screen, res);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_mesa_set_clear(ice->dirty_dmabufs, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
iris_flush_dirty_dmabufs(struct iris_context *ice)
|
|
|
|
|
{
|
|
|
|
|
set_foreach(ice->dirty_dmabufs, entry) {
|
|
|
|
|
struct pipe_resource *res = (struct pipe_resource *)entry->key;
|
|
|
|
|
ice->ctx.flush_resource(&ice->ctx, res);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
clear_dirty_dmabuf_set(ice);
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Destroy a context, freeing any associated memory.
|
|
|
|
|
*/
|
iris: Move suballocated resources to a dedicated allocation on export
We don't want to export suballocated resources to external consumers,
for a variety of reasons. First of all, it would be exporting random
other pieces of memory which we may not want those external consumers
to have access to. Secondly, external clients wouldn't be aware of
what buffers are packed together and busy-tracking implications there.
Nor should they be. And those are just the obvious reasons.
When we allocate a resource with the PIPE_BIND_SHARED flag, indicating
that it's going to be used externally, we avoid suballocation.
However, there are times when the client may suddenly decide to export
a texture or buffer, without any prior warning. Since we had no idea
this buffer would be exported, we suballocated it. Unfortunately, this
means we need to transition it to a dedicated allocation on the fly, by
allocating a new buffer and copying the contents over.
Making things worse, this often happens in DRI hooks that don't have an
associated context (which we need to say, run BLORP commands). We have
to create an temporary context for this purpose, perform our blit, then
destroy it. The radeonsi driver uses a permanent auxiliary context
stored in the screen for this purpose, but we can't do that because it
causes circular reference counting. radeonsi doesn't do the reference
counting that we do, but also doesn't use u_transfer_helper, so they
get lucky in avoiding stale resource->screen pointers. Other drivers
don't create an auxiliary context, so they avoid this problem for now.
For auxiliary data, rather than copying it over bit-for-bit, we simply
copy over the underlying data using iris_copy_region (GPU memcpy), and
take whatever the resulting aux state is from that operation. Assuming
the copy operation compresses, the result will be compressed.
v2: Stop using a screen->aux_context and just invent one on the fly to
avoid circular reference counting issues.
Acked-by: Paulo Zanoni <paulo.r.zanoni@intel.com> [v1]
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/12623>
2021-09-16 11:38:03 -07:00
|
|
|
void
|
2018-01-19 18:57:30 -08:00
|
|
|
iris_destroy_context(struct pipe_context *ctx)
|
|
|
|
|
{
|
2018-01-20 02:47:04 -08:00
|
|
|
struct iris_context *ice = (struct iris_context *)ctx;
|
2020-03-30 10:37:29 -04:00
|
|
|
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
|
2018-01-20 02:47:04 -08:00
|
|
|
|
2024-02-06 13:00:30 -08:00
|
|
|
blorp_finish(&ice->blorp);
|
|
|
|
|
|
2024-05-03 10:21:06 -07:00
|
|
|
intel_perf_free_context(ice->perf_ctx);
|
2018-01-19 18:57:30 -08:00
|
|
|
if (ctx->stream_uploader)
|
|
|
|
|
u_upload_destroy(ctx->stream_uploader);
|
2020-08-17 14:37:03 -07:00
|
|
|
if (ctx->const_uploader)
|
|
|
|
|
u_upload_destroy(ctx->const_uploader);
|
2018-01-19 18:57:30 -08:00
|
|
|
|
2020-10-29 15:32:32 -07:00
|
|
|
clear_dirty_dmabuf_set(ice);
|
|
|
|
|
|
2020-03-30 10:37:29 -04:00
|
|
|
screen->vtbl.destroy_state(ice);
|
2020-10-20 14:27:58 -05:00
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(ice->shaders.scratch_surfs); i++)
|
|
|
|
|
pipe_resource_reference(&ice->shaders.scratch_surfs[i].res, NULL);
|
|
|
|
|
|
2022-04-12 11:45:41 -05:00
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(ice->shaders.scratch_bos); i++) {
|
|
|
|
|
for (unsigned j = 0; j < ARRAY_SIZE(ice->shaders.scratch_bos[i]); j++)
|
|
|
|
|
iris_bo_unreference(ice->shaders.scratch_bos[i][j]);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-25 19:43:45 -08:00
|
|
|
iris_destroy_program_cache(ice);
|
2020-10-27 15:56:06 -07:00
|
|
|
if (screen->measure.config)
|
|
|
|
|
iris_destroy_ctx_measure(ice);
|
|
|
|
|
|
2018-04-05 21:48:33 -07:00
|
|
|
u_upload_destroy(ice->state.surface_uploader);
|
2022-11-15 14:26:38 +02:00
|
|
|
u_upload_destroy(ice->state.scratch_surface_uploader);
|
2018-04-05 21:48:33 -07:00
|
|
|
u_upload_destroy(ice->state.dynamic_uploader);
|
2019-01-15 14:15:07 -08:00
|
|
|
u_upload_destroy(ice->query_buffer_uploader);
|
2018-01-25 19:43:45 -08:00
|
|
|
|
2021-11-19 15:40:16 -08:00
|
|
|
iris_destroy_batches(ice);
|
2018-09-08 19:43:34 -07:00
|
|
|
iris_destroy_binder(&ice->state.binder);
|
2024-08-28 15:35:04 +02:00
|
|
|
iris_bo_unreference(ice->draw.generation.ring_bo);
|
2018-01-23 01:23:54 -08:00
|
|
|
|
2021-11-23 00:43:36 +02:00
|
|
|
iris_utrace_fini(ice);
|
|
|
|
|
|
2020-05-01 10:57:15 -07:00
|
|
|
slab_destroy_child(&ice->transfer_pool);
|
2021-02-10 15:09:11 -08:00
|
|
|
slab_destroy_child(&ice->transfer_pool_unsync);
|
2020-05-01 10:57:15 -07:00
|
|
|
|
2018-01-20 02:47:04 -08:00
|
|
|
ralloc_free(ice);
|
2018-01-19 18:57:30 -08:00
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
#define genX_call(devinfo, func, ...) \
|
2021-03-29 13:43:47 -07:00
|
|
|
switch ((devinfo)->verx10) { \
|
2023-08-02 00:21:51 -07:00
|
|
|
case 300: \
|
|
|
|
|
gfx30_##func(__VA_ARGS__); \
|
|
|
|
|
break; \
|
2022-06-29 01:35:06 -07:00
|
|
|
case 200: \
|
|
|
|
|
gfx20_##func(__VA_ARGS__); \
|
|
|
|
|
break; \
|
2021-02-27 15:38:53 -08:00
|
|
|
case 125: \
|
2021-03-29 15:40:04 -07:00
|
|
|
gfx125_##func(__VA_ARGS__); \
|
2019-02-10 20:14:07 -08:00
|
|
|
break; \
|
2021-02-27 15:38:53 -08:00
|
|
|
case 120: \
|
2021-03-29 15:40:04 -07:00
|
|
|
gfx12_##func(__VA_ARGS__); \
|
2021-02-27 15:38:53 -08:00
|
|
|
break; \
|
|
|
|
|
case 110: \
|
2021-03-29 15:40:04 -07:00
|
|
|
gfx11_##func(__VA_ARGS__); \
|
2018-10-08 06:26:15 -07:00
|
|
|
break; \
|
2021-02-27 15:38:53 -08:00
|
|
|
case 90: \
|
2021-03-29 15:40:04 -07:00
|
|
|
gfx9_##func(__VA_ARGS__); \
|
2018-01-25 01:36:49 -08:00
|
|
|
break; \
|
2021-02-27 15:38:53 -08:00
|
|
|
case 80: \
|
2021-03-29 15:40:04 -07:00
|
|
|
gfx8_##func(__VA_ARGS__); \
|
2018-11-07 14:23:27 +10:00
|
|
|
break; \
|
2018-01-25 01:36:49 -08:00
|
|
|
default: \
|
|
|
|
|
unreachable("Unknown hardware generation"); \
|
|
|
|
|
}
|
|
|
|
|
|
2025-01-15 11:18:24 -08:00
|
|
|
#ifndef INTEL_USE_ELK
|
|
|
|
|
static inline void gfx8_init_state(struct iris_context *ice) { unreachable("no elk support"); }
|
|
|
|
|
static inline void gfx8_init_blorp(struct iris_context *ice) { unreachable("no elk support"); }
|
|
|
|
|
static inline void gfx8_init_query(struct iris_context *ice) { unreachable("no elk support"); }
|
|
|
|
|
#endif
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Create a context.
|
|
|
|
|
*
|
|
|
|
|
* This is where each context begins.
|
|
|
|
|
*/
|
2018-01-19 18:57:30 -08:00
|
|
|
struct pipe_context *
|
2018-01-19 21:55:32 -08:00
|
|
|
iris_create_context(struct pipe_screen *pscreen, void *priv, unsigned flags)
|
2018-01-19 18:57:30 -08:00
|
|
|
{
|
2018-01-19 21:55:32 -08:00
|
|
|
struct iris_screen *screen = (struct iris_screen*)pscreen;
|
2022-10-11 13:37:29 -07:00
|
|
|
const struct intel_device_info *devinfo = screen->devinfo;
|
2018-01-20 02:47:04 -08:00
|
|
|
struct iris_context *ice = rzalloc(NULL, struct iris_context);
|
2018-01-19 18:57:30 -08:00
|
|
|
|
|
|
|
|
if (!ice)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
struct pipe_context *ctx = &ice->ctx;
|
|
|
|
|
|
2018-01-19 21:55:32 -08:00
|
|
|
ctx->screen = pscreen;
|
2018-01-19 18:57:30 -08:00
|
|
|
ctx->priv = priv;
|
|
|
|
|
|
2024-04-24 09:38:18 +03:00
|
|
|
ctx->stream_uploader = u_upload_create(ctx, 1024 * 1024 * 2,
|
|
|
|
|
PIPE_BIND_VERTEX_BUFFER |
|
|
|
|
|
PIPE_BIND_INDEX_BUFFER |
|
|
|
|
|
PIPE_BIND_CONSTANT_BUFFER,
|
|
|
|
|
PIPE_USAGE_STREAM, 0);
|
2018-01-19 18:57:30 -08:00
|
|
|
if (!ctx->stream_uploader) {
|
2023-03-07 18:21:57 +01:00
|
|
|
ralloc_free(ice);
|
2018-01-19 18:57:30 -08:00
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-08-17 14:37:03 -07:00
|
|
|
ctx->const_uploader = u_upload_create(ctx, 1024 * 1024,
|
|
|
|
|
PIPE_BIND_CONSTANT_BUFFER,
|
2021-07-01 14:49:49 -07:00
|
|
|
PIPE_USAGE_IMMUTABLE,
|
|
|
|
|
IRIS_RESOURCE_FLAG_DEVICE_MEM);
|
2020-08-17 14:37:03 -07:00
|
|
|
if (!ctx->const_uploader) {
|
|
|
|
|
u_upload_destroy(ctx->stream_uploader);
|
2023-03-07 18:21:57 +01:00
|
|
|
ralloc_free(ice);
|
2020-08-17 14:37:03 -07:00
|
|
|
return NULL;
|
|
|
|
|
}
|
2018-01-19 18:57:30 -08:00
|
|
|
|
2020-10-29 15:32:32 -07:00
|
|
|
if (!create_dirty_dmabuf_set(ice)) {
|
|
|
|
|
ralloc_free(ice);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-19 18:57:30 -08:00
|
|
|
ctx->destroy = iris_destroy_context;
|
|
|
|
|
ctx->set_debug_callback = iris_set_debug_callback;
|
2019-05-07 22:26:22 -07:00
|
|
|
ctx->set_device_reset_callback = iris_set_device_reset_callback;
|
2019-05-08 11:33:50 -07:00
|
|
|
ctx->get_device_reset_status = iris_get_device_reset_status;
|
2018-08-20 23:37:13 -07:00
|
|
|
ctx->get_sample_position = iris_get_sample_position;
|
2018-01-19 18:57:30 -08:00
|
|
|
|
2018-11-07 11:50:02 +00:00
|
|
|
iris_init_context_fence_functions(ctx);
|
2018-01-19 18:57:30 -08:00
|
|
|
iris_init_blit_functions(ctx);
|
|
|
|
|
iris_init_clear_functions(ctx);
|
|
|
|
|
iris_init_program_functions(ctx);
|
|
|
|
|
iris_init_resource_functions(ctx);
|
2018-07-24 21:15:13 -07:00
|
|
|
iris_init_flush_functions(ctx);
|
2019-10-15 12:43:02 -07:00
|
|
|
iris_init_perfquery_functions(ctx);
|
2018-01-19 18:57:30 -08:00
|
|
|
|
2018-01-20 02:47:04 -08:00
|
|
|
iris_init_program_cache(ice);
|
2018-09-08 19:43:34 -07:00
|
|
|
iris_init_binder(ice);
|
2018-01-20 02:47:04 -08:00
|
|
|
|
2018-07-06 11:29:51 -07:00
|
|
|
slab_create_child(&ice->transfer_pool, &screen->transfer_pool);
|
2021-02-10 15:09:11 -08:00
|
|
|
slab_create_child(&ice->transfer_pool_unsync, &screen->transfer_pool);
|
2018-07-06 11:29:51 -07:00
|
|
|
|
2018-04-05 21:48:33 -07:00
|
|
|
ice->state.surface_uploader =
|
2020-08-25 15:29:20 -07:00
|
|
|
u_upload_create(ctx, 64 * 1024, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
|
2021-07-01 14:49:49 -07:00
|
|
|
IRIS_RESOURCE_FLAG_SURFACE_MEMZONE |
|
|
|
|
|
IRIS_RESOURCE_FLAG_DEVICE_MEM);
|
2022-11-15 14:26:38 +02:00
|
|
|
ice->state.scratch_surface_uploader =
|
2020-10-20 21:29:50 -05:00
|
|
|
u_upload_create(ctx, 64 * 1024, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
|
2022-11-15 14:26:38 +02:00
|
|
|
IRIS_RESOURCE_FLAG_SCRATCH_MEMZONE |
|
2021-07-01 14:49:49 -07:00
|
|
|
IRIS_RESOURCE_FLAG_DEVICE_MEM);
|
2018-04-05 21:48:33 -07:00
|
|
|
ice->state.dynamic_uploader =
|
2020-08-25 15:29:20 -07:00
|
|
|
u_upload_create(ctx, 64 * 1024, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
|
2021-07-01 14:49:49 -07:00
|
|
|
IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE |
|
|
|
|
|
IRIS_RESOURCE_FLAG_DEVICE_MEM);
|
2018-04-05 21:48:33 -07:00
|
|
|
|
2019-01-15 14:15:07 -08:00
|
|
|
ice->query_buffer_uploader =
|
2020-08-25 15:29:20 -07:00
|
|
|
u_upload_create(ctx, 16 * 1024, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING,
|
2019-01-15 14:15:07 -08:00
|
|
|
0);
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
genX_call(devinfo, init_state, ice);
|
2018-04-21 22:20:32 -07:00
|
|
|
genX_call(devinfo, init_blorp, ice);
|
2019-04-01 11:16:22 -07:00
|
|
|
genX_call(devinfo, init_query, ice);
|
2018-11-06 21:12:30 -08:00
|
|
|
|
2019-02-22 23:31:56 +00:00
|
|
|
if (flags & PIPE_CONTEXT_HIGH_PRIORITY)
|
2022-11-11 11:53:52 -08:00
|
|
|
ice->priority = IRIS_CONTEXT_HIGH_PRIORITY;
|
2019-02-22 23:31:56 +00:00
|
|
|
if (flags & PIPE_CONTEXT_LOW_PRIORITY)
|
2022-11-11 11:53:52 -08:00
|
|
|
ice->priority = IRIS_CONTEXT_LOW_PRIORITY;
|
2020-12-14 12:30:33 +02:00
|
|
|
if (flags & PIPE_CONTEXT_PROTECTED)
|
|
|
|
|
ice->protected = true;
|
2019-02-22 23:31:56 +00:00
|
|
|
|
2021-10-13 11:21:41 +02:00
|
|
|
if (INTEL_DEBUG(DEBUG_BATCH))
|
iris: Record state sizes for INTEL_DEBUG=bat decoding.
Felix noticed a crash when using INTEL_DEBUG=bat decoding. It turned
out that we were sometimes placing variable length data near the end
of a buffer, and with the decoder guessing random lengths rather than
having an actual count, it was walking off the end and crashing. So
this does more than improve the decoder output.
Unfortunately, this is a bit more complicated than i965's handling,
because we don't have a single state buffer. Various places upload
data via u_upload_mgr, and so there isn't a central place to record
the size. We don't need to catch every single place, however, since
it's only important to record variable length packets (like viewports
and binding tables).
State data also lives arbitrarily long, rather than being discarded on
every batch like i965, so we don't know when to clear out old entries
either. (We also don't have a callback when an upload buffer is
released.) So, this tracking may space leak over time. That's probably
okay though, as this is only a debugging feature and it's a slow leak.
We may also get lucky and overwrite existing entries as we reuse BOs,
though I find this unlikely to happen.
The fact that the decoder works in terms of offsets from a state base
address is also not ideal, as dynamic state base address and surface
state base address differ for iris. However, because dynamic state
addresses start from the top of a 4GB region, and binding tables start
from addresses [0, 64K), it's highly unlikely that we'll get overlap.
We can always improve this, but for now it's better than what we had.
2019-05-22 18:14:38 -07:00
|
|
|
ice->state.sizes = _mesa_hash_table_u64_create(ice);
|
|
|
|
|
|
2021-11-23 00:43:36 +02:00
|
|
|
/* Do this before initializing the batches */
|
|
|
|
|
iris_utrace_init(ice);
|
|
|
|
|
|
2022-11-11 11:53:52 -08:00
|
|
|
iris_init_batches(ice);
|
2018-11-06 21:12:30 -08:00
|
|
|
|
2020-03-30 10:37:29 -04:00
|
|
|
screen->vtbl.init_render_context(&ice->batches[IRIS_BATCH_RENDER]);
|
|
|
|
|
screen->vtbl.init_compute_context(&ice->batches[IRIS_BATCH_COMPUTE]);
|
2023-11-28 15:37:54 -08:00
|
|
|
screen->vtbl.init_copy_context(&ice->batches[IRIS_BATCH_BLITTER]);
|
2018-01-19 21:55:32 -08:00
|
|
|
|
2020-05-15 11:23:03 -07:00
|
|
|
if (!(flags & PIPE_CONTEXT_PREFER_THREADED))
|
|
|
|
|
return ctx;
|
|
|
|
|
|
|
|
|
|
/* Clover doesn't support u_threaded_context */
|
|
|
|
|
if (flags & PIPE_CONTEXT_COMPUTE_ONLY)
|
|
|
|
|
return ctx;
|
|
|
|
|
|
|
|
|
|
return threaded_context_create(ctx, &screen->transfer_pool,
|
|
|
|
|
iris_replace_buffer_storage,
|
2023-07-27 12:12:20 +02:00
|
|
|
&(struct threaded_context_options){
|
|
|
|
|
.unsynchronized_get_device_reset_status = true,
|
|
|
|
|
},
|
2020-05-15 11:23:03 -07:00
|
|
|
&ice->thrctx);
|
2018-01-19 18:57:30 -08:00
|
|
|
}
|