2018-01-19 18:57:30 -08:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2017 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
2018-08-19 00:31:46 -07:00
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
2018-01-19 18:57:30 -08:00
|
|
|
*
|
2018-08-19 00:31:46 -07:00
|
|
|
* The above copyright notice and this permission notice shall be included
|
|
|
|
|
* in all copies or substantial portions of the Software.
|
2018-01-19 18:57:30 -08:00
|
|
|
*
|
2018-08-19 00:31:46 -07:00
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
|
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
2018-01-19 18:57:30 -08:00
|
|
|
*/
|
2018-08-19 00:31:46 -07:00
|
|
|
|
2018-01-19 18:57:30 -08:00
|
|
|
#include <stdio.h>
|
|
|
|
|
#include <time.h>
|
|
|
|
|
#include "pipe/p_defines.h"
|
|
|
|
|
#include "pipe/p_state.h"
|
2020-05-15 11:23:03 -07:00
|
|
|
#include "util/debug.h"
|
2018-01-20 02:47:04 -08:00
|
|
|
#include "util/ralloc.h"
|
2018-01-19 18:57:30 -08:00
|
|
|
#include "util/u_inlines.h"
|
2019-06-27 15:05:31 -07:00
|
|
|
#include "util/format/u_format.h"
|
2018-01-19 18:57:30 -08:00
|
|
|
#include "util/u_upload_mgr.h"
|
2019-02-24 14:21:39 -08:00
|
|
|
#include "drm-uapi/i915_drm.h"
|
2018-01-19 18:57:30 -08:00
|
|
|
#include "iris_context.h"
|
|
|
|
|
#include "iris_resource.h"
|
|
|
|
|
#include "iris_screen.h"
|
2021-03-03 13:20:06 -08:00
|
|
|
#include "common/intel_defines.h"
|
|
|
|
|
#include "common/intel_sample_positions.h"
|
2018-01-19 18:57:30 -08:00
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* The pipe->set_debug_callback() driver hook.
|
|
|
|
|
*/
|
2018-01-19 18:57:30 -08:00
|
|
|
static void
|
|
|
|
|
iris_set_debug_callback(struct pipe_context *ctx,
|
|
|
|
|
const struct pipe_debug_callback *cb)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *)ctx;
|
|
|
|
|
|
|
|
|
|
if (cb)
|
|
|
|
|
ice->dbg = *cb;
|
|
|
|
|
else
|
|
|
|
|
memset(&ice->dbg, 0, sizeof(ice->dbg));
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-07 23:19:30 -07:00
|
|
|
/**
|
|
|
|
|
* Called from the batch module when it detects a GPU hang.
|
|
|
|
|
*
|
|
|
|
|
* In this case, we've lost our GEM context, and can't rely on any existing
|
|
|
|
|
* state on the GPU. We must mark everything dirty and wipe away any saved
|
|
|
|
|
* assumptions about the last known state of the GPU.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
iris_lost_context_state(struct iris_batch *batch)
|
|
|
|
|
{
|
2020-10-28 14:13:48 -07:00
|
|
|
struct iris_context *ice = batch->ice;
|
2019-05-07 23:19:30 -07:00
|
|
|
|
|
|
|
|
if (batch->name == IRIS_BATCH_RENDER) {
|
2020-03-30 10:37:29 -04:00
|
|
|
batch->screen->vtbl.init_render_context(batch);
|
2019-05-07 23:19:30 -07:00
|
|
|
} else if (batch->name == IRIS_BATCH_COMPUTE) {
|
2020-03-30 10:37:29 -04:00
|
|
|
batch->screen->vtbl.init_compute_context(batch);
|
2019-05-07 23:19:30 -07:00
|
|
|
} else {
|
|
|
|
|
unreachable("unhandled batch reset");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ice->state.dirty = ~0ull;
|
2020-05-29 16:57:01 -07:00
|
|
|
ice->state.stage_dirty = ~0ull;
|
2019-08-10 12:45:46 -07:00
|
|
|
ice->state.current_hash_scale = 0;
|
iris: Reconfigure the URB only if it's necessary or possibly useful
Reconfiguring the URB partitioning is likely to cause shader stalls,
as the dividing line between each stage's section of memory is moving.
(Technically, 3DSTATE_URB_* are pipelined commands, but that mostly
means that the command streamer doesn't need to stall.) So it should
be beneficial to update the URB configuration less often.
If the previous URB configuration already has enough space for our
current shader's needs, we can just continue using it, assuming we
are able to allocate the maximum number of URB entries per stage.
However, if we ran out of URB space and had to limit the number of
URB entrties for a stage, and the per-entry size is larger than we
need, we should reconfigure it to try and improve concurrency.
So, we begin tracking the last URB configuration in the context,
and compare against that when updating shader variants.
Cuts 36% of the URB reconfigurations (excluding BLORP) from a
Shadow of Mordor trace, and 46% from a GFXBench Manhattan 3.0 trace.
One nice thing is that this removes the need to look at the old
prog_data when updating shaders, which should make it possible to
unbind shader variants without causing spurious URB updates.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8721>
2021-01-25 22:25:51 -08:00
|
|
|
memset(&ice->shaders.urb, 0, sizeof(ice->shaders.urb));
|
2020-04-28 14:03:47 -07:00
|
|
|
memset(ice->state.last_block, 0, sizeof(ice->state.last_block));
|
2019-05-07 23:19:30 -07:00
|
|
|
memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
|
2019-05-29 14:48:41 -07:00
|
|
|
batch->last_surface_base_address = ~0ull;
|
2018-04-27 16:39:30 -07:00
|
|
|
batch->last_aux_map_state = 0;
|
2020-03-30 10:37:29 -04:00
|
|
|
batch->screen->vtbl.lost_genx_state(ice, batch);
|
2019-05-07 23:19:30 -07:00
|
|
|
}
|
|
|
|
|
|
2019-05-08 11:33:50 -07:00
|
|
|
static enum pipe_reset_status
|
|
|
|
|
iris_get_device_reset_status(struct pipe_context *ctx)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *)ctx;
|
|
|
|
|
|
|
|
|
|
enum pipe_reset_status worst_reset = PIPE_NO_RESET;
|
|
|
|
|
|
|
|
|
|
/* Check the reset status of each batch's hardware context, and take the
|
|
|
|
|
* worst status (if one was guilty, proclaim guilt).
|
|
|
|
|
*/
|
|
|
|
|
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
|
|
|
|
|
/* This will also recreate the hardware contexts as necessary, so any
|
|
|
|
|
* future queries will show no resets. We only want to report once.
|
|
|
|
|
*/
|
|
|
|
|
enum pipe_reset_status batch_reset =
|
|
|
|
|
iris_batch_check_for_reset(&ice->batches[i]);
|
|
|
|
|
|
|
|
|
|
if (batch_reset == PIPE_NO_RESET)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (worst_reset == PIPE_NO_RESET) {
|
|
|
|
|
worst_reset = batch_reset;
|
|
|
|
|
} else {
|
|
|
|
|
/* GUILTY < INNOCENT < UNKNOWN */
|
|
|
|
|
worst_reset = MIN2(worst_reset, batch_reset);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (worst_reset != PIPE_NO_RESET && ice->reset.reset)
|
|
|
|
|
ice->reset.reset(ice->reset.data, worst_reset);
|
|
|
|
|
|
|
|
|
|
return worst_reset;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-07 22:26:22 -07:00
|
|
|
static void
|
|
|
|
|
iris_set_device_reset_callback(struct pipe_context *ctx,
|
|
|
|
|
const struct pipe_device_reset_callback *cb)
|
|
|
|
|
{
|
|
|
|
|
struct iris_context *ice = (struct iris_context *)ctx;
|
|
|
|
|
|
|
|
|
|
if (cb)
|
|
|
|
|
ice->reset = *cb;
|
|
|
|
|
else
|
|
|
|
|
memset(&ice->reset, 0, sizeof(ice->reset));
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-20 23:37:13 -07:00
|
|
|
static void
|
|
|
|
|
iris_get_sample_position(struct pipe_context *ctx,
|
|
|
|
|
unsigned sample_count,
|
|
|
|
|
unsigned sample_index,
|
|
|
|
|
float *out_value)
|
|
|
|
|
{
|
|
|
|
|
union {
|
|
|
|
|
struct {
|
|
|
|
|
float x[16];
|
|
|
|
|
float y[16];
|
|
|
|
|
} a;
|
|
|
|
|
struct {
|
|
|
|
|
float _0XOffset, _1XOffset, _2XOffset, _3XOffset,
|
|
|
|
|
_4XOffset, _5XOffset, _6XOffset, _7XOffset,
|
|
|
|
|
_8XOffset, _9XOffset, _10XOffset, _11XOffset,
|
|
|
|
|
_12XOffset, _13XOffset, _14XOffset, _15XOffset;
|
|
|
|
|
float _0YOffset, _1YOffset, _2YOffset, _3YOffset,
|
|
|
|
|
_4YOffset, _5YOffset, _6YOffset, _7YOffset,
|
|
|
|
|
_8YOffset, _9YOffset, _10YOffset, _11YOffset,
|
|
|
|
|
_12YOffset, _13YOffset, _14YOffset, _15YOffset;
|
|
|
|
|
} v;
|
|
|
|
|
} u;
|
|
|
|
|
switch (sample_count) {
|
2021-03-03 13:58:15 -08:00
|
|
|
case 1: INTEL_SAMPLE_POS_1X(u.v._); break;
|
|
|
|
|
case 2: INTEL_SAMPLE_POS_2X(u.v._); break;
|
|
|
|
|
case 4: INTEL_SAMPLE_POS_4X(u.v._); break;
|
|
|
|
|
case 8: INTEL_SAMPLE_POS_8X(u.v._); break;
|
|
|
|
|
case 16: INTEL_SAMPLE_POS_16X(u.v._); break;
|
2018-08-20 23:37:13 -07:00
|
|
|
default: unreachable("invalid sample count");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out_value[0] = u.a.x[sample_index];
|
|
|
|
|
out_value[1] = u.a.y[sample_index];
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-29 15:32:32 -07:00
|
|
|
static bool
|
|
|
|
|
create_dirty_dmabuf_set(struct iris_context *ice)
|
|
|
|
|
{
|
|
|
|
|
assert(ice->dirty_dmabufs == NULL);
|
|
|
|
|
|
|
|
|
|
ice->dirty_dmabufs = _mesa_pointer_set_create(ice);
|
|
|
|
|
return ice->dirty_dmabufs != NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
iris_mark_dirty_dmabuf(struct iris_context *ice,
|
|
|
|
|
struct pipe_resource *res)
|
|
|
|
|
{
|
|
|
|
|
if (!_mesa_set_search(ice->dirty_dmabufs, res)) {
|
|
|
|
|
_mesa_set_add(ice->dirty_dmabufs, res);
|
|
|
|
|
pipe_reference(NULL, &res->reference);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
clear_dirty_dmabuf_set(struct iris_context *ice)
|
|
|
|
|
{
|
|
|
|
|
set_foreach(ice->dirty_dmabufs, entry) {
|
|
|
|
|
struct pipe_resource *res = (struct pipe_resource *)entry->key;
|
|
|
|
|
if (pipe_reference(&res->reference, NULL))
|
|
|
|
|
res->screen->resource_destroy(res->screen, res);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_mesa_set_clear(ice->dirty_dmabufs, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
iris_flush_dirty_dmabufs(struct iris_context *ice)
|
|
|
|
|
{
|
|
|
|
|
set_foreach(ice->dirty_dmabufs, entry) {
|
|
|
|
|
struct pipe_resource *res = (struct pipe_resource *)entry->key;
|
|
|
|
|
ice->ctx.flush_resource(&ice->ctx, res);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
clear_dirty_dmabuf_set(ice);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Destroy a context, freeing any associated memory.
|
|
|
|
|
*/
|
2018-01-19 18:57:30 -08:00
|
|
|
static void
|
|
|
|
|
iris_destroy_context(struct pipe_context *ctx)
|
|
|
|
|
{
|
2018-01-20 02:47:04 -08:00
|
|
|
struct iris_context *ice = (struct iris_context *)ctx;
|
2020-03-30 10:37:29 -04:00
|
|
|
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
|
2018-01-20 02:47:04 -08:00
|
|
|
|
2018-01-19 18:57:30 -08:00
|
|
|
if (ctx->stream_uploader)
|
|
|
|
|
u_upload_destroy(ctx->stream_uploader);
|
2020-08-17 14:37:03 -07:00
|
|
|
if (ctx->const_uploader)
|
|
|
|
|
u_upload_destroy(ctx->const_uploader);
|
2018-01-19 18:57:30 -08:00
|
|
|
|
2020-10-29 15:32:32 -07:00
|
|
|
clear_dirty_dmabuf_set(ice);
|
|
|
|
|
|
2020-03-30 10:37:29 -04:00
|
|
|
screen->vtbl.destroy_state(ice);
|
2018-01-25 19:43:45 -08:00
|
|
|
iris_destroy_program_cache(ice);
|
2018-11-28 15:15:21 -08:00
|
|
|
iris_destroy_border_color_pool(ice);
|
2020-10-27 15:56:06 -07:00
|
|
|
if (screen->measure.config)
|
|
|
|
|
iris_destroy_ctx_measure(ice);
|
|
|
|
|
|
2018-04-05 21:48:33 -07:00
|
|
|
u_upload_destroy(ice->state.surface_uploader);
|
|
|
|
|
u_upload_destroy(ice->state.dynamic_uploader);
|
2019-01-15 14:15:07 -08:00
|
|
|
u_upload_destroy(ice->query_buffer_uploader);
|
2018-01-25 19:43:45 -08:00
|
|
|
|
2018-11-20 09:00:22 -08:00
|
|
|
iris_batch_free(&ice->batches[IRIS_BATCH_RENDER]);
|
|
|
|
|
iris_batch_free(&ice->batches[IRIS_BATCH_COMPUTE]);
|
2018-09-08 19:43:34 -07:00
|
|
|
iris_destroy_binder(&ice->state.binder);
|
2018-01-23 01:23:54 -08:00
|
|
|
|
2020-05-01 10:57:15 -07:00
|
|
|
slab_destroy_child(&ice->transfer_pool);
|
2021-02-10 15:09:11 -08:00
|
|
|
slab_destroy_child(&ice->transfer_pool_unsync);
|
2020-05-01 10:57:15 -07:00
|
|
|
|
2018-01-20 02:47:04 -08:00
|
|
|
ralloc_free(ice);
|
2018-01-19 18:57:30 -08:00
|
|
|
}
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
#define genX_call(devinfo, func, ...) \
|
2021-03-29 13:43:47 -07:00
|
|
|
switch ((devinfo)->verx10) { \
|
2021-02-27 15:38:53 -08:00
|
|
|
case 125: \
|
2021-03-29 15:40:04 -07:00
|
|
|
gfx125_##func(__VA_ARGS__); \
|
2019-02-10 20:14:07 -08:00
|
|
|
break; \
|
2021-02-27 15:38:53 -08:00
|
|
|
case 120: \
|
2021-03-29 15:40:04 -07:00
|
|
|
gfx12_##func(__VA_ARGS__); \
|
2021-02-27 15:38:53 -08:00
|
|
|
break; \
|
|
|
|
|
case 110: \
|
2021-03-29 15:40:04 -07:00
|
|
|
gfx11_##func(__VA_ARGS__); \
|
2018-10-08 06:26:15 -07:00
|
|
|
break; \
|
2021-02-27 15:38:53 -08:00
|
|
|
case 90: \
|
2021-03-29 15:40:04 -07:00
|
|
|
gfx9_##func(__VA_ARGS__); \
|
2018-01-25 01:36:49 -08:00
|
|
|
break; \
|
2021-02-27 15:38:53 -08:00
|
|
|
case 80: \
|
2021-03-29 15:40:04 -07:00
|
|
|
gfx8_##func(__VA_ARGS__); \
|
2018-11-07 14:23:27 +10:00
|
|
|
break; \
|
2018-01-25 01:36:49 -08:00
|
|
|
default: \
|
|
|
|
|
unreachable("Unknown hardware generation"); \
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-30 23:49:34 -07:00
|
|
|
/**
|
|
|
|
|
* Create a context.
|
|
|
|
|
*
|
|
|
|
|
* This is where each context begins.
|
|
|
|
|
*/
|
2018-01-19 18:57:30 -08:00
|
|
|
struct pipe_context *
|
2018-01-19 21:55:32 -08:00
|
|
|
iris_create_context(struct pipe_screen *pscreen, void *priv, unsigned flags)
|
2018-01-19 18:57:30 -08:00
|
|
|
{
|
2018-01-19 21:55:32 -08:00
|
|
|
struct iris_screen *screen = (struct iris_screen*)pscreen;
|
2021-04-05 13:19:39 -07:00
|
|
|
const struct intel_device_info *devinfo = &screen->devinfo;
|
2018-01-20 02:47:04 -08:00
|
|
|
struct iris_context *ice = rzalloc(NULL, struct iris_context);
|
2018-01-19 18:57:30 -08:00
|
|
|
|
|
|
|
|
if (!ice)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
struct pipe_context *ctx = &ice->ctx;
|
|
|
|
|
|
2018-01-19 21:55:32 -08:00
|
|
|
ctx->screen = pscreen;
|
2018-01-19 18:57:30 -08:00
|
|
|
ctx->priv = priv;
|
|
|
|
|
|
|
|
|
|
ctx->stream_uploader = u_upload_create_default(ctx);
|
|
|
|
|
if (!ctx->stream_uploader) {
|
|
|
|
|
free(ctx);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-08-17 14:37:03 -07:00
|
|
|
ctx->const_uploader = u_upload_create(ctx, 1024 * 1024,
|
|
|
|
|
PIPE_BIND_CONSTANT_BUFFER,
|
|
|
|
|
PIPE_USAGE_IMMUTABLE, 0);
|
|
|
|
|
if (!ctx->const_uploader) {
|
|
|
|
|
u_upload_destroy(ctx->stream_uploader);
|
|
|
|
|
free(ctx);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2018-01-19 18:57:30 -08:00
|
|
|
|
2020-10-29 15:32:32 -07:00
|
|
|
if (!create_dirty_dmabuf_set(ice)) {
|
|
|
|
|
ralloc_free(ice);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-19 18:57:30 -08:00
|
|
|
ctx->destroy = iris_destroy_context;
|
|
|
|
|
ctx->set_debug_callback = iris_set_debug_callback;
|
2019-05-07 22:26:22 -07:00
|
|
|
ctx->set_device_reset_callback = iris_set_device_reset_callback;
|
2019-05-08 11:33:50 -07:00
|
|
|
ctx->get_device_reset_status = iris_get_device_reset_status;
|
2018-08-20 23:37:13 -07:00
|
|
|
ctx->get_sample_position = iris_get_sample_position;
|
2018-01-19 18:57:30 -08:00
|
|
|
|
2018-11-07 11:50:02 +00:00
|
|
|
iris_init_context_fence_functions(ctx);
|
2018-01-19 18:57:30 -08:00
|
|
|
iris_init_blit_functions(ctx);
|
|
|
|
|
iris_init_clear_functions(ctx);
|
|
|
|
|
iris_init_program_functions(ctx);
|
|
|
|
|
iris_init_resource_functions(ctx);
|
2018-07-24 21:15:13 -07:00
|
|
|
iris_init_flush_functions(ctx);
|
2019-10-15 12:43:02 -07:00
|
|
|
iris_init_perfquery_functions(ctx);
|
2018-01-19 18:57:30 -08:00
|
|
|
|
2018-01-20 02:47:04 -08:00
|
|
|
iris_init_program_cache(ice);
|
2018-06-28 02:25:25 -07:00
|
|
|
iris_init_border_color_pool(ice);
|
2018-09-08 19:43:34 -07:00
|
|
|
iris_init_binder(ice);
|
2018-01-20 02:47:04 -08:00
|
|
|
|
2018-07-06 11:29:51 -07:00
|
|
|
slab_create_child(&ice->transfer_pool, &screen->transfer_pool);
|
2021-02-10 15:09:11 -08:00
|
|
|
slab_create_child(&ice->transfer_pool_unsync, &screen->transfer_pool);
|
2018-07-06 11:29:51 -07:00
|
|
|
|
2018-04-05 21:48:33 -07:00
|
|
|
ice->state.surface_uploader =
|
2020-08-25 15:29:20 -07:00
|
|
|
u_upload_create(ctx, 64 * 1024, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
|
2018-04-05 21:48:33 -07:00
|
|
|
IRIS_RESOURCE_FLAG_SURFACE_MEMZONE);
|
|
|
|
|
ice->state.dynamic_uploader =
|
2020-08-25 15:29:20 -07:00
|
|
|
u_upload_create(ctx, 64 * 1024, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
|
2018-04-05 21:48:33 -07:00
|
|
|
IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE);
|
|
|
|
|
|
2019-01-15 14:15:07 -08:00
|
|
|
ice->query_buffer_uploader =
|
2020-08-25 15:29:20 -07:00
|
|
|
u_upload_create(ctx, 16 * 1024, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING,
|
2019-01-15 14:15:07 -08:00
|
|
|
0);
|
|
|
|
|
|
2018-01-25 01:36:49 -08:00
|
|
|
genX_call(devinfo, init_state, ice);
|
2018-04-21 22:20:32 -07:00
|
|
|
genX_call(devinfo, init_blorp, ice);
|
2019-04-01 11:16:22 -07:00
|
|
|
genX_call(devinfo, init_query, ice);
|
2018-11-06 21:12:30 -08:00
|
|
|
|
2019-02-22 23:31:56 +00:00
|
|
|
int priority = 0;
|
|
|
|
|
if (flags & PIPE_CONTEXT_HIGH_PRIORITY)
|
2021-03-03 13:58:15 -08:00
|
|
|
priority = INTEL_CONTEXT_HIGH_PRIORITY;
|
2019-02-22 23:31:56 +00:00
|
|
|
if (flags & PIPE_CONTEXT_LOW_PRIORITY)
|
2021-03-03 13:58:15 -08:00
|
|
|
priority = INTEL_CONTEXT_LOW_PRIORITY;
|
2019-02-22 23:31:56 +00:00
|
|
|
|
2020-09-14 18:49:43 +02:00
|
|
|
if (INTEL_DEBUG & DEBUG_BATCH)
|
iris: Record state sizes for INTEL_DEBUG=bat decoding.
Felix noticed a crash when using INTEL_DEBUG=bat decoding. It turned
out that we were sometimes placing variable length data near the end
of a buffer, and with the decoder guessing random lengths rather than
having an actual count, it was walking off the end and crashing. So
this does more than improve the decoder output.
Unfortunately, this is a bit more complicated than i965's handling,
because we don't have a single state buffer. Various places upload
data via u_upload_mgr, and so there isn't a central place to record
the size. We don't need to catch every single place, however, since
it's only important to record variable length packets (like viewports
and binding tables).
State data also lives arbitrarily long, rather than being discarded on
every batch like i965, so we don't know when to clear out old entries
either. (We also don't have a callback when an upload buffer is
released.) So, this tracking may space leak over time. That's probably
okay though, as this is only a debugging feature and it's a slow leak.
We may also get lucky and overwrite existing entries as we reuse BOs,
though I find this unlikely to happen.
The fact that the decoder works in terms of offsets from a state base
address is also not ideal, as dynamic state base address and surface
state base address differ for iris. However, because dynamic state
addresses start from the top of a 4GB region, and binding tables start
from addresses [0, 64K), it's highly unlikely that we'll get overlap.
We can always improve this, but for now it's better than what we had.
2019-05-22 18:14:38 -07:00
|
|
|
ice->state.sizes = _mesa_hash_table_u64_create(ice);
|
|
|
|
|
|
2018-11-06 21:12:30 -08:00
|
|
|
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
|
2020-05-01 10:17:15 -07:00
|
|
|
iris_init_batch(ice, (enum iris_batch_name) i, priority);
|
2018-11-06 21:12:30 -08:00
|
|
|
}
|
|
|
|
|
|
2020-03-30 10:37:29 -04:00
|
|
|
screen->vtbl.init_render_context(&ice->batches[IRIS_BATCH_RENDER]);
|
|
|
|
|
screen->vtbl.init_compute_context(&ice->batches[IRIS_BATCH_COMPUTE]);
|
2018-01-19 21:55:32 -08:00
|
|
|
|
2020-05-15 11:23:03 -07:00
|
|
|
if (!(flags & PIPE_CONTEXT_PREFER_THREADED))
|
|
|
|
|
return ctx;
|
|
|
|
|
|
|
|
|
|
/* Clover doesn't support u_threaded_context */
|
|
|
|
|
if (flags & PIPE_CONTEXT_COMPUTE_ONLY)
|
|
|
|
|
return ctx;
|
|
|
|
|
|
|
|
|
|
return threaded_context_create(ctx, &screen->transfer_pool,
|
|
|
|
|
iris_replace_buffer_storage,
|
|
|
|
|
NULL, /* TODO: asynchronous flushes? */
|
|
|
|
|
&ice->thrctx);
|
2018-01-19 18:57:30 -08:00
|
|
|
}
|