mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-24 21:50:12 +01:00
ilo: rework ilo_texture
Use ilo_buffer for buffer resources and ilo_texture for texture resources. A major cleanup is necessitated by the separation.
This commit is contained in:
parent
768296dd05
commit
176ad54c04
5 changed files with 1075 additions and 814 deletions
|
|
@ -757,15 +757,15 @@ gen6_emit_3DSTATE_VERTEX_BUFFERS(const struct ilo_dev_info *dev,
|
|||
|
||||
/* use null vb if there is no buffer or the stride is out of range */
|
||||
if (vb->buffer && vb->stride <= 2048) {
|
||||
const struct ilo_texture *tex = ilo_texture(vb->buffer);
|
||||
const struct ilo_buffer *buf = ilo_buffer(vb->buffer);
|
||||
const uint32_t start_offset = vb->buffer_offset;
|
||||
const uint32_t end_offset = tex->bo->get_size(tex->bo) - 1;
|
||||
const uint32_t end_offset = buf->bo->get_size(buf->bo) - 1;
|
||||
|
||||
dw |= vb->stride << BRW_VB0_PITCH_SHIFT;
|
||||
|
||||
ilo_cp_write(cp, dw);
|
||||
ilo_cp_write_bo(cp, start_offset, tex->bo, INTEL_DOMAIN_VERTEX, 0);
|
||||
ilo_cp_write_bo(cp, end_offset, tex->bo, INTEL_DOMAIN_VERTEX, 0);
|
||||
ilo_cp_write_bo(cp, start_offset, buf->bo, INTEL_DOMAIN_VERTEX, 0);
|
||||
ilo_cp_write_bo(cp, end_offset, buf->bo, INTEL_DOMAIN_VERTEX, 0);
|
||||
ilo_cp_write(cp, instance_divisor);
|
||||
}
|
||||
else {
|
||||
|
|
@ -925,13 +925,13 @@ gen6_emit_3DSTATE_INDEX_BUFFER(const struct ilo_dev_info *dev,
|
|||
{
|
||||
const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x0a);
|
||||
const uint8_t cmd_len = 3;
|
||||
const struct ilo_texture *tex = ilo_texture(ib->buffer);
|
||||
const struct ilo_buffer *buf = ilo_buffer(ib->buffer);
|
||||
uint32_t start_offset, end_offset;
|
||||
int format;
|
||||
|
||||
ILO_GPE_VALID_GEN(dev, 6, 7);
|
||||
|
||||
if (!tex)
|
||||
if (!buf)
|
||||
return;
|
||||
|
||||
format = gen6_translate_index_size(ib->index_size);
|
||||
|
|
@ -945,7 +945,7 @@ gen6_emit_3DSTATE_INDEX_BUFFER(const struct ilo_dev_info *dev,
|
|||
}
|
||||
|
||||
/* end_offset must also be aligned */
|
||||
end_offset = tex->bo->get_size(tex->bo);
|
||||
end_offset = buf->bo->get_size(buf->bo);
|
||||
end_offset -= (end_offset % ib->index_size);
|
||||
/* it is inclusive */
|
||||
end_offset -= 1;
|
||||
|
|
@ -954,8 +954,8 @@ gen6_emit_3DSTATE_INDEX_BUFFER(const struct ilo_dev_info *dev,
|
|||
ilo_cp_write(cp, cmd | (cmd_len - 2) |
|
||||
((enable_cut_index) ? BRW_CUT_INDEX_ENABLE : 0) |
|
||||
format << 8);
|
||||
ilo_cp_write_bo(cp, start_offset, tex->bo, INTEL_DOMAIN_VERTEX, 0);
|
||||
ilo_cp_write_bo(cp, end_offset, tex->bo, INTEL_DOMAIN_VERTEX, 0);
|
||||
ilo_cp_write_bo(cp, start_offset, buf->bo, INTEL_DOMAIN_VERTEX, 0);
|
||||
ilo_cp_write_bo(cp, end_offset, buf->bo, INTEL_DOMAIN_VERTEX, 0);
|
||||
ilo_cp_end(cp);
|
||||
}
|
||||
|
||||
|
|
@ -3569,7 +3569,7 @@ gen6_fill_null_SURFACE_STATE(const struct ilo_dev_info *dev,
|
|||
|
||||
static void
|
||||
gen6_fill_buffer_SURFACE_STATE(const struct ilo_dev_info *dev,
|
||||
const struct ilo_texture *tex,
|
||||
const struct ilo_buffer *buf,
|
||||
unsigned offset, unsigned size,
|
||||
unsigned struct_size,
|
||||
enum pipe_format elem_format,
|
||||
|
|
@ -3629,14 +3629,6 @@ gen6_fill_buffer_SURFACE_STATE(const struct ilo_dev_info *dev,
|
|||
*/
|
||||
pitch = struct_size;
|
||||
|
||||
/*
|
||||
* From the Sandy Bridge PRM, volume 4 part 1, page 82:
|
||||
*
|
||||
* "If Surface Type is SURFTYPE_BUFFER, this field (Tiled Surface) must
|
||||
* be false (buffers are supported only in linear memory)"
|
||||
*/
|
||||
assert(tex->tiling == INTEL_TILING_NONE);
|
||||
|
||||
pitch--;
|
||||
num_entries--;
|
||||
/* bits [6:0] */
|
||||
|
|
@ -3939,17 +3931,17 @@ gen6_emit_cbuf_SURFACE_STATE(const struct ilo_dev_info *dev,
|
|||
struct ilo_cp *cp)
|
||||
{
|
||||
const enum pipe_format elem_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
|
||||
struct ilo_texture *tex = ilo_texture(cbuf->buffer);
|
||||
struct ilo_buffer *buf = ilo_buffer(cbuf->buffer);
|
||||
uint32_t dw[6];
|
||||
|
||||
ILO_GPE_VALID_GEN(dev, 6, 6);
|
||||
|
||||
gen6_fill_buffer_SURFACE_STATE(dev, tex,
|
||||
gen6_fill_buffer_SURFACE_STATE(dev, buf,
|
||||
cbuf->buffer_offset, cbuf->buffer_size,
|
||||
util_format_get_blocksize(elem_format), elem_format,
|
||||
false, false, dw, Elements(dw));
|
||||
|
||||
return gen6_emit_SURFACE_STATE(dev, tex->bo, false, dw, Elements(dw), cp);
|
||||
return gen6_emit_SURFACE_STATE(dev, buf->bo, false, dw, Elements(dw), cp);
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
|
|
@ -3959,7 +3951,7 @@ gen6_emit_so_SURFACE_STATE(const struct ilo_dev_info *dev,
|
|||
int so_index,
|
||||
struct ilo_cp *cp)
|
||||
{
|
||||
struct ilo_texture *tex = ilo_texture(so->buffer);
|
||||
struct ilo_buffer *buf = ilo_buffer(so->buffer);
|
||||
unsigned bo_offset, struct_size;
|
||||
enum pipe_format elem_format;
|
||||
uint32_t dw[6];
|
||||
|
|
@ -3988,10 +3980,10 @@ gen6_emit_so_SURFACE_STATE(const struct ilo_dev_info *dev,
|
|||
break;
|
||||
}
|
||||
|
||||
gen6_fill_buffer_SURFACE_STATE(dev, tex, bo_offset, so->buffer_size,
|
||||
gen6_fill_buffer_SURFACE_STATE(dev, buf, bo_offset, so->buffer_size,
|
||||
struct_size, elem_format, false, true, dw, Elements(dw));
|
||||
|
||||
return gen6_emit_SURFACE_STATE(dev, tex->bo, false, dw, Elements(dw), cp);
|
||||
return gen6_emit_SURFACE_STATE(dev, buf->bo, false, dw, Elements(dw), cp);
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
|
|
|
|||
|
|
@ -1130,7 +1130,7 @@ gen7_emit_3DSTATE_SO_BUFFER(const struct ilo_dev_info *dev,
|
|||
{
|
||||
const uint32_t cmd = ILO_GPE_CMD(0x3, 0x1, 0x18);
|
||||
const uint8_t cmd_len = 4;
|
||||
struct ilo_texture *tex;
|
||||
struct ilo_buffer *buf;
|
||||
int end;
|
||||
|
||||
ILO_GPE_VALID_GEN(dev, 7, 7);
|
||||
|
|
@ -1145,7 +1145,7 @@ gen7_emit_3DSTATE_SO_BUFFER(const struct ilo_dev_info *dev,
|
|||
return;
|
||||
}
|
||||
|
||||
tex = ilo_texture(so_target->buffer);
|
||||
buf = ilo_buffer(so_target->buffer);
|
||||
|
||||
/* DWord-aligned */
|
||||
assert(stride % 4 == 0 && base % 4 == 0);
|
||||
|
|
@ -1159,8 +1159,8 @@ gen7_emit_3DSTATE_SO_BUFFER(const struct ilo_dev_info *dev,
|
|||
ilo_cp_write(cp, cmd | (cmd_len - 2));
|
||||
ilo_cp_write(cp, index << SO_BUFFER_INDEX_SHIFT |
|
||||
stride);
|
||||
ilo_cp_write_bo(cp, base, tex->bo, INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
|
||||
ilo_cp_write_bo(cp, end, tex->bo, INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
|
||||
ilo_cp_write_bo(cp, base, buf->bo, INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
|
||||
ilo_cp_write_bo(cp, end, buf->bo, INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
|
||||
ilo_cp_end(cp);
|
||||
}
|
||||
|
||||
|
|
@ -1296,7 +1296,7 @@ gen7_fill_null_SURFACE_STATE(const struct ilo_dev_info *dev,
|
|||
|
||||
static void
|
||||
gen7_fill_buffer_SURFACE_STATE(const struct ilo_dev_info *dev,
|
||||
const struct ilo_texture *tex,
|
||||
const struct ilo_buffer *buf,
|
||||
unsigned offset, unsigned size,
|
||||
unsigned struct_size,
|
||||
enum pipe_format elem_format,
|
||||
|
|
@ -1374,14 +1374,6 @@ gen7_fill_buffer_SURFACE_STATE(const struct ilo_dev_info *dev,
|
|||
|
||||
pitch = struct_size;
|
||||
|
||||
/*
|
||||
* From the Ivy Bridge PRM, volume 4 part 1, page 65:
|
||||
*
|
||||
* "If Surface Type is SURFTYPE_BUFFER, this field (Tiled Surface) must
|
||||
* be false (because buffers are supported only in linear memory)."
|
||||
*/
|
||||
assert(tex->tiling == INTEL_TILING_NONE);
|
||||
|
||||
pitch--;
|
||||
num_entries--;
|
||||
/* bits [6:0] */
|
||||
|
|
@ -1722,17 +1714,17 @@ gen7_emit_cbuf_SURFACE_STATE(const struct ilo_dev_info *dev,
|
|||
struct ilo_cp *cp)
|
||||
{
|
||||
const enum pipe_format elem_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
|
||||
struct ilo_texture *tex = ilo_texture(cbuf->buffer);
|
||||
struct ilo_buffer *buf = ilo_buffer(cbuf->buffer);
|
||||
uint32_t dw[8];
|
||||
|
||||
ILO_GPE_VALID_GEN(dev, 7, 7);
|
||||
|
||||
gen7_fill_buffer_SURFACE_STATE(dev, tex,
|
||||
gen7_fill_buffer_SURFACE_STATE(dev, buf,
|
||||
cbuf->buffer_offset, cbuf->buffer_size,
|
||||
util_format_get_blocksize(elem_format), elem_format,
|
||||
false, false, dw, Elements(dw));
|
||||
|
||||
return gen7_emit_SURFACE_STATE(dev, tex->bo, false, dw, Elements(dw), cp);
|
||||
return gen7_emit_SURFACE_STATE(dev, buf->bo, false, dw, Elements(dw), cp);
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -33,19 +33,20 @@
|
|||
#include "ilo_common.h"
|
||||
|
||||
struct ilo_screen;
|
||||
struct winsys_handle;
|
||||
|
||||
/*
|
||||
* TODO we should have
|
||||
*
|
||||
* ilo_resource, inherited by
|
||||
* - ilo_buffer
|
||||
* - ilo_texture
|
||||
* - ilo_global_binding
|
||||
*/
|
||||
struct ilo_buffer {
|
||||
struct pipe_resource base;
|
||||
|
||||
struct intel_bo *bo;
|
||||
unsigned bo_size;
|
||||
unsigned bo_flags;
|
||||
};
|
||||
|
||||
struct ilo_texture {
|
||||
struct pipe_resource base;
|
||||
struct winsys_handle *handle;
|
||||
|
||||
bool imported;
|
||||
unsigned bo_flags;
|
||||
|
||||
enum pipe_format bo_format;
|
||||
struct intel_bo *bo;
|
||||
|
|
@ -73,21 +74,32 @@ struct ilo_texture {
|
|||
bool interleaved;
|
||||
|
||||
/* 2D offsets into a layer/slice/face */
|
||||
struct {
|
||||
struct ilo_texture_slice {
|
||||
unsigned x;
|
||||
unsigned y;
|
||||
} *slice_offsets[PIPE_MAX_TEXTURE_LEVELS];
|
||||
};
|
||||
|
||||
static inline struct ilo_buffer *
|
||||
ilo_buffer(struct pipe_resource *res)
|
||||
{
|
||||
return (struct ilo_buffer *)
|
||||
((res && res->target == PIPE_BUFFER) ? res : NULL);
|
||||
}
|
||||
|
||||
static inline struct ilo_texture *
|
||||
ilo_texture(struct pipe_resource *res)
|
||||
{
|
||||
return (struct ilo_texture *) res;
|
||||
return (struct ilo_texture *)
|
||||
((res && res->target != PIPE_BUFFER) ? res : NULL);
|
||||
}
|
||||
|
||||
void
|
||||
ilo_init_resource_functions(struct ilo_screen *is);
|
||||
|
||||
bool
|
||||
ilo_buffer_alloc_bo(struct ilo_buffer *buf);
|
||||
|
||||
bool
|
||||
ilo_texture_alloc_bo(struct ilo_texture *tex);
|
||||
|
||||
|
|
|
|||
|
|
@ -54,88 +54,109 @@ ilo_transfer(struct pipe_transfer *transfer)
|
|||
return (struct ilo_transfer *) transfer;
|
||||
}
|
||||
|
||||
static void
|
||||
ilo_transfer_inline_write(struct pipe_context *pipe,
|
||||
struct pipe_resource *res,
|
||||
unsigned level,
|
||||
unsigned usage,
|
||||
const struct pipe_box *box,
|
||||
const void *data,
|
||||
unsigned stride,
|
||||
unsigned layer_stride)
|
||||
/**
|
||||
* Choose the best mapping method, depending on the transfer usage and whether
|
||||
* the bo is busy.
|
||||
*/
|
||||
static bool
|
||||
transfer_choose_method(struct ilo_context *ilo, struct ilo_transfer *xfer)
|
||||
{
|
||||
struct ilo_context *ilo = ilo_context(pipe);
|
||||
struct ilo_texture *tex = ilo_texture(res);
|
||||
int offset, size;
|
||||
bool will_be_busy;
|
||||
struct pipe_resource *res = xfer->base.resource;
|
||||
struct ilo_texture *tex;
|
||||
struct ilo_buffer *buf;
|
||||
struct intel_bo *bo;
|
||||
bool will_be_busy, will_stall;
|
||||
|
||||
/*
|
||||
* Fall back to map(), memcpy(), and unmap(). We use this path for
|
||||
* unsynchronized write, as the buffer is likely to be busy and pwrite()
|
||||
* will stall.
|
||||
*/
|
||||
if (unlikely(tex->base.target != PIPE_BUFFER) ||
|
||||
(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
|
||||
u_default_transfer_inline_write(pipe, res,
|
||||
level, usage, box, data, stride, layer_stride);
|
||||
if (res->target == PIPE_BUFFER) {
|
||||
tex = NULL;
|
||||
|
||||
return;
|
||||
buf = ilo_buffer(res);
|
||||
bo = buf->bo;
|
||||
}
|
||||
else {
|
||||
buf = NULL;
|
||||
|
||||
tex = ilo_texture(res);
|
||||
bo = tex->bo;
|
||||
|
||||
/* need to convert on-the-fly */
|
||||
if (tex->bo_format != tex->base.format &&
|
||||
!(xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
|
||||
xfer->method = ILO_TRANSFER_MAP_STAGING_SYS;
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX With hardware context support, the bo may be needed by GPU without
|
||||
* being referenced by ilo->cp->bo. We have to flush unconditionally, and
|
||||
* that is bad.
|
||||
*/
|
||||
if (ilo->cp->hw_ctx)
|
||||
ilo_cp_flush(ilo->cp);
|
||||
xfer->method = ILO_TRANSFER_MAP_DIRECT;
|
||||
|
||||
will_be_busy = ilo->cp->bo->references(ilo->cp->bo, tex->bo);
|
||||
/* unsynchronized map does not stall */
|
||||
if (xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
|
||||
return true;
|
||||
|
||||
/* see if we can avoid stalling */
|
||||
if (will_be_busy || intel_bo_is_busy(tex->bo)) {
|
||||
bool will_stall = true;
|
||||
will_be_busy = ilo->cp->bo->references(ilo->cp->bo, bo);
|
||||
if (!will_be_busy) {
|
||||
/*
|
||||
* XXX With hardware context support, the bo may be needed by GPU
|
||||
* without being referenced by ilo->cp->bo. We have to flush
|
||||
* unconditionally, and that is bad.
|
||||
*/
|
||||
if (ilo->cp->hw_ctx)
|
||||
ilo_cp_flush(ilo->cp);
|
||||
|
||||
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
|
||||
/* old data not needed so discard the old bo to avoid stalling */
|
||||
if (ilo_texture_alloc_bo(tex))
|
||||
will_stall = false;
|
||||
}
|
||||
else {
|
||||
/*
|
||||
* We could allocate a temporary bo to hold the data and emit
|
||||
* pipelined copy blit to move them to tex->bo. But for now, do
|
||||
* nothing.
|
||||
*/
|
||||
}
|
||||
if (!intel_bo_is_busy(bo))
|
||||
return true;
|
||||
}
|
||||
|
||||
/* flush to make bo busy (so that pwrite() stalls as it should be) */
|
||||
if (will_stall && will_be_busy)
|
||||
/* bo is busy and mapping it will stall */
|
||||
will_stall = true;
|
||||
|
||||
if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY) {
|
||||
/* nothing we can do */
|
||||
}
|
||||
else if (xfer->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
|
||||
/* discard old bo and allocate a new one for mapping */
|
||||
if ((tex && ilo_texture_alloc_bo(tex)) ||
|
||||
(buf && ilo_buffer_alloc_bo(buf)))
|
||||
will_stall = false;
|
||||
}
|
||||
else if (xfer->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) {
|
||||
/*
|
||||
* We could allocate and return a system buffer here. When a region of
|
||||
* the buffer is explicitly flushed, we pwrite() the region to a
|
||||
* temporary bo and emit pipelined copy blit.
|
||||
*
|
||||
* For now, do nothing.
|
||||
*/
|
||||
}
|
||||
else if (xfer->base.usage & PIPE_TRANSFER_DISCARD_RANGE) {
|
||||
/*
|
||||
* We could allocate a temporary bo for mapping, and emit pipelined copy
|
||||
* blit upon unmapping.
|
||||
*
|
||||
* For now, do nothing.
|
||||
*/
|
||||
}
|
||||
|
||||
if (will_stall) {
|
||||
if (xfer->base.usage & PIPE_TRANSFER_DONTBLOCK)
|
||||
return false;
|
||||
|
||||
/* flush to make bo busy (so that map() stalls as it should be) */
|
||||
if (will_be_busy)
|
||||
ilo_cp_flush(ilo->cp);
|
||||
}
|
||||
|
||||
/* for PIPE_BUFFERs, conversion should not be needed */
|
||||
assert(tex->bo_format == tex->base.format);
|
||||
|
||||
/* they should specify just an offset and a size */
|
||||
assert(level == 0);
|
||||
assert(box->y == 0);
|
||||
assert(box->z == 0);
|
||||
assert(box->height == 1);
|
||||
assert(box->depth == 1);
|
||||
offset = box->x;
|
||||
size = box->width;
|
||||
|
||||
tex->bo->pwrite(tex->bo, offset, size, data);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
transfer_unmap_sys_convert(enum pipe_format dst_fmt,
|
||||
const struct pipe_transfer *dst_xfer,
|
||||
void *dst,
|
||||
enum pipe_format src_fmt,
|
||||
const struct pipe_transfer *src_xfer,
|
||||
const void *src)
|
||||
tex_unmap_sys_convert(enum pipe_format dst_fmt,
|
||||
const struct pipe_transfer *dst_xfer,
|
||||
void *dst,
|
||||
enum pipe_format src_fmt,
|
||||
const struct pipe_transfer *src_xfer,
|
||||
const void *src)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
@ -159,9 +180,9 @@ transfer_unmap_sys_convert(enum pipe_format dst_fmt,
|
|||
}
|
||||
|
||||
static void
|
||||
transfer_unmap_sys(struct ilo_context *ilo,
|
||||
struct ilo_texture *tex,
|
||||
struct ilo_transfer *xfer)
|
||||
tex_unmap_sys(struct ilo_context *ilo,
|
||||
struct ilo_texture *tex,
|
||||
struct ilo_transfer *xfer)
|
||||
{
|
||||
const void *src = xfer->ptr;
|
||||
struct pipe_transfer *dst_xfer;
|
||||
|
|
@ -180,7 +201,7 @@ transfer_unmap_sys(struct ilo_context *ilo,
|
|||
}
|
||||
|
||||
if (likely(tex->bo_format != tex->base.format)) {
|
||||
transfer_unmap_sys_convert(tex->bo_format, dst_xfer, dst,
|
||||
tex_unmap_sys_convert(tex->bo_format, dst_xfer, dst,
|
||||
tex->base.format, &xfer->base, src);
|
||||
}
|
||||
else {
|
||||
|
|
@ -195,9 +216,9 @@ transfer_unmap_sys(struct ilo_context *ilo,
|
|||
}
|
||||
|
||||
static bool
|
||||
transfer_map_sys(struct ilo_context *ilo,
|
||||
struct ilo_texture *tex,
|
||||
struct ilo_transfer *xfer)
|
||||
tex_map_sys(struct ilo_context *ilo,
|
||||
struct ilo_texture *tex,
|
||||
struct ilo_transfer *xfer)
|
||||
{
|
||||
const struct pipe_box *box = &xfer->base.box;
|
||||
const size_t stride = util_format_get_stride(tex->base.format, box->width);
|
||||
|
|
@ -232,19 +253,19 @@ transfer_map_sys(struct ilo_context *ilo,
|
|||
}
|
||||
|
||||
static void
|
||||
transfer_unmap_direct(struct ilo_context *ilo,
|
||||
struct ilo_texture *tex,
|
||||
struct ilo_transfer *xfer)
|
||||
tex_unmap_direct(struct ilo_context *ilo,
|
||||
struct ilo_texture *tex,
|
||||
struct ilo_transfer *xfer)
|
||||
{
|
||||
tex->bo->unmap(tex->bo);
|
||||
}
|
||||
|
||||
static bool
|
||||
transfer_map_direct(struct ilo_context *ilo,
|
||||
struct ilo_texture *tex,
|
||||
struct ilo_transfer *xfer)
|
||||
tex_map_direct(struct ilo_context *ilo,
|
||||
struct ilo_texture *tex,
|
||||
struct ilo_transfer *xfer)
|
||||
{
|
||||
int x, y, err;
|
||||
int err, x, y;
|
||||
|
||||
if (xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
|
||||
err = tex->bo->map_unsynchronized(tex->bo);
|
||||
|
|
@ -294,86 +315,135 @@ transfer_map_direct(struct ilo_context *ilo,
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Choose the best mapping method, depending on the transfer usage and whether
|
||||
* the bo is busy.
|
||||
*/
|
||||
static bool
|
||||
transfer_map_choose_method(struct ilo_context *ilo,
|
||||
struct ilo_texture *tex,
|
||||
struct ilo_transfer *xfer)
|
||||
tex_map(struct ilo_context *ilo, struct ilo_transfer *xfer)
|
||||
{
|
||||
bool will_be_busy, will_stall;
|
||||
struct ilo_texture *tex = ilo_texture(xfer->base.resource);
|
||||
bool success;
|
||||
|
||||
/* need to convert on-the-fly */
|
||||
if (tex->bo_format != tex->base.format &&
|
||||
!(xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
|
||||
xfer->method = ILO_TRANSFER_MAP_STAGING_SYS;
|
||||
success = transfer_choose_method(ilo, xfer);
|
||||
if (!success)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
switch (xfer->method) {
|
||||
case ILO_TRANSFER_MAP_DIRECT:
|
||||
success = tex_map_direct(ilo, tex, xfer);
|
||||
break;
|
||||
case ILO_TRANSFER_MAP_STAGING_SYS:
|
||||
success = tex_map_sys(ilo, tex, xfer);
|
||||
break;
|
||||
default:
|
||||
assert(!"unknown mapping method");
|
||||
success = false;
|
||||
break;
|
||||
}
|
||||
|
||||
xfer->method = ILO_TRANSFER_MAP_DIRECT;
|
||||
return success;
|
||||
}
|
||||
|
||||
static void
|
||||
tex_unmap(struct ilo_context *ilo, struct ilo_transfer *xfer)
|
||||
{
|
||||
struct ilo_texture *tex = ilo_texture(xfer->base.resource);
|
||||
|
||||
switch (xfer->method) {
|
||||
case ILO_TRANSFER_MAP_DIRECT:
|
||||
tex_unmap_direct(ilo, tex, xfer);
|
||||
break;
|
||||
case ILO_TRANSFER_MAP_STAGING_SYS:
|
||||
tex_unmap_sys(ilo, tex, xfer);
|
||||
break;
|
||||
default:
|
||||
assert(!"unknown mapping method");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
buf_map(struct ilo_context *ilo, struct ilo_transfer *xfer)
|
||||
{
|
||||
struct ilo_buffer *buf = ilo_buffer(xfer->base.resource);
|
||||
int err;
|
||||
|
||||
if (!transfer_choose_method(ilo, xfer))
|
||||
return false;
|
||||
|
||||
assert(xfer->method == ILO_TRANSFER_MAP_DIRECT);
|
||||
|
||||
/* unsynchronized map does not stall */
|
||||
if (xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
|
||||
return true;
|
||||
err = buf->bo->map_unsynchronized(buf->bo);
|
||||
else if (ilo->dev->has_llc || (xfer->base.usage & PIPE_TRANSFER_READ))
|
||||
err = buf->bo->map(buf->bo, (xfer->base.usage & PIPE_TRANSFER_WRITE));
|
||||
else
|
||||
err = buf->bo->map_gtt(buf->bo);
|
||||
|
||||
will_be_busy = ilo->cp->bo->references(ilo->cp->bo, tex->bo);
|
||||
if (!will_be_busy) {
|
||||
/*
|
||||
* XXX With hardware context support, the bo may be needed by GPU
|
||||
* without being referenced by ilo->cp->bo. We have to flush
|
||||
* unconditionally, and that is bad.
|
||||
*/
|
||||
if (ilo->cp->hw_ctx)
|
||||
ilo_cp_flush(ilo->cp);
|
||||
if (err)
|
||||
return false;
|
||||
|
||||
if (!intel_bo_is_busy(tex->bo))
|
||||
return true;
|
||||
}
|
||||
assert(xfer->base.level == 0);
|
||||
assert(xfer->base.box.y == 0);
|
||||
assert(xfer->base.box.z == 0);
|
||||
assert(xfer->base.box.height == 1);
|
||||
assert(xfer->base.box.depth == 1);
|
||||
|
||||
/* bo is busy and mapping it will stall */
|
||||
will_stall = true;
|
||||
xfer->base.stride = 0;
|
||||
xfer->base.layer_stride = 0;
|
||||
|
||||
if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY) {
|
||||
/* nothing we can do */
|
||||
}
|
||||
else if (xfer->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
|
||||
/* discard old bo and allocate a new one for mapping */
|
||||
if (ilo_texture_alloc_bo(tex))
|
||||
will_stall = false;
|
||||
}
|
||||
else if (xfer->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) {
|
||||
/*
|
||||
* We could allocate and return a system buffer here. When a region of
|
||||
* the buffer is explicitly flushed, we pwrite() the region to a
|
||||
* temporary bo and emit pipelined copy blit.
|
||||
*
|
||||
* For now, do nothing.
|
||||
*/
|
||||
}
|
||||
else if (xfer->base.usage & PIPE_TRANSFER_DISCARD_RANGE) {
|
||||
/*
|
||||
* We could allocate a temporary bo for mapping, and emit pipelined copy
|
||||
* blit upon unmapping.
|
||||
*
|
||||
* For now, do nothing.
|
||||
*/
|
||||
}
|
||||
|
||||
if (will_stall) {
|
||||
if (xfer->base.usage & PIPE_TRANSFER_DONTBLOCK)
|
||||
return false;
|
||||
|
||||
/* flush to make bo busy (so that map() stalls as it should be) */
|
||||
if (will_be_busy)
|
||||
ilo_cp_flush(ilo->cp);
|
||||
}
|
||||
xfer->ptr = buf->bo->get_virtual(buf->bo);
|
||||
xfer->ptr += xfer->base.box.x;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
buf_unmap(struct ilo_context *ilo, struct ilo_transfer *xfer)
|
||||
{
|
||||
struct ilo_buffer *buf = ilo_buffer(xfer->base.resource);
|
||||
|
||||
buf->bo->unmap(buf->bo);
|
||||
}
|
||||
|
||||
static void
|
||||
buf_pwrite(struct ilo_context *ilo, struct ilo_buffer *buf,
|
||||
unsigned usage, int offset, int size, const void *data)
|
||||
{
|
||||
bool will_be_busy;
|
||||
|
||||
/*
|
||||
* XXX With hardware context support, the bo may be needed by GPU without
|
||||
* being referenced by ilo->cp->bo. We have to flush unconditionally, and
|
||||
* that is bad.
|
||||
*/
|
||||
if (ilo->cp->hw_ctx)
|
||||
ilo_cp_flush(ilo->cp);
|
||||
|
||||
will_be_busy = ilo->cp->bo->references(ilo->cp->bo, buf->bo);
|
||||
|
||||
/* see if we can avoid stalling */
|
||||
if (will_be_busy || intel_bo_is_busy(buf->bo)) {
|
||||
bool will_stall = true;
|
||||
|
||||
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
|
||||
/* old data not needed so discard the old bo to avoid stalling */
|
||||
if (ilo_buffer_alloc_bo(buf))
|
||||
will_stall = false;
|
||||
}
|
||||
else {
|
||||
/*
|
||||
* We could allocate a temporary bo to hold the data and emit
|
||||
* pipelined copy blit to move them to buf->bo. But for now, do
|
||||
* nothing.
|
||||
*/
|
||||
}
|
||||
|
||||
/* flush to make bo busy (so that pwrite() stalls as it should be) */
|
||||
if (will_stall && will_be_busy)
|
||||
ilo_cp_flush(ilo->cp);
|
||||
}
|
||||
|
||||
buf->bo->pwrite(buf->bo, offset, size, data);
|
||||
}
|
||||
|
||||
static void
|
||||
ilo_transfer_flush_region(struct pipe_context *pipe,
|
||||
struct pipe_transfer *transfer,
|
||||
|
|
@ -386,20 +456,12 @@ ilo_transfer_unmap(struct pipe_context *pipe,
|
|||
struct pipe_transfer *transfer)
|
||||
{
|
||||
struct ilo_context *ilo = ilo_context(pipe);
|
||||
struct ilo_texture *tex = ilo_texture(transfer->resource);
|
||||
struct ilo_transfer *xfer = ilo_transfer(transfer);
|
||||
|
||||
switch (xfer->method) {
|
||||
case ILO_TRANSFER_MAP_DIRECT:
|
||||
transfer_unmap_direct(ilo, tex, xfer);
|
||||
break;
|
||||
case ILO_TRANSFER_MAP_STAGING_SYS:
|
||||
transfer_unmap_sys(ilo, tex, xfer);
|
||||
break;
|
||||
default:
|
||||
assert(!"unknown mapping method");
|
||||
break;
|
||||
}
|
||||
if (xfer->base.resource->target == PIPE_BUFFER)
|
||||
buf_unmap(ilo, xfer);
|
||||
else
|
||||
tex_unmap(ilo, xfer);
|
||||
|
||||
pipe_resource_reference(&xfer->base.resource, NULL);
|
||||
FREE(xfer);
|
||||
|
|
@ -414,9 +476,8 @@ ilo_transfer_map(struct pipe_context *pipe,
|
|||
struct pipe_transfer **transfer)
|
||||
{
|
||||
struct ilo_context *ilo = ilo_context(pipe);
|
||||
struct ilo_texture *tex = ilo_texture(res);
|
||||
struct ilo_transfer *xfer;
|
||||
int ok;
|
||||
bool success;
|
||||
|
||||
xfer = MALLOC_STRUCT(ilo_transfer);
|
||||
if (!xfer) {
|
||||
|
|
@ -425,33 +486,20 @@ ilo_transfer_map(struct pipe_context *pipe,
|
|||
}
|
||||
|
||||
xfer->base.resource = NULL;
|
||||
pipe_resource_reference(&xfer->base.resource, &tex->base);
|
||||
pipe_resource_reference(&xfer->base.resource, res);
|
||||
xfer->base.level = level;
|
||||
xfer->base.usage = usage;
|
||||
xfer->base.box = *box;
|
||||
|
||||
ok = transfer_map_choose_method(ilo, tex, xfer);
|
||||
if (ok) {
|
||||
switch (xfer->method) {
|
||||
case ILO_TRANSFER_MAP_DIRECT:
|
||||
ok = transfer_map_direct(ilo, tex, xfer);
|
||||
break;
|
||||
case ILO_TRANSFER_MAP_STAGING_SYS:
|
||||
ok = transfer_map_sys(ilo, tex, xfer);
|
||||
break;
|
||||
default:
|
||||
assert(!"unknown mapping method");
|
||||
ok = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (res->target == PIPE_BUFFER)
|
||||
success = buf_map(ilo, xfer);
|
||||
else
|
||||
success = tex_map(ilo, xfer);
|
||||
|
||||
if (!ok) {
|
||||
if (!success) {
|
||||
pipe_resource_reference(&xfer->base.resource, NULL);
|
||||
FREE(xfer);
|
||||
|
||||
*transfer = NULL;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -460,6 +508,34 @@ ilo_transfer_map(struct pipe_context *pipe,
|
|||
return xfer->ptr;
|
||||
}
|
||||
|
||||
static void
|
||||
ilo_transfer_inline_write(struct pipe_context *pipe,
|
||||
struct pipe_resource *res,
|
||||
unsigned level,
|
||||
unsigned usage,
|
||||
const struct pipe_box *box,
|
||||
const void *data,
|
||||
unsigned stride,
|
||||
unsigned layer_stride)
|
||||
{
|
||||
if (likely(res->target == PIPE_BUFFER) &&
|
||||
!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
|
||||
/* they should specify just an offset and a size */
|
||||
assert(level == 0);
|
||||
assert(box->y == 0);
|
||||
assert(box->z == 0);
|
||||
assert(box->height == 1);
|
||||
assert(box->depth == 1);
|
||||
|
||||
buf_pwrite(ilo_context(pipe), ilo_buffer(res),
|
||||
usage, box->x, box->width, data);
|
||||
}
|
||||
else {
|
||||
u_default_transfer_inline_write(pipe, res,
|
||||
level, usage, box, data, stride, layer_stride);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize transfer-related functions.
|
||||
*/
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue