mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-26 19:20:08 +01:00
r600g: merge r600_bo with r600_resource
I have moved 'last_flush' and 'binding' from r600_bo to winsys/radeon. The other members are now part of r600_resource. Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
ba89086e79
commit
6101b6d442
18 changed files with 243 additions and 382 deletions
|
|
@ -975,11 +975,9 @@ static struct pipe_sampler_view *evergreen_create_sampler_view(struct pipe_conte
|
|||
struct r600_pipe_sampler_view *view = CALLOC_STRUCT(r600_pipe_sampler_view);
|
||||
struct r600_pipe_resource_state *rstate;
|
||||
struct r600_resource_texture *tmp = (struct r600_resource_texture*)texture;
|
||||
struct r600_resource *rbuffer;
|
||||
unsigned format, endian;
|
||||
uint32_t word4 = 0, yuv_format = 0, pitch = 0;
|
||||
unsigned char swizzle[4], array_mode = 0, tile_type = 0;
|
||||
struct r600_bo *bo[2];
|
||||
unsigned height, depth;
|
||||
|
||||
if (view == NULL)
|
||||
|
|
@ -1018,10 +1016,6 @@ static struct pipe_sampler_view *evergreen_create_sampler_view(struct pipe_conte
|
|||
word4 |= S_030010_NUM_FORMAT_ALL(V_030010_SQ_NUM_FORMAT_INT);
|
||||
}
|
||||
|
||||
rbuffer = &tmp->resource;
|
||||
bo[0] = rbuffer->bo;
|
||||
bo[1] = rbuffer->bo;
|
||||
|
||||
height = texture->height0;
|
||||
depth = texture->depth0;
|
||||
|
||||
|
|
@ -1037,8 +1031,8 @@ static struct pipe_sampler_view *evergreen_create_sampler_view(struct pipe_conte
|
|||
depth = texture->array_size;
|
||||
}
|
||||
|
||||
rstate->bo[0] = bo[0];
|
||||
rstate->bo[1] = bo[1];
|
||||
rstate->bo[0] = &tmp->resource;
|
||||
rstate->bo[1] = &tmp->resource;
|
||||
rstate->bo_usage[0] = RADEON_USAGE_READ;
|
||||
rstate->bo_usage[1] = RADEON_USAGE_READ;
|
||||
|
||||
|
|
@ -1286,7 +1280,6 @@ static void evergreen_cb(struct r600_pipe_context *rctx, struct r600_pipe_state
|
|||
const struct pipe_framebuffer_state *state, int cb)
|
||||
{
|
||||
struct r600_resource_texture *rtex;
|
||||
struct r600_resource *rbuffer;
|
||||
struct r600_surface *surf;
|
||||
unsigned level = state->cbufs[cb]->u.tex.level;
|
||||
unsigned pitch, slice;
|
||||
|
|
@ -1295,7 +1288,6 @@ static void evergreen_cb(struct r600_pipe_context *rctx, struct r600_pipe_state
|
|||
unsigned offset;
|
||||
unsigned tile_type;
|
||||
const struct util_format_description *desc;
|
||||
struct r600_bo *bo[3];
|
||||
int i;
|
||||
unsigned blend_clamp = 0, blend_bypass = 0;
|
||||
|
||||
|
|
@ -1310,11 +1302,6 @@ static void evergreen_cb(struct r600_pipe_context *rctx, struct r600_pipe_state
|
|||
rtex = rtex->flushed_depth_texture;
|
||||
}
|
||||
|
||||
rbuffer = &rtex->resource;
|
||||
bo[0] = rbuffer->bo;
|
||||
bo[1] = rbuffer->bo;
|
||||
bo[2] = rbuffer->bo;
|
||||
|
||||
/* XXX quite sure for dx10+ hw don't need any offset hacks */
|
||||
offset = r600_texture_get_offset((struct r600_resource_texture *)state->cbufs[cb]->texture,
|
||||
level, state->cbufs[cb]->u.tex.first_layer);
|
||||
|
|
@ -1343,7 +1330,7 @@ static void evergreen_cb(struct r600_pipe_context *rctx, struct r600_pipe_state
|
|||
|
||||
format = r600_translate_colorformat(surf->base.format);
|
||||
swap = r600_translate_colorswap(surf->base.format);
|
||||
if (rbuffer->b.b.b.usage == PIPE_USAGE_STAGING) {
|
||||
if (rtex->resource.b.b.b.usage == PIPE_USAGE_STAGING) {
|
||||
endian = ENDIAN_NONE;
|
||||
} else {
|
||||
endian = r600_colorformat_endian_swap(format);
|
||||
|
|
@ -1404,13 +1391,13 @@ static void evergreen_cb(struct r600_pipe_context *rctx, struct r600_pipe_state
|
|||
/* FIXME handle enabling of CB beyond BASE8 which has different offset */
|
||||
r600_pipe_state_add_reg(rstate,
|
||||
R_028C60_CB_COLOR0_BASE + cb * 0x3C,
|
||||
offset >> 8, 0xFFFFFFFF, bo[0], RADEON_USAGE_READWRITE);
|
||||
offset >> 8, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate,
|
||||
R_028C78_CB_COLOR0_DIM + cb * 0x3C,
|
||||
0x0, 0xFFFFFFFF, NULL, 0);
|
||||
r600_pipe_state_add_reg(rstate,
|
||||
R_028C70_CB_COLOR0_INFO + cb * 0x3C,
|
||||
color_info, 0xFFFFFFFF, bo[0], RADEON_USAGE_READWRITE);
|
||||
color_info, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate,
|
||||
R_028C64_CB_COLOR0_PITCH + cb * 0x3C,
|
||||
S_028C64_PITCH_TILE_MAX(pitch),
|
||||
|
|
@ -1425,7 +1412,7 @@ static void evergreen_cb(struct r600_pipe_context *rctx, struct r600_pipe_state
|
|||
r600_pipe_state_add_reg(rstate,
|
||||
R_028C74_CB_COLOR0_ATTRIB + cb * 0x3C,
|
||||
S_028C74_NON_DISP_TILING_ORDER(tile_type),
|
||||
0xFFFFFFFF, bo[0], RADEON_USAGE_READWRITE);
|
||||
0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
|
||||
}
|
||||
|
||||
static void evergreen_db(struct r600_pipe_context *rctx, struct r600_pipe_state *rstate,
|
||||
|
|
@ -1451,9 +1438,9 @@ static void evergreen_db(struct r600_pipe_context *rctx, struct r600_pipe_state
|
|||
format = r600_translate_dbformat(rtex->real_format);
|
||||
|
||||
r600_pipe_state_add_reg(rstate, R_028048_DB_Z_READ_BASE,
|
||||
offset >> 8, 0xFFFFFFFF, rtex->resource.bo, RADEON_USAGE_READWRITE);
|
||||
offset >> 8, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate, R_028050_DB_Z_WRITE_BASE,
|
||||
offset >> 8, 0xFFFFFFFF, rtex->resource.bo, RADEON_USAGE_READWRITE);
|
||||
offset >> 8, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate, R_028008_DB_DEPTH_VIEW, 0x00000000, 0xFFFFFFFF, NULL, 0);
|
||||
|
||||
if (rtex->stencil) {
|
||||
|
|
@ -1461,11 +1448,11 @@ static void evergreen_db(struct r600_pipe_context *rctx, struct r600_pipe_state
|
|||
r600_texture_get_offset(rtex->stencil, level, first_layer);
|
||||
|
||||
r600_pipe_state_add_reg(rstate, R_02804C_DB_STENCIL_READ_BASE,
|
||||
stencil_offset >> 8, 0xFFFFFFFF, rtex->stencil->resource.bo, RADEON_USAGE_READWRITE);
|
||||
stencil_offset >> 8, 0xFFFFFFFF, &rtex->stencil->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate, R_028054_DB_STENCIL_WRITE_BASE,
|
||||
stencil_offset >> 8, 0xFFFFFFFF, rtex->stencil->resource.bo, RADEON_USAGE_READWRITE);
|
||||
stencil_offset >> 8, 0xFFFFFFFF, &rtex->stencil->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate, R_028044_DB_STENCIL_INFO,
|
||||
1, 0xFFFFFFFF, rtex->stencil->resource.bo, RADEON_USAGE_READWRITE);
|
||||
1, 0xFFFFFFFF, &rtex->stencil->resource, RADEON_USAGE_READWRITE);
|
||||
} else {
|
||||
r600_pipe_state_add_reg(rstate, R_028044_DB_STENCIL_INFO,
|
||||
0, 0xFFFFFFFF, NULL, RADEON_USAGE_READWRITE);
|
||||
|
|
@ -1473,7 +1460,7 @@ static void evergreen_db(struct r600_pipe_context *rctx, struct r600_pipe_state
|
|||
|
||||
r600_pipe_state_add_reg(rstate, R_028040_DB_Z_INFO,
|
||||
S_028040_ARRAY_MODE(rtex->array_mode[level]) | S_028040_FORMAT(format),
|
||||
0xFFFFFFFF, rtex->resource.bo, RADEON_USAGE_READWRITE);
|
||||
0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate, R_028058_DB_DEPTH_SIZE,
|
||||
S_028058_PITCH_TILE_MAX(pitch),
|
||||
0xFFFFFFFF, NULL, 0);
|
||||
|
|
@ -2457,10 +2444,10 @@ void evergreen_pipe_mod_buffer_resource(struct r600_pipe_resource_state *rstate,
|
|||
unsigned offset, unsigned stride,
|
||||
enum radeon_bo_usage usage)
|
||||
{
|
||||
rstate->bo[0] = rbuffer->bo;
|
||||
rstate->bo[0] = rbuffer;
|
||||
rstate->bo_usage[0] = usage;
|
||||
rstate->val[0] = offset;
|
||||
rstate->val[1] = rbuffer->bo_size - offset - 1;
|
||||
rstate->val[1] = rbuffer->buf->size - offset - 1;
|
||||
rstate->val[2] = S_030008_ENDIAN_SWAP(r600_endian_swap(32)) |
|
||||
S_030008_STRIDE(stride);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@
|
|||
|
||||
#include "../../winsys/radeon/drm/radeon_winsys.h"
|
||||
#include "util/u_double_list.h"
|
||||
#include "util/u_vbuf_mgr.h"
|
||||
|
||||
#define R600_ERR(fmt, args...) \
|
||||
fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
|
||||
|
|
@ -85,33 +86,16 @@ struct r600_tiling_info {
|
|||
enum radeon_family r600_get_family(struct radeon *rw);
|
||||
enum chip_class r600_get_family_class(struct radeon *radeon);
|
||||
|
||||
/* r600_bo.c */
|
||||
struct r600_bo;
|
||||
struct radeon_winsys_cs;
|
||||
struct r600_resource {
|
||||
struct u_vbuf_resource b;
|
||||
|
||||
struct r600_bo *r600_bo(struct radeon *radeon,
|
||||
unsigned size, unsigned alignment,
|
||||
unsigned binding, unsigned usage);
|
||||
struct r600_bo *r600_bo_handle(struct radeon *radeon, struct winsys_handle *whandle,
|
||||
unsigned *stride, unsigned *array_mode);
|
||||
void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, struct radeon_winsys_cs *cs, unsigned usage);
|
||||
void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo);
|
||||
boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *pb_bo,
|
||||
unsigned stride, struct winsys_handle *whandle);
|
||||
|
||||
void r600_bo_destroy(struct r600_bo *bo);
|
||||
|
||||
/* this relies on the pipe_reference being the first member of r600_bo */
|
||||
static INLINE void r600_bo_reference(struct r600_bo **dst, struct r600_bo *src)
|
||||
{
|
||||
struct r600_bo *old = *dst;
|
||||
|
||||
if (pipe_reference((struct pipe_reference *)(*dst), (struct pipe_reference *)src)) {
|
||||
r600_bo_destroy(old);
|
||||
}
|
||||
*dst = src;
|
||||
}
|
||||
/* Winsys objects. */
|
||||
struct pb_buffer *buf;
|
||||
struct radeon_winsys_cs_handle *cs_buf;
|
||||
|
||||
/* Resource state. */
|
||||
unsigned domains;
|
||||
};
|
||||
|
||||
/* R600/R700 STATES */
|
||||
#define R600_GROUP_MAX 16
|
||||
|
|
@ -133,7 +117,7 @@ struct r600_pipe_reg {
|
|||
u32 value;
|
||||
u32 mask;
|
||||
struct r600_block *block;
|
||||
struct r600_bo *bo;
|
||||
struct r600_resource *bo;
|
||||
enum radeon_bo_usage bo_usage;
|
||||
u32 id;
|
||||
};
|
||||
|
|
@ -147,7 +131,7 @@ struct r600_pipe_state {
|
|||
struct r600_pipe_resource_state {
|
||||
unsigned id;
|
||||
u32 val[8];
|
||||
struct r600_bo *bo[2];
|
||||
struct r600_resource *bo[2];
|
||||
enum radeon_bo_usage bo_usage[2];
|
||||
};
|
||||
|
||||
|
|
@ -158,7 +142,7 @@ struct r600_pipe_resource_state {
|
|||
#define R600_BLOCK_STATUS_RESOURCE_VERTEX (1 << 3)
|
||||
|
||||
struct r600_block_reloc {
|
||||
struct r600_bo *bo;
|
||||
struct r600_resource *bo;
|
||||
enum radeon_bo_usage bo_usage;
|
||||
unsigned flush_flags;
|
||||
unsigned flush_mask;
|
||||
|
|
@ -206,7 +190,7 @@ struct r600_query {
|
|||
/* The buffer where query results are stored. It's used as a ring,
|
||||
* data blocks for current query are stored sequentially from
|
||||
* results_start to results_end, with wrapping on the buffer end */
|
||||
struct r600_bo *buffer;
|
||||
struct r600_resource *buffer;
|
||||
unsigned buffer_size;
|
||||
/* linked list of queries */
|
||||
struct list_head list;
|
||||
|
|
@ -223,6 +207,7 @@ struct r600_query {
|
|||
|
||||
struct r600_context {
|
||||
struct radeon *radeon;
|
||||
struct r600_screen *screen;
|
||||
struct radeon_winsys_cs *cs;
|
||||
|
||||
struct r600_range *range;
|
||||
|
|
@ -237,7 +222,7 @@ struct r600_context {
|
|||
unsigned init_dwords;
|
||||
|
||||
unsigned creloc;
|
||||
struct r600_bo **bo;
|
||||
struct r600_resource **bo;
|
||||
|
||||
u32 *pm4;
|
||||
unsigned pm4_cdwords;
|
||||
|
|
@ -262,11 +247,11 @@ struct r600_draw {
|
|||
u32 vgt_index_type;
|
||||
u32 vgt_draw_initiator;
|
||||
u32 indices_bo_offset;
|
||||
struct r600_bo *indices;
|
||||
struct r600_resource *indices;
|
||||
};
|
||||
|
||||
void r600_get_backend_mask(struct r600_context *ctx);
|
||||
int r600_context_init(struct r600_context *ctx, struct radeon *radeon);
|
||||
int r600_context_init(struct r600_context *ctx, struct r600_screen *screen, struct radeon *radeon);
|
||||
void r600_context_fini(struct r600_context *ctx);
|
||||
void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state);
|
||||
void r600_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
|
||||
|
|
@ -288,12 +273,12 @@ void r600_context_queries_suspend(struct r600_context *ctx);
|
|||
void r600_context_queries_resume(struct r600_context *ctx, boolean flushed);
|
||||
void r600_query_predication(struct r600_context *ctx, struct r600_query *query, int operation,
|
||||
int flag_wait);
|
||||
void r600_context_emit_fence(struct r600_context *ctx, struct r600_bo *fence,
|
||||
void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence,
|
||||
unsigned offset, unsigned value);
|
||||
void r600_context_flush_all(struct r600_context *ctx, unsigned flush_flags);
|
||||
void r600_context_flush_dest_caches(struct r600_context *ctx);
|
||||
|
||||
int evergreen_context_init(struct r600_context *ctx, struct radeon *radeon);
|
||||
int evergreen_context_init(struct r600_context *ctx, struct r600_screen *screen, struct radeon *radeon);
|
||||
void evergreen_context_draw(struct r600_context *ctx, const struct r600_draw *draw);
|
||||
void evergreen_context_flush_dest_caches(struct r600_context *ctx);
|
||||
void evergreen_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
|
||||
|
|
@ -309,12 +294,12 @@ void _r600_pipe_state_add_reg(struct r600_context *ctx,
|
|||
struct r600_pipe_state *state,
|
||||
u32 offset, u32 value, u32 mask,
|
||||
u32 range_id, u32 block_id,
|
||||
struct r600_bo *bo,
|
||||
struct r600_resource *bo,
|
||||
enum radeon_bo_usage usage);
|
||||
|
||||
void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
|
||||
u32 offset, u32 value, u32 mask,
|
||||
struct r600_bo *bo,
|
||||
struct r600_resource *bo,
|
||||
enum radeon_bo_usage usage);
|
||||
|
||||
#define r600_pipe_state_add_reg(state, offset, value, mask, bo, usage) _r600_pipe_state_add_reg(&rctx->ctx, state, offset, value, mask, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset), bo, usage)
|
||||
|
|
@ -327,7 +312,7 @@ static inline void r600_pipe_state_mod_reg(struct r600_pipe_state *state,
|
|||
}
|
||||
|
||||
static inline void r600_pipe_state_mod_reg_bo(struct r600_pipe_state *state,
|
||||
u32 value, struct r600_bo *bo,
|
||||
u32 value, struct r600_resource *bo,
|
||||
enum radeon_bo_usage usage)
|
||||
{
|
||||
state->regs[state->nregs].value = value;
|
||||
|
|
|
|||
|
|
@ -2262,17 +2262,19 @@ int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context *rctx, stru
|
|||
|
||||
ve->fs_size = bc.ndw*4;
|
||||
|
||||
/* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
|
||||
ve->fetch_shader = r600_bo(rctx->radeon, ve->fs_size, 256, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE);
|
||||
ve->fetch_shader = (struct r600_resource*)
|
||||
pipe_buffer_create(rctx->context.screen,
|
||||
PIPE_BIND_CUSTOM,
|
||||
PIPE_USAGE_IMMUTABLE, ve->fs_size);
|
||||
if (ve->fetch_shader == NULL) {
|
||||
r600_bytecode_clear(&bc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bytecode = r600_bo_map(rctx->radeon, ve->fetch_shader, rctx->ctx.cs, PIPE_TRANSFER_WRITE);
|
||||
bytecode = rctx->ws->buffer_map(ve->fetch_shader->buf, rctx->ctx.cs, PIPE_TRANSFER_WRITE);
|
||||
if (bytecode == NULL) {
|
||||
r600_bytecode_clear(&bc);
|
||||
r600_bo_reference(&ve->fetch_shader, NULL);
|
||||
pipe_resource_reference((struct pipe_resource**)&ve->fetch_shader, NULL);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
@ -2284,7 +2286,7 @@ int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context *rctx, stru
|
|||
memcpy(bytecode, bc.bytecode, ve->fs_size);
|
||||
}
|
||||
|
||||
r600_bo_unmap(rctx->radeon, ve->fetch_shader);
|
||||
rctx->ws->buffer_unmap(ve->fetch_shader->buf);
|
||||
r600_bytecode_clear(&bc);
|
||||
|
||||
if (rctx->chip_class >= EVERGREEN)
|
||||
|
|
|
|||
|
|
@ -42,10 +42,7 @@ static void r600_buffer_destroy(struct pipe_screen *screen,
|
|||
struct r600_screen *rscreen = (struct r600_screen*)screen;
|
||||
struct r600_resource *rbuffer = r600_resource(buf);
|
||||
|
||||
if (rbuffer->bo) {
|
||||
r600_bo_reference(&rbuffer->bo, NULL);
|
||||
}
|
||||
rbuffer->bo = NULL;
|
||||
pb_reference(&rbuffer->buf, NULL);
|
||||
util_slab_free(&rscreen->pool_buffers, rbuffer);
|
||||
}
|
||||
|
||||
|
|
@ -82,7 +79,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *pipe,
|
|||
if (rbuffer->b.user_ptr)
|
||||
return (uint8_t*)rbuffer->b.user_ptr + transfer->box.x;
|
||||
|
||||
data = r600_bo_map(rctx->screen->radeon, rbuffer->bo, rctx->ctx.cs, transfer->usage);
|
||||
data = rctx->ws->buffer_map(rbuffer->buf, rctx->ctx.cs, transfer->usage);
|
||||
if (!data)
|
||||
return NULL;
|
||||
|
||||
|
|
@ -98,8 +95,7 @@ static void r600_buffer_transfer_unmap(struct pipe_context *pipe,
|
|||
if (rbuffer->b.user_ptr)
|
||||
return;
|
||||
|
||||
if (rbuffer->bo)
|
||||
r600_bo_unmap(rctx->screen->radeon, rbuffer->bo);
|
||||
rctx->ws->buffer_unmap(rbuffer->buf);
|
||||
}
|
||||
|
||||
static void r600_buffer_transfer_flush_region(struct pipe_context *pipe,
|
||||
|
|
@ -125,19 +121,17 @@ static void r600_buffer_transfer_inline_write(struct pipe_context *pipe,
|
|||
unsigned layer_stride)
|
||||
{
|
||||
struct r600_pipe_context *rctx = (struct r600_pipe_context*)pipe;
|
||||
struct radeon *radeon = rctx->screen->radeon;
|
||||
struct r600_resource *rbuffer = r600_resource(resource);
|
||||
uint8_t *map = NULL;
|
||||
|
||||
assert(rbuffer->b.user_ptr == NULL);
|
||||
|
||||
map = r600_bo_map(radeon, rbuffer->bo, rctx->ctx.cs,
|
||||
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD | usage);
|
||||
map = rctx->ws->buffer_map(rbuffer->buf, rctx->ctx.cs,
|
||||
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD | usage);
|
||||
|
||||
memcpy(map + box->x, data, box->width);
|
||||
|
||||
if (rbuffer->bo)
|
||||
r600_bo_unmap(radeon, rbuffer->bo);
|
||||
rctx->ws->buffer_unmap(rbuffer->buf);
|
||||
}
|
||||
|
||||
static const struct u_resource_vtbl r600_buffer_vtbl =
|
||||
|
|
@ -152,12 +146,53 @@ static const struct u_resource_vtbl r600_buffer_vtbl =
|
|||
r600_buffer_transfer_inline_write /* transfer_inline_write */
|
||||
};
|
||||
|
||||
bool r600_init_resource(struct r600_screen *rscreen,
|
||||
struct r600_resource *res,
|
||||
unsigned size, unsigned alignment,
|
||||
unsigned bind, unsigned usage)
|
||||
{
|
||||
uint32_t initial_domain, domains;
|
||||
|
||||
/* Staging resources particpate in transfers and blits only
|
||||
* and are used for uploads and downloads from regular
|
||||
* resources. We generate them internally for some transfers.
|
||||
*/
|
||||
if (usage == PIPE_USAGE_STAGING) {
|
||||
domains = RADEON_DOMAIN_GTT;
|
||||
initial_domain = RADEON_DOMAIN_GTT;
|
||||
} else {
|
||||
domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
|
||||
|
||||
switch(usage) {
|
||||
case PIPE_USAGE_DYNAMIC:
|
||||
case PIPE_USAGE_STREAM:
|
||||
case PIPE_USAGE_STAGING:
|
||||
initial_domain = RADEON_DOMAIN_GTT;
|
||||
break;
|
||||
case PIPE_USAGE_DEFAULT:
|
||||
case PIPE_USAGE_STATIC:
|
||||
case PIPE_USAGE_IMMUTABLE:
|
||||
default:
|
||||
initial_domain = RADEON_DOMAIN_VRAM;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
res->buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment, bind, initial_domain);
|
||||
if (!res->buf) {
|
||||
return false;
|
||||
}
|
||||
|
||||
res->cs_buf = rscreen->ws->buffer_get_cs_handle(res->buf);
|
||||
res->domains = domains;
|
||||
return true;
|
||||
}
|
||||
|
||||
struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
|
||||
const struct pipe_resource *templ)
|
||||
{
|
||||
struct r600_screen *rscreen = (struct r600_screen*)screen;
|
||||
struct r600_resource *rbuffer;
|
||||
struct r600_bo *bo;
|
||||
/* XXX We probably want a different alignment for buffers and textures. */
|
||||
unsigned alignment = 4096;
|
||||
|
||||
|
|
@ -168,19 +203,11 @@ struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
|
|||
rbuffer->b.b.b.screen = screen;
|
||||
rbuffer->b.b.vtbl = &r600_buffer_vtbl;
|
||||
rbuffer->b.user_ptr = NULL;
|
||||
rbuffer->size = rbuffer->b.b.b.width0;
|
||||
rbuffer->bo_size = rbuffer->size;
|
||||
|
||||
bo = r600_bo(rscreen->radeon,
|
||||
rbuffer->b.b.b.width0,
|
||||
alignment, rbuffer->b.b.b.bind,
|
||||
rbuffer->b.b.b.usage);
|
||||
|
||||
if (bo == NULL) {
|
||||
if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, templ->bind, templ->usage)) {
|
||||
FREE(rbuffer);
|
||||
return NULL;
|
||||
}
|
||||
rbuffer->bo = bo;
|
||||
return &rbuffer->b.b.b;
|
||||
}
|
||||
|
||||
|
|
@ -206,8 +233,7 @@ struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen,
|
|||
rbuffer->b.b.b.array_size = 1;
|
||||
rbuffer->b.b.b.flags = 0;
|
||||
rbuffer->b.user_ptr = ptr;
|
||||
rbuffer->bo = NULL;
|
||||
rbuffer->bo_size = 0;
|
||||
rbuffer->buf = NULL;
|
||||
return &rbuffer->b.b.b;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -53,18 +53,19 @@
|
|||
*/
|
||||
static struct r600_fence *r600_create_fence(struct r600_pipe_context *ctx)
|
||||
{
|
||||
struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
|
||||
struct r600_fence *fence = NULL;
|
||||
|
||||
if (!ctx->fences.bo) {
|
||||
/* Create the shared buffer object */
|
||||
ctx->fences.bo = r600_bo(ctx->radeon, 4096, 0, 0, 0);
|
||||
ctx->fences.bo = (struct r600_resource*)
|
||||
pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM,
|
||||
PIPE_USAGE_STAGING, 4096);
|
||||
if (!ctx->fences.bo) {
|
||||
R600_ERR("r600: failed to create bo for fence objects\n");
|
||||
return NULL;
|
||||
}
|
||||
ctx->fences.data = r600_bo_map(ctx->radeon, ctx->fences.bo, rctx->ctx.cs,
|
||||
PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_WRITE);
|
||||
ctx->fences.data = ctx->ws->buffer_map(ctx->fences.bo->buf, ctx->ctx.cs,
|
||||
PIPE_TRANSFER_WRITE);
|
||||
}
|
||||
|
||||
if (!LIST_IS_EMPTY(&ctx->fences.pool)) {
|
||||
|
|
@ -184,8 +185,8 @@ static void r600_destroy_context(struct pipe_context *context)
|
|||
FREE(entry);
|
||||
}
|
||||
|
||||
r600_bo_unmap(rctx->radeon, rctx->fences.bo);
|
||||
r600_bo_reference(&rctx->fences.bo, NULL);
|
||||
rctx->ws->buffer_unmap(rctx->fences.bo->buf);
|
||||
pipe_resource_reference((struct pipe_resource**)&rctx->fences.bo, NULL);
|
||||
}
|
||||
|
||||
r600_update_num_contexts(rctx->screen, -1);
|
||||
|
|
@ -211,6 +212,7 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
|
|||
|
||||
/* Easy accessing of screen/winsys. */
|
||||
rctx->screen = rscreen;
|
||||
rctx->ws = rscreen->ws;
|
||||
rctx->radeon = rscreen->radeon;
|
||||
rctx->family = r600_get_family(rctx->radeon);
|
||||
rctx->chip_class = r600_get_family_class(rctx->radeon);
|
||||
|
|
@ -234,7 +236,7 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
|
|||
case R600:
|
||||
case R700:
|
||||
r600_init_state_functions(rctx);
|
||||
if (r600_context_init(&rctx->ctx, rctx->radeon)) {
|
||||
if (r600_context_init(&rctx->ctx, rctx->screen, rctx->radeon)) {
|
||||
r600_destroy_context(&rctx->context);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -244,7 +246,7 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
|
|||
case EVERGREEN:
|
||||
case CAYMAN:
|
||||
evergreen_init_state_functions(rctx);
|
||||
if (evergreen_context_init(&rctx->ctx, rctx->radeon)) {
|
||||
if (evergreen_context_init(&rctx->ctx, rctx->screen, rctx->radeon)) {
|
||||
r600_destroy_context(&rctx->context);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -114,8 +114,8 @@ struct r600_vertex_element
|
|||
{
|
||||
unsigned count;
|
||||
struct pipe_vertex_element elements[PIPE_MAX_ATTRIBS];
|
||||
struct u_vbuf_elements *vmgr_elements;
|
||||
struct r600_bo *fetch_shader;
|
||||
struct u_vbuf_elements *vmgr_elements;
|
||||
struct r600_resource *fetch_shader;
|
||||
unsigned fs_size;
|
||||
struct r600_pipe_state rstate;
|
||||
/* if offset is to big for fetch instructio we need to alterate
|
||||
|
|
@ -128,8 +128,8 @@ struct r600_vertex_element
|
|||
struct r600_pipe_shader {
|
||||
struct r600_shader shader;
|
||||
struct r600_pipe_state rstate;
|
||||
struct r600_bo *bo;
|
||||
struct r600_bo *bo_fetch;
|
||||
struct r600_resource *bo;
|
||||
struct r600_resource *bo_fetch;
|
||||
struct r600_vertex_element vertex_elements;
|
||||
struct tgsi_token *tokens;
|
||||
};
|
||||
|
|
@ -166,7 +166,7 @@ struct r600_fence_block {
|
|||
};
|
||||
|
||||
struct r600_pipe_fences {
|
||||
struct r600_bo *bo;
|
||||
struct r600_resource *bo;
|
||||
unsigned *data;
|
||||
unsigned next_index;
|
||||
/* linked list of preallocated blocks */
|
||||
|
|
@ -185,6 +185,7 @@ struct r600_pipe_context {
|
|||
enum chip_class chip_class;
|
||||
void *custom_dsa_flush;
|
||||
struct r600_screen *screen;
|
||||
struct radeon_winsys *ws;
|
||||
struct radeon *radeon;
|
||||
struct r600_pipe_state *states[R600_PIPE_NSTATES];
|
||||
struct r600_context ctx;
|
||||
|
|
@ -269,6 +270,10 @@ void r600_blit_push_depth(struct pipe_context *ctx, struct r600_resource_texture
|
|||
void r600_flush_depth_textures(struct r600_pipe_context *rctx);
|
||||
|
||||
/* r600_buffer.c */
|
||||
bool r600_init_resource(struct r600_screen *rscreen,
|
||||
struct r600_resource *res,
|
||||
unsigned size, unsigned alignment,
|
||||
unsigned bind, unsigned usage);
|
||||
struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
|
||||
const struct pipe_resource *templ);
|
||||
struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen,
|
||||
|
|
|
|||
|
|
@ -39,17 +39,6 @@ struct r600_transfer {
|
|||
struct pipe_resource *staging_texture;
|
||||
};
|
||||
|
||||
/* This gets further specialized into either buffer or texture
|
||||
* structures. Use the vtbl struct to choose between the two
|
||||
* underlying implementations.
|
||||
*/
|
||||
struct r600_resource {
|
||||
struct u_vbuf_resource b;
|
||||
struct r600_bo *bo;
|
||||
u32 size;
|
||||
unsigned bo_size;
|
||||
};
|
||||
|
||||
struct r600_resource_texture {
|
||||
struct r600_resource resource;
|
||||
|
||||
|
|
|
|||
|
|
@ -81,12 +81,12 @@ static int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *s
|
|||
|
||||
/* copy new shader */
|
||||
if (shader->bo == NULL) {
|
||||
/* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
|
||||
shader->bo = r600_bo(rctx->radeon, rshader->bc.ndw * 4, 4096, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE);
|
||||
shader->bo = (struct r600_resource*)
|
||||
pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, rshader->bc.ndw * 4);
|
||||
if (shader->bo == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
ptr = (uint32_t*)r600_bo_map(rctx->radeon, shader->bo, rctx->ctx.cs, PIPE_TRANSFER_WRITE);
|
||||
ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->buf, rctx->ctx.cs, PIPE_TRANSFER_WRITE);
|
||||
if (R600_BIG_ENDIAN) {
|
||||
for (i = 0; i < rshader->bc.ndw; ++i) {
|
||||
ptr[i] = bswap_32(rshader->bc.bytecode[i]);
|
||||
|
|
@ -94,7 +94,7 @@ static int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *s
|
|||
} else {
|
||||
memcpy(ptr, rshader->bc.bytecode, rshader->bc.ndw * sizeof(*ptr));
|
||||
}
|
||||
r600_bo_unmap(rctx->radeon, shader->bo);
|
||||
rctx->ws->buffer_unmap(shader->bo->buf);
|
||||
}
|
||||
/* build state */
|
||||
switch (rshader->processor_type) {
|
||||
|
|
@ -154,7 +154,7 @@ int r600_pipe_shader_create(struct pipe_context *ctx, struct r600_pipe_shader *s
|
|||
|
||||
void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader)
|
||||
{
|
||||
r600_bo_reference(&shader->bo, NULL);
|
||||
pipe_resource_reference((struct pipe_resource**)&shader->bo, NULL);
|
||||
r600_bytecode_clear(&shader->shader.bc);
|
||||
|
||||
memset(&shader->shader,0,sizeof(struct r600_shader));
|
||||
|
|
|
|||
|
|
@ -1013,11 +1013,9 @@ static struct pipe_sampler_view *r600_create_sampler_view(struct pipe_context *c
|
|||
struct r600_pipe_sampler_view *view = CALLOC_STRUCT(r600_pipe_sampler_view);
|
||||
struct r600_pipe_resource_state *rstate;
|
||||
struct r600_resource_texture *tmp = (struct r600_resource_texture*)texture;
|
||||
struct r600_resource *rbuffer;
|
||||
unsigned format, endian;
|
||||
uint32_t word4 = 0, yuv_format = 0, pitch = 0;
|
||||
unsigned char swizzle[4], array_mode = 0, tile_type = 0;
|
||||
struct r600_bo *bo[2];
|
||||
unsigned width, height, depth, offset_level, last_level;
|
||||
|
||||
if (view == NULL)
|
||||
|
|
@ -1056,10 +1054,6 @@ static struct pipe_sampler_view *r600_create_sampler_view(struct pipe_context *c
|
|||
word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
|
||||
}
|
||||
|
||||
rbuffer = &tmp->resource;
|
||||
bo[0] = rbuffer->bo;
|
||||
bo[1] = rbuffer->bo;
|
||||
|
||||
offset_level = state->u.tex.first_level;
|
||||
last_level = state->u.tex.last_level - offset_level;
|
||||
width = u_minify(texture->width0, offset_level);
|
||||
|
|
@ -1078,8 +1072,8 @@ static struct pipe_sampler_view *r600_create_sampler_view(struct pipe_context *c
|
|||
depth = texture->array_size;
|
||||
}
|
||||
|
||||
rstate->bo[0] = bo[0];
|
||||
rstate->bo[1] = bo[1];
|
||||
rstate->bo[0] = &tmp->resource;
|
||||
rstate->bo[1] = &tmp->resource;
|
||||
rstate->bo_usage[0] = RADEON_USAGE_READ;
|
||||
rstate->bo_usage[1] = RADEON_USAGE_READ;
|
||||
|
||||
|
|
@ -1389,7 +1383,6 @@ static void r600_cb(struct r600_pipe_context *rctx, struct r600_pipe_state *rsta
|
|||
const struct pipe_framebuffer_state *state, int cb)
|
||||
{
|
||||
struct r600_resource_texture *rtex;
|
||||
struct r600_resource *rbuffer;
|
||||
struct r600_surface *surf;
|
||||
unsigned level = state->cbufs[cb]->u.tex.level;
|
||||
unsigned pitch, slice;
|
||||
|
|
@ -1397,7 +1390,6 @@ static void r600_cb(struct r600_pipe_context *rctx, struct r600_pipe_state *rsta
|
|||
unsigned format, swap, ntype, endian;
|
||||
unsigned offset;
|
||||
const struct util_format_description *desc;
|
||||
struct r600_bo *bo[3];
|
||||
int i;
|
||||
|
||||
surf = (struct r600_surface *)state->cbufs[cb];
|
||||
|
|
@ -1411,11 +1403,6 @@ static void r600_cb(struct r600_pipe_context *rctx, struct r600_pipe_state *rsta
|
|||
rtex = rtex->flushed_depth_texture;
|
||||
}
|
||||
|
||||
rbuffer = &rtex->resource;
|
||||
bo[0] = rbuffer->bo;
|
||||
bo[1] = rbuffer->bo;
|
||||
bo[2] = rbuffer->bo;
|
||||
|
||||
/* XXX quite sure for dx10+ hw don't need any offset hacks */
|
||||
offset = r600_texture_get_offset(rtex,
|
||||
level, state->cbufs[cb]->u.tex.first_layer);
|
||||
|
|
@ -1436,7 +1423,7 @@ static void r600_cb(struct r600_pipe_context *rctx, struct r600_pipe_state *rsta
|
|||
|
||||
format = r600_translate_colorformat(surf->base.format);
|
||||
swap = r600_translate_colorswap(surf->base.format);
|
||||
if(rbuffer->b.b.b.usage == PIPE_USAGE_STAGING) {
|
||||
if(rtex->resource.b.b.b.usage == PIPE_USAGE_STAGING) {
|
||||
endian = ENDIAN_NONE;
|
||||
} else {
|
||||
endian = r600_colorformat_endian_swap(format);
|
||||
|
|
@ -1486,10 +1473,10 @@ static void r600_cb(struct r600_pipe_context *rctx, struct r600_pipe_state *rsta
|
|||
|
||||
r600_pipe_state_add_reg(rstate,
|
||||
R_028040_CB_COLOR0_BASE + cb * 4,
|
||||
offset >> 8, 0xFFFFFFFF, bo[0], RADEON_USAGE_READWRITE);
|
||||
offset >> 8, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate,
|
||||
R_0280A0_CB_COLOR0_INFO + cb * 4,
|
||||
color_info, 0xFFFFFFFF, bo[0], RADEON_USAGE_READWRITE);
|
||||
color_info, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate,
|
||||
R_028060_CB_COLOR0_SIZE + cb * 4,
|
||||
S_028060_PITCH_TILE_MAX(pitch) |
|
||||
|
|
@ -1500,10 +1487,10 @@ static void r600_cb(struct r600_pipe_context *rctx, struct r600_pipe_state *rsta
|
|||
0x00000000, 0xFFFFFFFF, NULL, 0);
|
||||
r600_pipe_state_add_reg(rstate,
|
||||
R_0280E0_CB_COLOR0_FRAG + cb * 4,
|
||||
0, 0xFFFFFFFF, bo[1], RADEON_USAGE_READWRITE);
|
||||
0, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate,
|
||||
R_0280C0_CB_COLOR0_TILE + cb * 4,
|
||||
0, 0xFFFFFFFF, bo[2], RADEON_USAGE_READWRITE);
|
||||
0, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate,
|
||||
R_028100_CB_COLOR0_MASK + cb * 4,
|
||||
0x00000000, 0xFFFFFFFF, NULL, 0);
|
||||
|
|
@ -1513,7 +1500,6 @@ static void r600_db(struct r600_pipe_context *rctx, struct r600_pipe_state *rsta
|
|||
const struct pipe_framebuffer_state *state)
|
||||
{
|
||||
struct r600_resource_texture *rtex;
|
||||
struct r600_resource *rbuffer;
|
||||
struct r600_surface *surf;
|
||||
unsigned level;
|
||||
unsigned pitch, slice, format;
|
||||
|
|
@ -1527,8 +1513,6 @@ static void r600_db(struct r600_pipe_context *rctx, struct r600_pipe_state *rsta
|
|||
surf = (struct r600_surface *)state->zsbuf;
|
||||
rtex = (struct r600_resource_texture*)state->zsbuf->texture;
|
||||
|
||||
rbuffer = &rtex->resource;
|
||||
|
||||
/* XXX quite sure for dx10+ hw don't need any offset hacks */
|
||||
offset = r600_texture_get_offset((struct r600_resource_texture *)state->zsbuf->texture,
|
||||
level, state->zsbuf->u.tex.first_layer);
|
||||
|
|
@ -1537,14 +1521,14 @@ static void r600_db(struct r600_pipe_context *rctx, struct r600_pipe_state *rsta
|
|||
format = r600_translate_dbformat(state->zsbuf->texture->format);
|
||||
|
||||
r600_pipe_state_add_reg(rstate, R_02800C_DB_DEPTH_BASE,
|
||||
offset >> 8, 0xFFFFFFFF, rbuffer->bo, RADEON_USAGE_READWRITE);
|
||||
offset >> 8, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate, R_028000_DB_DEPTH_SIZE,
|
||||
S_028000_PITCH_TILE_MAX(pitch) | S_028000_SLICE_TILE_MAX(slice),
|
||||
0xFFFFFFFF, NULL, 0);
|
||||
r600_pipe_state_add_reg(rstate, R_028004_DB_DEPTH_VIEW, 0x00000000, 0xFFFFFFFF, NULL, 0);
|
||||
r600_pipe_state_add_reg(rstate, R_028010_DB_DEPTH_INFO,
|
||||
S_028010_ARRAY_MODE(rtex->array_mode[level]) | S_028010_FORMAT(format),
|
||||
0xFFFFFFFF, rbuffer->bo, RADEON_USAGE_READWRITE);
|
||||
0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
|
||||
r600_pipe_state_add_reg(rstate, R_028D34_DB_PREFETCH_LIMIT,
|
||||
(surf->aligned_height / 8) - 1, 0xFFFFFFFF, NULL, 0);
|
||||
}
|
||||
|
|
@ -2235,9 +2219,9 @@ void r600_pipe_mod_buffer_resource(struct r600_pipe_resource_state *rstate,
|
|||
enum radeon_bo_usage usage)
|
||||
{
|
||||
rstate->val[0] = offset;
|
||||
rstate->bo[0] = rbuffer->bo;
|
||||
rstate->bo[0] = rbuffer;
|
||||
rstate->bo_usage[0] = usage;
|
||||
rstate->val[1] = rbuffer->bo_size - offset - 1;
|
||||
rstate->val[1] = rbuffer->buf->size - offset - 1;
|
||||
rstate->val[2] = S_038008_ENDIAN_SWAP(r600_endian_swap(32)) |
|
||||
S_038008_STRIDE(stride);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@ void r600_delete_state(struct pipe_context *ctx, void *state)
|
|||
rctx->states[rstate->id] = NULL;
|
||||
}
|
||||
for (int i = 0; i < rstate->nregs; i++) {
|
||||
r600_bo_reference(&rstate->regs[i].bo, NULL);
|
||||
pipe_resource_reference((struct pipe_resource**)&rstate->regs[i].bo, NULL);
|
||||
}
|
||||
free(rstate);
|
||||
}
|
||||
|
|
@ -181,7 +181,7 @@ void r600_delete_vertex_element(struct pipe_context *ctx, void *state)
|
|||
if (rctx->vertex_elements == state)
|
||||
rctx->vertex_elements = NULL;
|
||||
|
||||
r600_bo_reference(&v->fetch_shader, NULL);
|
||||
pipe_resource_reference((struct pipe_resource**)&v->fetch_shader, NULL);
|
||||
u_vbuf_destroy_vertex_elements(rctx->vbuf_mgr, v->vmgr_elements);
|
||||
FREE(state);
|
||||
}
|
||||
|
|
@ -428,7 +428,7 @@ void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
|
|||
0xFFFFFFFF, NULL, 0);
|
||||
r600_pipe_state_add_reg(&rctx->vs_const_buffer,
|
||||
R_028980_ALU_CONST_CACHE_VS_0,
|
||||
offset >> 8, 0xFFFFFFFF, rbuffer->bo, RADEON_USAGE_READ);
|
||||
offset >> 8, 0xFFFFFFFF, rbuffer, RADEON_USAGE_READ);
|
||||
r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer);
|
||||
|
||||
rstate = &rctx->vs_const_buffer_resource[index];
|
||||
|
|
@ -456,7 +456,7 @@ void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
|
|||
0xFFFFFFFF, NULL, 0);
|
||||
r600_pipe_state_add_reg(&rctx->ps_const_buffer,
|
||||
R_028940_ALU_CONST_CACHE_PS_0,
|
||||
offset >> 8, 0xFFFFFFFF, rbuffer->bo, RADEON_USAGE_READ);
|
||||
offset >> 8, 0xFFFFFFFF, rbuffer, RADEON_USAGE_READ);
|
||||
r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer);
|
||||
|
||||
rstate = &rctx->ps_const_buffer_resource[index];
|
||||
|
|
@ -663,7 +663,7 @@ void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
|
|||
rdraw.indices = NULL;
|
||||
if (draw.index_buffer) {
|
||||
rbuffer = (struct r600_resource*)draw.index_buffer;
|
||||
rdraw.indices = rbuffer->bo;
|
||||
rdraw.indices = rbuffer;
|
||||
rdraw.indices_bo_offset = draw.index_buffer_offset;
|
||||
}
|
||||
|
||||
|
|
@ -688,7 +688,7 @@ void _r600_pipe_state_add_reg(struct r600_context *ctx,
|
|||
struct r600_pipe_state *state,
|
||||
u32 offset, u32 value, u32 mask,
|
||||
u32 range_id, u32 block_id,
|
||||
struct r600_bo *bo,
|
||||
struct r600_resource *bo,
|
||||
enum radeon_bo_usage usage)
|
||||
{
|
||||
struct r600_range *range;
|
||||
|
|
@ -712,7 +712,7 @@ void _r600_pipe_state_add_reg(struct r600_context *ctx,
|
|||
|
||||
void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
|
||||
u32 offset, u32 value, u32 mask,
|
||||
struct r600_bo *bo,
|
||||
struct r600_resource *bo,
|
||||
enum radeon_bo_usage usage)
|
||||
{
|
||||
if (bo) assert(usage);
|
||||
|
|
|
|||
|
|
@ -338,10 +338,10 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen,
|
|||
{
|
||||
struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
|
||||
struct r600_resource *resource = &rtex->resource;
|
||||
struct radeon *radeon = ((struct r600_screen*)screen)->radeon;
|
||||
struct r600_screen *rscreen = (struct r600_screen*)screen;
|
||||
|
||||
return r600_bo_get_winsys_handle(radeon, resource->bo,
|
||||
rtex->pitch_in_bytes[0], whandle);
|
||||
return rscreen->ws->buffer_get_handle(resource->buf,
|
||||
rtex->pitch_in_bytes[0], whandle);
|
||||
}
|
||||
|
||||
static void r600_texture_destroy(struct pipe_screen *screen,
|
||||
|
|
@ -353,9 +353,7 @@ static void r600_texture_destroy(struct pipe_screen *screen,
|
|||
if (rtex->flushed_depth_texture)
|
||||
pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
|
||||
|
||||
if (resource->bo) {
|
||||
r600_bo_reference(&resource->bo, NULL);
|
||||
}
|
||||
pb_reference(&resource->buf, NULL);
|
||||
FREE(rtex);
|
||||
}
|
||||
|
||||
|
|
@ -377,12 +375,12 @@ r600_texture_create_object(struct pipe_screen *screen,
|
|||
unsigned array_mode,
|
||||
unsigned pitch_in_bytes_override,
|
||||
unsigned max_buffer_size,
|
||||
struct r600_bo *bo,
|
||||
struct pb_buffer *buf,
|
||||
boolean alloc_bo)
|
||||
{
|
||||
struct r600_resource_texture *rtex;
|
||||
struct r600_resource *resource;
|
||||
struct radeon *radeon = ((struct r600_screen*)screen)->radeon;
|
||||
struct r600_screen *rscreen = (struct r600_screen*)screen;
|
||||
|
||||
rtex = CALLOC_STRUCT(r600_resource_texture);
|
||||
if (rtex == NULL)
|
||||
|
|
@ -393,7 +391,6 @@ r600_texture_create_object(struct pipe_screen *screen,
|
|||
resource->b.b.vtbl = &r600_texture_vtbl;
|
||||
pipe_reference_init(&resource->b.b.b.reference, 1);
|
||||
resource->b.b.b.screen = screen;
|
||||
resource->bo = bo;
|
||||
rtex->pitch_override = pitch_in_bytes_override;
|
||||
rtex->real_format = base->format;
|
||||
|
||||
|
|
@ -459,23 +456,27 @@ r600_texture_create_object(struct pipe_screen *screen,
|
|||
rtex->size = stencil_offset + rtex->stencil->size;
|
||||
}
|
||||
|
||||
resource->size = rtex->size;
|
||||
|
||||
/* Now create the backing buffer. */
|
||||
if (!resource->bo && alloc_bo) {
|
||||
if (!buf && alloc_bo) {
|
||||
struct pipe_resource *ptex = &rtex->resource.b.b.b;
|
||||
unsigned base_align = r600_get_base_alignment(screen, ptex->format, array_mode);
|
||||
|
||||
resource->bo = r600_bo(radeon, rtex->size, base_align, base->bind, base->usage);
|
||||
if (!resource->bo) {
|
||||
if (!r600_init_resource(rscreen, resource, rtex->size, base_align, base->bind, base->usage)) {
|
||||
pipe_resource_reference((struct pipe_resource**)&rtex->stencil, NULL);
|
||||
FREE(rtex);
|
||||
return NULL;
|
||||
}
|
||||
} else if (buf) {
|
||||
resource->buf = buf;
|
||||
resource->cs_buf = rscreen->ws->buffer_get_cs_handle(buf);
|
||||
resource->domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
|
||||
}
|
||||
|
||||
if (rtex->stencil)
|
||||
rtex->stencil->resource.bo = rtex->resource.bo;
|
||||
if (rtex->stencil) {
|
||||
rtex->stencil->resource.buf = rtex->resource.buf;
|
||||
rtex->stencil->resource.cs_buf = rtex->resource.cs_buf;
|
||||
rtex->stencil->resource.domains = rtex->resource.domains;
|
||||
}
|
||||
return rtex;
|
||||
}
|
||||
|
||||
|
|
@ -540,28 +541,36 @@ static void r600_surface_destroy(struct pipe_context *pipe,
|
|||
FREE(surface);
|
||||
}
|
||||
|
||||
|
||||
struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
|
||||
const struct pipe_resource *templ,
|
||||
struct winsys_handle *whandle)
|
||||
{
|
||||
struct radeon *rw = ((struct r600_screen*)screen)->radeon;
|
||||
struct r600_bo *bo = NULL;
|
||||
struct r600_screen *rscreen = (struct r600_screen*)screen;
|
||||
struct pb_buffer *buf = NULL;
|
||||
unsigned stride = 0;
|
||||
unsigned array_mode = 0;
|
||||
enum radeon_bo_layout micro, macro;
|
||||
|
||||
/* Support only 2D textures without mipmaps */
|
||||
if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
|
||||
templ->depth0 != 1 || templ->last_level != 0)
|
||||
return NULL;
|
||||
|
||||
bo = r600_bo_handle(rw, whandle, &stride, &array_mode);
|
||||
if (bo == NULL) {
|
||||
buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride, NULL);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rscreen->ws->buffer_get_tiling(buf, µ, ¯o);
|
||||
|
||||
if (macro == RADEON_LAYOUT_TILED)
|
||||
array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
|
||||
else if (micro == RADEON_LAYOUT_TILED)
|
||||
array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
|
||||
else
|
||||
array_mode = 0;
|
||||
|
||||
return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
|
||||
stride, 0, bo, FALSE);
|
||||
stride, 0, buf, FALSE);
|
||||
}
|
||||
|
||||
int r600_texture_depth_flush(struct pipe_context *ctx,
|
||||
|
|
@ -748,28 +757,27 @@ void* r600_texture_transfer_map(struct pipe_context *ctx,
|
|||
{
|
||||
struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
|
||||
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
|
||||
struct r600_bo *bo;
|
||||
struct pb_buffer *buf;
|
||||
enum pipe_format format = transfer->resource->format;
|
||||
struct radeon *radeon = rctx->screen->radeon;
|
||||
unsigned offset = 0;
|
||||
char *map;
|
||||
|
||||
if (rtransfer->staging_texture) {
|
||||
bo = ((struct r600_resource *)rtransfer->staging_texture)->bo;
|
||||
buf = ((struct r600_resource *)rtransfer->staging_texture)->buf;
|
||||
} else {
|
||||
struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
|
||||
|
||||
if (rtex->flushed_depth_texture)
|
||||
bo = ((struct r600_resource *)rtex->flushed_depth_texture)->bo;
|
||||
buf = ((struct r600_resource *)rtex->flushed_depth_texture)->buf;
|
||||
else
|
||||
bo = ((struct r600_resource *)transfer->resource)->bo;
|
||||
buf = ((struct r600_resource *)transfer->resource)->buf;
|
||||
|
||||
offset = rtransfer->offset +
|
||||
transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
|
||||
transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
|
||||
}
|
||||
|
||||
if (!(map = r600_bo_map(radeon, bo, rctx->ctx.cs, transfer->usage))) {
|
||||
if (!(map = rctx->ws->buffer_map(buf, rctx->ctx.cs, transfer->usage))) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -780,21 +788,21 @@ void r600_texture_transfer_unmap(struct pipe_context *ctx,
|
|||
struct pipe_transfer* transfer)
|
||||
{
|
||||
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
|
||||
struct radeon *radeon = ((struct r600_screen*)ctx->screen)->radeon;
|
||||
struct r600_bo *bo;
|
||||
struct r600_pipe_context *rctx = (struct r600_pipe_context*)ctx;
|
||||
struct pb_buffer *buf;
|
||||
|
||||
if (rtransfer->staging_texture) {
|
||||
bo = ((struct r600_resource *)rtransfer->staging_texture)->bo;
|
||||
buf = ((struct r600_resource *)rtransfer->staging_texture)->buf;
|
||||
} else {
|
||||
struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
|
||||
|
||||
if (rtex->flushed_depth_texture) {
|
||||
bo = ((struct r600_resource *)rtex->flushed_depth_texture)->bo;
|
||||
buf = ((struct r600_resource *)rtex->flushed_depth_texture)->buf;
|
||||
} else {
|
||||
bo = ((struct r600_resource *)transfer->resource)->bo;
|
||||
buf = ((struct r600_resource *)transfer->resource)->buf;
|
||||
}
|
||||
}
|
||||
r600_bo_unmap(radeon, bo);
|
||||
rctx->ws->buffer_unmap(buf);
|
||||
}
|
||||
|
||||
void r600_init_surface_functions(struct r600_pipe_context *r600)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,4 @@
|
|||
C_SOURCES := \
|
||||
evergreen_hw_context.c \
|
||||
r600_bo.c \
|
||||
r600_drm.c \
|
||||
r600_hw_context.c
|
||||
|
||||
|
|
|
|||
|
|
@ -897,12 +897,13 @@ static int evergreen_loop_const_init(struct r600_context *ctx, u32 offset)
|
|||
return r600_context_add_block(ctx, r600_loop_consts, nreg, PKT3_SET_LOOP_CONST, EVERGREEN_LOOP_CONST_OFFSET);
|
||||
}
|
||||
|
||||
int evergreen_context_init(struct r600_context *ctx, struct radeon *radeon)
|
||||
int evergreen_context_init(struct r600_context *ctx, struct r600_screen *screen, struct radeon *radeon)
|
||||
{
|
||||
int r;
|
||||
|
||||
memset(ctx, 0, sizeof(struct r600_context));
|
||||
ctx->radeon = radeon;
|
||||
ctx->screen = screen;
|
||||
|
||||
LIST_INITHEAD(&ctx->query_list);
|
||||
|
||||
|
|
@ -1205,8 +1206,8 @@ void evergreen_context_draw(struct r600_context *ctx, const struct r600_draw *dr
|
|||
|
||||
void evergreen_context_flush_dest_caches(struct r600_context *ctx)
|
||||
{
|
||||
struct r600_bo *cb[12];
|
||||
struct r600_bo *db;
|
||||
struct r600_resource *cb[12];
|
||||
struct r600_resource *db;
|
||||
|
||||
if (!(ctx->flags & R600_CONTEXT_DST_CACHES_DIRTY))
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -1,131 +0,0 @@
|
|||
/*
|
||||
* Copyright 2010 Dave Airlie
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* on the rights to use, copy, modify, merge, publish, distribute, sub
|
||||
* license, and/or sell copies of the Software, and to permit persons to whom
|
||||
* the Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Dave Airlie
|
||||
*/
|
||||
#include "r600_priv.h"
|
||||
#include "r600d.h"
|
||||
#include "state_tracker/drm_driver.h"
|
||||
|
||||
struct r600_bo *r600_bo(struct radeon *radeon,
|
||||
unsigned size, unsigned alignment,
|
||||
unsigned binding, unsigned usage)
|
||||
{
|
||||
struct r600_bo *bo;
|
||||
struct pb_buffer *pb;
|
||||
uint32_t initial_domain, domains;
|
||||
|
||||
/* Staging resources particpate in transfers and blits only
|
||||
* and are used for uploads and downloads from regular
|
||||
* resources. We generate them internally for some transfers.
|
||||
*/
|
||||
if (usage == PIPE_USAGE_STAGING) {
|
||||
domains = RADEON_DOMAIN_GTT;
|
||||
initial_domain = RADEON_DOMAIN_GTT;
|
||||
} else {
|
||||
domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
|
||||
|
||||
switch(usage) {
|
||||
case PIPE_USAGE_DYNAMIC:
|
||||
case PIPE_USAGE_STREAM:
|
||||
case PIPE_USAGE_STAGING:
|
||||
initial_domain = RADEON_DOMAIN_GTT;
|
||||
break;
|
||||
case PIPE_USAGE_DEFAULT:
|
||||
case PIPE_USAGE_STATIC:
|
||||
case PIPE_USAGE_IMMUTABLE:
|
||||
default:
|
||||
initial_domain = RADEON_DOMAIN_VRAM;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pb = radeon->ws->buffer_create(radeon->ws, size, alignment, binding, initial_domain);
|
||||
if (!pb) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bo = calloc(1, sizeof(struct r600_bo));
|
||||
bo->domains = domains;
|
||||
bo->buf = pb;
|
||||
bo->cs_buf = radeon->ws->buffer_get_cs_handle(pb);
|
||||
|
||||
pipe_reference_init(&bo->reference, 1);
|
||||
return bo;
|
||||
}
|
||||
|
||||
struct r600_bo *r600_bo_handle(struct radeon *radeon, struct winsys_handle *whandle,
|
||||
unsigned *stride, unsigned *array_mode)
|
||||
{
|
||||
struct pb_buffer *pb;
|
||||
struct r600_bo *bo = calloc(1, sizeof(struct r600_bo));
|
||||
|
||||
pb = bo->buf = radeon->ws->buffer_from_handle(radeon->ws, whandle, stride, NULL);
|
||||
if (!pb) {
|
||||
free(bo);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pipe_reference_init(&bo->reference, 1);
|
||||
bo->domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
|
||||
bo->cs_buf = radeon->ws->buffer_get_cs_handle(pb);
|
||||
|
||||
if (stride)
|
||||
*stride = whandle->stride;
|
||||
|
||||
if (array_mode) {
|
||||
enum radeon_bo_layout micro, macro;
|
||||
|
||||
radeon->ws->buffer_get_tiling(bo->buf, µ, ¯o);
|
||||
|
||||
if (macro == RADEON_LAYOUT_TILED)
|
||||
*array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
|
||||
else if (micro == RADEON_LAYOUT_TILED)
|
||||
*array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
|
||||
else
|
||||
*array_mode = 0;
|
||||
}
|
||||
return bo;
|
||||
}
|
||||
|
||||
void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, struct radeon_winsys_cs *cs, unsigned usage)
|
||||
{
|
||||
return radeon->ws->buffer_map(bo->buf, cs, usage);
|
||||
}
|
||||
|
||||
void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo)
|
||||
{
|
||||
radeon->ws->buffer_unmap(bo->buf);
|
||||
}
|
||||
|
||||
void r600_bo_destroy(struct r600_bo *bo)
|
||||
{
|
||||
pb_reference(&bo->buf, NULL);
|
||||
free(bo);
|
||||
}
|
||||
|
||||
boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *bo,
|
||||
unsigned stride, struct winsys_handle *whandle)
|
||||
{
|
||||
return radeon->ws->buffer_get_handle(bo->buf, stride, whandle);
|
||||
}
|
||||
|
|
@ -24,6 +24,7 @@
|
|||
* Jerome Glisse
|
||||
*/
|
||||
#include "r600_priv.h"
|
||||
#include "r600_pipe.h"
|
||||
#include "r600d.h"
|
||||
#include "util/u_memory.h"
|
||||
#include <errno.h>
|
||||
|
|
@ -33,8 +34,8 @@
|
|||
/* Get backends mask */
|
||||
void r600_get_backend_mask(struct r600_context *ctx)
|
||||
{
|
||||
struct r600_bo * buffer;
|
||||
u32 * results;
|
||||
struct r600_resource *buffer;
|
||||
u32 *results;
|
||||
unsigned num_backends = ctx->radeon->info.r600_num_backends;
|
||||
unsigned i, mask = 0;
|
||||
|
||||
|
|
@ -66,16 +67,17 @@ void r600_get_backend_mask(struct r600_context *ctx)
|
|||
/* otherwise backup path for older kernels */
|
||||
|
||||
/* create buffer for event data */
|
||||
buffer = r600_bo(ctx->radeon, ctx->max_db*16, 1, 0,
|
||||
PIPE_USAGE_STAGING);
|
||||
buffer = (struct r600_resource*)
|
||||
pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM,
|
||||
PIPE_USAGE_STAGING, ctx->max_db*16);
|
||||
if (!buffer)
|
||||
goto err;
|
||||
|
||||
/* initialize buffer with zeroes */
|
||||
results = r600_bo_map(ctx->radeon, buffer, ctx->cs, PIPE_TRANSFER_WRITE);
|
||||
results = ctx->screen->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE);
|
||||
if (results) {
|
||||
memset(results, 0, ctx->max_db * 4 * 4);
|
||||
r600_bo_unmap(ctx->radeon, buffer);
|
||||
ctx->screen->ws->buffer_unmap(buffer->buf);
|
||||
|
||||
/* emit EVENT_WRITE for ZPASS_DONE */
|
||||
ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
|
||||
|
|
@ -90,18 +92,18 @@ void r600_get_backend_mask(struct r600_context *ctx)
|
|||
r600_context_flush(ctx, 0);
|
||||
|
||||
/* analyze results */
|
||||
results = r600_bo_map(ctx->radeon, buffer, ctx->cs, PIPE_TRANSFER_READ);
|
||||
results = ctx->screen->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_READ);
|
||||
if (results) {
|
||||
for(i = 0; i < ctx->max_db; i++) {
|
||||
/* at least highest bit will be set if backend is used */
|
||||
if (results[i*4 + 1])
|
||||
mask |= (1<<i);
|
||||
}
|
||||
r600_bo_unmap(ctx->radeon, buffer);
|
||||
ctx->screen->ws->buffer_unmap(buffer->buf);
|
||||
}
|
||||
}
|
||||
|
||||
r600_bo_reference(&buffer, NULL);
|
||||
pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
|
||||
|
||||
if (mask != 0) {
|
||||
ctx->backend_mask = mask;
|
||||
|
|
@ -736,7 +738,7 @@ static void r600_free_resource_range(struct r600_context *ctx, struct r600_range
|
|||
block = range->blocks[i];
|
||||
if (block) {
|
||||
for (int k = 1; k <= block->nbo; k++)
|
||||
r600_bo_reference(&block->reloc[k].bo, NULL);
|
||||
pipe_resource_reference((struct pipe_resource**)&block->reloc[k].bo, NULL);
|
||||
free(block);
|
||||
}
|
||||
}
|
||||
|
|
@ -761,7 +763,7 @@ void r600_context_fini(struct r600_context *ctx)
|
|||
range->blocks[CTX_BLOCK_ID(offset)] = NULL;
|
||||
}
|
||||
for (int k = 1; k <= block->nbo; k++) {
|
||||
r600_bo_reference(&block->reloc[k].bo, NULL);
|
||||
pipe_resource_reference((struct pipe_resource**)&block->reloc[k].bo, NULL);
|
||||
}
|
||||
free(block);
|
||||
}
|
||||
|
|
@ -826,12 +828,13 @@ int r600_setup_block_table(struct r600_context *ctx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int r600_context_init(struct r600_context *ctx, struct radeon *radeon)
|
||||
int r600_context_init(struct r600_context *ctx, struct r600_screen *screen, struct radeon *radeon)
|
||||
{
|
||||
int r;
|
||||
|
||||
memset(ctx, 0, sizeof(struct r600_context));
|
||||
ctx->radeon = radeon;
|
||||
ctx->screen = screen;
|
||||
|
||||
LIST_INITHEAD(&ctx->query_list);
|
||||
|
||||
|
|
@ -949,11 +952,11 @@ void r600_context_flush_all(struct r600_context *ctx, unsigned flush_flags)
|
|||
}
|
||||
|
||||
void r600_context_bo_flush(struct r600_context *ctx, unsigned flush_flags,
|
||||
unsigned flush_mask, struct r600_bo *bo)
|
||||
unsigned flush_mask, struct r600_resource *bo)
|
||||
{
|
||||
/* if bo has already been flushed */
|
||||
if (!(~bo->last_flush & flush_flags)) {
|
||||
bo->last_flush &= flush_mask;
|
||||
if (!(~bo->cs_buf->last_flush & flush_flags)) {
|
||||
bo->cs_buf->last_flush &= flush_mask;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -962,7 +965,7 @@ void r600_context_bo_flush(struct r600_context *ctx, unsigned flush_flags,
|
|||
G_0085F0_DB_ACTION_ENA(flush_flags))) {
|
||||
if (ctx->flags & R600_CONTEXT_CHECK_EVENT_FLUSH) {
|
||||
/* the rv670 seems to fail fbo-generatemipmap unless we flush the CB1 dest base ena */
|
||||
if ((bo->binding & BO_BOUND_TEXTURE) &&
|
||||
if ((bo->cs_buf->binding & BO_BOUND_TEXTURE) &&
|
||||
(flush_flags & S_0085F0_CB_ACTION_ENA(1))) {
|
||||
if ((ctx->radeon->family == CHIP_RV670) ||
|
||||
(ctx->radeon->family == CHIP_RS780) ||
|
||||
|
|
@ -988,7 +991,7 @@ void r600_context_bo_flush(struct r600_context *ctx, unsigned flush_flags,
|
|||
ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, ctx->predicate_drawing);
|
||||
ctx->pm4[ctx->pm4_cdwords++] = r600_context_bo_reloc(ctx, bo, RADEON_USAGE_WRITE);
|
||||
}
|
||||
bo->last_flush = (bo->last_flush | flush_flags) & flush_mask;
|
||||
bo->cs_buf->last_flush = (bo->cs_buf->last_flush | flush_flags) & flush_mask;
|
||||
}
|
||||
|
||||
void r600_context_reg(struct r600_context *ctx,
|
||||
|
|
@ -1066,7 +1069,7 @@ void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_stat
|
|||
if (block->pm4_bo_index[id]) {
|
||||
/* find relocation */
|
||||
reloc_id = block->pm4_bo_index[id];
|
||||
r600_bo_reference(&block->reloc[reloc_id].bo, reg->bo);
|
||||
pipe_resource_reference((struct pipe_resource**)&block->reloc[reloc_id].bo, ®->bo->b.b.b);
|
||||
block->reloc[reloc_id].bo_usage = reg->bo_usage;
|
||||
/* always force dirty for relocs for now */
|
||||
dirty |= R600_BLOCK_STATUS_DIRTY;
|
||||
|
|
@ -1103,10 +1106,10 @@ void r600_context_pipe_state_set_resource(struct r600_context *ctx, struct r600_
|
|||
if (state == NULL) {
|
||||
block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_RESOURCE_DIRTY);
|
||||
if (block->reloc[1].bo)
|
||||
block->reloc[1].bo->binding &= ~BO_BOUND_TEXTURE;
|
||||
block->reloc[1].bo->cs_buf->binding &= ~BO_BOUND_TEXTURE;
|
||||
|
||||
r600_bo_reference(&block->reloc[1].bo, NULL);
|
||||
r600_bo_reference(&block->reloc[2].bo, NULL);
|
||||
pipe_resource_reference((struct pipe_resource**)&block->reloc[1].bo, NULL);
|
||||
pipe_resource_reference((struct pipe_resource**)&block->reloc[2].bo, NULL);
|
||||
LIST_DELINIT(&block->list);
|
||||
LIST_DELINIT(&block->enable_list);
|
||||
return;
|
||||
|
|
@ -1140,16 +1143,16 @@ void r600_context_pipe_state_set_resource(struct r600_context *ctx, struct r600_
|
|||
/* VERTEX RESOURCE, we preted there is 2 bo to relocate so
|
||||
* we have single case btw VERTEX & TEXTURE resource
|
||||
*/
|
||||
r600_bo_reference(&block->reloc[1].bo, state->bo[0]);
|
||||
pipe_resource_reference((struct pipe_resource**)&block->reloc[1].bo, &state->bo[0]->b.b.b);
|
||||
block->reloc[1].bo_usage = state->bo_usage[0];
|
||||
r600_bo_reference(&block->reloc[2].bo, NULL);
|
||||
pipe_resource_reference((struct pipe_resource**)&block->reloc[2].bo, NULL);
|
||||
} else {
|
||||
/* TEXTURE RESOURCE */
|
||||
r600_bo_reference(&block->reloc[1].bo, state->bo[0]);
|
||||
pipe_resource_reference((struct pipe_resource**)&block->reloc[1].bo, &state->bo[0]->b.b.b);
|
||||
block->reloc[1].bo_usage = state->bo_usage[0];
|
||||
r600_bo_reference(&block->reloc[2].bo, state->bo[1]);
|
||||
pipe_resource_reference((struct pipe_resource**)&block->reloc[2].bo, &state->bo[1]->b.b.b);
|
||||
block->reloc[2].bo_usage = state->bo_usage[1];
|
||||
state->bo[0]->binding |= BO_BOUND_TEXTURE;
|
||||
state->bo[0]->cs_buf->binding |= BO_BOUND_TEXTURE;
|
||||
}
|
||||
|
||||
if (is_vertex)
|
||||
|
|
@ -1265,7 +1268,7 @@ void r600_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r60
|
|||
r600_context_pipe_state_set_sampler_border(ctx, state, offset);
|
||||
}
|
||||
|
||||
struct r600_bo *r600_context_reg_bo(struct r600_context *ctx, unsigned offset)
|
||||
struct r600_resource *r600_context_reg_bo(struct r600_context *ctx, unsigned offset)
|
||||
{
|
||||
struct r600_range *range;
|
||||
struct r600_block *block;
|
||||
|
|
@ -1372,8 +1375,8 @@ void r600_context_block_resource_emit_dirty(struct r600_context *ctx, struct r60
|
|||
|
||||
void r600_context_flush_dest_caches(struct r600_context *ctx)
|
||||
{
|
||||
struct r600_bo *cb[8];
|
||||
struct r600_bo *db;
|
||||
struct r600_resource *cb[8];
|
||||
struct r600_resource *db;
|
||||
int i;
|
||||
|
||||
if (!(ctx->flags & R600_CONTEXT_DST_CACHES_DIRTY))
|
||||
|
|
@ -1511,8 +1514,8 @@ void r600_context_flush(struct r600_context *ctx, unsigned flags)
|
|||
|
||||
/* restart */
|
||||
for (int i = 0; i < ctx->creloc; i++) {
|
||||
ctx->bo[i]->last_flush = 0;
|
||||
r600_bo_reference(&ctx->bo[i], NULL);
|
||||
ctx->bo[i]->cs_buf->last_flush = 0;
|
||||
pipe_resource_reference((struct pipe_resource**)&ctx->bo[i], NULL);
|
||||
}
|
||||
ctx->creloc = 0;
|
||||
ctx->pm4_dirty_cdwords = 0;
|
||||
|
|
@ -1545,7 +1548,7 @@ void r600_context_flush(struct r600_context *ctx, unsigned flags)
|
|||
}
|
||||
}
|
||||
|
||||
void r600_context_emit_fence(struct r600_context *ctx, struct r600_bo *fence_bo, unsigned offset, unsigned value)
|
||||
void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value)
|
||||
{
|
||||
unsigned ndwords = 10;
|
||||
|
||||
|
|
@ -1573,9 +1576,9 @@ static boolean r600_query_result(struct r600_context *ctx, struct r600_query *qu
|
|||
u32 *results, *current_result;
|
||||
|
||||
if (wait)
|
||||
results = r600_bo_map(ctx->radeon, query->buffer, ctx->cs, PIPE_TRANSFER_READ);
|
||||
results = ctx->screen->ws->buffer_map(query->buffer->buf, ctx->cs, PIPE_TRANSFER_READ);
|
||||
else
|
||||
results = r600_bo_map(ctx->radeon, query->buffer, ctx->cs, PIPE_TRANSFER_DONTBLOCK | PIPE_TRANSFER_READ);
|
||||
results = ctx->screen->ws->buffer_map(query->buffer->buf, ctx->cs, PIPE_TRANSFER_DONTBLOCK | PIPE_TRANSFER_READ);
|
||||
if (!results)
|
||||
return FALSE;
|
||||
|
||||
|
|
@ -1597,7 +1600,7 @@ static boolean r600_query_result(struct r600_context *ctx, struct r600_query *qu
|
|||
}
|
||||
|
||||
query->results_start = query->results_end;
|
||||
r600_bo_unmap(ctx->radeon, query->buffer);
|
||||
ctx->screen->ws->buffer_unmap(query->buffer->buf);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
|
@ -1649,7 +1652,7 @@ void r600_query_begin(struct r600_context *ctx, struct r600_query *query)
|
|||
u32 *results;
|
||||
int i;
|
||||
|
||||
results = r600_bo_map(ctx->radeon, query->buffer, ctx->cs, PIPE_TRANSFER_WRITE);
|
||||
results = ctx->screen->ws->buffer_map(query->buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE);
|
||||
if (results) {
|
||||
results = (u32*)((char*)results + query->results_end);
|
||||
memset(results, 0, query->result_size);
|
||||
|
|
@ -1661,7 +1664,7 @@ void r600_query_begin(struct r600_context *ctx, struct r600_query *query)
|
|||
results[(i * 4)+3] = 0x80000000;
|
||||
}
|
||||
}
|
||||
r600_bo_unmap(ctx->radeon, query->buffer);
|
||||
ctx->screen->ws->buffer_unmap(query->buffer->buf);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1799,8 +1802,8 @@ struct r600_query *r600_context_query_create(struct r600_context *ctx, unsigned
|
|||
* being written by the gpu, hence staging is probably a good
|
||||
* usage pattern.
|
||||
*/
|
||||
query->buffer = r600_bo(ctx->radeon, query->buffer_size, 1, 0,
|
||||
PIPE_USAGE_STAGING);
|
||||
query->buffer = (struct r600_resource*)
|
||||
pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, query->buffer_size);
|
||||
if (!query->buffer) {
|
||||
free(query);
|
||||
return NULL;
|
||||
|
|
@ -1813,7 +1816,7 @@ struct r600_query *r600_context_query_create(struct r600_context *ctx, unsigned
|
|||
|
||||
void r600_context_query_destroy(struct r600_context *ctx, struct r600_query *query)
|
||||
{
|
||||
r600_bo_reference(&query->buffer, NULL);
|
||||
pipe_resource_reference((struct pipe_resource**)&query->buffer, NULL);
|
||||
LIST_DELINIT(&query->list);
|
||||
free(query);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,22 +59,12 @@ struct r600_reg {
|
|||
|
||||
#define BO_BOUND_TEXTURE 1
|
||||
|
||||
struct r600_bo {
|
||||
struct pipe_reference reference; /* this must be the first member for the r600_bo_reference inline to work */
|
||||
/* DO NOT MOVE THIS ^ */
|
||||
struct pb_buffer *buf;
|
||||
struct radeon_winsys_cs_handle *cs_buf;
|
||||
unsigned domains;
|
||||
unsigned last_flush;
|
||||
unsigned binding;
|
||||
};
|
||||
|
||||
/*
|
||||
* r600_hw_context.c
|
||||
*/
|
||||
void r600_context_bo_flush(struct r600_context *ctx, unsigned flush_flags,
|
||||
unsigned flush_mask, struct r600_bo *rbo);
|
||||
struct r600_bo *r600_context_reg_bo(struct r600_context *ctx, unsigned offset);
|
||||
unsigned flush_mask, struct r600_resource *rbo);
|
||||
struct r600_resource *r600_context_reg_bo(struct r600_context *ctx, unsigned offset);
|
||||
int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg, unsigned nreg,
|
||||
unsigned opcode, unsigned offset_base);
|
||||
void r600_context_pipe_state_set_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, struct r600_block *block);
|
||||
|
|
@ -89,7 +79,7 @@ void r600_context_reg(struct r600_context *ctx,
|
|||
void r600_init_cs(struct r600_context *ctx);
|
||||
int r600_resource_init(struct r600_context *ctx, struct r600_range *range, unsigned offset, unsigned nblocks, unsigned stride, struct r600_reg *reg, int nreg, unsigned offset_base);
|
||||
|
||||
static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx, struct r600_bo *rbo,
|
||||
static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx, struct r600_resource *rbo,
|
||||
enum radeon_bo_usage usage)
|
||||
{
|
||||
enum radeon_bo_domain rd = usage & RADEON_USAGE_READ ? rbo->domains : 0;
|
||||
|
|
@ -104,7 +94,7 @@ static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx, struct r6
|
|||
if (reloc_index >= ctx->creloc)
|
||||
ctx->creloc = reloc_index+1;
|
||||
|
||||
r600_bo_reference(&ctx->bo[reloc_index], rbo);
|
||||
pipe_resource_reference((struct pipe_resource**)&ctx->bo[reloc_index], &rbo->b.b.b);
|
||||
return reloc_index * 4;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -46,6 +46,10 @@ struct radeon_bo_desc {
|
|||
|
||||
struct radeon_bo {
|
||||
struct pb_buffer base;
|
||||
|
||||
unsigned last_flush;
|
||||
unsigned binding;
|
||||
|
||||
struct radeon_bomgr *mgr;
|
||||
struct radeon_drm_winsys *rws;
|
||||
|
||||
|
|
|
|||
|
|
@ -68,7 +68,15 @@ enum radeon_bo_usage { /* bitfield */
|
|||
};
|
||||
|
||||
struct winsys_handle;
|
||||
struct radeon_winsys_cs_handle; /* for write_reloc etc. */
|
||||
|
||||
struct radeon_winsys_cs_handle { /* for write_reloc etc. */
|
||||
struct {
|
||||
struct pb_buffer base;
|
||||
} _private;
|
||||
|
||||
unsigned last_flush;
|
||||
unsigned binding;
|
||||
};
|
||||
|
||||
struct radeon_winsys_cs {
|
||||
unsigned cdw; /* Number of used dwords. */
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue