[g3dvl] move video buffer creation out of video context

This commit is contained in:
Christian König 2011-07-08 16:56:11 +02:00
parent 3bb33c911b
commit 4e837f557b
11 changed files with 121 additions and 94 deletions

View file

@ -74,46 +74,6 @@ vl_context_create_decoder(struct pipe_video_context *context,
return NULL;
}
static struct pipe_video_buffer *
vl_context_create_buffer(struct pipe_video_context *context,
enum pipe_format buffer_format,
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height)
{
struct vl_context *ctx = (struct vl_context*)context;
const enum pipe_format *resource_formats;
struct pipe_video_buffer *result;
unsigned buffer_width, buffer_height;
bool pot_buffers;
assert(context);
assert(width > 0 && height > 0);
pot_buffers = !ctx->base.screen->get_video_param
(
ctx->base.screen,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_CAP_NPOT_TEXTURES
);
resource_formats = vl_video_buffer_formats(ctx->pipe->screen, buffer_format);
if (!resource_formats)
return NULL;
buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH);
buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT);
result = vl_video_buffer_init(context, ctx->pipe,
buffer_width, buffer_height, 1,
chroma_format,
resource_formats,
PIPE_USAGE_STATIC);
if (result) // TODO move format handling into vl_video_buffer
result->buffer_format = buffer_format;
return result;
}
struct pipe_video_context *
vl_create_context(struct pipe_context *pipe)
{
@ -128,7 +88,6 @@ vl_create_context(struct pipe_context *pipe)
ctx->base.destroy = vl_context_destroy;
ctx->base.create_decoder = vl_context_create_decoder;
ctx->base.create_buffer = vl_context_create_buffer;
ctx->pipe = pipe;

View file

@ -92,11 +92,14 @@ init_zscan_buffer(struct vl_mpeg12_buffer *buffer)
dec = (struct vl_mpeg12_decoder*)buffer->base.decoder;
formats[0] = formats[1] = formats[2] = dec->zscan_source_format;
buffer->zscan_source = vl_video_buffer_init(dec->base.context, dec->pipe,
dec->blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT,
align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line,
1, PIPE_VIDEO_CHROMA_FORMAT_444,
formats, PIPE_USAGE_STATIC);
buffer->zscan_source = vl_video_buffer_create_ex
(
dec->pipe,
dec->blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT,
align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line,
1, PIPE_VIDEO_CHROMA_FORMAT_444, formats, PIPE_USAGE_STATIC
);
if (!buffer->zscan_source)
goto error_source;
@ -718,19 +721,22 @@ init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_conf
nr_of_idct_render_targets = 1;
formats[0] = formats[1] = formats[2] = format_config->idct_source_format;
dec->idct_source = vl_video_buffer_init(dec->base.context, dec->pipe,
dec->base.width / 4, dec->base.height, 1,
dec->base.chroma_format,
formats, PIPE_USAGE_STATIC);
dec->idct_source = vl_video_buffer_create_ex
(
dec->pipe, dec->base.width / 4, dec->base.height, 1,
dec->base.chroma_format, formats, PIPE_USAGE_STATIC
);
if (!dec->idct_source)
goto error_idct_source;
formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
dec->mc_source = vl_video_buffer_init(dec->base.context, dec->pipe,
dec->base.width / nr_of_idct_render_targets,
dec->base.height / 4, nr_of_idct_render_targets,
dec->base.chroma_format,
formats, PIPE_USAGE_STATIC);
dec->mc_source = vl_video_buffer_create_ex
(
dec->pipe, dec->base.width / nr_of_idct_render_targets,
dec->base.height / 4, nr_of_idct_render_targets,
dec->base.chroma_format, formats, PIPE_USAGE_STATIC
);
if (!dec->mc_source)
goto error_mc_source;
@ -772,11 +778,12 @@ init_mc_source_widthout_idct(struct vl_mpeg12_decoder *dec, const struct format_
enum pipe_format formats[3];
formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
dec->mc_source = vl_video_buffer_init(dec->base.context, dec->pipe,
dec->base.width, dec->base.height, 1,
dec->base.chroma_format,
formats, PIPE_USAGE_STATIC);
dec->mc_source = vl_video_buffer_create_ex
(
dec->pipe, dec->base.width, dec->base.height, 1,
dec->base.chroma_format, formats, PIPE_USAGE_STATIC
);
return dec->mc_source != NULL;
}

View file

@ -114,7 +114,7 @@ vl_video_buffer_sampler_view_planes(struct pipe_video_buffer *buffer)
assert(buf);
pipe = buf->pipe;
pipe = buf->base.context;
for (i = 0; i < buf->num_planes; ++i ) {
if (!buf->sampler_view_planes[i]) {
@ -149,7 +149,7 @@ vl_video_buffer_sampler_view_components(struct pipe_video_buffer *buffer)
assert(buf);
pipe = buf->pipe;
pipe = buf->base.context;
for (component = 0, i = 0; i < buf->num_planes; ++i ) {
unsigned nr_components = util_format_get_nr_components(buf->resources[i]->format);
@ -188,7 +188,7 @@ vl_video_buffer_surfaces(struct pipe_video_buffer *buffer)
assert(buf);
pipe = buf->pipe;
pipe = buf->base.context;
for (i = 0; i < buf->num_planes; ++i ) {
if (!buf->surfaces[i]) {
@ -211,21 +211,60 @@ error:
}
struct pipe_video_buffer *
vl_video_buffer_init(struct pipe_video_context *context,
struct pipe_context *pipe,
unsigned width, unsigned height, unsigned depth,
enum pipe_video_chroma_format chroma_format,
const enum pipe_format resource_formats[VL_MAX_PLANES],
unsigned usage)
vl_video_buffer_create(struct pipe_context *pipe,
enum pipe_format buffer_format,
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height)
{
const enum pipe_format *resource_formats;
struct pipe_video_buffer *result;
unsigned buffer_width, buffer_height;
bool pot_buffers;
assert(pipe);
assert(width > 0 && height > 0);
pot_buffers = !pipe->screen->get_video_param
(
pipe->screen,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_CAP_NPOT_TEXTURES
);
resource_formats = vl_video_buffer_formats(pipe->screen, buffer_format);
if (!resource_formats)
return NULL;
buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH);
buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT);
result = vl_video_buffer_create_ex
(
pipe, buffer_width, buffer_height, 1,
chroma_format, resource_formats, PIPE_USAGE_STATIC
);
if (result)
result->buffer_format = buffer_format;
return result;
}
struct pipe_video_buffer *
vl_video_buffer_create_ex(struct pipe_context *pipe,
unsigned width, unsigned height, unsigned depth,
enum pipe_video_chroma_format chroma_format,
const enum pipe_format resource_formats[VL_MAX_PLANES],
unsigned usage)
{
struct vl_video_buffer *buffer;
struct pipe_resource templ;
unsigned i;
assert(context && pipe);
assert(pipe);
buffer = CALLOC_STRUCT(vl_video_buffer);
buffer->base.context = pipe;
buffer->base.destroy = vl_video_buffer_destroy;
buffer->base.get_sampler_view_planes = vl_video_buffer_sampler_view_planes;
buffer->base.get_sampler_view_components = vl_video_buffer_sampler_view_components;
@ -233,7 +272,6 @@ vl_video_buffer_init(struct pipe_video_context *context,
buffer->base.chroma_format = chroma_format;
buffer->base.width = width;
buffer->base.height = height;
buffer->pipe = pipe;
buffer->num_planes = 1;
memset(&templ, 0, sizeof(templ));

View file

@ -41,7 +41,6 @@
struct vl_video_buffer
{
struct pipe_video_buffer base;
struct pipe_context *pipe;
unsigned num_planes;
struct pipe_resource *resources[VL_MAX_PLANES];
struct pipe_sampler_view *sampler_view_planes[VL_MAX_PLANES];
@ -63,15 +62,24 @@ boolean
vl_video_buffer_is_format_supported(struct pipe_screen *screen,
enum pipe_format format,
enum pipe_video_profile profile);
/**
* initialize a buffer, creating its resources
* creates a video buffer, can be used as a standard implementation for pipe->create_video_buffer
*/
struct pipe_video_buffer *
vl_video_buffer_init(struct pipe_video_context *context,
struct pipe_context *pipe,
unsigned width, unsigned height, unsigned depth,
enum pipe_video_chroma_format chroma_format,
const enum pipe_format resource_formats[VL_MAX_PLANES],
unsigned usage);
vl_video_buffer_create(struct pipe_context *pipe,
enum pipe_format buffer_format,
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height);
/**
* extended create function, gets depth, usage and formats for each plane seperately
*/
struct pipe_video_buffer *
vl_video_buffer_create_ex(struct pipe_context *pipe,
unsigned width, unsigned height, unsigned depth,
enum pipe_video_chroma_format chroma_format,
const enum pipe_format resource_formats[VL_MAX_PLANES],
unsigned usage);
#endif /* vl_ycbcr_buffer_h */

View file

@ -27,6 +27,7 @@
#include "util/u_simple_list.h"
#include "util/u_upload_mgr.h"
#include "os/os_time.h"
#include "vl/vl_video_buffer.h"
#include "r300_cb.h"
#include "r300_context.h"
@ -436,6 +437,8 @@ struct pipe_context* r300_create_context(struct pipe_screen* screen,
r300_init_query_functions(r300);
r300_init_state_functions(r300);
r300_init_resource_functions(r300);
r300->context.create_video_buffer = vl_video_buffer_create;
r300->vbuf_mgr = u_vbuf_mgr_create(&r300->context, 1024 * 1024, 16,
PIPE_BIND_VERTEX_BUFFER |

View file

@ -226,6 +226,7 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
r600_init_context_resource_functions(rctx);
r600_init_surface_functions(rctx);
rctx->context.draw_vbo = r600_draw_vbo;
rctx->context.create_video_buffer = vl_video_buffer_create;
switch (r600_get_family(rctx->radeon)) {
case CHIP_R600:

View file

@ -37,6 +37,7 @@
#include "util/u_memory.h"
#include "util/u_inlines.h"
#include "tgsi/tgsi_exec.h"
#include "vl/vl_video_buffer.h"
#include "sp_clear.h"
#include "sp_context.h"
#include "sp_flush.h"
@ -258,6 +259,8 @@ softpipe_create_context( struct pipe_screen *screen,
softpipe->pipe.flush = softpipe_flush_wrapped;
softpipe->pipe.render_condition = softpipe_render_condition;
softpipe->pipe.create_video_buffer = vl_video_buffer_create;
/*
* Alloc caches for accessing drawing surfaces and textures.

View file

@ -59,6 +59,9 @@ struct pipe_vertex_buffer;
struct pipe_vertex_element;
struct pipe_viewport_state;
enum pipe_video_chroma_format;
enum pipe_format;
/**
* Gallium rendering context. Basically:
* - state setting functions
@ -395,6 +398,14 @@ struct pipe_context {
* Flush any pending framebuffer writes and invalidate texture caches.
*/
void (*texture_barrier)(struct pipe_context *);
/**
* Creates a video buffer as decoding target
*/
struct pipe_video_buffer *(*create_video_buffer)( struct pipe_context *context,
enum pipe_format buffer_format,
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height );
};

View file

@ -62,13 +62,6 @@ struct pipe_video_context
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height);
/**
* Creates a buffer as decoding target
*/
struct pipe_video_buffer *(*create_buffer)(struct pipe_video_context *context,
enum pipe_format buffer_format,
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height);
};
/**
@ -166,7 +159,7 @@ struct pipe_video_decode_buffer
*/
struct pipe_video_buffer
{
struct pipe_video_context *context;
struct pipe_context *context;
enum pipe_format buffer_format;
enum pipe_video_chroma_format chroma_format;

View file

@ -70,9 +70,9 @@ vlVdpVideoSurfaceCreate(VdpDevice device, VdpChromaType chroma_type,
}
p_surf->device = dev;
p_surf->video_buffer = dev->context->vpipe->create_buffer
p_surf->video_buffer = dev->context->pipe->create_video_buffer
(
dev->context->vpipe,
dev->context->pipe,
PIPE_FORMAT_YV12, // most common used
ChromaToPipe(chroma_type),
width, height

View file

@ -303,6 +303,7 @@ Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surfac
};
XvMCContextPrivate *context_priv;
struct pipe_context *pipe;
struct pipe_video_context *vpipe;
XvMCSurfacePrivate *surface_priv;
@ -316,6 +317,7 @@ Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surfac
return XvMCBadSurface;
context_priv = context->privData;
pipe = context_priv->vctx->pipe;
vpipe = context_priv->vctx->vpipe;
surface_priv = CALLOC(1, sizeof(XvMCSurfacePrivate));
@ -326,10 +328,12 @@ Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surfac
surface_priv->decode_buffer->set_quant_matrix(surface_priv->decode_buffer, dummy_quant, dummy_quant);
surface_priv->mv_stride = surface_priv->decode_buffer->get_mv_stream_stride(surface_priv->decode_buffer);
surface_priv->video_buffer = vpipe->create_buffer(vpipe, PIPE_FORMAT_NV12,
context_priv->decoder->chroma_format,
context_priv->decoder->width,
context_priv->decoder->height);
surface_priv->video_buffer = pipe->create_video_buffer
(
pipe, PIPE_FORMAT_NV12, context_priv->decoder->chroma_format,
context_priv->decoder->width, context_priv->decoder->height
);
surface_priv->context = context;
surface->surface_id = XAllocID(dpy);