[g3dvl] and finally remove pipe_video_context

This commit is contained in:
Christian König 2011-07-08 19:22:43 +02:00
parent 4e837f557b
commit ea78480029
28 changed files with 151 additions and 266 deletions

View file

@ -148,9 +148,9 @@ C_SOURCES = \
util/u_resource.c \
util/u_upload_mgr.c \
util/u_vbuf_mgr.c \
vl/vl_context.c \
vl/vl_csc.c \
vl/vl_compositor.c \
vl/vl_decoder.c \
vl/vl_mpeg12_decoder.c \
vl/vl_mpeg12_bitstream.c \
vl/vl_zscan.c \

View file

@ -29,7 +29,7 @@
#define vl_compositor_h
#include <pipe/p_state.h>
#include <pipe/p_video_context.h>
#include <pipe/p_video_decoder.h>
#include <pipe/p_video_state.h>
#include "vl_types.h"

View file

@ -25,71 +25,41 @@
*
**************************************************************************/
#include <pipe/p_video_context.h>
#include <pipe/p_video_decoder.h>
#include <util/u_memory.h>
#include <util/u_rect.h>
#include <util/u_video.h>
#include "vl_context.h"
#include "vl_compositor.h"
#include "vl_decoder.h"
#include "vl_mpeg12_decoder.h"
static void
vl_context_destroy(struct pipe_video_context *context)
struct pipe_video_decoder *
vl_create_decoder(struct pipe_context *pipe,
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height)
{
struct vl_context *ctx = (struct vl_context*)context;
assert(context);
FREE(ctx);
}
static struct pipe_video_decoder *
vl_context_create_decoder(struct pipe_video_context *context,
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height)
{
struct vl_context *ctx = (struct vl_context*)context;
unsigned buffer_width, buffer_height;
bool pot_buffers;
assert(context);
assert(pipe);
assert(width > 0 && height > 0);
pot_buffers = !ctx->base.screen->get_video_param(ctx->base.screen, profile, PIPE_VIDEO_CAP_NPOT_TEXTURES);
pot_buffers = !pipe->screen->get_video_param
(
pipe->screen,
profile,
PIPE_VIDEO_CAP_NPOT_TEXTURES
);
buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH);
buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT);
switch (u_reduce_video_profile(profile)) {
case PIPE_VIDEO_CODEC_MPEG12:
return vl_create_mpeg12_decoder(context, ctx->pipe, profile, entrypoint,
chroma_format, buffer_width, buffer_height);
return vl_create_mpeg12_decoder(pipe, profile, entrypoint, chroma_format, buffer_width, buffer_height);
default:
return NULL;
}
return NULL;
}
struct pipe_video_context *
vl_create_context(struct pipe_context *pipe)
{
struct vl_context *ctx;
ctx = CALLOC_STRUCT(vl_context);
if (!ctx)
return NULL;
ctx->base.screen = pipe->screen;
ctx->base.destroy = vl_context_destroy;
ctx->base.create_decoder = vl_context_create_decoder;
ctx->pipe = pipe;
return &ctx->base;
}

View file

@ -26,23 +26,19 @@
*
**************************************************************************/
#ifndef vl_context_h
#define vl_context_h
#ifndef vl_decoder_h
#define vl_decoder_h
#include <pipe/p_video_context.h>
#include <pipe/p_video_decoder.h>
struct pipe_screen;
struct pipe_context;
/**
* standard implementation of pipe->create_video_decoder
*/
struct pipe_video_decoder *
vl_create_decoder(struct pipe_context *pipe,
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height);
struct vl_context
{
struct pipe_video_context base;
struct pipe_context *pipe;
};
/* drivers can call this function in their pipe_video_context constructors and pass it
an accelerated pipe_context along with suitable buffering modes, etc */
struct pipe_video_context *
vl_create_context(struct pipe_context *pipe);
#endif /* vl_context_h */
#endif /* vl_decoder_h */

View file

@ -94,7 +94,7 @@ init_zscan_buffer(struct vl_mpeg12_buffer *buffer)
formats[0] = formats[1] = formats[2] = dec->zscan_source_format;
buffer->zscan_source = vl_video_buffer_create_ex
(
dec->pipe,
dec->base.context,
dec->blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT,
align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line,
1, PIPE_VIDEO_CHROMA_FORMAT_444, formats, PIPE_USAGE_STATIC
@ -277,7 +277,7 @@ vl_mpeg12_buffer_begin_frame(struct pipe_video_decode_buffer *buffer)
dec = (struct vl_mpeg12_decoder *)buf->base.decoder;
assert(dec);
vl_vb_map(&buf->vertex_stream, dec->pipe);
vl_vb_map(&buf->vertex_stream, dec->base.context);
sampler_views = buf->zscan_source->get_sampler_view_planes(buf->zscan_source);
@ -293,14 +293,14 @@ vl_mpeg12_buffer_begin_frame(struct pipe_video_decode_buffer *buffer)
1
};
buf->tex_transfer[i] = dec->pipe->get_transfer
buf->tex_transfer[i] = dec->base.context->get_transfer
(
dec->pipe, tex,
dec->base.context, tex,
0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
&rect
);
buf->texels[i] = dec->pipe->transfer_map(dec->pipe, buf->tex_transfer[i]);
buf->texels[i] = dec->base.context->transfer_map(dec->base.context, buf->tex_transfer[i]);
}
if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
@ -407,11 +407,11 @@ vl_mpeg12_buffer_end_frame(struct pipe_video_decode_buffer *buffer)
dec = (struct vl_mpeg12_decoder *)buf->base.decoder;
assert(dec);
vl_vb_unmap(&buf->vertex_stream, dec->pipe);
vl_vb_unmap(&buf->vertex_stream, dec->base.context);
for (i = 0; i < VL_MAX_PLANES; ++i) {
dec->pipe->transfer_unmap(dec->pipe, buf->tex_transfer[i]);
dec->pipe->transfer_destroy(dec->pipe, buf->tex_transfer[i]);
dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer[i]);
dec->base.context->transfer_destroy(dec->base.context, buf->tex_transfer[i]);
}
}
@ -423,11 +423,11 @@ vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
assert(decoder);
/* Asserted in softpipe_delete_fs_state() for some reason */
dec->pipe->bind_vs_state(dec->pipe, NULL);
dec->pipe->bind_fs_state(dec->pipe, NULL);
dec->base.context->bind_vs_state(dec->base.context, NULL);
dec->base.context->bind_fs_state(dec->base.context, NULL);
dec->pipe->delete_depth_stencil_alpha_state(dec->pipe, dec->dsa);
dec->pipe->delete_sampler_state(dec->pipe, dec->sampler_ycbcr);
dec->base.context->delete_depth_stencil_alpha_state(dec->base.context, dec->dsa);
dec->base.context->delete_sampler_state(dec->base.context, dec->sampler_ycbcr);
vl_mc_cleanup(&dec->mc_y);
vl_mc_cleanup(&dec->mc_c);
@ -442,8 +442,8 @@ vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
vl_zscan_cleanup(&dec->zscan_y);
vl_zscan_cleanup(&dec->zscan_c);
dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves_ycbcr);
dec->pipe->delete_vertex_elements_state(dec->pipe, dec->ves_mv);
dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_mv);
pipe_resource_reference(&dec->quads.buffer, NULL);
pipe_resource_reference(&dec->pos.buffer, NULL);
@ -478,7 +478,7 @@ vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder)
buffer->base.decode_bitstream = vl_mpeg12_buffer_decode_bitstream;
buffer->base.end_frame = vl_mpeg12_buffer_end_frame;
if (!vl_vb_init(&buffer->vertex_stream, dec->pipe,
if (!vl_vb_init(&buffer->vertex_stream, dec->base.context,
dec->base.width / MACROBLOCK_WIDTH,
dec->base.height / MACROBLOCK_HEIGHT))
goto error_vertex_buffer;
@ -545,7 +545,7 @@ vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer,
surfaces = dst->get_surfaces(dst);
dec->pipe->bind_vertex_elements_state(dec->pipe, dec->ves_mv);
dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv);
for (i = 0; i < VL_MAX_PLANES; ++i) {
if (!surfaces[i]) continue;
@ -555,7 +555,7 @@ vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer,
if (!sv[j]) continue;
vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
dec->pipe->set_vertex_buffers(dec->pipe, 3, vb);
dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
vl_mc_render_ref(&buf->mc[i], sv[j][i]);
}
@ -563,12 +563,12 @@ vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer,
vb[2] = dec->block_num;
dec->pipe->bind_vertex_elements_state(dec->pipe, dec->ves_ycbcr);
dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
for (i = 0; i < VL_MAX_PLANES; ++i) {
if (!num_ycbcr_blocks[i]) continue;
vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
dec->pipe->set_vertex_buffers(dec->pipe, 3, vb);
dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
vl_zscan_render(&buf->zscan[i] , num_ycbcr_blocks[i]);
@ -585,13 +585,13 @@ vl_mpeg12_decoder_flush_buffer(struct pipe_video_decode_buffer *buffer,
if (!num_ycbcr_blocks[i]) continue;
vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, component);
dec->pipe->set_vertex_buffers(dec->pipe, 3, vb);
dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
vl_idct_prepare_stage2(component == 0 ? &dec->idct_y : &dec->idct_c, &buf->idct[component]);
else {
dec->pipe->set_fragment_sampler_views(dec->pipe, 1, &mc_source_sv[component]);
dec->pipe->bind_fragment_sampler_states(dec->pipe, 1, &dec->sampler_ycbcr);
dec->base.context->set_fragment_sampler_views(dec->base.context, 1, &mc_source_sv[component]);
dec->base.context->bind_fragment_sampler_states(dec->base.context, 1, &dec->sampler_ycbcr);
}
vl_mc_render_ycbcr(&buf->mc[i], j, num_ycbcr_blocks[component]);
}
@ -623,8 +623,8 @@ init_pipe_state(struct vl_mpeg12_decoder *dec)
dsa.alpha.enabled = 0;
dsa.alpha.func = PIPE_FUNC_ALWAYS;
dsa.alpha.ref_value = 0;
dec->dsa = dec->pipe->create_depth_stencil_alpha_state(dec->pipe, &dsa);
dec->pipe->bind_depth_stencil_alpha_state(dec->pipe, dec->dsa);
dec->dsa = dec->base.context->create_depth_stencil_alpha_state(dec->base.context, &dsa);
dec->base.context->bind_depth_stencil_alpha_state(dec->base.context, dec->dsa);
memset(&sampler, 0, sizeof(sampler));
sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
@ -636,7 +636,7 @@ init_pipe_state(struct vl_mpeg12_decoder *dec)
sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
sampler.compare_func = PIPE_FUNC_ALWAYS;
sampler.normalized_coords = 1;
dec->sampler_ycbcr = dec->pipe->create_sampler_state(dec->pipe, &sampler);
dec->sampler_ycbcr = dec->base.context->create_sampler_state(dec->base.context, &sampler);
if (!dec->sampler_ycbcr)
return false;
@ -651,7 +651,7 @@ find_format_config(struct vl_mpeg12_decoder *dec, const struct format_config con
assert(dec);
screen = dec->pipe->screen;
screen = dec->base.context->screen;
for (i = 0; i < num_configs; ++i) {
if (!screen->is_format_supported(screen, configs[i].zscan_source_format, PIPE_TEXTURE_2D,
@ -685,17 +685,17 @@ init_zscan(struct vl_mpeg12_decoder *dec, const struct format_config* format_con
assert(dec);
dec->zscan_source_format = format_config->zscan_source_format;
dec->zscan_linear = vl_zscan_layout(dec->pipe, vl_zscan_linear, dec->blocks_per_line);
dec->zscan_normal = vl_zscan_layout(dec->pipe, vl_zscan_normal, dec->blocks_per_line);
dec->zscan_alternate = vl_zscan_layout(dec->pipe, vl_zscan_alternate, dec->blocks_per_line);
dec->zscan_linear = vl_zscan_layout(dec->base.context, vl_zscan_linear, dec->blocks_per_line);
dec->zscan_normal = vl_zscan_layout(dec->base.context, vl_zscan_normal, dec->blocks_per_line);
dec->zscan_alternate = vl_zscan_layout(dec->base.context, vl_zscan_alternate, dec->blocks_per_line);
num_channels = dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 4 : 1;
if (!vl_zscan_init(&dec->zscan_y, dec->pipe, dec->base.width, dec->base.height,
if (!vl_zscan_init(&dec->zscan_y, dec->base.context, dec->base.width, dec->base.height,
dec->blocks_per_line, dec->num_blocks, num_channels))
return false;
if (!vl_zscan_init(&dec->zscan_c, dec->pipe, dec->chroma_width, dec->chroma_height,
if (!vl_zscan_init(&dec->zscan_c, dec->base.context, dec->chroma_width, dec->chroma_height,
dec->blocks_per_line, dec->num_blocks, num_channels))
return false;
@ -710,8 +710,15 @@ init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_conf
struct pipe_sampler_view *matrix = NULL;
nr_of_idct_render_targets = dec->pipe->screen->get_param(dec->pipe->screen, PIPE_CAP_MAX_RENDER_TARGETS);
max_inst = dec->pipe->screen->get_shader_param(dec->pipe->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS);
nr_of_idct_render_targets = dec->base.context->screen->get_param
(
dec->base.context->screen, PIPE_CAP_MAX_RENDER_TARGETS
);
max_inst = dec->base.context->screen->get_shader_param
(
dec->base.context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS
);
// Just assume we need 32 inst per render target, not 100% true, but should work in most cases
if (nr_of_idct_render_targets >= 4 && max_inst >= 32*4)
@ -723,7 +730,7 @@ init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_conf
formats[0] = formats[1] = formats[2] = format_config->idct_source_format;
dec->idct_source = vl_video_buffer_create_ex
(
dec->pipe, dec->base.width / 4, dec->base.height, 1,
dec->base.context, dec->base.width / 4, dec->base.height, 1,
dec->base.chroma_format, formats, PIPE_USAGE_STATIC
);
@ -733,7 +740,7 @@ init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_conf
formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
dec->mc_source = vl_video_buffer_create_ex
(
dec->pipe, dec->base.width / nr_of_idct_render_targets,
dec->base.context, dec->base.width / nr_of_idct_render_targets,
dec->base.height / 4, nr_of_idct_render_targets,
dec->base.chroma_format, formats, PIPE_USAGE_STATIC
);
@ -741,14 +748,14 @@ init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_conf
if (!dec->mc_source)
goto error_mc_source;
if (!(matrix = vl_idct_upload_matrix(dec->pipe, format_config->idct_scale)))
if (!(matrix = vl_idct_upload_matrix(dec->base.context, format_config->idct_scale)))
goto error_matrix;
if (!vl_idct_init(&dec->idct_y, dec->pipe, dec->base.width, dec->base.height,
if (!vl_idct_init(&dec->idct_y, dec->base.context, dec->base.width, dec->base.height,
nr_of_idct_render_targets, matrix, matrix))
goto error_y;
if(!vl_idct_init(&dec->idct_c, dec->pipe, dec->chroma_width, dec->chroma_height,
if(!vl_idct_init(&dec->idct_c, dec->base.context, dec->chroma_width, dec->chroma_height,
nr_of_idct_render_targets, matrix, matrix))
goto error_c;
@ -780,7 +787,7 @@ init_mc_source_widthout_idct(struct vl_mpeg12_decoder *dec, const struct format_
formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
dec->mc_source = vl_video_buffer_create_ex
(
dec->pipe, dec->base.width, dec->base.height, 1,
dec->base.context, dec->base.width, dec->base.height, 1,
dec->base.chroma_format, formats, PIPE_USAGE_STATIC
);
@ -831,8 +838,7 @@ mc_frag_shader_callback(void *priv, struct vl_mc *mc,
}
struct pipe_video_decoder *
vl_create_mpeg12_decoder(struct pipe_video_context *context,
struct pipe_context *pipe,
vl_create_mpeg12_decoder(struct pipe_context *context,
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
@ -860,21 +866,19 @@ vl_create_mpeg12_decoder(struct pipe_video_context *context,
dec->base.create_buffer = vl_mpeg12_create_buffer;
dec->base.flush_buffer = vl_mpeg12_decoder_flush_buffer;
dec->pipe = pipe;
dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
dec->quads = vl_vb_upload_quads(dec->pipe);
dec->quads = vl_vb_upload_quads(dec->base.context);
dec->pos = vl_vb_upload_pos(
dec->pipe,
dec->base.context,
dec->base.width / MACROBLOCK_WIDTH,
dec->base.height / MACROBLOCK_HEIGHT
);
dec->block_num = vl_vb_upload_block_num(dec->pipe, dec->num_blocks);
dec->block_num = vl_vb_upload_block_num(dec->base.context, dec->num_blocks);
dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->pipe);
dec->ves_mv = vl_vb_get_ves_mv(dec->pipe);
dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->base.context);
dec->ves_mv = vl_vb_get_ves_mv(dec->base.context);
/* TODO: Implement 422, 444 */
assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
@ -922,12 +926,14 @@ vl_create_mpeg12_decoder(struct pipe_video_context *context,
goto error_sources;
}
if (!vl_mc_init(&dec->mc_y, dec->pipe, dec->base.width, dec->base.height, MACROBLOCK_HEIGHT, format_config->mc_scale,
if (!vl_mc_init(&dec->mc_y, dec->base.context, dec->base.width, dec->base.height,
MACROBLOCK_HEIGHT, format_config->mc_scale,
mc_vert_shader_callback, mc_frag_shader_callback, dec))
goto error_mc_y;
// TODO
if (!vl_mc_init(&dec->mc_c, dec->pipe, dec->base.width, dec->base.height, BLOCK_HEIGHT, format_config->mc_scale,
if (!vl_mc_init(&dec->mc_c, dec->base.context, dec->base.width, dec->base.height,
BLOCK_HEIGHT, format_config->mc_scale,
mc_vert_shader_callback, mc_frag_shader_callback, dec))
goto error_mc_c;

View file

@ -28,7 +28,7 @@
#ifndef vl_mpeg12_decoder_h
#define vl_mpeg12_decoder_h
#include <pipe/p_video_context.h>
#include <pipe/p_video_decoder.h>
#include "vl_mpeg12_bitstream.h"
#include "vl_zscan.h"
@ -44,7 +44,6 @@ struct pipe_context;
struct vl_mpeg12_decoder
{
struct pipe_video_decoder base;
struct pipe_context *pipe;
unsigned chroma_width, chroma_height;
@ -93,11 +92,11 @@ struct vl_mpeg12_buffer
short *texels[VL_MAX_PLANES];
};
/* drivers can call this function in their pipe_video_context constructors and pass it
an accelerated pipe_context along with suitable buffering modes, etc */
/**
* creates a shader based mpeg12 decoder
*/
struct pipe_video_decoder *
vl_create_mpeg12_decoder(struct pipe_video_context *context,
struct pipe_context *pipe,
vl_create_mpeg12_decoder(struct pipe_context *pipe,
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,

View file

@ -29,7 +29,7 @@
#define vl_ycbcr_buffer_h
#include <pipe/p_context.h>
#include <pipe/p_video_context.h>
#include <pipe/p_video_decoder.h>
#include "vl_defines.h"

View file

@ -27,6 +27,7 @@
#include "util/u_simple_list.h"
#include "util/u_upload_mgr.h"
#include "os/os_time.h"
#include "vl/vl_decoder.h"
#include "vl/vl_video_buffer.h"
#include "r300_cb.h"
@ -438,6 +439,7 @@ struct pipe_context* r300_create_context(struct pipe_screen* screen,
r300_init_state_functions(r300);
r300_init_resource_functions(r300);
r300->context.create_video_decoder = vl_create_decoder;
r300->context.create_video_buffer = vl_video_buffer_create;
r300->vbuf_mgr = u_vbuf_mgr_create(&r300->context, 1024 * 1024, 16,

View file

@ -25,7 +25,6 @@
#include "util/u_format_s3tc.h"
#include "util/u_memory.h"
#include "os/os_time.h"
#include "vl/vl_context.h"
#include "vl/vl_video_buffer.h"
#include "r300_context.h"
@ -425,14 +424,6 @@ static boolean r300_is_format_supported(struct pipe_screen* screen,
return retval == usage;
}
static struct pipe_video_context *
r300_video_create(struct pipe_screen *screen, struct pipe_context *pipe)
{
assert(screen);
return vl_create_context(pipe);
}
static void r300_destroy_screen(struct pipe_screen* pscreen)
{
struct r300_screen* r300screen = r300_screen(pscreen);
@ -533,7 +524,6 @@ struct pipe_screen* r300_screen_create(struct radeon_winsys *rws)
r300screen->screen.is_format_supported = r300_is_format_supported;
r300screen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
r300screen->screen.context_create = r300_create_context;
r300screen->screen.video_context_create = r300_video_create;
r300screen->screen.fence_reference = r300_fence_reference;
r300screen->screen.fence_signalled = r300_fence_signalled;
r300screen->screen.fence_finish = r300_fence_finish;

View file

@ -38,7 +38,7 @@
#include <util/u_memory.h>
#include <util/u_inlines.h>
#include "util/u_upload_mgr.h"
#include <vl/vl_context.h>
#include <vl/vl_decoder.h>
#include <vl/vl_video_buffer.h>
#include "os/os_time.h"
#include <pipebuffer/pb_buffer.h>
@ -226,6 +226,8 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
r600_init_context_resource_functions(rctx);
r600_init_surface_functions(rctx);
rctx->context.draw_vbo = r600_draw_vbo;
rctx->context.create_video_decoder = vl_create_decoder;
rctx->context.create_video_buffer = vl_video_buffer_create;
switch (r600_get_family(rctx->radeon)) {
@ -302,14 +304,6 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
return &rctx->context;
}
static struct pipe_video_context *
r600_video_create(struct pipe_screen *screen, struct pipe_context *pipe)
{
assert(screen && pipe);
return vl_create_context(pipe);
}
/*
* pipe_screen
*/
@ -679,7 +673,6 @@ struct pipe_screen *r600_screen_create(struct radeon *radeon)
rscreen->screen.is_format_supported = r600_is_format_supported;
rscreen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
rscreen->screen.context_create = r600_create_context;
rscreen->screen.video_context_create = r600_video_create;
rscreen->screen.fence_reference = r600_fence_reference;
rscreen->screen.fence_signalled = r600_fence_signalled;
rscreen->screen.fence_finish = r600_fence_finish;

View file

@ -37,6 +37,7 @@
#include "util/u_memory.h"
#include "util/u_inlines.h"
#include "tgsi/tgsi_exec.h"
#include "vl/vl_decoder.h"
#include "vl/vl_video_buffer.h"
#include "sp_clear.h"
#include "sp_context.h"
@ -260,6 +261,7 @@ softpipe_create_context( struct pipe_screen *screen,
softpipe->pipe.render_condition = softpipe_render_condition;
softpipe->pipe.create_video_decoder = vl_create_decoder;
softpipe->pipe.create_video_buffer = vl_video_buffer_create;
/*

View file

@ -33,7 +33,6 @@
#include "pipe/p_defines.h"
#include "pipe/p_screen.h"
#include "draw/draw_context.h"
#include "vl/vl_context.h"
#include "vl/vl_video_buffer.h"
#include "state_tracker/sw_winsys.h"
@ -300,14 +299,6 @@ softpipe_flush_frontbuffer(struct pipe_screen *_screen,
winsys->displaytarget_display(winsys, texture->dt, context_private);
}
static struct pipe_video_context *
sp_video_create(struct pipe_screen *screen, struct pipe_context *context)
{
assert(screen);
return vl_create_context(context);
}
/**
* Create a new pipe_screen object
* Note: we're not presently subclassing pipe_screen (no softpipe_screen).
@ -335,7 +326,6 @@ softpipe_create_screen(struct sw_winsys *winsys)
screen->base.is_video_format_supported = vl_video_buffer_is_format_supported;
screen->base.context_create = softpipe_create_context;
screen->base.flush_frontbuffer = softpipe_flush_frontbuffer;
screen->base.video_context_create = sp_video_create;
util_format_s3tc_init();

View file

@ -59,6 +59,8 @@ struct pipe_vertex_buffer;
struct pipe_vertex_element;
struct pipe_viewport_state;
enum pipe_video_profile;
enum pipe_video_entrypoint;
enum pipe_video_chroma_format;
enum pipe_format;
@ -399,6 +401,15 @@ struct pipe_context {
*/
void (*texture_barrier)(struct pipe_context *);
/**
* Creates a video decoder for a specific video codec/profile
*/
struct pipe_video_decoder *(*create_video_decoder)( struct pipe_context *context,
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height );
/**
* Creates a video buffer as decoding target
*/

View file

@ -100,9 +100,6 @@ struct pipe_screen {
struct pipe_context * (*context_create)( struct pipe_screen *, void *priv );
struct pipe_video_context * (*video_context_create)( struct pipe_screen *screen,
struct pipe_context *context );
/**
* Check if the given pipe_format is supported as a texture or
* drawing surface.

View file

@ -41,35 +41,11 @@ struct pipe_picture_desc;
struct pipe_fence_handle;
/**
* Gallium video rendering context
*/
struct pipe_video_context
{
struct pipe_screen *screen;
/**
* destroy context, all objects created from this context
* (buffers, decoders, compositors etc...) must be freed before calling this
*/
void (*destroy)(struct pipe_video_context *context);
/**
* create a decoder for a specific video profile
*/
struct pipe_video_decoder *(*create_decoder)(struct pipe_video_context *context,
enum pipe_video_profile profile,
enum pipe_video_entrypoint entrypoint,
enum pipe_video_chroma_format chroma_format,
unsigned width, unsigned height);
};
/**
* decoder for a specific video codec
* Gallium video decoder for a specific codec/profile
*/
struct pipe_video_decoder
{
struct pipe_video_context *context;
struct pipe_context *context;
enum pipe_video_profile profile;
enum pipe_video_entrypoint entrypoint;

View file

@ -25,8 +25,6 @@
*
**************************************************************************/
#include <pipe/p_video_context.h>
#include <util/u_memory.h>
#include <util/u_math.h>
#include <util/u_debug.h>
@ -41,7 +39,7 @@ vlVdpDecoderCreate(VdpDevice device,
VdpDecoder *decoder)
{
enum pipe_video_profile p_profile;
struct pipe_video_context *vpipe;
struct pipe_context *pipe;
vlVdpDevice *dev;
vlVdpDecoder *vldecoder;
VdpStatus ret;
@ -63,7 +61,7 @@ vlVdpDecoderCreate(VdpDevice device,
if (!dev)
return VDP_STATUS_INVALID_HANDLE;
vpipe = dev->context->vpipe;
pipe = dev->context->pipe;
vldecoder = CALLOC(1,sizeof(vlVdpDecoder));
if (!vldecoder)
@ -72,9 +70,9 @@ vlVdpDecoderCreate(VdpDevice device,
vldecoder->device = dev;
// TODO: Define max_references. Used mainly for H264
vldecoder->decoder = vpipe->create_decoder
vldecoder->decoder = pipe->create_video_decoder
(
vpipe, p_profile,
pipe, p_profile,
PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CHROMA_FORMAT_420,
width, height

View file

@ -26,7 +26,6 @@
**************************************************************************/
#include <pipe/p_compiler.h>
#include <pipe/p_video_context.h>
#include <util/u_memory.h>
#include <util/u_debug.h>

View file

@ -44,7 +44,6 @@ vlVdpVideoMixerCreate(VdpDevice device,
VdpVideoMixer *mixer)
{
vlVdpVideoMixer *vmixer = NULL;
struct pipe_video_context *context;
VdpStatus ret;
float csc[16];
@ -54,8 +53,6 @@ vlVdpVideoMixerCreate(VdpDevice device,
if (!dev)
return VDP_STATUS_INVALID_HANDLE;
context = dev->context->vpipe;
vmixer = CALLOC(1, sizeof(vlVdpVideoMixer));
if (!vmixer)
return VDP_STATUS_RESOURCES;

View file

@ -41,7 +41,6 @@ vlVdpOutputSurfaceCreate(VdpDevice device,
VdpOutputSurface *surface)
{
struct pipe_context *pipe;
struct pipe_video_context *context;
struct pipe_resource res_tmpl, *res;
struct pipe_sampler_view sv_templ;
struct pipe_surface surf_templ;
@ -57,8 +56,7 @@ vlVdpOutputSurfaceCreate(VdpDevice device,
return VDP_STATUS_INVALID_HANDLE;
pipe = dev->context->pipe;
context = dev->context->vpipe;
if (!pipe || !context)
if (!pipe)
return VDP_STATUS_INVALID_HANDLE;
vlsurface = CALLOC(1, sizeof(vlVdpOutputSurface));
@ -76,7 +74,7 @@ vlVdpOutputSurfaceCreate(VdpDevice device,
res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
res_tmpl.usage = PIPE_USAGE_STATIC;
res = context->screen->resource_create(context->screen, &res_tmpl);
res = pipe->screen->resource_create(pipe->screen, &res_tmpl);
if (!res) {
FREE(dev);
return VDP_STATUS_ERROR;

View file

@ -40,7 +40,6 @@ vlVdpPresentationQueueCreate(VdpDevice device,
VdpPresentationQueue *presentation_queue)
{
vlVdpPresentationQueue *pq = NULL;
struct pipe_video_context *context;
VdpStatus ret;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Creating PresentationQueue\n");
@ -59,8 +58,6 @@ vlVdpPresentationQueueCreate(VdpDevice device,
if (dev != pqt->device)
return VDP_STATUS_HANDLE_DEVICE_MISMATCH;
context = dev->context->vpipe;
pq = CALLOC(1, sizeof(vlVdpPresentationQueue));
if (!pq)
return VDP_STATUS_RESOURCES;
@ -175,9 +172,9 @@ vlVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue,
vl_compositor_render(&pq->compositor, PIPE_MPEG12_PICTURE_TYPE_FRAME,
drawable_surface, NULL, NULL);
pq->device->context->vpipe->screen->flush_frontbuffer
pq->device->context->pipe->screen->flush_frontbuffer
(
pq->device->context->vpipe->screen,
pq->device->context->pipe->screen,
drawable_surface->texture,
0, 0,
vl_contextprivate_get(pq->device->context, drawable_surface)

View file

@ -28,7 +28,6 @@
#include <assert.h>
#include <pipe/p_video_context.h>
#include <pipe/p_state.h>
#include <util/u_memory.h>
@ -161,7 +160,6 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
{
enum pipe_format pformat = FormatToPipe(source_ycbcr_format);
struct pipe_context *pipe;
struct pipe_video_context *context;
struct pipe_sampler_view **sampler_views;
unsigned i;
@ -173,8 +171,7 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
return VDP_STATUS_INVALID_HANDLE;
pipe = p_surf->device->context->pipe;
context = p_surf->device->context->vpipe;
if (!pipe && !context)
if (!pipe)
return VDP_STATUS_INVALID_HANDLE;
if (p_surf->video_buffer == NULL || pformat != p_surf->video_buffer->buffer_format) {

View file

@ -34,7 +34,7 @@
#include <vdpau/vdpau_x11.h>
#include <pipe/p_compiler.h>
#include <pipe/p_video_context.h>
#include <pipe/p_video_decoder.h>
#include <util/u_debug.h>
#include <vl/vl_compositor.h>

View file

@ -31,7 +31,7 @@
#include <X11/extensions/XvMClib.h>
#include <pipe/p_screen.h>
#include <pipe/p_video_context.h>
#include <pipe/p_video_decoder.h>
#include <pipe/p_video_state.h>
#include <pipe/p_state.h>
@ -244,13 +244,14 @@ Status XvMCCreateContext(Display *dpy, XvPortID port, int surface_type_id,
return BadAlloc;
}
context_priv->decoder = vctx->vpipe->create_decoder(vctx->vpipe,
ProfileToPipe(mc_type),
(mc_type & XVMC_IDCT) ?
PIPE_VIDEO_ENTRYPOINT_IDCT :
PIPE_VIDEO_ENTRYPOINT_MC,
FormatToPipe(chroma_format),
width, height);
context_priv->decoder = vctx->pipe->create_video_decoder
(
vctx->pipe,
ProfileToPipe(mc_type),
(mc_type & XVMC_IDCT) ? PIPE_VIDEO_ENTRYPOINT_IDCT : PIPE_VIDEO_ENTRYPOINT_MC,
FormatToPipe(chroma_format),
width, height
);
if (!context_priv->decoder) {
XVMC_MSG(XVMC_ERR, "[XvMC] Could not create VL decoder.\n");

View file

@ -32,7 +32,7 @@
#include <xorg/fourcc.h>
#include <pipe/p_screen.h>
#include <pipe/p_video_context.h>
#include <pipe/p_video_decoder.h>
#include <pipe/p_state.h>
#include <util/u_memory.h>
@ -224,7 +224,6 @@ Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *
XvMCContextPrivate *context_priv;
XvMCSubpicturePrivate *subpicture_priv;
struct pipe_context *pipe;
struct pipe_video_context *vpipe;
struct pipe_resource tex_templ, *tex;
struct pipe_sampler_view sampler_templ;
Status ret;
@ -238,7 +237,6 @@ Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *
context_priv = context->privData;
pipe = context_priv->vctx->pipe;
vpipe = context_priv->vctx->vpipe;
if (!subpicture)
return XvMCBadSubpicture;
@ -259,9 +257,9 @@ Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *
tex_templ.target = PIPE_TEXTURE_2D;
tex_templ.format = XvIDToPipe(xvimage_id);
tex_templ.last_level = 0;
if (vpipe->screen->get_video_param(vpipe->screen,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_CAP_NPOT_TEXTURES)) {
if (pipe->screen->get_video_param(pipe->screen,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_CAP_NPOT_TEXTURES)) {
tex_templ.width0 = width;
tex_templ.height0 = height;
}
@ -275,7 +273,7 @@ Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *
tex_templ.bind = PIPE_BIND_SAMPLER_VIEW;
tex_templ.flags = 0;
tex = vpipe->screen->resource_create(vpipe->screen, &tex_templ);
tex = pipe->screen->resource_create(pipe->screen, &tex_templ);
memset(&sampler_templ, 0, sizeof(sampler_templ));
u_sampler_view_default_template(&sampler_templ, tex, tex->format);
@ -305,7 +303,7 @@ Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *
tex_templ.height0 = 1;
tex_templ.usage = PIPE_USAGE_STATIC;
tex = vpipe->screen->resource_create(vpipe->screen, &tex_templ);
tex = pipe->screen->resource_create(pipe->screen, &tex_templ);
memset(&sampler_templ, 0, sizeof(sampler_templ));
u_sampler_view_default_template(&sampler_templ, tex, tex->format);

View file

@ -30,7 +30,7 @@
#include <X11/Xlibint.h>
#include <pipe/p_video_context.h>
#include <pipe/p_video_decoder.h>
#include <pipe/p_video_state.h>
#include <pipe/p_state.h>
@ -304,7 +304,6 @@ Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surfac
XvMCContextPrivate *context_priv;
struct pipe_context *pipe;
struct pipe_video_context *vpipe;
XvMCSurfacePrivate *surface_priv;
XVMC_MSG(XVMC_TRACE, "[XvMC] Creating surface %p.\n", surface);
@ -318,7 +317,6 @@ Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surfac
context_priv = context->privData;
pipe = context_priv->vctx->pipe;
vpipe = context_priv->vctx->vpipe;
surface_priv = CALLOC(1, sizeof(XvMCSurfacePrivate));
if (!surface_priv)
@ -357,10 +355,8 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
XvMCMacroBlockArray *macroblocks, XvMCBlockArray *blocks
)
{
struct pipe_video_context *vpipe;
struct pipe_video_decode_buffer *t_buffer;
XvMCContextPrivate *context_priv;
XvMCSurfacePrivate *target_surface_priv;
XvMCSurfacePrivate *past_surface_priv;
XvMCSurfacePrivate *future_surface_priv;
@ -406,9 +402,6 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
assert(!past_surface || past_surface_priv->context == context);
assert(!future_surface || future_surface_priv->context == context);
context_priv = context->privData;
vpipe = context_priv->vctx->vpipe;
t_buffer = target_surface_priv->decode_buffer;
// enshure that all reference frames are flushed
@ -496,7 +489,7 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
{
static int dump_window = -1;
struct pipe_video_context *vpipe;
struct pipe_context *pipe;
struct vl_compositor *compositor;
XvMCSurfacePrivate *surface_priv;
@ -522,7 +515,7 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
assert(srcy + srch - 1 < surface->height);
subpicture_priv = surface_priv->subpicture ? surface_priv->subpicture->privData : NULL;
vpipe = context_priv->vctx->vpipe;
pipe = context_priv->vctx->pipe;
compositor = &context_priv->compositor;
if (!context_priv->drawable_surface ||
@ -571,15 +564,15 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
}
// Workaround for r600g, there seems to be a bug in the fence refcounting code
vpipe->screen->fence_reference(vpipe->screen, &surface_priv->fence, NULL);
pipe->screen->fence_reference(pipe->screen, &surface_priv->fence, NULL);
vl_compositor_render(compositor, PictureToPipe(flags), context_priv->drawable_surface, &dst_rect, &surface_priv->fence);
XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for display. Pushing to front buffer.\n", surface);
vpipe->screen->flush_frontbuffer
pipe->screen->flush_frontbuffer
(
vpipe->screen,
pipe->screen,
context_priv->drawable_surface->texture,
0, 0,
vl_contextprivate_get(context_priv->vctx, context_priv->drawable_surface)
@ -606,7 +599,7 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
PUBLIC
Status XvMCGetSurfaceStatus(Display *dpy, XvMCSurface *surface, int *status)
{
struct pipe_video_context *vpipe;
struct pipe_context *pipe;
XvMCSurfacePrivate *surface_priv;
XvMCContextPrivate *context_priv;
@ -619,12 +612,12 @@ Status XvMCGetSurfaceStatus(Display *dpy, XvMCSurface *surface, int *status)
surface_priv = surface->privData;
context_priv = surface_priv->context->privData;
vpipe = context_priv->vctx->vpipe;
pipe = context_priv->vctx->pipe;
*status = 0;
if (surface_priv->fence)
if (!vpipe->screen->fence_signalled(vpipe->screen, surface_priv->fence))
if (!pipe->screen->fence_signalled(pipe->screen, surface_priv->fence))
*status |= XVMC_RENDERING;
return Success;

View file

@ -27,11 +27,13 @@
#include <vl_winsys.h>
#include <driclient.h>
#include <pipe/p_video_context.h>
#include <pipe/p_screen.h>
#include <pipe/p_context.h>
#include <pipe/p_state.h>
#include <util/u_memory.h>
#include <util/u_hash.h>
#include <util/u_hash_table.h>
#include <util/u_inlines.h>
#include <state_tracker/drm_driver.h>
#include <X11/Xlibint.h>
@ -237,13 +239,6 @@ vl_video_create(struct vl_screen *vscreen)
struct vl_dri_screen *vl_dri_scrn = (struct vl_dri_screen*)vscreen;
struct vl_dri_context *vl_dri_ctx;
if (!vscreen->pscreen->video_context_create) {
debug_printf("[G3DVL] No video support found on %s/%s.\n",
vscreen->pscreen->get_vendor(vscreen->pscreen),
vscreen->pscreen->get_name(vscreen->pscreen));
goto no_vpipe;
}
vl_dri_ctx = CALLOC_STRUCT(vl_dri_context);
if (!vl_dri_ctx)
goto no_struct;
@ -256,11 +251,6 @@ vl_video_create(struct vl_screen *vscreen)
goto no_pipe;
}
vl_dri_ctx->base.vpipe = vscreen->pscreen->video_context_create(vscreen->pscreen, vl_dri_ctx->base.pipe);
if (!vl_dri_ctx->base.vpipe)
goto no_pipe;
vl_dri_ctx->base.vscreen = vscreen;
vl_dri_ctx->fd = vl_dri_scrn->dri_screen->fd;
@ -270,7 +260,6 @@ no_pipe:
FREE(vl_dri_ctx);
no_struct:
no_vpipe:
return NULL;
}
@ -281,6 +270,5 @@ void vl_video_destroy(struct vl_context *vctx)
assert(vctx);
vl_dri_ctx->base.pipe->destroy(vl_dri_ctx->base.pipe);
vl_dri_ctx->base.vpipe->destroy(vl_dri_ctx->base.vpipe);
FREE(vl_dri_ctx);
}

View file

@ -33,7 +33,6 @@
#include <pipe/p_format.h>
struct pipe_screen;
struct pipe_video_context;
struct pipe_surface;
struct vl_screen
@ -45,7 +44,6 @@ struct vl_context
{
struct vl_screen *vscreen;
struct pipe_context *pipe;
struct pipe_video_context *vpipe;
};
struct vl_screen*

View file

@ -28,7 +28,6 @@
#include <X11/Xlibint.h>
#include <pipe/p_state.h>
#include <pipe/p_video_context.h>
#include <util/u_memory.h>
#include <util/u_format.h>
@ -173,30 +172,21 @@ struct vl_context*
vl_video_create(struct vl_screen *vscreen)
{
struct pipe_context *pipe;
struct pipe_video_context *vpipe;
struct vl_context *vctx;
assert(vscreen);
assert(vscreen->pscreen->video_context_create);
pipe = vscreen->pscreen->context_create(vscreen->pscreen, NULL);
if (!pipe)
return NULL;
vpipe = vscreen->pscreen->video_context_create(vscreen->pscreen, pipe);
if (!vpipe) {
pipe->destroy(pipe);
return NULL;
}
vctx = CALLOC_STRUCT(vl_context);
if (!vctx) {
pipe->destroy(pipe);
vpipe->destroy(vpipe);
return NULL;
}
vctx->vpipe = vpipe;
vctx->pipe = pipe;
vctx->vscreen = vscreen;
return vctx;
@ -207,6 +197,5 @@ void vl_video_destroy(struct vl_context *vctx)
assert(vctx);
vctx->pipe->destroy(vctx->pipe);
vctx->vpipe->destroy(vctx->vpipe);
FREE(vctx);
}