st/va: add colospace conversion through Video Post Processing

Add support for VPP in the following functions:
vlVaCreateContext
vlVaDestroyContext
vlVaBeginPicture
vlVaRenderPicture
vlVaEndPicture

Add support for VAProcFilterNone in:
vlVaQueryVideoProcFilters
vlVaQueryVideoProcFilterCaps
vlVaQueryVideoProcPipelineCaps

Add handleVAProcPipelineParameterBufferType helper.

One application is:
VASurfaceNV12 -> gstvaapipostproc -> VASurfaceRGBA

Signed-off-by: Julien Isorce <j.isorce@samsung.com>
Reviewed-by: Emil Velikov <emil.l.velikov@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
This commit is contained in:
Julien Isorce 2015-10-30 11:42:48 +00:00 committed by Christian König
parent 05b6ce4209
commit 0b868807e4
4 changed files with 260 additions and 53 deletions

View file

@ -87,6 +87,14 @@ static struct VADriverVTable vtable =
&vlVaQuerySurfaceAttributes
};
static struct VADriverVTableVPP vtable_vpp =
{
1,
&vlVaQueryVideoProcFilters,
&vlVaQueryVideoProcFilterCaps,
&vlVaQueryVideoProcPipelineCaps
};
PUBLIC VAStatus
VA_DRIVER_INIT_FUNC(VADriverContextP ctx)
{
@ -122,6 +130,7 @@ VA_DRIVER_INIT_FUNC(VADriverContextP ctx)
ctx->version_major = 0;
ctx->version_minor = 1;
*ctx->vtable = vtable;
*ctx->vtable_vpp = vtable_vpp;
ctx->max_profiles = PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH - PIPE_VIDEO_PROFILE_UNKNOWN;
ctx->max_entrypoints = 1;
ctx->max_attributes = 1;
@ -151,11 +160,15 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
struct pipe_video_codec templat = {};
vlVaDriver *drv;
vlVaContext *context;
int is_vpp;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!(picture_width && picture_height))
is_vpp = config_id == PIPE_VIDEO_PROFILE_UNKNOWN && !picture_width &&
!picture_height && !flag && !render_targets && !num_render_targets;
if (!(picture_width && picture_height) && !is_vpp)
return VA_STATUS_ERROR_INVALID_IMAGE_FORMAT;
drv = VL_VA_DRIVER(ctx);
@ -163,52 +176,60 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
if (!context)
return VA_STATUS_ERROR_ALLOCATION_FAILED;
templat.profile = config_id;
templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
templat.width = picture_width;
templat.height = picture_height;
templat.max_references = num_render_targets;
templat.expect_chunked_decode = true;
if (is_vpp) {
context->decoder = NULL;
if (!drv->compositor.upload) {
FREE(context);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
} else {
templat.profile = config_id;
templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
templat.width = picture_width;
templat.height = picture_height;
templat.max_references = num_render_targets;
templat.expect_chunked_decode = true;
if (u_reduce_video_profile(templat.profile) ==
PIPE_VIDEO_FORMAT_MPEG4_AVC)
templat.level = u_get_h264_level(templat.width, templat.height,
&templat.max_references);
if (u_reduce_video_profile(templat.profile) ==
PIPE_VIDEO_FORMAT_MPEG4_AVC)
templat.level = u_get_h264_level(templat.width, templat.height,
&templat.max_references);
context->decoder = drv->pipe->create_video_codec(drv->pipe, &templat);
if (!context->decoder) {
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
context->decoder = drv->pipe->create_video_codec(drv->pipe, &templat);
if (!context->decoder) {
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
if (u_reduce_video_profile(context->decoder->profile) ==
if (u_reduce_video_profile(context->decoder->profile) ==
PIPE_VIDEO_FORMAT_MPEG4_AVC) {
context->desc.h264.pps = CALLOC_STRUCT(pipe_h264_pps);
if (!context->desc.h264.pps) {
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
context->desc.h264.pps = CALLOC_STRUCT(pipe_h264_pps);
if (!context->desc.h264.pps) {
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
context->desc.h264.pps->sps = CALLOC_STRUCT(pipe_h264_sps);
if (!context->desc.h264.pps->sps) {
FREE(context->desc.h264.pps);
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
}
context->desc.h264.pps->sps = CALLOC_STRUCT(pipe_h264_sps);
if (!context->desc.h264.pps->sps) {
FREE(context->desc.h264.pps);
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
}
if (u_reduce_video_profile(context->decoder->profile) ==
PIPE_VIDEO_FORMAT_HEVC) {
context->desc.h265.pps = CALLOC_STRUCT(pipe_h265_pps);
if (!context->desc.h265.pps) {
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
context->desc.h265.pps->sps = CALLOC_STRUCT(pipe_h265_sps);
if (!context->desc.h265.pps->sps) {
FREE(context->desc.h265.pps);
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
if (u_reduce_video_profile(context->decoder->profile) ==
PIPE_VIDEO_FORMAT_HEVC) {
context->desc.h265.pps = CALLOC_STRUCT(pipe_h265_pps);
if (!context->desc.h265.pps) {
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
context->desc.h265.pps->sps = CALLOC_STRUCT(pipe_h265_sps);
if (!context->desc.h265.pps->sps) {
FREE(context->desc.h265.pps);
FREE(context);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
}
}
@ -229,17 +250,20 @@ vlVaDestroyContext(VADriverContextP ctx, VAContextID context_id)
drv = VL_VA_DRIVER(ctx);
context = handle_table_get(drv->htab, context_id);
if (u_reduce_video_profile(context->decoder->profile) ==
PIPE_VIDEO_FORMAT_MPEG4_AVC) {
FREE(context->desc.h264.pps->sps);
FREE(context->desc.h264.pps);
if (context->decoder) {
if (u_reduce_video_profile(context->decoder->profile) ==
PIPE_VIDEO_FORMAT_MPEG4_AVC) {
FREE(context->desc.h264.pps->sps);
FREE(context->desc.h264.pps);
}
if (u_reduce_video_profile(context->decoder->profile) ==
PIPE_VIDEO_FORMAT_HEVC) {
FREE(context->desc.h265.pps->sps);
FREE(context->desc.h265.pps);
}
context->decoder->destroy(context->decoder);
}
if (u_reduce_video_profile(context->decoder->profile) ==
PIPE_VIDEO_FORMAT_HEVC) {
FREE(context->desc.h265.pps->sps);
FREE(context->desc.h265.pps);
}
context->decoder->destroy(context->decoder);
FREE(context);
handle_table_remove(drv->htab, context_id);

View file

@ -32,6 +32,7 @@
#include "util/u_video.h"
#include "vl/vl_vlc.h"
#include "vl/vl_winsys.h"
#include "va_private.h"
@ -58,6 +59,16 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
return VA_STATUS_ERROR_INVALID_SURFACE;
context->target = surf->buffer;
if (!context->decoder) {
/* VPP */
if ((context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM &&
context->target->buffer_format != PIPE_FORMAT_R8G8B8A8_UNORM) ||
context->target->interlaced)
return VA_STATUS_ERROR_UNIMPLEMENTED;
return VA_STATUS_SUCCESS;
}
context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
return VA_STATUS_SUCCESS;
@ -703,11 +714,71 @@ handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
num_buffers, (const void * const*)buffers, sizes);
}
static VAStatus
handleVAProcPipelineParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
{
struct u_rect src_rect;
struct u_rect dst_rect;
struct u_rect *dirty_area;
vlVaSurface *src_surface;
VAProcPipelineParameterBuffer *pipeline_param;
struct pipe_surface **surfaces;
struct pipe_screen *screen;
struct pipe_surface *psurf;
if (!drv || !context)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!buf || !buf->data)
return VA_STATUS_ERROR_INVALID_BUFFER;
if (!context->target)
return VA_STATUS_ERROR_INVALID_SURFACE;
pipeline_param = (VAProcPipelineParameterBuffer *)buf->data;
src_surface = handle_table_get(drv->htab, pipeline_param->surface);
if (!src_surface || !src_surface->buffer)
return VA_STATUS_ERROR_INVALID_SURFACE;
surfaces = context->target->get_surfaces(context->target);
if (!surfaces || !surfaces[0])
return VA_STATUS_ERROR_INVALID_SURFACE;
screen = drv->pipe->screen;
psurf = surfaces[0];
src_rect.x0 = pipeline_param->surface_region->x;
src_rect.y0 = pipeline_param->surface_region->y;
src_rect.x1 = pipeline_param->surface_region->x + pipeline_param->surface_region->width;
src_rect.y1 = pipeline_param->surface_region->y + pipeline_param->surface_region->height;
dst_rect.x0 = pipeline_param->output_region->x;
dst_rect.y0 = pipeline_param->output_region->y;
dst_rect.x1 = pipeline_param->output_region->x + pipeline_param->output_region->width;
dst_rect.y1 = pipeline_param->output_region->y + pipeline_param->output_region->height;
dirty_area = vl_screen_get_dirty_area(drv->vscreen);
vl_compositor_clear_layers(&drv->cstate);
vl_compositor_set_buffer_layer(&drv->cstate, &drv->compositor, 0, src_surface->buffer, &src_rect, NULL, VL_COMPOSITOR_WEAVE);
vl_compositor_set_layer_dst_area(&drv->cstate, 0, &dst_rect);
vl_compositor_render(&drv->cstate, &drv->compositor, psurf, dirty_area, true);
screen->fence_reference(screen, &src_surface->fence, NULL);
drv->pipe->flush(drv->pipe, &src_surface->fence, 0);
return VA_STATUS_SUCCESS;
}
VAStatus
vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buffers, int num_buffers)
{
vlVaDriver *drv;
vlVaContext *context;
VAStatus vaStatus = VA_STATUS_SUCCESS;
unsigned i;
@ -743,13 +814,16 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
case VASliceDataBufferType:
handleVASliceDataBufferType(context, buf);
break;
case VAProcPipelineParameterBufferType:
vaStatus = handleVAProcPipelineParameterBufferType(drv, context, buf);
break;
default:
break;
}
}
return VA_STATUS_SUCCESS;
return vaStatus;
}
VAStatus
@ -769,6 +843,11 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
if (!context)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!context->decoder) {
/* VPP */
return VA_STATUS_SUCCESS;
}
context->mpeg4.frame_num++;
context->decoder->end_frame(context->decoder, context->target, &context->desc.base);

View file

@ -348,7 +348,8 @@ vlVaQuerySurfaceAttributes(VADriverContextP ctx, VAConfigID config,
i = 0;
if (config == PIPE_VIDEO_PROFILE_UNKNOWN) {
/* Assume VAEntrypointVideoProc for now. */
/* vlVaCreateConfig returns PIPE_VIDEO_PROFILE_UNKNOWN
only for VAEntrypointVideoProc. */
attribs[i].type = VASurfaceAttribPixelFormat;
attribs[i].value.type = VAGenericValueTypeInteger;
attribs[i].flags = VA_SURFACE_ATTRIB_GETTABLE | VA_SURFACE_ATTRIB_SETTABLE;
@ -634,3 +635,98 @@ no_res:
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
VAStatus
vlVaQueryVideoProcFilters(VADriverContextP ctx, VAContextID context,
VAProcFilterType *filters, unsigned int *num_filters)
{
unsigned int num = 0;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!num_filters || !filters)
return VA_STATUS_ERROR_INVALID_PARAMETER;
filters[num++] = VAProcFilterNone;
*num_filters = num;
return VA_STATUS_SUCCESS;
}
VAStatus
vlVaQueryVideoProcFilterCaps(VADriverContextP ctx, VAContextID context,
VAProcFilterType type, void *filter_caps,
unsigned int *num_filter_caps)
{
unsigned int i;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!filter_caps || !num_filter_caps)
return VA_STATUS_ERROR_INVALID_PARAMETER;
i = 0;
switch (type) {
case VAProcFilterNone:
break;
case VAProcFilterNoiseReduction:
case VAProcFilterDeinterlacing:
case VAProcFilterSharpening:
case VAProcFilterColorBalance:
case VAProcFilterSkinToneEnhancement:
return VA_STATUS_ERROR_UNIMPLEMENTED;
default:
assert(0);
}
*num_filter_caps = i;
return VA_STATUS_SUCCESS;
}
static VAProcColorStandardType vpp_input_color_standards[VAProcColorStandardCount] = {
VAProcColorStandardBT601
};
static VAProcColorStandardType vpp_output_color_standards[VAProcColorStandardCount] = {
VAProcColorStandardBT601
};
VAStatus
vlVaQueryVideoProcPipelineCaps(VADriverContextP ctx, VAContextID context,
VABufferID *filters, unsigned int num_filters,
VAProcPipelineCaps *pipeline_cap)
{
unsigned int i = 0;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!pipeline_cap)
return VA_STATUS_ERROR_INVALID_PARAMETER;
if (num_filters && !filters)
return VA_STATUS_ERROR_INVALID_PARAMETER;
pipeline_cap->pipeline_flags = 0;
pipeline_cap->filter_flags = 0;
pipeline_cap->num_forward_references = 0;
pipeline_cap->num_backward_references = 0;
pipeline_cap->num_input_color_standards = 1;
pipeline_cap->input_color_standards = vpp_input_color_standards;
pipeline_cap->num_output_color_standards = 1;
pipeline_cap->output_color_standards = vpp_output_color_standards;
for (i = 0; i < num_filters; i++) {
vlVaBuffer *buf = handle_table_get(VL_VA_DRIVER(ctx)->htab, filters[i]);
if (!buf || buf->type >= VABufferTypeMax)
return VA_STATUS_ERROR_INVALID_BUFFER;
}
return VA_STATUS_SUCCESS;
}

View file

@ -33,6 +33,7 @@
#include <va/va.h>
#include <va/va_backend.h>
#include <va/va_backend_vpp.h>
#include "pipe/p_video_enums.h"
#include "pipe/p_video_codec.h"
@ -318,4 +319,11 @@ VAStatus vlVaCreateSurfaces2(VADriverContextP ctx, unsigned int format, unsigned
unsigned int num_attribs);
VAStatus vlVaQuerySurfaceAttributes(VADriverContextP ctx, VAConfigID config, VASurfaceAttrib *attrib_list,
unsigned int *num_attribs);
VAStatus vlVaQueryVideoProcFilters(VADriverContextP ctx, VAContextID context, VAProcFilterType *filters,
unsigned int *num_filters);
VAStatus vlVaQueryVideoProcFilterCaps(VADriverContextP ctx, VAContextID context, VAProcFilterType type,
void *filter_caps, unsigned int *num_filter_caps);
VAStatus vlVaQueryVideoProcPipelineCaps(VADriverContextP ctx, VAContextID context, VABufferID *filters,
unsigned int num_filters, VAProcPipelineCaps *pipeline_cap);
#endif //VA_PRIVATE_H