frontends/va: Add support for decode/encode processing

This implements support for Decode processing allowing to perform
processing operation on the decoded picture in one single call without
having to use separate processing context.

This also implements the same functionality for encoding, which is
useful to perform conversion from RGB to YUV in a single call, and it
allows us to properly support the conversion inside encoder (eg. EFC on
AMD).
For Encode processing the additional output buffer is required same as
with Decode processing, but driver may not use it to perform the
conversion (in case where the conversion can be done by the encoder hw).
This means the contents of the additional buffer is undefined, and
application should not rely on the buffer actually containing output
picture of the conversion.

Reviewed-by: Ruijing Dong <ruijing.dong@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36755>
This commit is contained in:
David Rosca 2025-01-24 15:44:43 +01:00 committed by Marge Bot
parent b0a5d78247
commit efc6d27fd4
6 changed files with 106 additions and 25 deletions

View file

@ -261,6 +261,9 @@ vlVaGetConfigAttributes(VADriverContextP ctx, VAProfile profile, VAEntrypoint en
}
} break;
#endif
case VAConfigAttribDecProcessing:
value = 1;
break;
default:
value = VA_ATTRIB_NOT_SUPPORTED;
break;

View file

@ -162,6 +162,7 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
context->slice_data_offset = 0;
context->have_slice_params = false;
context->proc.dst_surface = NULL;
mtx_unlock(&drv->mutex);
return VA_STATUS_SUCCESS;
@ -1124,11 +1125,9 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
vlVaBuffer *coded_buf;
vlVaSurface *surf;
void *feedback = NULL;
struct pipe_screen *screen;
bool apply_av1_fg = false;
struct pipe_video_buffer **out_target;
int output_id;
enum pipe_format target_format;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
@ -1185,15 +1184,11 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
context->mpeg4.frame_num++;
screen = context->decoder->context->screen;
if ((bool)(surf->templat.bind & PIPE_BIND_PROTECTED) != context->desc.base.protected_playback) {
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
target_format = context->target->buffer_format;
if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
coded_buf = context->coded_buf;
context->desc.base.out_fence = &coded_buf->fence;
@ -1217,6 +1212,19 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
context->desc.av1enc.requested_metadata = driver_metadata_support;
context->desc.base.in_fence = surf->fence;
if (context->proc.dst_surface) {
if (!context->decoder->process_frame ||
context->decoder->process_frame(context->decoder, context->target, &context->proc.vpp) != 0) {
VAStatus ret = vlVaPostProcCompositor(drv, context->target, context->proc.vpp.dst,
VL_COMPOSITOR_NONE, &context->proc.vpp);
vlVaSurfaceFlush(drv, context->proc.dst_surface);
if (ret != VA_STATUS_SUCCESS) {
mtx_unlock(&drv->mutex);
return ret;
}
}
context->target = context->proc.vpp.dst;
}
context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
context->decoder->encode_bitstream(context->decoder, context->target,
coded_buf->derived_surface.resource, &feedback);
@ -1229,12 +1237,10 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
context->desc.base.out_fence = &surf->fence;
}
if (screen->is_video_target_buffer_supported &&
!screen->is_video_target_buffer_supported(screen,
target_format,
context->target,
context->decoder->profile,
context->decoder->entrypoint)) {
if (!drv->pipe->screen->is_video_format_supported(drv->pipe->screen,
context->target->buffer_format,
context->decoder->profile,
context->decoder->entrypoint)) {
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
@ -1254,7 +1260,21 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
PIPE_VIDEO_CAP_REQUIRES_FLUSH_ON_END_FRAME))
context->decoder->flush(context->decoder);
if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
if (context->proc.dst_surface) {
if (!context->decoder->process_frame ||
context->decoder->process_frame(context->decoder, context->target, &context->proc.vpp) != 0) {
VAStatus ret = vlVaPostProcCompositor(drv, context->target, context->proc.vpp.dst,
VL_COMPOSITOR_NONE, &context->proc.vpp);
vlVaSurfaceFlush(drv, context->proc.dst_surface);
if (ret != VA_STATUS_SUCCESS) {
mtx_unlock(&drv->mutex);
return ret;
}
}
}
} else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
switch (u_reduce_video_profile(context->templat.profile)) {
case PIPE_VIDEO_FORMAT_AV1:
context->desc.av1enc.frame_num++;

View file

@ -265,7 +265,7 @@ vlVaHandleVAProcPipelineParameterBufferType(vlVaDriver *drv, vlVaContext *contex
VARectangle def_src_region, def_dst_region;
const VARectangle *src_region, *dst_region;
VAProcPipelineParameterBuffer *param;
struct pipe_video_buffer *src;
struct pipe_video_buffer *src, *dst;
vlVaSurface *src_surface, *dst_surface;
unsigned i;
struct pipe_vpp_desc vpp = {0};
@ -285,15 +285,23 @@ vlVaHandleVAProcPipelineParameterBufferType(vlVaDriver *drv, vlVaContext *contex
dst_surface = handle_table_get(drv->htab, context->target_id);
if (!src_surface || !dst_surface)
return VA_STATUS_ERROR_INVALID_SURFACE;
vlVaGetSurfaceBuffer(drv, src_surface);
vlVaGetSurfaceBuffer(drv, dst_surface);
if (!src_surface->buffer || !dst_surface->buffer)
return VA_STATUS_ERROR_INVALID_SURFACE;
/* Encode/Decode processing */
if (context->templat.entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE ||
context->templat.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
if (param->num_additional_outputs < 1 || !param->additional_outputs)
return VA_STATUS_ERROR_INVALID_PARAMETER;
dst_surface = handle_table_get(drv->htab, param->additional_outputs[0]);
}
src_region = vlVaRegionDefault(param->surface_region, src_surface, &def_src_region);
dst_region = vlVaRegionDefault(param->output_region, dst_surface, &def_dst_region);
src = src_surface->buffer;
src = vlVaGetSurfaceBuffer(drv, src_surface);
dst = vlVaGetSurfaceBuffer(drv, dst_surface);
if (!src || !dst)
return VA_STATUS_ERROR_INVALID_SURFACE;
for (i = 0; i < param->num_filters; i++) {
vlVaBuffer *buf = handle_table_get(drv->htab, param->filters[i]);
@ -462,7 +470,7 @@ vlVaHandleVAProcPipelineParameterBufferType(vlVaDriver *drv, vlVaContext *contex
vpp.out_color_range = PIPE_VIDEO_VPP_CHROMA_COLOR_RANGE_FULL;
break;
default:
vpp.out_color_range = util_format_is_yuv(context->target->buffer_format) ?
vpp.out_color_range = util_format_is_yuv(dst->buffer_format) ?
PIPE_VIDEO_VPP_CHROMA_COLOR_RANGE_REDUCED : PIPE_VIDEO_VPP_CHROMA_COLOR_RANGE_FULL;
break;
}
@ -485,11 +493,22 @@ vlVaHandleVAProcPipelineParameterBufferType(vlVaDriver *drv, vlVaContext *contex
vpp.base.in_fence = src_surface->fence;
if (vlVaVidEngineBlit(drv, context, src, context->target, deinterlace, &vpp) == VA_STATUS_SUCCESS)
/* Encode/Decode processing */
if (context->templat.entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE ||
context->templat.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
vpp.dst = dst;
vpp.base.out_fence = &dst_surface->fence;
context->proc.vpp = vpp;
context->proc.dst_surface = dst_surface;
vlVaSetSurfaceContext(drv, dst_surface, context);
return VA_STATUS_SUCCESS;
}
if (vlVaVidEngineBlit(drv, context, src, dst, deinterlace, &vpp) == VA_STATUS_SUCCESS)
return VA_STATUS_SUCCESS;
VAStatus ret =
vlVaPostProcCompositor(drv, src, context->target, deinterlace, &vpp);
vlVaPostProcCompositor(drv, src, dst, deinterlace, &vpp);
vlVaSurfaceFlush(drv, dst_surface);
return ret;
}

View file

@ -1317,9 +1317,11 @@ no_res:
}
VAStatus
vlVaQueryVideoProcFilters(VADriverContextP ctx, VAContextID context,
vlVaQueryVideoProcFilters(VADriverContextP ctx, VAContextID context_id,
VAProcFilterType *filters, unsigned int *num_filters)
{
vlVaDriver *drv = VL_VA_DRIVER(ctx);
vlVaContext *context;
unsigned int num = 0;
if (!ctx)
@ -1328,7 +1330,18 @@ vlVaQueryVideoProcFilters(VADriverContextP ctx, VAContextID context,
if (!num_filters || !filters)
return VA_STATUS_ERROR_INVALID_PARAMETER;
filters[num++] = VAProcFilterDeinterlacing;
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
if (context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE &&
context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
filters[num++] = VAProcFilterDeinterlacing;
mtx_unlock(&drv->mutex);
*num_filters = num;
@ -1336,11 +1349,14 @@ vlVaQueryVideoProcFilters(VADriverContextP ctx, VAContextID context,
}
VAStatus
vlVaQueryVideoProcFilterCaps(VADriverContextP ctx, VAContextID context,
vlVaQueryVideoProcFilterCaps(VADriverContextP ctx, VAContextID context_id,
VAProcFilterType type, void *filter_caps,
unsigned int *num_filter_caps)
{
vlVaDriver *drv = VL_VA_DRIVER(ctx);
vlVaContext *context;
unsigned int i;
bool supports_filters;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
@ -1348,6 +1364,18 @@ vlVaQueryVideoProcFilterCaps(VADriverContextP ctx, VAContextID context,
if (!filter_caps || !num_filter_caps)
return VA_STATUS_ERROR_INVALID_PARAMETER;
mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
mtx_unlock(&drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
supports_filters = context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE &&
context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
mtx_unlock(&drv->mutex);
i = 0;
switch (type) {
@ -1356,6 +1384,9 @@ vlVaQueryVideoProcFilterCaps(VADriverContextP ctx, VAContextID context,
case VAProcFilterDeinterlacing: {
VAProcFilterCapDeinterlacing *deint = filter_caps;
if (!supports_filters)
return VA_STATUS_ERROR_UNIMPLEMENTED;
if (*num_filter_caps < 3) {
*num_filter_caps = 3;
return VA_STATUS_ERROR_MAX_NUM_EXCEEDED;

View file

@ -404,6 +404,11 @@ typedef struct vlVaContext {
struct pipe_av1_enc_picture_desc av1enc;
} desc;
struct {
struct pipe_vpp_desc vpp;
struct vlVaSurface *dst_surface;
} proc;
struct {
unsigned long long int frame_num;
unsigned int start_code_size;

View file

@ -2156,6 +2156,9 @@ struct pipe_vpp_desc
enum pipe_video_vpp_orientation orientation;
struct pipe_vpp_blend blend;
/* Only used for encode/decode processing */
struct pipe_video_buffer *dst;
uint32_t background_color;
enum pipe_video_vpp_color_standard_type in_colors_standard;
enum pipe_video_vpp_color_range in_color_range;