mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-27 17:00:09 +01:00
frontends/va: Implement vaSyncBuffer
Reviewed-by: Ruijing Dong <ruijing.dong@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/18715>
This commit is contained in:
parent
84c0529258
commit
229c6f79a6
7 changed files with 152 additions and 43 deletions
|
|
@ -397,3 +397,79 @@ vlVaReleaseBufferHandle(VADriverContextP ctx, VABufferID buf_id)
|
|||
|
||||
return VA_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
VAStatus
|
||||
vlVaSyncBuffer(VADriverContextP ctx, VABufferID buf_id, uint64_t timeout_ns)
|
||||
{
|
||||
vlVaDriver *drv;
|
||||
vlVaContext *context;
|
||||
vlVaBuffer *buf;
|
||||
|
||||
if (!ctx)
|
||||
return VA_STATUS_ERROR_INVALID_CONTEXT;
|
||||
|
||||
drv = VL_VA_DRIVER(ctx);
|
||||
if (!drv)
|
||||
return VA_STATUS_ERROR_INVALID_CONTEXT;
|
||||
|
||||
/* Some apps like ffmpeg check for vaSyncBuffer to be present
|
||||
to do async enqueuing of multiple vaEndPicture encode calls
|
||||
before calling vaSyncBuffer with a pre-defined latency
|
||||
If vaSyncBuffer is not implemented, they fallback to the
|
||||
usual synchronous pairs of { vaEndPicture + vaSyncSurface }
|
||||
|
||||
As this might require the driver to support multiple
|
||||
operations and/or store multiple feedback values before sync
|
||||
fallback to backward compatible behaviour unless driver
|
||||
explicitly supports PIPE_VIDEO_CAP_ENC_SUPPORTS_ASYNC_OPERATION
|
||||
*/
|
||||
if (!drv->pipe->screen->get_video_param(drv->pipe->screen,
|
||||
PIPE_VIDEO_PROFILE_UNKNOWN,
|
||||
PIPE_VIDEO_ENTRYPOINT_ENCODE,
|
||||
PIPE_VIDEO_CAP_ENC_SUPPORTS_ASYNC_OPERATION))
|
||||
return VA_STATUS_ERROR_UNIMPLEMENTED;
|
||||
|
||||
/* vaSyncBuffer spec states that "If timeout is zero, the function returns immediately." */
|
||||
if (timeout_ns == 0)
|
||||
return VA_STATUS_ERROR_TIMEDOUT;
|
||||
|
||||
if (timeout_ns != VA_TIMEOUT_INFINITE)
|
||||
return VA_STATUS_ERROR_UNIMPLEMENTED;
|
||||
|
||||
mtx_lock(&drv->mutex);
|
||||
buf = handle_table_get(drv->htab, buf_id);
|
||||
|
||||
if (!buf) {
|
||||
mtx_unlock(&drv->mutex);
|
||||
return VA_STATUS_ERROR_INVALID_BUFFER;
|
||||
}
|
||||
|
||||
if (!buf->feedback) {
|
||||
/* No outstanding operation: nothing to do. */
|
||||
mtx_unlock(&drv->mutex);
|
||||
return VA_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
context = handle_table_get(drv->htab, buf->ctx);
|
||||
if (!context) {
|
||||
mtx_unlock(&drv->mutex);
|
||||
return VA_STATUS_ERROR_INVALID_CONTEXT;
|
||||
}
|
||||
|
||||
vlVaSurface* surf = handle_table_get(drv->htab, buf->associated_encode_input_surf);
|
||||
|
||||
if ((buf->feedback) && (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE)) {
|
||||
context->decoder->get_feedback(context->decoder, buf->feedback, &(buf->coded_size));
|
||||
buf->feedback = NULL;
|
||||
/* Also mark the associated render target (encode source texture) surface as done
|
||||
in case they call vaSyncSurface on it to avoid getting the feedback twice*/
|
||||
if(surf)
|
||||
{
|
||||
surf->feedback = NULL;
|
||||
buf->associated_encode_input_surf = VA_INVALID_ID;
|
||||
}
|
||||
}
|
||||
|
||||
mtx_unlock(&drv->mutex);
|
||||
return VA_STATUS_SUCCESS;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -99,6 +99,10 @@ static struct VADriverVTable vtable =
|
|||
NULL, /* vaQueryProcessingRate */
|
||||
&vlVaExportSurfaceHandle,
|
||||
#endif
|
||||
#if VA_CHECK_VERSION(1, 15, 0)
|
||||
NULL, /* vaSyncSurface2 */
|
||||
&vlVaSyncBuffer,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct VADriverVTableVPP vtable_vpp =
|
||||
|
|
|
|||
|
|
@ -91,7 +91,6 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
|
|||
PIPE_VIDEO_ENTRYPOINT_PROCESSING,
|
||||
PIPE_VIDEO_CAP_SUPPORTED)) {
|
||||
context->needs_begin_frame = true;
|
||||
context->vpp_needs_flush_on_endpic = true;
|
||||
}
|
||||
|
||||
return VA_STATUS_SUCCESS;
|
||||
|
|
@ -790,7 +789,7 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
|
|||
vlVaContext *context;
|
||||
vlVaBuffer *coded_buf;
|
||||
vlVaSurface *surf;
|
||||
void *feedback;
|
||||
void *feedback = NULL;
|
||||
struct pipe_screen *screen;
|
||||
bool supported;
|
||||
bool realloc = false;
|
||||
|
|
@ -930,40 +929,46 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
|
|||
context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
|
||||
context->decoder->encode_bitstream(context->decoder, context->target,
|
||||
coded_buf->derived_surface.resource, &feedback);
|
||||
coded_buf->feedback = feedback;
|
||||
coded_buf->ctx = context_id;
|
||||
surf->feedback = feedback;
|
||||
surf->coded_buf = coded_buf;
|
||||
coded_buf->associated_encode_input_surf = context->target_id;
|
||||
}
|
||||
|
||||
context->decoder->end_frame(context->decoder, context->target, &context->desc.base);
|
||||
if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
|
||||
u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
|
||||
int idr_period = context->desc.h264enc.gop_size / context->gop_coeff;
|
||||
int p_remain_in_idr = idr_period - context->desc.h264enc.frame_num;
|
||||
surf->frame_num_cnt = context->desc.h264enc.frame_num_cnt;
|
||||
surf->force_flushed = false;
|
||||
if (context->first_single_submitted) {
|
||||
context->decoder->flush(context->decoder);
|
||||
context->first_single_submitted = false;
|
||||
surf->force_flushed = true;
|
||||
}
|
||||
if (p_remain_in_idr == 1) {
|
||||
if ((context->desc.h264enc.frame_num_cnt % 2) != 0) {
|
||||
context->decoder->flush(context->decoder);
|
||||
context->first_single_submitted = true;
|
||||
}
|
||||
else
|
||||
context->first_single_submitted = false;
|
||||
surf->force_flushed = true;
|
||||
}
|
||||
if (!context->desc.h264enc.not_referenced)
|
||||
context->desc.h264enc.frame_num++;
|
||||
} else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
|
||||
u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
|
||||
context->desc.h265enc.frame_num++;
|
||||
else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_PROCESSING &&
|
||||
context->vpp_needs_flush_on_endpic) {
|
||||
|
||||
if (drv->pipe->screen->get_video_param(drv->pipe->screen,
|
||||
context->decoder->profile,
|
||||
context->decoder->entrypoint,
|
||||
PIPE_VIDEO_CAP_REQUIRES_FLUSH_ON_END_FRAME))
|
||||
context->decoder->flush(context->decoder);
|
||||
context->vpp_needs_flush_on_endpic = false;
|
||||
else {
|
||||
if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
|
||||
u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
|
||||
int idr_period = context->desc.h264enc.gop_size / context->gop_coeff;
|
||||
int p_remain_in_idr = idr_period - context->desc.h264enc.frame_num;
|
||||
surf->frame_num_cnt = context->desc.h264enc.frame_num_cnt;
|
||||
surf->force_flushed = false;
|
||||
if (context->first_single_submitted) {
|
||||
context->decoder->flush(context->decoder);
|
||||
context->first_single_submitted = false;
|
||||
surf->force_flushed = true;
|
||||
}
|
||||
if (p_remain_in_idr == 1) {
|
||||
if ((context->desc.h264enc.frame_num_cnt % 2) != 0) {
|
||||
context->decoder->flush(context->decoder);
|
||||
context->first_single_submitted = true;
|
||||
}
|
||||
else
|
||||
context->first_single_submitted = false;
|
||||
surf->force_flushed = true;
|
||||
}
|
||||
if (!context->desc.h264enc.not_referenced)
|
||||
context->desc.h264enc.frame_num++;
|
||||
} else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
|
||||
u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
|
||||
context->desc.h265enc.frame_num++;
|
||||
}
|
||||
|
||||
mtx_unlock(&drv->mutex);
|
||||
|
|
|
|||
|
|
@ -174,7 +174,6 @@ static VAStatus vlVaVidEngineBlit(vlVaDriver *drv, vlVaContext *context,
|
|||
context->needs_begin_frame = false;
|
||||
}
|
||||
context->decoder->process_frame(context->decoder, src, &context->desc.vidproc);
|
||||
context->vpp_needs_flush_on_endpic = true;
|
||||
|
||||
return VA_STATUS_SUCCESS;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -123,21 +123,28 @@ vlVaSyncSurface(VADriverContextP ctx, VASurfaceID render_target)
|
|||
}
|
||||
|
||||
if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
|
||||
if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
|
||||
int frame_diff;
|
||||
if (context->desc.h264enc.frame_num_cnt >= surf->frame_num_cnt)
|
||||
frame_diff = context->desc.h264enc.frame_num_cnt - surf->frame_num_cnt;
|
||||
else
|
||||
frame_diff = 0xFFFFFFFF - surf->frame_num_cnt + 1 + context->desc.h264enc.frame_num_cnt;
|
||||
if ((frame_diff == 0) &&
|
||||
(surf->force_flushed == false) &&
|
||||
(context->desc.h264enc.frame_num_cnt % 2 != 0)) {
|
||||
context->decoder->flush(context->decoder);
|
||||
context->first_single_submitted = true;
|
||||
if (!drv->pipe->screen->get_video_param(drv->pipe->screen,
|
||||
context->decoder->profile,
|
||||
context->decoder->entrypoint,
|
||||
PIPE_VIDEO_CAP_REQUIRES_FLUSH_ON_END_FRAME)) {
|
||||
if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
|
||||
int frame_diff;
|
||||
if (context->desc.h264enc.frame_num_cnt >= surf->frame_num_cnt)
|
||||
frame_diff = context->desc.h264enc.frame_num_cnt - surf->frame_num_cnt;
|
||||
else
|
||||
frame_diff = 0xFFFFFFFF - surf->frame_num_cnt + 1 + context->desc.h264enc.frame_num_cnt;
|
||||
if ((frame_diff == 0) &&
|
||||
(surf->force_flushed == false) &&
|
||||
(context->desc.h264enc.frame_num_cnt % 2 != 0)) {
|
||||
context->decoder->flush(context->decoder);
|
||||
context->first_single_submitted = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
context->decoder->get_feedback(context->decoder, surf->feedback, &(surf->coded_buf->coded_size));
|
||||
surf->feedback = NULL;
|
||||
surf->coded_buf->feedback = NULL;
|
||||
surf->coded_buf->associated_encode_input_surf = VA_INVALID_ID;
|
||||
}
|
||||
mtx_unlock(&drv->mutex);
|
||||
return VA_STATUS_SUCCESS;
|
||||
|
|
|
|||
|
|
@ -286,6 +286,9 @@ typedef struct {
|
|||
VABufferInfo export_state;
|
||||
unsigned int coded_size;
|
||||
struct pipe_video_buffer *derived_image_buffer;
|
||||
void *feedback;
|
||||
VASurfaceID associated_encode_input_surf;
|
||||
VAContextID ctx;
|
||||
} vlVaBuffer;
|
||||
|
||||
typedef struct {
|
||||
|
|
@ -328,7 +331,6 @@ typedef struct {
|
|||
bool first_single_submitted;
|
||||
int gop_coeff;
|
||||
bool needs_begin_frame;
|
||||
bool vpp_needs_flush_on_endpic;
|
||||
void *blit_cs;
|
||||
int packed_header_type;
|
||||
} vlVaContext;
|
||||
|
|
@ -457,6 +459,7 @@ VAStatus vlVaQueryVideoProcFilterCaps(VADriverContextP ctx, VAContextID context,
|
|||
void *filter_caps, unsigned int *num_filter_caps);
|
||||
VAStatus vlVaQueryVideoProcPipelineCaps(VADriverContextP ctx, VAContextID context, VABufferID *filters,
|
||||
unsigned int num_filters, VAProcPipelineCaps *pipeline_cap);
|
||||
VAStatus vlVaSyncBuffer(VADriverContextP ctx, VABufferID buf_id, uint64_t timeout_ns);
|
||||
|
||||
// internal functions
|
||||
VAStatus vlVaHandleVAProcPipelineParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf);
|
||||
|
|
|
|||
|
|
@ -114,6 +114,21 @@ enum pipe_video_cap
|
|||
PIPE_VIDEO_CAP_ENC_HEVC_BLOCK_SIZES = 29,
|
||||
PIPE_VIDEO_CAP_ENC_HEVC_FEATURE_FLAGS = 30,
|
||||
PIPE_VIDEO_CAP_ENC_HEVC_PREDICTION_DIRECTION = 31,
|
||||
/*
|
||||
If reported by the driver, then pipe_video_codec.flush(...)
|
||||
needs to be called after pipe_video_codec.end_frame(...)
|
||||
to kick off the work in the device
|
||||
*/
|
||||
PIPE_VIDEO_CAP_REQUIRES_FLUSH_ON_END_FRAME = 32,
|
||||
|
||||
/*
|
||||
If reported by the driver, then multiple p_video_codec encode
|
||||
operations can be asynchronously enqueued (and also flushed)
|
||||
with different feedback values in the device before get_feedback
|
||||
is called on them to synchronize. The device can block on begin_frame
|
||||
when it has reached its maximum async depth capacity
|
||||
*/
|
||||
PIPE_VIDEO_CAP_ENC_SUPPORTS_ASYNC_OPERATION = 33,
|
||||
};
|
||||
|
||||
/* To be used with PIPE_VIDEO_CAP_VPP_ORIENTATION_MODES and for VPP state*/
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue