0
0
mirror of https://github.com/mpv-player/mpv.git synced 2024-09-20 12:02:23 +02:00

vaapi: determine surface format in decoder, not in renderer

Until now, we have made the assumption that a driver will use only 1
hardware surface format. the format is dictated by the driver (you
don't create surfaces with a specific format - you just pass a
rt_format and get a surface that will be in a specific driver-chosen
format).

In particular, the renderer created a dummy surface to probe the format,
and hoped the decoder would produce the same format. Due to a driver
bug this required a workaround to actually get the same format as the
driver did.

Change this so that the format is determined in the decoder. The format
is then passed down as hw_subfmt, which allows the renderer to configure
itself with the correct format. If the hardware surface changes its
format midstream, the renderer can be reconfigured using the normal
mechanisms.

This calls va_surface_init_subformat() each time after the decoder
returns a surface. Since libavcodec/AVFrame has no concept of sub-
formats, this is unavoidable. It creates and destroys a derived
VAImage, but this shouldn't have any bad performance effects (at
least I didn't notice any measurable effects).

Note that vaDeriveImage() failures are silently ignored as some
drivers (the vdpau wrapper) support neither vaDeriveImage, nor EGL
interop. In addition, we still probe whether we can map an image
in the EGL interop code. This is important as it's the only way
to determine whether EGL interop is supported at all. With respect
to the driver bug mentioned above, it doesn't matter which format
the test surface has.

In vf_vavpp, also remove the rt_format guessing business. I think the
existing logic was a bit meaningless anyway. It's not even a given
that vavpp produces the same rt_format for output.
This commit is contained in:
wm4 2016-04-11 20:46:05 +02:00
parent 49431626cb
commit f5ff2656e0
5 changed files with 72 additions and 48 deletions

View File

@ -340,6 +340,12 @@ static struct mp_image *allocate_image(struct lavc_ctx *ctx, int w, int h)
return img;
}
static struct mp_image *update_format(struct lavc_ctx *ctx, struct mp_image *img)
{
va_surface_init_subformat(img);
return img;
}
static void destroy_va_dummy_ctx(struct priv *p)
{
va_destroy(p->ctx);
@ -497,6 +503,7 @@ const struct vd_lavc_hwdec mp_vd_lavc_vaapi = {
.allocate_image = allocate_image,
.lock = intel_shit_lock,
.unlock = intel_crap_unlock,
.process_image = update_format,
};
const struct vd_lavc_hwdec mp_vd_lavc_vaapi_copy = {

View File

@ -304,14 +304,6 @@ static int filter_ext(struct vf_instance *vf, struct mp_image *in)
struct vf_priv_s *p = vf->priv;
if (in) {
int rt_format = in->imgfmt == IMGFMT_VAAPI ? va_surface_rt_format(in)
: VA_RT_FORMAT_YUV420;
if (!p->pool || p->current_rt_format != rt_format) {
talloc_free(p->pool);
p->pool = mp_image_pool_new(20);
va_pool_set_allocator(p->pool, p->va, rt_format);
p->current_rt_format = rt_format;
}
if (in->imgfmt != IMGFMT_VAAPI) {
struct mp_image *tmp = upload(vf, in);
talloc_free(in);
@ -350,10 +342,25 @@ static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
{
struct vf_priv_s *p = vf->priv;
p->params = *in;
*out = *in;
out->imgfmt = IMGFMT_VAAPI;
flush_frames(vf);
talloc_free(p->pool);
p->pool = NULL;
p->params = *in;
p->current_rt_format = VA_RT_FORMAT_YUV420;
p->pool = mp_image_pool_new(20);
va_pool_set_allocator(p->pool, p->va, p->current_rt_format);
struct mp_image *probe = mp_image_pool_get(p->pool, IMGFMT_VAAPI, in->w, in->h);
if (!probe)
return -1;
va_surface_init_subformat(probe);
*out = *in;
out->imgfmt = probe->params.imgfmt;
out->hw_subfmt = probe->params.hw_subfmt;
talloc_free(probe);
return 0;
}

View File

@ -172,30 +172,6 @@ static void destroy(struct gl_hwdec *hw)
va_destroy(p->ctx);
}
// Create an empty dummy VPP. This works around a weird bug that affects the
// VA surface format, as it is reported by vaDeriveImage(). Before a VPP
// context or a decoder context is created, the surface format will be reported
// as YV12. Surfaces created after context creation will report NV12 (even
// though surface creation does not take a context as argument!). Existing
// surfaces will change their format from YV12 to NV12 as soon as the decoder
// renders to them! Because we want know the surface format in advance (to
// simplify our renderer configuration logic), we hope that this hack gives
// us reasonable behavior.
// See: https://bugs.freedesktop.org/show_bug.cgi?id=79848
static void insane_hack(struct gl_hwdec *hw)
{
struct priv *p = hw->priv;
VAConfigID config;
if (vaCreateConfig(p->display, VAProfileNone, VAEntrypointVideoProc,
NULL, 0, &config) == VA_STATUS_SUCCESS)
{
// We want to keep this until the VADisplay is destroyed. It will
// implicitly free the context.
VAContextID context;
vaCreateContext(p->display, config, 0, 0, 0, NULL, 0, &context);
}
}
static int create(struct gl_hwdec *hw)
{
GL *gl = hw->gl;
@ -248,7 +224,6 @@ static int create(struct gl_hwdec *hw)
MP_VERBOSE(p, "using VAAPI EGL interop\n");
insane_hack(hw);
if (!test_format(hw)) {
destroy(hw);
return -1;
@ -278,6 +253,18 @@ static int reinit(struct gl_hwdec *hw, struct mp_image_params *params)
}
gl->BindTexture(GL_TEXTURE_2D, 0);
hw->converted_imgfmt = va_fourcc_to_imgfmt(params->hw_subfmt);
if (hw->converted_imgfmt != IMGFMT_NV12 &&
hw->converted_imgfmt != IMGFMT_420P)
{
MP_FATAL(p, "unsupported VA image format %s\n",
mp_tag_str(params->hw_subfmt));
return -1;
}
MP_VERBOSE(p, "format: %s %s\n", mp_tag_str(params->hw_subfmt),
mp_imgfmt_to_name(hw->converted_imgfmt));
return 0;
}
@ -308,18 +295,6 @@ static int map_image(struct gl_hwdec *hw, struct mp_image *hw_image,
goto err;
int mpfmt = va_fourcc_to_imgfmt(va_image->format.fourcc);
if (mpfmt != IMGFMT_NV12 && mpfmt != IMGFMT_420P) {
MP_FATAL(p, "unsupported VA image format %s\n",
mp_tag_str(va_image->format.fourcc));
goto err;
}
if (!hw->converted_imgfmt) {
MP_VERBOSE(p, "format: %s %s\n", mp_tag_str(va_image->format.fourcc),
mp_imgfmt_to_name(mpfmt));
hw->converted_imgfmt = mpfmt;
}
if (hw->converted_imgfmt != mpfmt) {
MP_FATAL(p, "mid-stream hwdec format change (%s -> %s) not supported\n",
mp_imgfmt_to_name(hw->converted_imgfmt), mp_imgfmt_to_name(mpfmt));
@ -387,6 +362,7 @@ static bool test_format(struct gl_hwdec *hw)
va_pool_set_allocator(alloc, p->ctx, VA_RT_FORMAT_YUV420);
struct mp_image *surface = mp_image_pool_get(alloc, IMGFMT_VAAPI, 64, 64);
if (surface) {
va_surface_init_subformat(surface);
struct mp_image_params params = surface->params;
if (reinit(hw, &params) >= 0) {
GLuint textures[4];

View File

@ -487,6 +487,38 @@ struct mp_image *va_surface_download(struct mp_image *src,
return NULL;
}
// Set the hw_subfmt from the surface's real format. Because of this bug:
// https://bugs.freedesktop.org/show_bug.cgi?id=79848
// it should be assumed that the real format is only known after an arbitrary
// vaCreateContext() call has been made, or even better, after the surface
// has been rendered to.
// If the hw_subfmt is already set, this is a NOP.
void va_surface_init_subformat(struct mp_image *mpi)
{
VAStatus status;
if (mpi->params.hw_subfmt)
return;
struct va_surface *p = va_surface_in_mp_image(mpi);
if (!p)
return;
VAImage va_image = { .image_id = VA_INVALID_ID };
va_lock(p->ctx);
status = vaDeriveImage(p->display, va_surface_id(mpi), &va_image);
if (status != VA_STATUS_SUCCESS)
goto err;
mpi->params.hw_subfmt = va_image.format.fourcc;
status = vaDestroyImage(p->display, va_image.image_id);
CHECK_VA_STATUS(p->ctx, "vaDestroyImage()");
err:
va_unlock(p->ctx);
}
struct pool_alloc_ctx {
struct mp_vaapi_ctx *vaapi;
int rt_format;

View File

@ -69,6 +69,8 @@ struct mp_image *va_surface_download(struct mp_image *src,
int va_surface_alloc_imgfmt(struct mp_image *img, int imgfmt);
int va_surface_upload(struct mp_image *va_dst, struct mp_image *sw_src);
void va_surface_init_subformat(struct mp_image *mpi);
bool va_guess_if_emulated(struct mp_vaapi_ctx *ctx);
#endif