FFmpeg/libavutil/hwcontext_qsv.c
Wenbin Chen 7e7b3a4c28 libavutil/hwcontext_qsv: Align width and heigh when download qsv frame
The width and height for qsv frame to download need to be
aligned with 16. Add the alignment operation.
Now the following command works:
ffmpeg -hwaccel qsv -f rawvideo -s 1920x1080 -pix_fmt yuv420p -i \
input.yuv -vf "hwupload=extra_hw_frames=16,format=qsv,hwdownload, \
format=nv12" -f null -

Signed-off-by: Wenbin Chen <wenbin.chen@intel.com>
Signed-off-by: Haihao Xiang <haihao.xiang@intel.com>
2022-04-13 13:41:27 +08:00

1682 lines
54 KiB
C

/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdatomic.h>
#include <stdint.h>
#include <string.h>
#include <mfx/mfxvideo.h>
#include "config.h"
#if HAVE_PTHREADS
#include <pthread.h>
#endif
#define COBJMACROS
#if CONFIG_VAAPI
#include "hwcontext_vaapi.h"
#endif
#if CONFIG_D3D11VA
#include "hwcontext_d3d11va.h"
#endif
#if CONFIG_DXVA2
#include "hwcontext_dxva2.h"
#endif
#include "buffer.h"
#include "common.h"
#include "hwcontext.h"
#include "hwcontext_internal.h"
#include "hwcontext_qsv.h"
#include "mem.h"
#include "pixfmt.h"
#include "pixdesc.h"
#include "time.h"
#include "imgutils.h"
#define QSV_VERSION_ATLEAST(MAJOR, MINOR) \
(MFX_VERSION_MAJOR > (MAJOR) || \
MFX_VERSION_MAJOR == (MAJOR) && MFX_VERSION_MINOR >= (MINOR))
#define MFX_IMPL_VIA_MASK(impl) (0x0f00 & (impl))
typedef struct QSVDevicePriv {
AVBufferRef *child_device_ctx;
} QSVDevicePriv;
typedef struct QSVDeviceContext {
mfxHDL handle;
mfxHandleType handle_type;
mfxVersion ver;
mfxIMPL impl;
enum AVHWDeviceType child_device_type;
enum AVPixelFormat child_pix_fmt;
} QSVDeviceContext;
typedef struct QSVFramesContext {
mfxSession session_download;
atomic_int session_download_init;
mfxSession session_upload;
atomic_int session_upload_init;
#if HAVE_PTHREADS
pthread_mutex_t session_lock;
#endif
AVBufferRef *child_frames_ref;
mfxFrameSurface1 *surfaces_internal;
mfxHDLPair *handle_pairs_internal;
int nb_surfaces_used;
// used in the frame allocator for non-opaque surfaces
mfxMemId *mem_ids;
// used in the opaque alloc request for opaque surfaces
mfxFrameSurface1 **surface_ptrs;
mfxExtOpaqueSurfaceAlloc opaque_alloc;
mfxExtBuffer *ext_buffers[1];
AVFrame realigned_upload_frame;
AVFrame realigned_download_frame;
} QSVFramesContext;
static const struct {
enum AVPixelFormat pix_fmt;
uint32_t fourcc;
} supported_pixel_formats[] = {
{ AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
{ AV_PIX_FMT_BGRA, MFX_FOURCC_RGB4 },
{ AV_PIX_FMT_P010, MFX_FOURCC_P010 },
{ AV_PIX_FMT_PAL8, MFX_FOURCC_P8 },
#if CONFIG_VAAPI
{ AV_PIX_FMT_YUYV422,
MFX_FOURCC_YUY2 },
#if QSV_VERSION_ATLEAST(1, 27)
{ AV_PIX_FMT_Y210,
MFX_FOURCC_Y210 },
#endif
#endif
};
extern int ff_qsv_get_surface_base_handle(mfxFrameSurface1 *surf,
enum AVHWDeviceType base_dev_type,
void **base_handle);
/**
* Caller needs to allocate enough space for base_handle pointer.
**/
int ff_qsv_get_surface_base_handle(mfxFrameSurface1 *surf,
enum AVHWDeviceType base_dev_type,
void **base_handle)
{
mfxHDLPair *handle_pair;
handle_pair = surf->Data.MemId;
switch (base_dev_type) {
#if CONFIG_VAAPI
case AV_HWDEVICE_TYPE_VAAPI:
base_handle[0] = handle_pair->first;
return 0;
#endif
#if CONFIG_D3D11VA
case AV_HWDEVICE_TYPE_D3D11VA:
base_handle[0] = handle_pair->first;
base_handle[1] = handle_pair->second;
return 0;
#endif
#if CONFIG_DXVA2
case AV_HWDEVICE_TYPE_DXVA2:
base_handle[0] = handle_pair->first;
return 0;
#endif
}
return AVERROR(EINVAL);
}
static uint32_t qsv_fourcc_from_pix_fmt(enum AVPixelFormat pix_fmt)
{
int i;
for (i = 0; i < FF_ARRAY_ELEMS(supported_pixel_formats); i++) {
if (supported_pixel_formats[i].pix_fmt == pix_fmt)
return supported_pixel_formats[i].fourcc;
}
return 0;
}
#if CONFIG_D3D11VA
static uint32_t qsv_get_d3d11va_bind_flags(int mem_type)
{
uint32_t bind_flags = 0;
if ((mem_type & MFX_MEMTYPE_VIDEO_MEMORY_ENCODER_TARGET) && (mem_type & MFX_MEMTYPE_INTERNAL_FRAME))
bind_flags = D3D11_BIND_DECODER | D3D11_BIND_VIDEO_ENCODER;
else
bind_flags = D3D11_BIND_DECODER;
if ((MFX_MEMTYPE_FROM_VPPOUT & mem_type) || (MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET & mem_type))
bind_flags = D3D11_BIND_RENDER_TARGET;
return bind_flags;
}
#endif
static int qsv_fill_border(AVFrame *dst, const AVFrame *src)
{
const AVPixFmtDescriptor *desc;
int i, planes_nb = 0;
if (dst->format != src->format)
return AVERROR(EINVAL);
desc = av_pix_fmt_desc_get(dst->format);
for (i = 0; i < desc->nb_components; i++)
planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1);
for (i = 0; i < planes_nb; i++) {
int sheight, dheight, y;
ptrdiff_t swidth = av_image_get_linesize(src->format,
src->width,
i);
ptrdiff_t dwidth = av_image_get_linesize(dst->format,
dst->width,
i);
const AVComponentDescriptor comp = desc->comp[i];
if (swidth < 0 || dwidth < 0) {
av_log(NULL, AV_LOG_ERROR, "av_image_get_linesize failed\n");
return AVERROR(EINVAL);
}
sheight = src->height;
dheight = dst->height;
if (i) {
sheight = AV_CEIL_RSHIFT(src->height, desc->log2_chroma_h);
dheight = AV_CEIL_RSHIFT(dst->height, desc->log2_chroma_h);
}
//fill right padding
for (y = 0; y < sheight; y++) {
void *line_ptr = dst->data[i] + y*dst->linesize[i] + swidth;
av_memcpy_backptr(line_ptr,
comp.depth > 8 ? 2 : 1,
dwidth - swidth);
}
//fill bottom padding
for (y = sheight; y < dheight; y++) {
memcpy(dst->data[i]+y*dst->linesize[i],
dst->data[i]+(sheight-1)*dst->linesize[i],
dwidth);
}
}
return 0;
}
static int qsv_device_init(AVHWDeviceContext *ctx)
{
AVQSVDeviceContext *hwctx = ctx->hwctx;
QSVDeviceContext *s = ctx->internal->priv;
int hw_handle_supported = 0;
mfxHandleType handle_type;
enum AVHWDeviceType device_type;
enum AVPixelFormat pix_fmt;
mfxStatus err;
err = MFXQueryIMPL(hwctx->session, &s->impl);
if (err == MFX_ERR_NONE)
err = MFXQueryVersion(hwctx->session, &s->ver);
if (err != MFX_ERR_NONE) {
av_log(ctx, AV_LOG_ERROR, "Error querying the session attributes\n");
return AVERROR_UNKNOWN;
}
if (MFX_IMPL_VIA_VAAPI == MFX_IMPL_VIA_MASK(s->impl)) {
#if CONFIG_VAAPI
handle_type = MFX_HANDLE_VA_DISPLAY;
device_type = AV_HWDEVICE_TYPE_VAAPI;
pix_fmt = AV_PIX_FMT_VAAPI;
hw_handle_supported = 1;
#endif
} else if (MFX_IMPL_VIA_D3D11 == MFX_IMPL_VIA_MASK(s->impl)) {
#if CONFIG_D3D11VA
handle_type = MFX_HANDLE_D3D11_DEVICE;
device_type = AV_HWDEVICE_TYPE_D3D11VA;
pix_fmt = AV_PIX_FMT_D3D11;
hw_handle_supported = 1;
#endif
} else if (MFX_IMPL_VIA_D3D9 == MFX_IMPL_VIA_MASK(s->impl)) {
#if CONFIG_DXVA2
handle_type = MFX_HANDLE_D3D9_DEVICE_MANAGER;
device_type = AV_HWDEVICE_TYPE_DXVA2;
pix_fmt = AV_PIX_FMT_DXVA2_VLD;
hw_handle_supported = 1;
#endif
}
if (hw_handle_supported) {
err = MFXVideoCORE_GetHandle(hwctx->session, handle_type, &s->handle);
if (err == MFX_ERR_NONE) {
s->handle_type = handle_type;
s->child_device_type = device_type;
s->child_pix_fmt = pix_fmt;
}
}
if (!s->handle) {
av_log(ctx, AV_LOG_VERBOSE, "No supported hw handle could be retrieved "
"from the session\n");
}
return 0;
}
static void qsv_frames_uninit(AVHWFramesContext *ctx)
{
QSVFramesContext *s = ctx->internal->priv;
if (s->session_download) {
MFXVideoVPP_Close(s->session_download);
MFXClose(s->session_download);
}
s->session_download = NULL;
s->session_download_init = 0;
if (s->session_upload) {
MFXVideoVPP_Close(s->session_upload);
MFXClose(s->session_upload);
}
s->session_upload = NULL;
s->session_upload_init = 0;
#if HAVE_PTHREADS
pthread_mutex_destroy(&s->session_lock);
#endif
av_freep(&s->mem_ids);
av_freep(&s->surface_ptrs);
av_freep(&s->surfaces_internal);
av_freep(&s->handle_pairs_internal);
av_frame_unref(&s->realigned_upload_frame);
av_frame_unref(&s->realigned_download_frame);
av_buffer_unref(&s->child_frames_ref);
}
static void qsv_pool_release_dummy(void *opaque, uint8_t *data)
{
}
static AVBufferRef *qsv_pool_alloc(void *opaque, size_t size)
{
AVHWFramesContext *ctx = (AVHWFramesContext*)opaque;
QSVFramesContext *s = ctx->internal->priv;
AVQSVFramesContext *hwctx = ctx->hwctx;
if (s->nb_surfaces_used < hwctx->nb_surfaces) {
s->nb_surfaces_used++;
return av_buffer_create((uint8_t*)(s->surfaces_internal + s->nb_surfaces_used - 1),
sizeof(*hwctx->surfaces), qsv_pool_release_dummy, NULL, 0);
}
return NULL;
}
static int qsv_init_child_ctx(AVHWFramesContext *ctx)
{
AVQSVFramesContext *hwctx = ctx->hwctx;
QSVFramesContext *s = ctx->internal->priv;
QSVDeviceContext *device_priv = ctx->device_ctx->internal->priv;
AVBufferRef *child_device_ref = NULL;
AVBufferRef *child_frames_ref = NULL;
AVHWDeviceContext *child_device_ctx;
AVHWFramesContext *child_frames_ctx;
int i, ret = 0;
if (!device_priv->handle) {
av_log(ctx, AV_LOG_ERROR,
"Cannot create a non-opaque internal surface pool without "
"a hardware handle\n");
return AVERROR(EINVAL);
}
child_device_ref = av_hwdevice_ctx_alloc(device_priv->child_device_type);
if (!child_device_ref)
return AVERROR(ENOMEM);
child_device_ctx = (AVHWDeviceContext*)child_device_ref->data;
#if CONFIG_VAAPI
if (child_device_ctx->type == AV_HWDEVICE_TYPE_VAAPI) {
AVVAAPIDeviceContext *child_device_hwctx = child_device_ctx->hwctx;
child_device_hwctx->display = (VADisplay)device_priv->handle;
}
#endif
#if CONFIG_D3D11VA
if (child_device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
AVD3D11VADeviceContext *child_device_hwctx = child_device_ctx->hwctx;
ID3D11Device_AddRef((ID3D11Device*)device_priv->handle);
child_device_hwctx->device = (ID3D11Device*)device_priv->handle;
}
#endif
#if CONFIG_DXVA2
if (child_device_ctx->type == AV_HWDEVICE_TYPE_DXVA2) {
AVDXVA2DeviceContext *child_device_hwctx = child_device_ctx->hwctx;
child_device_hwctx->devmgr = (IDirect3DDeviceManager9*)device_priv->handle;
}
#endif
ret = av_hwdevice_ctx_init(child_device_ref);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "Error initializing a child device context\n");
goto fail;
}
child_frames_ref = av_hwframe_ctx_alloc(child_device_ref);
if (!child_frames_ref) {
ret = AVERROR(ENOMEM);
goto fail;
}
child_frames_ctx = (AVHWFramesContext*)child_frames_ref->data;
child_frames_ctx->format = device_priv->child_pix_fmt;
child_frames_ctx->sw_format = ctx->sw_format;
child_frames_ctx->initial_pool_size = ctx->initial_pool_size;
child_frames_ctx->width = FFALIGN(ctx->width, 16);
child_frames_ctx->height = FFALIGN(ctx->height, 16);
#if CONFIG_D3D11VA
if (child_device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
AVD3D11VAFramesContext *child_frames_hwctx = child_frames_ctx->hwctx;
if (hwctx->frame_type == 0)
hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
if (hwctx->frame_type & MFX_MEMTYPE_SHARED_RESOURCE)
child_frames_hwctx->MiscFlags = D3D11_RESOURCE_MISC_SHARED;
child_frames_hwctx->BindFlags = qsv_get_d3d11va_bind_flags(hwctx->frame_type);
}
#endif
#if CONFIG_DXVA2
if (child_device_ctx->type == AV_HWDEVICE_TYPE_DXVA2) {
AVDXVA2FramesContext *child_frames_hwctx = child_frames_ctx->hwctx;
if (hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET)
child_frames_hwctx->surface_type = DXVA2_VideoProcessorRenderTarget;
else
child_frames_hwctx->surface_type = DXVA2_VideoDecoderRenderTarget;
}
#endif
ret = av_hwframe_ctx_init(child_frames_ref);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "Error initializing a child frames context\n");
goto fail;
}
#if CONFIG_VAAPI
if (child_device_ctx->type == AV_HWDEVICE_TYPE_VAAPI) {
AVVAAPIFramesContext *child_frames_hwctx = child_frames_ctx->hwctx;
for (i = 0; i < ctx->initial_pool_size; i++) {
s->handle_pairs_internal[i].first = child_frames_hwctx->surface_ids + i;
s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
}
hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
}
#endif
#if CONFIG_D3D11VA
if (child_device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
AVD3D11VAFramesContext *child_frames_hwctx = child_frames_ctx->hwctx;
for (i = 0; i < ctx->initial_pool_size; i++) {
s->handle_pairs_internal[i].first = (mfxMemId)child_frames_hwctx->texture_infos[i].texture;
if(child_frames_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
} else {
s->handle_pairs_internal[i].second = (mfxMemId)child_frames_hwctx->texture_infos[i].index;
}
s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
}
if (child_frames_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
hwctx->frame_type |= MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
} else {
hwctx->frame_type |= MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
}
}
#endif
#if CONFIG_DXVA2
if (child_device_ctx->type == AV_HWDEVICE_TYPE_DXVA2) {
AVDXVA2FramesContext *child_frames_hwctx = child_frames_ctx->hwctx;
for (i = 0; i < ctx->initial_pool_size; i++) {
s->handle_pairs_internal[i].first = (mfxMemId)child_frames_hwctx->surfaces[i];
s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
}
if (child_frames_hwctx->surface_type == DXVA2_VideoProcessorRenderTarget)
hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
else
hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
}
#endif
s->child_frames_ref = child_frames_ref;
child_frames_ref = NULL;
fail:
av_buffer_unref(&child_device_ref);
av_buffer_unref(&child_frames_ref);
return ret;
}
static int qsv_init_surface(AVHWFramesContext *ctx, mfxFrameSurface1 *surf)
{
const AVPixFmtDescriptor *desc;
uint32_t fourcc;
desc = av_pix_fmt_desc_get(ctx->sw_format);
if (!desc)
return AVERROR(EINVAL);
fourcc = qsv_fourcc_from_pix_fmt(ctx->sw_format);
if (!fourcc)
return AVERROR(EINVAL);
surf->Info.BitDepthLuma = desc->comp[0].depth;
surf->Info.BitDepthChroma = desc->comp[0].depth;
surf->Info.Shift = desc->comp[0].depth > 8;
if (desc->log2_chroma_w && desc->log2_chroma_h)
surf->Info.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
else if (desc->log2_chroma_w)
surf->Info.ChromaFormat = MFX_CHROMAFORMAT_YUV422;
else
surf->Info.ChromaFormat = MFX_CHROMAFORMAT_YUV444;
surf->Info.FourCC = fourcc;
surf->Info.Width = FFALIGN(ctx->width, 16);
surf->Info.CropW = ctx->width;
surf->Info.Height = FFALIGN(ctx->height, 16);
surf->Info.CropH = ctx->height;
surf->Info.FrameRateExtN = 25;
surf->Info.FrameRateExtD = 1;
surf->Info.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
return 0;
}
static int qsv_init_pool(AVHWFramesContext *ctx, uint32_t fourcc)
{
QSVFramesContext *s = ctx->internal->priv;
AVQSVFramesContext *frames_hwctx = ctx->hwctx;
int i, ret = 0;
if (ctx->initial_pool_size <= 0) {
av_log(ctx, AV_LOG_ERROR, "QSV requires a fixed frame pool size\n");
return AVERROR(EINVAL);
}
s->handle_pairs_internal = av_calloc(ctx->initial_pool_size,
sizeof(*s->handle_pairs_internal));
if (!s->handle_pairs_internal)
return AVERROR(ENOMEM);
s->surfaces_internal = av_calloc(ctx->initial_pool_size,
sizeof(*s->surfaces_internal));
if (!s->surfaces_internal)
return AVERROR(ENOMEM);
for (i = 0; i < ctx->initial_pool_size; i++) {
ret = qsv_init_surface(ctx, &s->surfaces_internal[i]);
if (ret < 0)
return ret;
}
if (!(frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)) {
ret = qsv_init_child_ctx(ctx);
if (ret < 0)
return ret;
}
ctx->internal->pool_internal = av_buffer_pool_init2(sizeof(mfxFrameSurface1),
ctx, qsv_pool_alloc, NULL);
if (!ctx->internal->pool_internal)
return AVERROR(ENOMEM);
frames_hwctx->surfaces = s->surfaces_internal;
frames_hwctx->nb_surfaces = ctx->initial_pool_size;
return 0;
}
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
mfxFrameAllocResponse *resp)
{
AVHWFramesContext *ctx = pthis;
QSVFramesContext *s = ctx->internal->priv;
AVQSVFramesContext *hwctx = ctx->hwctx;
mfxFrameInfo *i = &req->Info;
mfxFrameInfo *i1 = &hwctx->surfaces[0].Info;
if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
!(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
!(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
return MFX_ERR_UNSUPPORTED;
if (i->Width > i1->Width || i->Height > i1->Height ||
i->FourCC != i1->FourCC || i->ChromaFormat != i1->ChromaFormat) {
av_log(ctx, AV_LOG_ERROR, "Mismatching surface properties in an "
"allocation request: %dx%d %d %d vs %dx%d %d %d\n",
i->Width, i->Height, i->FourCC, i->ChromaFormat,
i1->Width, i1->Height, i1->FourCC, i1->ChromaFormat);
return MFX_ERR_UNSUPPORTED;
}
resp->mids = s->mem_ids;
resp->NumFrameActual = hwctx->nb_surfaces;
return MFX_ERR_NONE;
}
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
{
return MFX_ERR_NONE;
}
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
{
return MFX_ERR_UNSUPPORTED;
}
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
{
return MFX_ERR_UNSUPPORTED;
}
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
{
mfxHDLPair *pair_dst = (mfxHDLPair*)hdl;
mfxHDLPair *pair_src = (mfxHDLPair*)mid;
pair_dst->first = pair_src->first;
if (pair_src->second != (mfxMemId)MFX_INFINITE)
pair_dst->second = pair_src->second;
return MFX_ERR_NONE;
}
static int qsv_init_internal_session(AVHWFramesContext *ctx,
mfxSession *session, int upload)
{
QSVFramesContext *s = ctx->internal->priv;
AVQSVFramesContext *frames_hwctx = ctx->hwctx;
QSVDeviceContext *device_priv = ctx->device_ctx->internal->priv;
int opaque = !!(frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME);
mfxFrameAllocator frame_allocator = {
.pthis = ctx,
.Alloc = frame_alloc,
.Lock = frame_lock,
.Unlock = frame_unlock,
.GetHDL = frame_get_hdl,
.Free = frame_free,
};
mfxVideoParam par;
mfxStatus err;
err = MFXInit(device_priv->impl, &device_priv->ver, session);
if (err != MFX_ERR_NONE) {
av_log(ctx, AV_LOG_ERROR, "Error initializing an internal session\n");
return AVERROR_UNKNOWN;
}
if (device_priv->handle) {
err = MFXVideoCORE_SetHandle(*session, device_priv->handle_type,
device_priv->handle);
if (err != MFX_ERR_NONE)
return AVERROR_UNKNOWN;
}
if (!opaque) {
err = MFXVideoCORE_SetFrameAllocator(*session, &frame_allocator);
if (err != MFX_ERR_NONE)
return AVERROR_UNKNOWN;
}
memset(&par, 0, sizeof(par));
if (opaque) {
par.ExtParam = s->ext_buffers;
par.NumExtParam = FF_ARRAY_ELEMS(s->ext_buffers);
par.IOPattern = upload ? MFX_IOPATTERN_OUT_OPAQUE_MEMORY :
MFX_IOPATTERN_IN_OPAQUE_MEMORY;
} else {
par.IOPattern = upload ? MFX_IOPATTERN_OUT_VIDEO_MEMORY :
MFX_IOPATTERN_IN_VIDEO_MEMORY;
}
par.IOPattern |= upload ? MFX_IOPATTERN_IN_SYSTEM_MEMORY :
MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
par.AsyncDepth = 1;
par.vpp.In = frames_hwctx->surfaces[0].Info;
/* Apparently VPP requires the frame rate to be set to some value, otherwise
* init will fail (probably for the framerate conversion filter). Since we
* are only doing data upload/download here, we just invent an arbitrary
* value */
par.vpp.In.FrameRateExtN = 25;
par.vpp.In.FrameRateExtD = 1;
par.vpp.Out = par.vpp.In;
err = MFXVideoVPP_Init(*session, &par);
if (err != MFX_ERR_NONE) {
av_log(ctx, AV_LOG_VERBOSE, "Error opening the internal VPP session."
"Surface upload/download will not be possible\n");
MFXClose(*session);
*session = NULL;
}
return 0;
}
static int qsv_frames_init(AVHWFramesContext *ctx)
{
QSVFramesContext *s = ctx->internal->priv;
AVQSVFramesContext *frames_hwctx = ctx->hwctx;
int opaque = !!(frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME);
uint32_t fourcc;
int i, ret;
fourcc = qsv_fourcc_from_pix_fmt(ctx->sw_format);
if (!fourcc) {
av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format\n");
return AVERROR(ENOSYS);
}
if (!ctx->pool) {
ret = qsv_init_pool(ctx, fourcc);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "Error creating an internal frame pool\n");
return ret;
}
}
if (opaque) {
s->surface_ptrs = av_calloc(frames_hwctx->nb_surfaces,
sizeof(*s->surface_ptrs));
if (!s->surface_ptrs)
return AVERROR(ENOMEM);
for (i = 0; i < frames_hwctx->nb_surfaces; i++)
s->surface_ptrs[i] = frames_hwctx->surfaces + i;
s->opaque_alloc.In.Surfaces = s->surface_ptrs;
s->opaque_alloc.In.NumSurface = frames_hwctx->nb_surfaces;
s->opaque_alloc.In.Type = frames_hwctx->frame_type;
s->opaque_alloc.Out = s->opaque_alloc.In;
s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc);
s->ext_buffers[0] = (mfxExtBuffer*)&s->opaque_alloc;
} else {
s->mem_ids = av_calloc(frames_hwctx->nb_surfaces, sizeof(*s->mem_ids));
if (!s->mem_ids)
return AVERROR(ENOMEM);
for (i = 0; i < frames_hwctx->nb_surfaces; i++)
s->mem_ids[i] = frames_hwctx->surfaces[i].Data.MemId;
}
s->session_download = NULL;
s->session_upload = NULL;
s->session_download_init = 0;
s->session_upload_init = 0;
#if HAVE_PTHREADS
pthread_mutex_init(&s->session_lock, NULL);
#endif
return 0;
}
static int qsv_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
{
frame->buf[0] = av_buffer_pool_get(ctx->pool);
if (!frame->buf[0])
return AVERROR(ENOMEM);
frame->data[3] = frame->buf[0]->data;
frame->format = AV_PIX_FMT_QSV;
frame->width = ctx->width;
frame->height = ctx->height;
return 0;
}
static int qsv_transfer_get_formats(AVHWFramesContext *ctx,
enum AVHWFrameTransferDirection dir,
enum AVPixelFormat **formats)
{
enum AVPixelFormat *fmts;
fmts = av_malloc_array(2, sizeof(*fmts));
if (!fmts)
return AVERROR(ENOMEM);
fmts[0] = ctx->sw_format;
fmts[1] = AV_PIX_FMT_NONE;
*formats = fmts;
return 0;
}
static int qsv_frames_derive_from(AVHWFramesContext *dst_ctx,
AVHWFramesContext *src_ctx, int flags)
{
AVQSVFramesContext *src_hwctx = src_ctx->hwctx;
int i;
switch (dst_ctx->device_ctx->type) {
#if CONFIG_VAAPI
case AV_HWDEVICE_TYPE_VAAPI:
{
AVVAAPIFramesContext *dst_hwctx = dst_ctx->hwctx;
dst_hwctx->surface_ids = av_calloc(src_hwctx->nb_surfaces,
sizeof(*dst_hwctx->surface_ids));
if (!dst_hwctx->surface_ids)
return AVERROR(ENOMEM);
for (i = 0; i < src_hwctx->nb_surfaces; i++) {
mfxHDLPair *pair = (mfxHDLPair*)src_hwctx->surfaces[i].Data.MemId;
dst_hwctx->surface_ids[i] = *(VASurfaceID*)pair->first;
}
dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces;
}
break;
#endif
#if CONFIG_D3D11VA
case AV_HWDEVICE_TYPE_D3D11VA:
{
AVD3D11VAFramesContext *dst_hwctx = dst_ctx->hwctx;
mfxHDLPair *pair = (mfxHDLPair*)src_hwctx->surfaces[i].Data.MemId;
dst_hwctx->texture = (ID3D11Texture2D*)pair->first;
if (src_hwctx->frame_type & MFX_MEMTYPE_SHARED_RESOURCE)
dst_hwctx->MiscFlags = D3D11_RESOURCE_MISC_SHARED;
dst_hwctx->BindFlags = qsv_get_d3d11va_bind_flags(src_hwctx->frame_type);
}
break;
#endif
#if CONFIG_DXVA2
case AV_HWDEVICE_TYPE_DXVA2:
{
AVDXVA2FramesContext *dst_hwctx = dst_ctx->hwctx;
dst_hwctx->surfaces = av_calloc(src_hwctx->nb_surfaces,
sizeof(*dst_hwctx->surfaces));
if (!dst_hwctx->surfaces)
return AVERROR(ENOMEM);
for (i = 0; i < src_hwctx->nb_surfaces; i++) {
mfxHDLPair *pair = (mfxHDLPair*)src_hwctx->surfaces[i].Data.MemId;
dst_hwctx->surfaces[i] = (IDirect3DSurface9*)pair->first;
}
dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces;
if (src_hwctx->frame_type == MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
dst_hwctx->surface_type = DXVA2_VideoDecoderRenderTarget;
else
dst_hwctx->surface_type = DXVA2_VideoProcessorRenderTarget;
}
break;
#endif
default:
return AVERROR(ENOSYS);
}
return 0;
}
static int qsv_map_from(AVHWFramesContext *ctx,
AVFrame *dst, const AVFrame *src, int flags)
{
QSVFramesContext *s = ctx->internal->priv;
mfxFrameSurface1 *surf = (mfxFrameSurface1*)src->data[3];
AVHWFramesContext *child_frames_ctx;
const AVPixFmtDescriptor *desc;
uint8_t *child_data;
AVFrame *dummy;
int ret = 0;
if (!s->child_frames_ref)
return AVERROR(ENOSYS);
child_frames_ctx = (AVHWFramesContext*)s->child_frames_ref->data;
switch (child_frames_ctx->device_ctx->type) {
#if CONFIG_VAAPI
case AV_HWDEVICE_TYPE_VAAPI:
{
mfxHDLPair *pair = (mfxHDLPair*)surf->Data.MemId;
/* pair->first is *VASurfaceID while data[3] in vaapi frame is VASurfaceID, so
* we need this casting for vaapi.
* Add intptr_t to force cast from VASurfaceID(uint) type to pointer(long) type
* to avoid compile warning */
child_data = (uint8_t*)(intptr_t)*(VASurfaceID*)pair->first;
break;
}
#endif
#if CONFIG_D3D11VA
case AV_HWDEVICE_TYPE_D3D11VA:
{
mfxHDLPair *pair = (mfxHDLPair*)surf->Data.MemId;
child_data = pair->first;
break;
}
#endif
#if CONFIG_DXVA2
case AV_HWDEVICE_TYPE_DXVA2:
{
mfxHDLPair *pair = (mfxHDLPair*)surf->Data.MemId;
child_data = pair->first;
break;
}
#endif
default:
return AVERROR(ENOSYS);
}
if (dst->format == child_frames_ctx->format) {
ret = ff_hwframe_map_create(s->child_frames_ref,
dst, src, NULL, NULL);
if (ret < 0)
return ret;
dst->width = src->width;
dst->height = src->height;
if (child_frames_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
mfxHDLPair *pair = (mfxHDLPair*)surf->Data.MemId;
dst->data[0] = pair->first;
dst->data[1] = pair->second;
} else {
dst->data[3] = child_data;
}
return 0;
}
desc = av_pix_fmt_desc_get(dst->format);
if (desc && desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
// This only supports mapping to software.
return AVERROR(ENOSYS);
}
dummy = av_frame_alloc();
if (!dummy)
return AVERROR(ENOMEM);
dummy->buf[0] = av_buffer_ref(src->buf[0]);
dummy->hw_frames_ctx = av_buffer_ref(s->child_frames_ref);
if (!dummy->buf[0] || !dummy->hw_frames_ctx)
goto fail;
dummy->format = child_frames_ctx->format;
dummy->width = src->width;
dummy->height = src->height;
if (child_frames_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
mfxHDLPair *pair = (mfxHDLPair*)surf->Data.MemId;
dummy->data[0] = pair->first;
dummy->data[1] = pair->second;
} else {
dummy->data[3] = child_data;
}
ret = av_hwframe_map(dst, dummy, flags);
fail:
av_frame_free(&dummy);
return ret;
}
static int qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst,
const AVFrame *src)
{
QSVFramesContext *s = ctx->internal->priv;
AVHWFramesContext *child_frames_ctx = (AVHWFramesContext*)s->child_frames_ref->data;
int download = !!src->hw_frames_ctx;
mfxFrameSurface1 *surf = (mfxFrameSurface1*)(download ? src->data[3] : dst->data[3]);
AVFrame *dummy;
int ret;
dummy = av_frame_alloc();
if (!dummy)
return AVERROR(ENOMEM);
dummy->format = child_frames_ctx->format;
dummy->width = src->width;
dummy->height = src->height;
dummy->buf[0] = download ? src->buf[0] : dst->buf[0];
dummy->data[3] = surf->Data.MemId;
dummy->hw_frames_ctx = s->child_frames_ref;
ret = download ? av_hwframe_transfer_data(dst, dummy, 0) :
av_hwframe_transfer_data(dummy, src, 0);
dummy->buf[0] = NULL;
dummy->data[3] = NULL;
dummy->hw_frames_ctx = NULL;
av_frame_free(&dummy);
return ret;
}
static int map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
{
switch (frame->format) {
case AV_PIX_FMT_NV12:
case AV_PIX_FMT_P010:
surface->Data.Y = frame->data[0];
surface->Data.UV = frame->data[1];
break;
case AV_PIX_FMT_YUV420P:
surface->Data.Y = frame->data[0];
surface->Data.U = frame->data[1];
surface->Data.V = frame->data[2];
break;
case AV_PIX_FMT_BGRA:
surface->Data.B = frame->data[0];
surface->Data.G = frame->data[0] + 1;
surface->Data.R = frame->data[0] + 2;
surface->Data.A = frame->data[0] + 3;
break;
#if CONFIG_VAAPI
case AV_PIX_FMT_YUYV422:
surface->Data.Y = frame->data[0];
surface->Data.U = frame->data[0] + 1;
surface->Data.V = frame->data[0] + 3;
break;
case AV_PIX_FMT_Y210:
surface->Data.Y16 = (mfxU16 *)frame->data[0];
surface->Data.U16 = (mfxU16 *)frame->data[0] + 1;
surface->Data.V16 = (mfxU16 *)frame->data[0] + 3;
break;
#endif
default:
return MFX_ERR_UNSUPPORTED;
}
surface->Data.Pitch = frame->linesize[0];
surface->Data.TimeStamp = frame->pts;
return 0;
}
static int qsv_internal_session_check_init(AVHWFramesContext *ctx, int upload)
{
QSVFramesContext *s = ctx->internal->priv;
atomic_int *inited = upload ? &s->session_upload_init : &s->session_download_init;
mfxSession *session = upload ? &s->session_upload : &s->session_download;
int ret = 0;
if (atomic_load(inited))
return 0;
#if HAVE_PTHREADS
pthread_mutex_lock(&s->session_lock);
#endif
if (!atomic_load(inited)) {
ret = qsv_init_internal_session(ctx, session, upload);
atomic_store(inited, 1);
}
#if HAVE_PTHREADS
pthread_mutex_unlock(&s->session_lock);
#endif
return ret;
}
static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
const AVFrame *src)
{
QSVFramesContext *s = ctx->internal->priv;
mfxFrameSurface1 out = {{ 0 }};
mfxFrameSurface1 *in = (mfxFrameSurface1*)src->data[3];
mfxSyncPoint sync = NULL;
mfxStatus err;
int ret = 0;
/* download to temp frame if the output is not padded as libmfx requires */
AVFrame *tmp_frame = &s->realigned_download_frame;
AVFrame *dst_frame;
int realigned = 0;
ret = qsv_internal_session_check_init(ctx, 0);
if (ret < 0)
return ret;
/* According to MSDK spec for mfxframeinfo, "Width must be a multiple of 16.
* Height must be a multiple of 16 for progressive frame sequence and a
* multiple of 32 otherwise.", so allign all frames to 16 before downloading. */
if (dst->height & 15 || dst->linesize[0] & 15) {
realigned = 1;
if (tmp_frame->format != dst->format ||
tmp_frame->width != FFALIGN(dst->linesize[0], 16) ||
tmp_frame->height != FFALIGN(dst->height, 16)) {
av_frame_unref(tmp_frame);
tmp_frame->format = dst->format;
tmp_frame->width = FFALIGN(dst->linesize[0], 16);
tmp_frame->height = FFALIGN(dst->height, 16);
ret = av_frame_get_buffer(tmp_frame, 0);
if (ret < 0)
return ret;
}
}
dst_frame = realigned ? tmp_frame : dst;
if (!s->session_download) {
if (s->child_frames_ref)
return qsv_transfer_data_child(ctx, dst_frame, src);
av_log(ctx, AV_LOG_ERROR, "Surface download not possible\n");
return AVERROR(ENOSYS);
}
out.Info = in->Info;
map_frame_to_surface(dst_frame, &out);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in, &out, NULL, &sync);
if (err == MFX_WRN_DEVICE_BUSY)
av_usleep(1);
} while (err == MFX_WRN_DEVICE_BUSY);
if (err < 0 || !sync) {
av_log(ctx, AV_LOG_ERROR, "Error downloading the surface\n");
return AVERROR_UNKNOWN;
}
do {
err = MFXVideoCORE_SyncOperation(s->session_download, sync, 1000);
} while (err == MFX_WRN_IN_EXECUTION);
if (err < 0) {
av_log(ctx, AV_LOG_ERROR, "Error synchronizing the operation: %d\n", err);
return AVERROR_UNKNOWN;
}
if (realigned) {
tmp_frame->width = dst->width;
tmp_frame->height = dst->height;
ret = av_frame_copy(dst, tmp_frame);
tmp_frame->width = FFALIGN(dst->linesize[0], 16);
tmp_frame->height = FFALIGN(dst->height, 16);
if (ret < 0)
return ret;
}
return 0;
}
static int qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
const AVFrame *src)
{
QSVFramesContext *s = ctx->internal->priv;
mfxFrameSurface1 in = {{ 0 }};
mfxFrameSurface1 *out = (mfxFrameSurface1*)dst->data[3];
mfxFrameInfo tmp_info;
mfxSyncPoint sync = NULL;
mfxStatus err;
int ret = 0;
/* make a copy if the input is not padded as libmfx requires */
AVFrame *tmp_frame = &s->realigned_upload_frame;
const AVFrame *src_frame;
int realigned = 0;
ret = qsv_internal_session_check_init(ctx, 1);
if (ret < 0)
return ret;
/* According to MSDK spec for mfxframeinfo, "Width must be a multiple of 16.
* Height must be a multiple of 16 for progressive frame sequence and a
* multiple of 32 otherwise.", so allign all frames to 16 before uploading. */
if (src->height & 15 || src->linesize[0] & 15) {
realigned = 1;
if (tmp_frame->format != src->format ||
tmp_frame->width != FFALIGN(src->width, 16) ||
tmp_frame->height != FFALIGN(src->height, 16)) {
av_frame_unref(tmp_frame);
tmp_frame->format = src->format;
tmp_frame->width = FFALIGN(src->width, 16);
tmp_frame->height = FFALIGN(src->height, 16);
ret = av_frame_get_buffer(tmp_frame, 0);
if (ret < 0)
return ret;
}
ret = av_frame_copy(tmp_frame, src);
if (ret < 0) {
av_frame_unref(tmp_frame);
return ret;
}
ret = qsv_fill_border(tmp_frame, src);
if (ret < 0) {
av_frame_unref(tmp_frame);
return ret;
}
tmp_info = out->Info;
out->Info.CropW = FFMIN(out->Info.Width, tmp_frame->width);
out->Info.CropH = FFMIN(out->Info.Height, tmp_frame->height);
}
src_frame = realigned ? tmp_frame : src;
if (!s->session_upload) {
if (s->child_frames_ref)
return qsv_transfer_data_child(ctx, dst, src_frame);
av_log(ctx, AV_LOG_ERROR, "Surface upload not possible\n");
return AVERROR(ENOSYS);
}
in.Info = out->Info;
map_frame_to_surface(src_frame, &in);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync);
if (err == MFX_WRN_DEVICE_BUSY)
av_usleep(1);
} while (err == MFX_WRN_DEVICE_BUSY);
if (err < 0 || !sync) {
av_log(ctx, AV_LOG_ERROR, "Error uploading the surface\n");
return AVERROR_UNKNOWN;
}
do {
err = MFXVideoCORE_SyncOperation(s->session_upload, sync, 1000);
} while (err == MFX_WRN_IN_EXECUTION);
if (err < 0) {
av_log(ctx, AV_LOG_ERROR, "Error synchronizing the operation\n");
return AVERROR_UNKNOWN;
}
if (realigned) {
out->Info.CropW = tmp_info.CropW;
out->Info.CropH = tmp_info.CropH;
}
return 0;
}
static int qsv_frames_derive_to(AVHWFramesContext *dst_ctx,
AVHWFramesContext *src_ctx, int flags)
{
QSVFramesContext *s = dst_ctx->internal->priv;
AVQSVFramesContext *dst_hwctx = dst_ctx->hwctx;
int i;
if (src_ctx->initial_pool_size == 0) {
av_log(dst_ctx, AV_LOG_ERROR, "Only fixed-size pools can be "
"mapped to QSV frames.\n");
return AVERROR(EINVAL);
}
switch (src_ctx->device_ctx->type) {
#if CONFIG_VAAPI
case AV_HWDEVICE_TYPE_VAAPI:
{
AVVAAPIFramesContext *src_hwctx = src_ctx->hwctx;
s->handle_pairs_internal = av_calloc(src_ctx->initial_pool_size,
sizeof(*s->handle_pairs_internal));
if (!s->handle_pairs_internal)
return AVERROR(ENOMEM);
s->surfaces_internal = av_calloc(src_hwctx->nb_surfaces,
sizeof(*s->surfaces_internal));
if (!s->surfaces_internal)
return AVERROR(ENOMEM);
for (i = 0; i < src_hwctx->nb_surfaces; i++) {
qsv_init_surface(dst_ctx, &s->surfaces_internal[i]);
s->handle_pairs_internal[i].first = src_hwctx->surface_ids + i;
s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
}
dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces;
dst_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
}
break;
#endif
#if CONFIG_D3D11VA
case AV_HWDEVICE_TYPE_D3D11VA:
{
AVD3D11VAFramesContext *src_hwctx = src_ctx->hwctx;
s->handle_pairs_internal = av_calloc(src_ctx->initial_pool_size,
sizeof(*s->handle_pairs_internal));
if (!s->handle_pairs_internal)
return AVERROR(ENOMEM);
s->surfaces_internal = av_calloc(src_ctx->initial_pool_size,
sizeof(*s->surfaces_internal));
if (!s->surfaces_internal)
return AVERROR(ENOMEM);
for (i = 0; i < src_ctx->initial_pool_size; i++) {
qsv_init_surface(dst_ctx, &s->surfaces_internal[i]);
s->handle_pairs_internal[i].first = (mfxMemId)src_hwctx->texture_infos[i].texture;
if (src_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
} else {
s->handle_pairs_internal[i].second = (mfxMemId)src_hwctx->texture_infos[i].index;
}
s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
}
dst_hwctx->nb_surfaces = src_ctx->initial_pool_size;
if (src_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
dst_hwctx->frame_type |= MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
} else {
dst_hwctx->frame_type |= MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
}
}
break;
#endif
#if CONFIG_DXVA2
case AV_HWDEVICE_TYPE_DXVA2:
{
AVDXVA2FramesContext *src_hwctx = src_ctx->hwctx;
s->handle_pairs_internal = av_calloc(src_ctx->initial_pool_size,
sizeof(*s->handle_pairs_internal));
if (!s->handle_pairs_internal)
return AVERROR(ENOMEM);
s->surfaces_internal = av_calloc(src_hwctx->nb_surfaces,
sizeof(*s->surfaces_internal));
if (!s->surfaces_internal)
return AVERROR(ENOMEM);
for (i = 0; i < src_hwctx->nb_surfaces; i++) {
qsv_init_surface(dst_ctx, &s->surfaces_internal[i]);
s->handle_pairs_internal[i].first = (mfxMemId)src_hwctx->surfaces[i];
s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
}
dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces;
if (src_hwctx->surface_type == DXVA2_VideoProcessorRenderTarget)
dst_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
else
dst_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
}
break;
#endif
default:
return AVERROR(ENOSYS);
}
dst_hwctx->surfaces = s->surfaces_internal;
return 0;
}
static int qsv_map_to(AVHWFramesContext *dst_ctx,
AVFrame *dst, const AVFrame *src, int flags)
{
AVQSVFramesContext *hwctx = dst_ctx->hwctx;
int i, err, index = -1;
for (i = 0; i < hwctx->nb_surfaces && index < 0; i++) {
switch(src->format) {
#if CONFIG_VAAPI
case AV_PIX_FMT_VAAPI:
{
mfxHDLPair *pair = (mfxHDLPair*)hwctx->surfaces[i].Data.MemId;
if (*(VASurfaceID*)pair->first == (VASurfaceID)src->data[3]) {
index = i;
break;
}
}
#endif
#if CONFIG_D3D11VA
case AV_PIX_FMT_D3D11:
{
mfxHDLPair *pair = (mfxHDLPair*)hwctx->surfaces[i].Data.MemId;
if (pair->first == src->data[0]
&& pair->second == src->data[1]) {
index = i;
break;
}
}
#endif
#if CONFIG_DXVA2
case AV_PIX_FMT_DXVA2_VLD:
{
mfxHDLPair *pair = (mfxHDLPair*)hwctx->surfaces[i].Data.MemId;
if (pair->first == src->data[3]) {
index = i;
break;
}
}
#endif
}
}
if (index < 0) {
av_log(dst_ctx, AV_LOG_ERROR, "Trying to map from a surface which "
"is not in the mapped frames context.\n");
return AVERROR(EINVAL);
}
err = ff_hwframe_map_create(dst->hw_frames_ctx,
dst, src, NULL, NULL);
if (err)
return err;
dst->width = src->width;
dst->height = src->height;
dst->data[3] = (uint8_t*)&hwctx->surfaces[index];
return 0;
}
static int qsv_frames_get_constraints(AVHWDeviceContext *ctx,
const void *hwconfig,
AVHWFramesConstraints *constraints)
{
int i;
constraints->valid_sw_formats = av_malloc_array(FF_ARRAY_ELEMS(supported_pixel_formats) + 1,
sizeof(*constraints->valid_sw_formats));
if (!constraints->valid_sw_formats)
return AVERROR(ENOMEM);
for (i = 0; i < FF_ARRAY_ELEMS(supported_pixel_formats); i++)
constraints->valid_sw_formats[i] = supported_pixel_formats[i].pix_fmt;
constraints->valid_sw_formats[FF_ARRAY_ELEMS(supported_pixel_formats)] = AV_PIX_FMT_NONE;
constraints->valid_hw_formats = av_malloc_array(2, sizeof(*constraints->valid_hw_formats));
if (!constraints->valid_hw_formats)
return AVERROR(ENOMEM);
constraints->valid_hw_formats[0] = AV_PIX_FMT_QSV;
constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
return 0;
}
static void qsv_device_free(AVHWDeviceContext *ctx)
{
AVQSVDeviceContext *hwctx = ctx->hwctx;
QSVDevicePriv *priv = ctx->user_opaque;
if (hwctx->session)
MFXClose(hwctx->session);
av_buffer_unref(&priv->child_device_ctx);
av_freep(&priv);
}
static mfxIMPL choose_implementation(const char *device, enum AVHWDeviceType child_device_type)
{
static const struct {
const char *name;
mfxIMPL impl;
} impl_map[] = {
{ "auto", MFX_IMPL_AUTO },
{ "sw", MFX_IMPL_SOFTWARE },
{ "hw", MFX_IMPL_HARDWARE },
{ "auto_any", MFX_IMPL_AUTO_ANY },
{ "hw_any", MFX_IMPL_HARDWARE_ANY },
{ "hw2", MFX_IMPL_HARDWARE2 },
{ "hw3", MFX_IMPL_HARDWARE3 },
{ "hw4", MFX_IMPL_HARDWARE4 },
};
mfxIMPL impl = MFX_IMPL_AUTO_ANY;
int i;
if (device) {
for (i = 0; i < FF_ARRAY_ELEMS(impl_map); i++)
if (!strcmp(device, impl_map[i].name)) {
impl = impl_map[i].impl;
break;
}
if (i == FF_ARRAY_ELEMS(impl_map))
impl = strtol(device, NULL, 0);
}
if (impl != MFX_IMPL_SOFTWARE) {
if (child_device_type == AV_HWDEVICE_TYPE_D3D11VA)
impl |= MFX_IMPL_VIA_D3D11;
else if (child_device_type == AV_HWDEVICE_TYPE_DXVA2)
impl |= MFX_IMPL_VIA_D3D9;
}
return impl;
}
static int qsv_device_derive_from_child(AVHWDeviceContext *ctx,
mfxIMPL implementation,
AVHWDeviceContext *child_device_ctx,
int flags)
{
AVQSVDeviceContext *hwctx = ctx->hwctx;
mfxVersion ver = { { 3, 1 } };
mfxHDL handle;
mfxHandleType handle_type;
mfxStatus err;
int ret;
switch (child_device_ctx->type) {
#if CONFIG_VAAPI
case AV_HWDEVICE_TYPE_VAAPI:
{
AVVAAPIDeviceContext *child_device_hwctx = child_device_ctx->hwctx;
handle_type = MFX_HANDLE_VA_DISPLAY;
handle = (mfxHDL)child_device_hwctx->display;
}
break;
#endif
#if CONFIG_D3D11VA
case AV_HWDEVICE_TYPE_D3D11VA:
{
AVD3D11VADeviceContext *child_device_hwctx = child_device_ctx->hwctx;
handle_type = MFX_HANDLE_D3D11_DEVICE;
handle = (mfxHDL)child_device_hwctx->device;
}
break;
#endif
#if CONFIG_DXVA2
case AV_HWDEVICE_TYPE_DXVA2:
{
AVDXVA2DeviceContext *child_device_hwctx = child_device_ctx->hwctx;
handle_type = MFX_HANDLE_D3D9_DEVICE_MANAGER;
handle = (mfxHDL)child_device_hwctx->devmgr;
}
break;
#endif
default:
ret = AVERROR(ENOSYS);
goto fail;
}
err = MFXInit(implementation, &ver, &hwctx->session);
if (err != MFX_ERR_NONE) {
av_log(ctx, AV_LOG_ERROR, "Error initializing an MFX session: "
"%d.\n", err);
ret = AVERROR_UNKNOWN;
goto fail;
}
err = MFXQueryVersion(hwctx->session, &ver);
if (err != MFX_ERR_NONE) {
av_log(ctx, AV_LOG_ERROR, "Error querying an MFX session: %d.\n", err);
ret = AVERROR_UNKNOWN;
goto fail;
}
av_log(ctx, AV_LOG_VERBOSE,
"Initialize MFX session: API version is %d.%d, implementation version is %d.%d\n",
MFX_VERSION_MAJOR, MFX_VERSION_MINOR, ver.Major, ver.Minor);
MFXClose(hwctx->session);
err = MFXInit(implementation, &ver, &hwctx->session);
if (err != MFX_ERR_NONE) {
av_log(ctx, AV_LOG_ERROR,
"Error initializing an MFX session: %d.\n", err);
ret = AVERROR_UNKNOWN;
goto fail;
}
err = MFXVideoCORE_SetHandle(hwctx->session, handle_type, handle);
if (err != MFX_ERR_NONE) {
av_log(ctx, AV_LOG_ERROR, "Error setting child device handle: "
"%d\n", err);
ret = AVERROR_UNKNOWN;
goto fail;
}
return 0;
fail:
if (hwctx->session)
MFXClose(hwctx->session);
return ret;
}
static int qsv_device_derive(AVHWDeviceContext *ctx,
AVHWDeviceContext *child_device_ctx,
AVDictionary *opts, int flags)
{
mfxIMPL impl;
impl = choose_implementation("hw_any", child_device_ctx->type);
return qsv_device_derive_from_child(ctx, impl,
child_device_ctx, flags);
}
static int qsv_device_create(AVHWDeviceContext *ctx, const char *device,
AVDictionary *opts, int flags)
{
QSVDevicePriv *priv;
enum AVHWDeviceType child_device_type;
AVHWDeviceContext *child_device;
AVDictionary *child_device_opts;
AVDictionaryEntry *e;
mfxIMPL impl;
int ret;
priv = av_mallocz(sizeof(*priv));
if (!priv)
return AVERROR(ENOMEM);
ctx->user_opaque = priv;
ctx->free = qsv_device_free;
e = av_dict_get(opts, "child_device_type", NULL, 0);
if (e) {
child_device_type = av_hwdevice_find_type_by_name(e->value);
if (child_device_type == AV_HWDEVICE_TYPE_NONE) {
av_log(ctx, AV_LOG_ERROR, "Unknown child device type "
"\"%s\".\n", e->value);
return AVERROR(EINVAL);
}
} else if (CONFIG_VAAPI) {
child_device_type = AV_HWDEVICE_TYPE_VAAPI;
} else if (CONFIG_DXVA2) {
av_log(NULL, AV_LOG_WARNING,
"WARNING: defaulting child_device_type to AV_HWDEVICE_TYPE_DXVA2 for compatibility "
"with old commandlines. This behaviour will be removed "
"in the future. Please explicitly set device type via \"-init_hw_device\" option.\n");
child_device_type = AV_HWDEVICE_TYPE_DXVA2;
} else if (CONFIG_D3D11VA) {
child_device_type = AV_HWDEVICE_TYPE_D3D11VA;
} else {
av_log(ctx, AV_LOG_ERROR, "No supported child device type is enabled\n");
return AVERROR(ENOSYS);
}
child_device_opts = NULL;
switch (child_device_type) {
#if CONFIG_VAAPI
case AV_HWDEVICE_TYPE_VAAPI:
{
// libmfx does not actually implement VAAPI properly, rather it
// depends on the specific behaviour of a matching iHD driver when
// used on recent Intel hardware. Set options to the VAAPI device
// creation so that we should pick a usable setup by default if
// possible, even when multiple devices and drivers are available.
av_dict_set(&child_device_opts, "kernel_driver", "i915", 0);
av_dict_set(&child_device_opts, "driver", "iHD", 0);
}
break;
#endif
#if CONFIG_D3D11VA
case AV_HWDEVICE_TYPE_D3D11VA:
break;
#endif
#if CONFIG_DXVA2
case AV_HWDEVICE_TYPE_DXVA2:
break;
#endif
default:
{
av_log(ctx, AV_LOG_ERROR, "No supported child device type is enabled\n");
return AVERROR(ENOSYS);
}
break;
}
e = av_dict_get(opts, "child_device", NULL, 0);
ret = av_hwdevice_ctx_create(&priv->child_device_ctx, child_device_type,
e ? e->value : NULL, child_device_opts, 0);
av_dict_free(&child_device_opts);
if (ret < 0)
return ret;
child_device = (AVHWDeviceContext*)priv->child_device_ctx->data;
impl = choose_implementation(device, child_device_type);
return qsv_device_derive_from_child(ctx, impl, child_device, 0);
}
const HWContextType ff_hwcontext_type_qsv = {
.type = AV_HWDEVICE_TYPE_QSV,
.name = "QSV",
.device_hwctx_size = sizeof(AVQSVDeviceContext),
.device_priv_size = sizeof(QSVDeviceContext),
.frames_hwctx_size = sizeof(AVQSVFramesContext),
.frames_priv_size = sizeof(QSVFramesContext),
.device_create = qsv_device_create,
.device_derive = qsv_device_derive,
.device_init = qsv_device_init,
.frames_get_constraints = qsv_frames_get_constraints,
.frames_init = qsv_frames_init,
.frames_uninit = qsv_frames_uninit,
.frames_get_buffer = qsv_get_buffer,
.transfer_get_formats = qsv_transfer_get_formats,
.transfer_data_to = qsv_transfer_data_to,
.transfer_data_from = qsv_transfer_data_from,
.map_to = qsv_map_to,
.map_from = qsv_map_from,
.frames_derive_to = qsv_frames_derive_to,
.frames_derive_from = qsv_frames_derive_from,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_QSV, AV_PIX_FMT_NONE },
};