dnn: extract common functions used by different filters

Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
This commit is contained in:
Guo, Yejun 2021-01-26 13:35:30 +08:00
parent 995c33a046
commit bdce636100
6 changed files with 201 additions and 157 deletions

View File

@ -27,6 +27,7 @@ OBJS-$(HAVE_THREADS) += pthread.o
# subsystems
OBJS-$(CONFIG_QSVVPP) += qsvvpp.o
OBJS-$(CONFIG_SCENE_SAD) += scene_sad.o
OBJS-$(CONFIG_DNN) += dnn_filter_common.o
include $(SRC_PATH)/libavfilter/dnn/Makefile
# audio filters

View File

@ -0,0 +1,106 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dnn_filter_common.h"
int ff_dnn_init(DnnContext *ctx, AVFilterContext *filter_ctx)
{
if (!ctx->model_filename) {
av_log(filter_ctx, AV_LOG_ERROR, "model file for network is not specified\n");
return AVERROR(EINVAL);
}
if (!ctx->model_inputname) {
av_log(filter_ctx, AV_LOG_ERROR, "input name of the model network is not specified\n");
return AVERROR(EINVAL);
}
if (!ctx->model_outputname) {
av_log(filter_ctx, AV_LOG_ERROR, "output name of the model network is not specified\n");
return AVERROR(EINVAL);
}
ctx->dnn_module = ff_get_dnn_module(ctx->backend_type);
if (!ctx->dnn_module) {
av_log(filter_ctx, AV_LOG_ERROR, "could not create DNN module for requested backend\n");
return AVERROR(ENOMEM);
}
if (!ctx->dnn_module->load_model) {
av_log(filter_ctx, AV_LOG_ERROR, "load_model for network is not specified\n");
return AVERROR(EINVAL);
}
ctx->model = (ctx->dnn_module->load_model)(ctx->model_filename, ctx->backend_options, filter_ctx);
if (!ctx->model) {
av_log(filter_ctx, AV_LOG_ERROR, "could not load DNN model\n");
return AVERROR(EINVAL);
}
if (!ctx->dnn_module->execute_model_async && ctx->async) {
ctx->async = 0;
av_log(filter_ctx, AV_LOG_WARNING, "this backend does not support async execution, roll back to sync.\n");
}
#if !HAVE_PTHREAD_CANCEL
if (ctx->async) {
ctx->async = 0;
av_log(filter_ctx, AV_LOG_WARNING, "pthread is not supported, roll back to sync.\n");
}
#endif
return 0;
}
DNNReturnType ff_dnn_get_input(DnnContext *ctx, DNNData *input)
{
return ctx->model->get_input(ctx->model->model, input, ctx->model_inputname);
}
DNNReturnType ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height)
{
return ctx->model->get_output(ctx->model->model, ctx->model_inputname, input_width, input_height,
ctx->model_outputname, output_width, output_height);
}
DNNReturnType ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)
{
return (ctx->dnn_module->execute_model)(ctx->model, ctx->model_inputname, in_frame,
(const char **)&ctx->model_outputname, 1, out_frame);
}
DNNReturnType ff_dnn_execute_model_async(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)
{
return (ctx->dnn_module->execute_model_async)(ctx->model, ctx->model_inputname, in_frame,
(const char **)&ctx->model_outputname, 1, out_frame);
}
DNNAsyncStatusType ff_dnn_get_async_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame)
{
return (ctx->dnn_module->get_async_result)(ctx->model, in_frame, out_frame);
}
DNNReturnType ff_dnn_flush(DnnContext *ctx)
{
return (ctx->dnn_module->flush)(ctx->model);
}
void ff_dnn_uninit(DnnContext *ctx)
{
if (ctx->dnn_module) {
(ctx->dnn_module->free_model)(&ctx->model);
av_freep(&ctx->dnn_module);
}
}

View File

@ -0,0 +1,59 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* common functions for the dnn based filters
*/
#ifndef AVFILTER_DNN_FILTER_COMMON_H
#define AVFILTER_DNN_FILTER_COMMON_H
#include "dnn_interface.h"
typedef struct DnnContext {
char *model_filename;
DNNBackendType backend_type;
char *model_inputname;
char *model_outputname;
char *backend_options;
int async;
DNNModule *dnn_module;
DNNModel *model;
} DnnContext;
#define DNN_COMMON_OPTIONS \
{ "model", "path to model file", OFFSET(model_filename), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },\
{ "input", "input name of the model", OFFSET(model_inputname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },\
{ "output", "output name of the model", OFFSET(model_outputname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },\
{ "backend_configs", "backend configs", OFFSET(backend_options), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },\
{ "options", "backend configs", OFFSET(backend_options), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },\
{ "async", "use DNN async inference", OFFSET(async), AV_OPT_TYPE_BOOL, { .i64 = 1}, 0, 1, FLAGS},
int ff_dnn_init(DnnContext *ctx, AVFilterContext *filter_ctx);
DNNReturnType ff_dnn_get_input(DnnContext *ctx, DNNData *input);
DNNReturnType ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height);
DNNReturnType ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame);
DNNReturnType ff_dnn_execute_model_async(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame);
DNNAsyncStatusType ff_dnn_get_async_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame);
DNNReturnType ff_dnn_flush(DnnContext *ctx);
void ff_dnn_uninit(DnnContext *ctx);
#endif

View File

@ -27,18 +27,14 @@
#include "libavformat/avio.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "dnn_interface.h"
#include "dnn_filter_common.h"
#include "formats.h"
#include "internal.h"
typedef struct DRContext {
const AVClass *class;
DnnContext dnnctx;
int filter_type;
char *model_filename;
DNNBackendType backend_type;
DNNModule *dnn_module;
DNNModel *model;
} DRContext;
#define OFFSET(x) offsetof(DRContext, x)
@ -47,12 +43,14 @@ static const AVOption derain_options[] = {
{ "filter_type", "filter type(derain/dehaze)", OFFSET(filter_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS, "type" },
{ "derain", "derain filter flag", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "type" },
{ "dehaze", "dehaze filter flag", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "type" },
{ "dnn_backend", "DNN backend", OFFSET(backend_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS, "backend" },
{ "dnn_backend", "DNN backend", OFFSET(dnnctx.backend_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS, "backend" },
{ "native", "native backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "backend" },
#if (CONFIG_LIBTENSORFLOW == 1)
{ "tensorflow", "tensorflow backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "backend" },
#endif
{ "model", "path to model file", OFFSET(model_filename), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
{ "model", "path to model file", OFFSET(dnnctx.model_filename), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
{ "input", "input name of the model", OFFSET(dnnctx.model_inputname), AV_OPT_TYPE_STRING, { .str = "x" }, 0, 0, FLAGS },
{ "output", "output name of the model", OFFSET(dnnctx.model_outputname), AV_OPT_TYPE_STRING, { .str = "y" }, 0, 0, FLAGS },
{ NULL }
};
@ -77,7 +75,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = ctx->outputs[0];
DRContext *dr_context = ctx->priv;
DNNReturnType dnn_result;
const char *model_output_name = "y";
AVFrame *out;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
@ -88,7 +85,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
av_frame_copy_props(out, in);
dnn_result = (dr_context->dnn_module->execute_model)(dr_context->model, "x", in, &model_output_name, 1, out);
dnn_result = ff_dnn_execute_model(&dr_context->dnnctx, in, out);
if (dnn_result != DNN_SUCCESS){
av_log(ctx, AV_LOG_ERROR, "failed to execute model\n");
av_frame_free(&in);
@ -103,38 +100,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static av_cold int init(AVFilterContext *ctx)
{
DRContext *dr_context = ctx->priv;
dr_context->dnn_module = ff_get_dnn_module(dr_context->backend_type);
if (!dr_context->dnn_module) {
av_log(ctx, AV_LOG_ERROR, "could not create DNN module for requested backend\n");
return AVERROR(ENOMEM);
}
if (!dr_context->model_filename) {
av_log(ctx, AV_LOG_ERROR, "model file for network is not specified\n");
return AVERROR(EINVAL);
}
if (!dr_context->dnn_module->load_model) {
av_log(ctx, AV_LOG_ERROR, "load_model for network is not specified\n");
return AVERROR(EINVAL);
}
dr_context->model = (dr_context->dnn_module->load_model)(dr_context->model_filename, NULL, NULL);
if (!dr_context->model) {
av_log(ctx, AV_LOG_ERROR, "could not load DNN model\n");
return AVERROR(EINVAL);
}
return 0;
return ff_dnn_init(&dr_context->dnnctx, ctx);
}
static av_cold void uninit(AVFilterContext *ctx)
{
DRContext *dr_context = ctx->priv;
if (dr_context->dnn_module) {
(dr_context->dnn_module->free_model)(&dr_context->model);
av_freep(&dr_context->dnn_module);
}
ff_dnn_uninit(&dr_context->dnnctx);
}
static const AVFilterPad derain_inputs[] = {

View File

@ -29,7 +29,7 @@
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "filters.h"
#include "dnn_interface.h"
#include "dnn_filter_common.h"
#include "formats.h"
#include "internal.h"
#include "libswscale/swscale.h"
@ -37,22 +37,12 @@
typedef struct DnnProcessingContext {
const AVClass *class;
char *model_filename;
DNNBackendType backend_type;
char *model_inputname;
char *model_outputname;
char *backend_options;
int async;
DNNModule *dnn_module;
DNNModel *model;
DnnContext dnnctx;
struct SwsContext *sws_uv_scale;
int sws_uv_height;
} DnnProcessingContext;
#define OFFSET(x) offsetof(DnnProcessingContext, x)
#define OFFSET(x) offsetof(DnnProcessingContext, dnnctx.x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
static const AVOption dnn_processing_options[] = {
{ "dnn_backend", "DNN backend", OFFSET(backend_type), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS, "backend" },
@ -63,11 +53,7 @@ static const AVOption dnn_processing_options[] = {
#if (CONFIG_LIBOPENVINO == 1)
{ "openvino", "openvino backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, 0, 0, FLAGS, "backend" },
#endif
{ "model", "path to model file", OFFSET(model_filename), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
{ "input", "input name of the model", OFFSET(model_inputname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
{ "output", "output name of the model", OFFSET(model_outputname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
{ "options", "backend options", OFFSET(backend_options), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
{ "async", "use DNN async inference", OFFSET(async), AV_OPT_TYPE_BOOL, { .i64 = 1}, 0, 1, FLAGS},
DNN_COMMON_OPTIONS
{ NULL }
};
@ -76,49 +62,7 @@ AVFILTER_DEFINE_CLASS(dnn_processing);
static av_cold int init(AVFilterContext *context)
{
DnnProcessingContext *ctx = context->priv;
if (!ctx->model_filename) {
av_log(ctx, AV_LOG_ERROR, "model file for network is not specified\n");
return AVERROR(EINVAL);
}
if (!ctx->model_inputname) {
av_log(ctx, AV_LOG_ERROR, "input name of the model network is not specified\n");
return AVERROR(EINVAL);
}
if (!ctx->model_outputname) {
av_log(ctx, AV_LOG_ERROR, "output name of the model network is not specified\n");
return AVERROR(EINVAL);
}
ctx->dnn_module = ff_get_dnn_module(ctx->backend_type);
if (!ctx->dnn_module) {
av_log(ctx, AV_LOG_ERROR, "could not create DNN module for requested backend\n");
return AVERROR(ENOMEM);
}
if (!ctx->dnn_module->load_model) {
av_log(ctx, AV_LOG_ERROR, "load_model for network is not specified\n");
return AVERROR(EINVAL);
}
ctx->model = (ctx->dnn_module->load_model)(ctx->model_filename, ctx->backend_options, context);
if (!ctx->model) {
av_log(ctx, AV_LOG_ERROR, "could not load DNN model\n");
return AVERROR(EINVAL);
}
if (!ctx->dnn_module->execute_model_async && ctx->async) {
ctx->async = 0;
av_log(ctx, AV_LOG_WARNING, "this backend does not support async execution, roll back to sync.\n");
}
#if !HAVE_PTHREAD_CANCEL
if (ctx->async) {
ctx->async = 0;
av_log(ctx, AV_LOG_WARNING, "pthread is not supported, roll back to sync.\n");
}
#endif
return 0;
return ff_dnn_init(&ctx->dnnctx, context);
}
static int query_formats(AVFilterContext *context)
@ -199,7 +143,7 @@ static int config_input(AVFilterLink *inlink)
DNNData model_input;
int check;
result = ctx->model->get_input(ctx->model->model, &model_input, ctx->model_inputname);
result = ff_dnn_get_input(&ctx->dnnctx, &model_input);
if (result != DNN_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "could not get input from the model\n");
return AVERROR(EIO);
@ -259,8 +203,7 @@ static int config_output(AVFilterLink *outlink)
AVFilterLink *inlink = context->inputs[0];
// have a try run in case that the dnn model resize the frame
result = ctx->model->get_output(ctx->model->model, ctx->model_inputname, inlink->w, inlink->h,
ctx->model_outputname, &outlink->w, &outlink->h);
result = ff_dnn_get_output(&ctx->dnnctx, inlink->w, inlink->h, &outlink->w, &outlink->h);
if (result != DNN_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "could not get output from the model\n");
return AVERROR(EIO);
@ -314,8 +257,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
av_frame_copy_props(out, in);
dnn_result = (ctx->dnn_module->execute_model)(ctx->model, ctx->model_inputname, in,
(const char **)&ctx->model_outputname, 1, out);
dnn_result = ff_dnn_execute_model(&ctx->dnnctx, in, out);
if (dnn_result != DNN_SUCCESS){
av_log(ctx, AV_LOG_ERROR, "failed to execute model\n");
av_frame_free(&in);
@ -376,7 +318,7 @@ static int flush_frame(AVFilterLink *outlink, int64_t pts, int64_t *out_pts)
int ret;
DNNAsyncStatusType async_state;
ret = (ctx->dnn_module->flush)(ctx->model);
ret = ff_dnn_flush(&ctx->dnnctx);
if (ret != DNN_SUCCESS) {
return -1;
}
@ -384,7 +326,7 @@ static int flush_frame(AVFilterLink *outlink, int64_t pts, int64_t *out_pts)
do {
AVFrame *in_frame = NULL;
AVFrame *out_frame = NULL;
async_state = (ctx->dnn_module->get_async_result)(ctx->model, &in_frame, &out_frame);
async_state = ff_dnn_get_async_result(&ctx->dnnctx, &in_frame, &out_frame);
if (out_frame) {
if (isPlanarYUV(in_frame->format))
copy_uv_planes(ctx, out_frame, in_frame);
@ -405,7 +347,7 @@ static int activate_async(AVFilterContext *filter_ctx)
{
AVFilterLink *inlink = filter_ctx->inputs[0];
AVFilterLink *outlink = filter_ctx->outputs[0];
DnnProcessingContext *ctx = (DnnProcessingContext *)filter_ctx->priv;
DnnProcessingContext *ctx = filter_ctx->priv;
AVFrame *in = NULL, *out = NULL;
int64_t pts;
int ret, status;
@ -426,8 +368,7 @@ static int activate_async(AVFilterContext *filter_ctx)
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
if ((ctx->dnn_module->execute_model_async)(ctx->model, ctx->model_inputname, in,
(const char **)&ctx->model_outputname, 1, out) != DNN_SUCCESS) {
if (ff_dnn_execute_model_async(&ctx->dnnctx, in, out) != DNN_SUCCESS) {
return AVERROR(EIO);
}
}
@ -437,7 +378,7 @@ static int activate_async(AVFilterContext *filter_ctx)
do {
AVFrame *in_frame = NULL;
AVFrame *out_frame = NULL;
async_state = (ctx->dnn_module->get_async_result)(ctx->model, &in_frame, &out_frame);
async_state = ff_dnn_get_async_result(&ctx->dnnctx, &in_frame, &out_frame);
if (out_frame) {
if (isPlanarYUV(in_frame->format))
copy_uv_planes(ctx, out_frame, in_frame);
@ -471,7 +412,7 @@ static int activate(AVFilterContext *filter_ctx)
{
DnnProcessingContext *ctx = filter_ctx->priv;
if (ctx->async)
if (ctx->dnnctx.async)
return activate_async(filter_ctx);
else
return activate_sync(filter_ctx);
@ -482,11 +423,7 @@ static av_cold void uninit(AVFilterContext *ctx)
DnnProcessingContext *context = ctx->priv;
sws_freeContext(context->sws_uv_scale);
if (context->dnn_module)
(context->dnn_module->free_model)(&context->model);
av_freep(&context->dnn_module);
ff_dnn_uninit(&context->dnnctx);
}
static const AVFilterPad dnn_processing_inputs[] = {

View File

@ -32,15 +32,11 @@
#include "libavutil/pixdesc.h"
#include "libavformat/avio.h"
#include "libswscale/swscale.h"
#include "dnn_interface.h"
#include "dnn_filter_common.h"
typedef struct SRContext {
const AVClass *class;
char *model_filename;
DNNBackendType backend_type;
DNNModule *dnn_module;
DNNModel *model;
DnnContext dnnctx;
int scale_factor;
struct SwsContext *sws_uv_scale;
int sws_uv_height;
@ -50,13 +46,15 @@ typedef struct SRContext {
#define OFFSET(x) offsetof(SRContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
static const AVOption sr_options[] = {
{ "dnn_backend", "DNN backend used for model execution", OFFSET(backend_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS, "backend" },
{ "dnn_backend", "DNN backend used for model execution", OFFSET(dnnctx.backend_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS, "backend" },
{ "native", "native backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "backend" },
#if (CONFIG_LIBTENSORFLOW == 1)
{ "tensorflow", "tensorflow backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "backend" },
#endif
{ "scale_factor", "scale factor for SRCNN model", OFFSET(scale_factor), AV_OPT_TYPE_INT, { .i64 = 2 }, 2, 4, FLAGS },
{ "model", "path to model file specifying network architecture and its parameters", OFFSET(model_filename), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
{ "model", "path to model file specifying network architecture and its parameters", OFFSET(dnnctx.model_filename), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
{ "input", "input name of the model", OFFSET(dnnctx.model_inputname), AV_OPT_TYPE_STRING, { .str = "x" }, 0, 0, FLAGS },
{ "output", "output name of the model", OFFSET(dnnctx.model_outputname), AV_OPT_TYPE_STRING, { .str = "y" }, 0, 0, FLAGS },
{ NULL }
};
@ -65,28 +63,7 @@ AVFILTER_DEFINE_CLASS(sr);
static av_cold int init(AVFilterContext *context)
{
SRContext *sr_context = context->priv;
sr_context->dnn_module = ff_get_dnn_module(sr_context->backend_type);
if (!sr_context->dnn_module){
av_log(context, AV_LOG_ERROR, "could not create DNN module for requested backend\n");
return AVERROR(ENOMEM);
}
if (!sr_context->model_filename){
av_log(context, AV_LOG_ERROR, "model file for network was not specified\n");
return AVERROR(EIO);
}
if (!sr_context->dnn_module->load_model) {
av_log(context, AV_LOG_ERROR, "load_model for network was not specified\n");
return AVERROR(EIO);
}
sr_context->model = (sr_context->dnn_module->load_model)(sr_context->model_filename, NULL, NULL);
if (!sr_context->model){
av_log(context, AV_LOG_ERROR, "could not load DNN model\n");
return AVERROR(EIO);
}
return 0;
return ff_dnn_init(&sr_context->dnnctx, context);
}
static int query_formats(AVFilterContext *context)
@ -114,8 +91,7 @@ static int config_output(AVFilterLink *outlink)
int out_width, out_height;
// have a try run in case that the dnn model resize the frame
result = ctx->model->get_output(ctx->model->model, "x", inlink->w, inlink->h,
"y", &out_width, &out_height);
result = ff_dnn_get_output(&ctx->dnnctx, inlink->w, inlink->h, &out_width, &out_height);
if (result != DNN_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "could not get output from the model\n");
return AVERROR(EIO);
@ -155,7 +131,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = context->outputs[0];
AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
DNNReturnType dnn_result;
const char *model_output_name = "y";
if (!out){
av_log(context, AV_LOG_ERROR, "could not allocate memory for output frame\n");
@ -168,11 +143,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
sws_scale(ctx->sws_pre_scale,
(const uint8_t **)in->data, in->linesize, 0, in->height,
out->data, out->linesize);
dnn_result = (ctx->dnn_module->execute_model)(ctx->model, "x", out,
(const char **)&model_output_name, 1, out);
dnn_result = ff_dnn_execute_model(&ctx->dnnctx, out, out);
} else {
dnn_result = (ctx->dnn_module->execute_model)(ctx->model, "x", in,
(const char **)&model_output_name, 1, out);
dnn_result = ff_dnn_execute_model(&ctx->dnnctx, in, out);
}
if (dnn_result != DNN_SUCCESS){
@ -197,11 +170,7 @@ static av_cold void uninit(AVFilterContext *context)
{
SRContext *sr_context = context->priv;
if (sr_context->dnn_module){
(sr_context->dnn_module->free_model)(&sr_context->model);
av_freep(&sr_context->dnn_module);
}
ff_dnn_uninit(&sr_context->dnnctx);
sws_freeContext(sr_context->sws_uv_scale);
sws_freeContext(sr_context->sws_pre_scale);
}