avcodec/videotoolbox: add hevc support

Signed-off-by: Aman Gupta <aman@tmm1.net>
This commit is contained in:
Aman Gupta 2017-09-26 11:30:28 -07:00
parent c32077c0ee
commit 3d4f8b9184
6 changed files with 219 additions and 2 deletions

7
configure vendored
View File

@ -2081,6 +2081,7 @@ TOOLCHAIN_FEATURES="
TYPES_LIST="
CONDITION_VARIABLE_Ptr
kCMVideoCodecType_HEVC
socklen_t
struct_addrinfo
struct_group_source_req
@ -2696,6 +2697,8 @@ hevc_vaapi_hwaccel_deps="vaapi VAPictureParameterBufferHEVC"
hevc_vaapi_hwaccel_select="hevc_decoder"
hevc_vdpau_hwaccel_deps="vdpau VdpPictureInfoHEVC"
hevc_vdpau_hwaccel_select="hevc_decoder"
hevc_videotoolbox_hwaccel_deps="videotoolbox"
hevc_videotoolbox_hwaccel_select="hevc_decoder"
mjpeg_cuvid_hwaccel_deps="cuda cuvid"
mjpeg_cuvid_hwaccel_select="mjpeg_cuvid_decoder"
mpeg_xvmc_hwaccel_deps="xvmc"
@ -5814,8 +5817,10 @@ enabled avfoundation && {
check_lib avfoundation CoreGraphics/CoreGraphics.h CGGetActiveDisplayList "-framework CoreGraphics" ||
check_lib avfoundation ApplicationServices/ApplicationServices.h CGGetActiveDisplayList "-framework ApplicationServices"; }
enabled videotoolbox &&
enabled videotoolbox && {
check_lib coreservices CoreServices/CoreServices.h UTGetOSTypeFromString "-framework CoreServices"
check_func_headers CoreMedia/CMFormatDescription.h kCMVideoCodecType_HEVC "-framework CoreMedia"
}
check_struct "sys/time.h sys/resource.h" "struct rusage" ru_maxrss

View File

@ -84,6 +84,7 @@ static void register_all(void)
REGISTER_HWACCEL(HEVC_QSV, hevc_qsv);
REGISTER_HWACCEL(HEVC_VAAPI, hevc_vaapi);
REGISTER_HWACCEL(HEVC_VDPAU, hevc_vdpau);
REGISTER_HWACCEL(HEVC_VIDEOTOOLBOX, hevc_videotoolbox);
REGISTER_HWACCEL(MJPEG_CUVID, mjpeg_cuvid);
REGISTER_HWACCEL(MPEG1_CUVID, mpeg1_cuvid);
REGISTER_HWACCEL(MPEG1_XVMC, mpeg1_xvmc);

View File

@ -208,6 +208,9 @@ int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
if (nb_output) {
HEVCFrame *frame = &s->DPB[min_idx];
if (frame->frame->format == AV_PIX_FMT_VIDEOTOOLBOX && frame->frame->buf[0]->size == 1)
return 0;
ret = av_frame_ref(out, frame->frame);
if (frame->flags & HEVC_FRAME_FLAG_BUMPING)
ff_hevc_unref_frame(s, frame, HEVC_FRAME_FLAG_OUTPUT | HEVC_FRAME_FLAG_BUMPING);

View File

@ -352,7 +352,11 @@ static void export_stream_params(AVCodecContext *avctx, const HEVCParamSets *ps,
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
{
#define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + CONFIG_HEVC_D3D11VA_HWACCEL * 2 + CONFIG_HEVC_VAAPI_HWACCEL + CONFIG_HEVC_VDPAU_HWACCEL)
#define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
CONFIG_HEVC_VAAPI_HWACCEL + \
CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
CONFIG_HEVC_VDPAU_HWACCEL)
enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
switch (sps->pix_fmt) {
@ -370,6 +374,9 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
#endif
#if CONFIG_HEVC_VDPAU_HWACCEL
*fmt++ = AV_PIX_FMT_VDPAU;
#endif
#if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
*fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
#endif
break;
case AV_PIX_FMT_YUV420P10:
@ -382,6 +389,9 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
#endif
#if CONFIG_HEVC_VAAPI_HWACCEL
*fmt++ = AV_PIX_FMT_VAAPI;
#endif
#if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
*fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
#endif
break;
}

View File

@ -59,4 +59,5 @@ int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size);
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx);
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx);
#endif /* AVCODEC_VDA_VT_INTERNAL_H */

View File

@ -32,6 +32,7 @@
#include "libavutil/hwcontext.h"
#include "bytestream.h"
#include "h264dec.h"
#include "hevcdec.h"
#include "mpegvideo.h"
#include <TargetConditionals.h>
@ -39,6 +40,10 @@
# define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
#endif
#if !HAVE_KCMVIDEOCODECTYPE_HEVC
enum { kCMVideoCodecType_HEVC = 'hvc1' };
#endif
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
@ -115,6 +120,164 @@ CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
return data;
}
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
{
HEVCContext *h = avctx->priv_data;
const HEVCVPS *vps = (const HEVCVPS *)h->ps.vps_list[0]->data;
const HEVCSPS *sps = (const HEVCSPS *)h->ps.sps_list[0]->data;
int i, num_pps = 0;
const HEVCPPS *pps = h->ps.pps;
PTLCommon ptlc = vps->ptl.general_ptl;
VUI vui = sps->vui;
uint8_t parallelismType;
CFDataRef data = NULL;
uint8_t *p;
int vt_extradata_size = 23 + 5 + vps->data_size + 5 + sps->data_size + 3;
uint8_t *vt_extradata;
for (i = 0; i < MAX_PPS_COUNT; i++) {
if (h->ps.pps_list[i]) {
const HEVCPPS *pps = (const HEVCPPS *)h->ps.pps_list[i]->data;
vt_extradata_size += 2 + pps->data_size;
num_pps++;
}
}
vt_extradata = av_malloc(vt_extradata_size);
if (!vt_extradata)
return NULL;
p = vt_extradata;
/* unsigned int(8) configurationVersion = 1; */
AV_W8(p + 0, 1);
/*
* unsigned int(2) general_profile_space;
* unsigned int(1) general_tier_flag;
* unsigned int(5) general_profile_idc;
*/
AV_W8(p + 1, ptlc.profile_space << 6 |
ptlc.tier_flag << 5 |
ptlc.profile_idc);
/* unsigned int(32) general_profile_compatibility_flags; */
memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
/* unsigned int(48) general_constraint_indicator_flags; */
AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
ptlc.interlaced_source_flag << 6 |
ptlc.non_packed_constraint_flag << 5 |
ptlc.frame_only_constraint_flag << 4);
AV_W8(p + 7, 0);
AV_WN32(p + 8, 0);
/* unsigned int(8) general_level_idc; */
AV_W8(p + 12, ptlc.level_idc);
/*
* bit(4) reserved = 1111b;
* unsigned int(12) min_spatial_segmentation_idc;
*/
AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
/*
* bit(6) reserved = 111111b;
* unsigned int(2) parallelismType;
*/
if (!vui.min_spatial_segmentation_idc)
parallelismType = 0;
else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
parallelismType = 0;
else if (pps->entropy_coding_sync_enabled_flag)
parallelismType = 3;
else if (pps->tiles_enabled_flag)
parallelismType = 2;
else
parallelismType = 1;
AV_W8(p + 15, 0xfc | parallelismType);
/*
* bit(6) reserved = 111111b;
* unsigned int(2) chromaFormat;
*/
AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
/*
* bit(5) reserved = 11111b;
* unsigned int(3) bitDepthLumaMinus8;
*/
AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
/*
* bit(5) reserved = 11111b;
* unsigned int(3) bitDepthChromaMinus8;
*/
AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
/* bit(16) avgFrameRate; */
AV_WB16(p + 19, 0);
/*
* bit(2) constantFrameRate;
* bit(3) numTemporalLayers;
* bit(1) temporalIdNested;
* unsigned int(2) lengthSizeMinusOne;
*/
AV_W8(p + 21, 0 << 6 |
sps->max_sub_layers << 3 |
sps->temporal_id_nesting_flag << 2 |
3);
/* unsigned int(8) numOfArrays; */
AV_W8(p + 22, 3);
p += 23;
/* vps */
/*
* bit(1) array_completeness;
* unsigned int(1) reserved = 0;
* unsigned int(6) NAL_unit_type;
*/
AV_W8(p, 1 << 7 |
HEVC_NAL_VPS & 0x3f);
/* unsigned int(16) numNalus; */
AV_WB16(p + 1, 1);
/* unsigned int(16) nalUnitLength; */
AV_WB16(p + 3, vps->data_size);
/* bit(8*nalUnitLength) nalUnit; */
memcpy(p + 5, vps->data, vps->data_size);
p += 5 + vps->data_size;
/* sps */
AV_W8(p, 1 << 7 |
HEVC_NAL_SPS & 0x3f);
AV_WB16(p + 1, 1);
AV_WB16(p + 3, sps->data_size);
memcpy(p + 5, sps->data, sps->data_size);
p += 5 + sps->data_size;
/* pps */
AV_W8(p, 1 << 7 |
HEVC_NAL_PPS & 0x3f);
AV_WB16(p + 1, num_pps);
p += 3;
for (i = 0; i < MAX_PPS_COUNT; i++) {
if (h->ps.pps_list[i]) {
const HEVCPPS *pps = (const HEVCPPS *)h->ps.pps_list[i]->data;
AV_WB16(p, pps->data_size);
memcpy(p + 2, pps->data, pps->data_size);
p += 2 + pps->data_size;
}
}
av_assert0(p - vt_extradata == vt_extradata_size);
data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
av_free(vt_extradata);
return data;
}
int ff_videotoolbox_buffer_create(VTContext *vtctx, AVFrame *frame)
{
av_buffer_unref(&frame->buf[0]);
@ -445,6 +608,18 @@ static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
return videotoolbox_common_end_frame(avctx, frame);
}
static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
{
HEVCContext *h = avctx->priv_data;
AVFrame *frame = h->ref->frame;
VTContext *vtctx = avctx->internal->hwaccel_priv_data;
int ret;
ret = videotoolbox_common_end_frame(avctx, frame);
vtctx->bitstream_size = 0;
return ret;
}
static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size)
@ -501,6 +676,11 @@ static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec
if (data)
CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
break;
case kCMVideoCodecType_HEVC :
data = ff_videotoolbox_hvcc_extradata_create(avctx);
if (data)
CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
break;
default:
break;
}
@ -600,6 +780,9 @@ static int videotoolbox_default_init(AVCodecContext *avctx)
case AV_CODEC_ID_H264 :
videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
break;
case AV_CODEC_ID_HEVC :
videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
break;
case AV_CODEC_ID_MPEG1VIDEO :
videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
break;
@ -782,6 +965,20 @@ AVHWAccel ff_h263_videotoolbox_hwaccel = {
.priv_data_size = sizeof(VTContext),
};
AVHWAccel ff_hevc_videotoolbox_hwaccel = {
.name = "hevc_videotoolbox",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_HEVC,
.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
.alloc_frame = ff_videotoolbox_alloc_frame,
.start_frame = ff_videotoolbox_h264_start_frame,
.decode_slice = ff_videotoolbox_h264_decode_slice,
.end_frame = videotoolbox_hevc_end_frame,
.init = videotoolbox_common_init,
.uninit = ff_videotoolbox_uninit,
.priv_data_size = sizeof(VTContext),
};
AVHWAccel ff_h264_videotoolbox_hwaccel = {
.name = "h264_videotoolbox",
.type = AVMEDIA_TYPE_VIDEO,