mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-06-15 21:45:19 +00:00
Update ffmpeg
This commit is contained in:
parent
df9e27f4fb
commit
03b4a57a59
@ -49,17 +49,15 @@ CONFIGURE_FLAGS="--enable-cross-compile --disable-programs \
|
||||
--enable-libdav1d \
|
||||
--enable-audiotoolbox \
|
||||
--enable-bsf=aac_adtstoasc,vp9_superframe,h264_mp4toannexb \
|
||||
--enable-decoder=h264,libvpx_vp9,hevc,libopus,mp3,aac,flac,alac_at,pcm_s16le,pcm_s24le,pcm_f32le,gsm_ms_at,vorbis,libdav1d \
|
||||
--enable-decoder=h264,libvpx_vp9,hevc,libopus,mp3,aac,flac,alac_at,pcm_s16le,pcm_s24le,pcm_f32le,gsm_ms_at,vorbis,libdav1d,av1 \
|
||||
--enable-encoder=libvpx_vp9,aac_at \
|
||||
--enable-demuxer=aac,mov,m4v,mp3,ogg,libopus,flac,wav,aiff,matroska,mpegts \
|
||||
--enable-demuxer=aac,mov,m4v,mp3,ogg,libopus,flac,wav,aiff,matroska,mpegts, \
|
||||
--enable-parser=aac,h264,mp3,libopus \
|
||||
--enable-protocol=file \
|
||||
--enable-muxer=mp4,matroska,mpegts \
|
||||
--enable-hwaccel=h264_videotoolbox,hevc_videotoolbox,av1_videotoolbox \
|
||||
"
|
||||
|
||||
|
||||
#--enable-hwaccel=h264_videotoolbox,hevc_videotoolbox \
|
||||
|
||||
EXTRA_CFLAGS="-DCONFIG_SAFE_BITSTREAM_READER=1"
|
||||
|
||||
if [ "$1" = "debug" ];
|
||||
|
@ -1,6 +1,8 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version <next>:
|
||||
|
||||
version 7.1:
|
||||
- Raw Captions with Time (RCWT) closed caption demuxer
|
||||
- LC3/LC3plus decoding/encoding using external library liblc3
|
||||
|
@ -206,7 +206,6 @@ Codecs:
|
||||
libgsm.c Michel Bardiaux
|
||||
libkvazaar.c Arttu Ylä-Outinen
|
||||
libopenh264enc.c Martin Storsjo, Linjie Fu
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
libopenjpegenc.c Michael Bradshaw
|
||||
libtheoraenc.c David Conrad
|
||||
libvorbis.c David Conrad
|
||||
@ -391,7 +390,7 @@ Muxers/Demuxers:
|
||||
dss.c Oleksij Rempel
|
||||
dtsdec.c foo86
|
||||
dv.c Roman Shaposhnik
|
||||
dvdvideodec.c Marth64
|
||||
dvdvideodec.c [2] Marth64
|
||||
electronicarts.c Peter Ross
|
||||
evc* Samsung (Dawid Kozinski)
|
||||
ffm* Baptiste Coudurier
|
||||
@ -445,7 +444,8 @@ Muxers/Demuxers:
|
||||
pva.c Ivo van Poorten
|
||||
r3d.c Baptiste Coudurier
|
||||
raw.c Michael Niedermayer
|
||||
rcwtenc.c Marth64
|
||||
rcwtdec.c [2] Marth64
|
||||
rcwtenc.c [2] Marth64
|
||||
rdt.c Ronald S. Bultje
|
||||
rl2.c Sascha Sommer
|
||||
rmdec.c, rmenc.c Ronald S. Bultje
|
||||
|
@ -1 +1 @@
|
||||
7.1
|
||||
7.0.git
|
||||
|
@ -1,15 +0,0 @@
|
||||
|
||||
┌──────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 7.1 "Péter" │
|
||||
└──────────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 7.1 "Péter", about 6
|
||||
months after the release of FFmpeg 7.0.
|
||||
|
||||
A complete Changelog is available at the root of the project, and the
|
||||
complete Git history on https://git.ffmpeg.org/gitweb/ffmpeg.git
|
||||
|
||||
We hope you will like this release as much as we enjoyed working on it, and
|
||||
as usual, if you have any questions about it, or any FFmpeg related topic,
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.libera.chat) or ask
|
||||
on the mailing-lists.
|
@ -1 +0,0 @@
|
||||
7.1
|
@ -467,6 +467,8 @@ Optimization options (experts only):
|
||||
--disable-neon disable NEON optimizations
|
||||
--disable-dotprod disable DOTPROD optimizations
|
||||
--disable-i8mm disable I8MM optimizations
|
||||
--disable-sve disable SVE optimizations
|
||||
--disable-sve2 disable SVE2 optimizations
|
||||
--disable-inline-asm disable use of inline assembly
|
||||
--disable-x86asm disable use of standalone x86 assembly
|
||||
--disable-mipsdsp disable MIPS DSP ASE R1 optimizations
|
||||
@ -2163,6 +2165,8 @@ ARCH_EXT_LIST_ARM="
|
||||
vfp
|
||||
vfpv3
|
||||
setend
|
||||
sve
|
||||
sve2
|
||||
"
|
||||
|
||||
ARCH_EXT_LIST_MIPS="
|
||||
@ -2432,6 +2436,8 @@ TOOLCHAIN_FEATURES="
|
||||
as_arch_directive
|
||||
as_archext_dotprod_directive
|
||||
as_archext_i8mm_directive
|
||||
as_archext_sve_directive
|
||||
as_archext_sve2_directive
|
||||
as_dn_directive
|
||||
as_fpu_directive
|
||||
as_func
|
||||
@ -2461,6 +2467,7 @@ TYPES_LIST="
|
||||
kCMVideoCodecType_HEVC
|
||||
kCMVideoCodecType_HEVCWithAlpha
|
||||
kCMVideoCodecType_VP9
|
||||
kCMVideoCodecType_AV1
|
||||
kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange
|
||||
kCVPixelFormatType_422YpCbCr8BiPlanarVideoRange
|
||||
kCVPixelFormatType_422YpCbCr10BiPlanarVideoRange
|
||||
@ -2752,6 +2759,8 @@ vfpv3_deps="vfp"
|
||||
setend_deps="arm"
|
||||
dotprod_deps="aarch64 neon"
|
||||
i8mm_deps="aarch64 neon"
|
||||
sve_deps="aarch64 neon"
|
||||
sve2_deps="aarch64 neon sve"
|
||||
|
||||
map 'eval ${v}_inline_deps=inline_asm' $ARCH_EXT_LIST_ARM
|
||||
|
||||
@ -3166,6 +3175,8 @@ av1_vaapi_hwaccel_deps="vaapi VADecPictureParameterBufferAV1_bit_depth_idx"
|
||||
av1_vaapi_hwaccel_select="av1_decoder"
|
||||
av1_vdpau_hwaccel_deps="vdpau VdpPictureInfoAV1"
|
||||
av1_vdpau_hwaccel_select="av1_decoder"
|
||||
av1_videotoolbox_hwaccel_deps="videotoolbox"
|
||||
av1_videotoolbox_hwaccel_select="av1_decoder"
|
||||
av1_vulkan_hwaccel_deps="vulkan"
|
||||
av1_vulkan_hwaccel_select="av1_decoder"
|
||||
h263_vaapi_hwaccel_deps="vaapi"
|
||||
@ -6234,9 +6245,11 @@ if enabled aarch64; then
|
||||
# internal assembler in clang 3.3 does not support this instruction
|
||||
enabled neon && check_insn neon 'ext v0.8B, v0.8B, v1.8B, #1'
|
||||
|
||||
archext_list="dotprod i8mm"
|
||||
archext_list="dotprod i8mm sve sve2"
|
||||
enabled dotprod && check_archext_insn dotprod 'udot v0.4s, v0.16b, v0.16b'
|
||||
enabled i8mm && check_archext_insn i8mm 'usdot v0.4s, v0.16b, v0.16b'
|
||||
enabled sve && check_archext_insn sve 'whilelt p0.s, x0, x1'
|
||||
enabled sve2 && check_archext_insn sve2 'sqrdmulh z0.s, z0.s, z0.s'
|
||||
|
||||
# Disable the main feature (e.g. HAVE_NEON) if neither inline nor external
|
||||
# assembly support the feature out of the box. Skip this for the features
|
||||
@ -6480,6 +6493,7 @@ check_cc intrinsics_sse2 emmintrin.h "__m128i test = _mm_setzero_si128()"
|
||||
|
||||
check_ldflags -Wl,--as-needed
|
||||
check_ldflags -Wl,-z,noexecstack
|
||||
check_ldflags -Wl,-no_warn_duplicate_libraries
|
||||
|
||||
if ! disabled network; then
|
||||
check_func getaddrinfo $network_extralibs
|
||||
@ -6697,6 +6711,7 @@ enabled videotoolbox && {
|
||||
check_func_headers CoreMedia/CMFormatDescription.h kCMVideoCodecType_HEVC "-framework CoreMedia"
|
||||
check_func_headers CoreMedia/CMFormatDescription.h kCMVideoCodecType_HEVCWithAlpha "-framework CoreMedia"
|
||||
check_func_headers CoreMedia/CMFormatDescription.h kCMVideoCodecType_VP9 "-framework CoreMedia"
|
||||
check_func_headers CoreMedia/CMFormatDescription.h kCMVideoCodecType_AV1 "-framework CoreMedia"
|
||||
check_func_headers CoreVideo/CVPixelBuffer.h kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange "-framework CoreVideo"
|
||||
check_func_headers CoreVideo/CVPixelBuffer.h kCVPixelFormatType_422YpCbCr8BiPlanarVideoRange "-framework CoreVideo"
|
||||
check_func_headers CoreVideo/CVPixelBuffer.h kCVPixelFormatType_422YpCbCr10BiPlanarVideoRange "-framework CoreVideo"
|
||||
@ -7919,6 +7934,8 @@ if enabled aarch64; then
|
||||
echo "NEON enabled ${neon-no}"
|
||||
echo "DOTPROD enabled ${dotprod-no}"
|
||||
echo "I8MM enabled ${i8mm-no}"
|
||||
echo "SVE enabled ${sve-no}"
|
||||
echo "SVE2 enabled ${sve2-no}"
|
||||
fi
|
||||
if enabled arm; then
|
||||
echo "ARMv5TE enabled ${armv5te-no}"
|
||||
|
9
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/doc/.gitignore
vendored
Normal file
9
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/doc/.gitignore
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
/*.1
|
||||
/*.3
|
||||
/*.html
|
||||
/*.pod
|
||||
/config.texi
|
||||
/avoptions_codec.texi
|
||||
/avoptions_format.texi
|
||||
/fate.txt
|
||||
/print_options
|
@ -2,6 +2,37 @@ The last version increases of all libraries were on 2024-03-07
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
2024-10-xx - xxxxxxxxxx - lavu 59.41.100 - log.h
|
||||
Add AVClass.state_flags_offset and AV_CLASS_STATE_INITIALIZED.
|
||||
|
||||
2024-09-30 - xxxxxxxxxx - lavf 61.9.100 - avformat.h
|
||||
Add {nb_}coded_side_data to AVStreamGroupTileGrid.
|
||||
|
||||
2024-09-xx - xxxxxxxxxx - lavu 59
|
||||
Deprecate av_int_list_length_for_size(), av_int_list_length(), and
|
||||
av_opt_set_int_list() without replacement. All AVOptions using these
|
||||
should be replaced with AV_OPT_TYPE_FLAG_ARRAY.
|
||||
|
||||
2024-09-xx - xxxxxxxxxx - lavfi 10.6.100
|
||||
Buffersink now has array-type options
|
||||
- pixel_formats
|
||||
- colorspaces
|
||||
- colorranges
|
||||
replacing the int-list options
|
||||
- pix_fmts
|
||||
- color_spaces
|
||||
- color_ranges
|
||||
abuffersink now has array-type options
|
||||
- sample_formats
|
||||
- samplerates
|
||||
- channel_layouts
|
||||
replacing the int-list/string options
|
||||
- sample_fmts
|
||||
- sample_rates
|
||||
- ch_layouts
|
||||
|
||||
-------- 8< --------- FFmpeg 7.1 was cut here -------- 8< ---------
|
||||
|
||||
2024-09-23 - 6940a6de2f0 - lavu 59.38.100 - frame.h
|
||||
Add AV_FRAME_DATA_VIEW_ID.
|
||||
|
||||
|
@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = 7.1
|
||||
PROJECT_NUMBER =
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
|
1
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/doc/doxy/.gitignore
vendored
Normal file
1
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/doc/doxy/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/html/
|
25
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/doc/examples/.gitignore
vendored
Normal file
25
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/doc/examples/.gitignore
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
/avio_list_dir
|
||||
/avio_reading
|
||||
/decode_audio
|
||||
/decode_video
|
||||
/demuxing_decoding
|
||||
/encode_audio
|
||||
/encode_video
|
||||
/extract_mvs
|
||||
/filter_audio
|
||||
/filtering_audio
|
||||
/filtering_video
|
||||
/http_multiclient
|
||||
/hw_decode
|
||||
/metadata
|
||||
/muxing
|
||||
/pc-uninstalled
|
||||
/qsvdec
|
||||
/remuxing
|
||||
/resampling_audio
|
||||
/scaling_video
|
||||
/transcode_aac
|
||||
/transcoding
|
||||
/vaapi_encode
|
||||
/vaapi_transcode
|
||||
/qsv_transcode
|
@ -96,8 +96,7 @@ static int init_filters(const char *filters_descr)
|
||||
const AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
static const int out_sample_rates[] = { 8000, -1 };
|
||||
static const int out_sample_rate = 8000;
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
|
||||
@ -123,34 +122,40 @@ static int init_filters(const char *filters_descr)
|
||||
}
|
||||
|
||||
/* buffer audio sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "out");
|
||||
if (!buffersink_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
ret = av_opt_set(buffersink_ctx, "sample_formats", "s16",
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set(buffersink_ctx, "ch_layouts", "mono",
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
ret = av_opt_set(buffersink_ctx, "channel_layouts", "mono",
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
ret = av_opt_set_array(buffersink_ctx, "samplerates", AV_OPT_SEARCH_CHILDREN,
|
||||
0, 1, AV_OPT_TYPE_INT, &out_sample_rate);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_init_dict(buffersink_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot initialize audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
|
@ -99,7 +99,6 @@ static int init_filters(const char *filters_descr)
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
@ -122,20 +121,26 @@ static int init_filters(const char *filters_descr)
|
||||
}
|
||||
|
||||
/* buffer video sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, buffersink, "out");
|
||||
if (!buffersink_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
|
||||
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
|
||||
ret = av_opt_set(buffersink_ctx, "pixel_formats", "gray8",
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_init_dict(buffersink_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot initialize buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
|
@ -171,23 +171,38 @@ static int open_output_file(const char *filename)
|
||||
* sample rate etc.). These properties can be changed for output
|
||||
* streams easily using filters */
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
const enum AVPixelFormat *pix_fmts = NULL;
|
||||
|
||||
enc_ctx->height = dec_ctx->height;
|
||||
enc_ctx->width = dec_ctx->width;
|
||||
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
|
||||
|
||||
ret = avcodec_get_supported_config(dec_ctx, NULL,
|
||||
AV_CODEC_CONFIG_PIX_FORMAT, 0,
|
||||
(const void**)&pix_fmts, NULL);
|
||||
|
||||
/* take first format from list of supported formats */
|
||||
if (encoder->pix_fmts)
|
||||
enc_ctx->pix_fmt = encoder->pix_fmts[0];
|
||||
else
|
||||
enc_ctx->pix_fmt = dec_ctx->pix_fmt;
|
||||
enc_ctx->pix_fmt = (ret >= 0 && pix_fmts) ?
|
||||
pix_fmts[0] : dec_ctx->pix_fmt;
|
||||
|
||||
/* video time_base can be set to whatever is handy and supported by encoder */
|
||||
enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
|
||||
} else {
|
||||
const enum AVSampleFormat *sample_fmts = NULL;
|
||||
|
||||
enc_ctx->sample_rate = dec_ctx->sample_rate;
|
||||
ret = av_channel_layout_copy(&enc_ctx->ch_layout, &dec_ctx->ch_layout);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = avcodec_get_supported_config(dec_ctx, NULL,
|
||||
AV_CODEC_CONFIG_SAMPLE_FORMAT, 0,
|
||||
(const void**)&sample_fmts, NULL);
|
||||
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->sample_fmt = encoder->sample_fmts[0];
|
||||
enc_ctx->sample_fmt = (ret >= 0 && sample_fmts) ?
|
||||
sample_fmts[0] : dec_ctx->sample_fmt;
|
||||
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
}
|
||||
|
||||
@ -283,10 +298,10 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, buffersink, "out");
|
||||
if (!buffersink_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
@ -297,6 +312,12 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_init_dict(buffersink_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot initialize buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
char buf[64];
|
||||
buffersrc = avfilter_get_by_name("abuffer");
|
||||
@ -322,10 +343,10 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, buffersink, "out");
|
||||
if (!buffersink_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
@ -352,6 +373,15 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (enc_ctx->frame_size > 0)
|
||||
av_buffersink_set_frame_size(buffersink_ctx, enc_ctx->frame_size);
|
||||
|
||||
ret = avfilter_init_dict(buffersink_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot initialize audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
|
@ -21,22 +21,24 @@ ffmpeg [@var{global_options}] @{[@var{input_file_options}] -i @file{input_url}@}
|
||||
inputs - including live grabbing/recording devices - filter, and transcode them
|
||||
into a plethora of output formats.
|
||||
|
||||
@command{ffmpeg} reads from an arbitrary number of input "files" (which can be regular
|
||||
@command{ffmpeg} reads from an arbitrary number of inputs (which can be regular
|
||||
files, pipes, network streams, grabbing devices, etc.), specified by the
|
||||
@code{-i} option, and writes to an arbitrary number of output "files", which are
|
||||
specified by a plain output url. Anything found on the command line which
|
||||
cannot be interpreted as an option is considered to be an output url.
|
||||
@code{-i} option, and writes to an arbitrary number of outputs, which are
|
||||
specified by a plain output url. Anything found on the command line which cannot
|
||||
be interpreted as an option is considered to be an output url.
|
||||
|
||||
Each input or output url can, in principle, contain any number of streams of
|
||||
different types (video/audio/subtitle/attachment/data). The allowed number and/or
|
||||
types of streams may be limited by the container format. Selecting which
|
||||
streams from which inputs will go into which output is either done automatically
|
||||
or with the @code{-map} option (see the Stream selection chapter).
|
||||
Each input or output can, in principle, contain any number of elementary streams
|
||||
of different types (video/audio/subtitle/attachment/data), though the allowed
|
||||
stream counts and/or types may be limited by the container format. Selecting
|
||||
which streams from which inputs will go into which output is either done
|
||||
automatically or with the @code{-map} option (see the @ref{Stream selection}
|
||||
chapter).
|
||||
|
||||
To refer to input files in options, you must use their indices (0-based). E.g.
|
||||
the first input file is @code{0}, the second is @code{1}, etc. Similarly, streams
|
||||
within a file are referred to by their indices. E.g. @code{2:3} refers to the
|
||||
fourth stream in the third input file. Also see the Stream specifiers chapter.
|
||||
To refer to inputs/outputs in options, you must use their indices (0-based).
|
||||
E.g. the first input is @code{0}, the second is @code{1}, etc. Similarly,
|
||||
streams within an input/output are referred to by their indices. E.g. @code{2:3}
|
||||
refers to the fourth stream in the third input or output. Also see the
|
||||
@ref{Stream specifiers} chapter.
|
||||
|
||||
As a general rule, options are applied to the next specified
|
||||
file. Therefore, order is important, and you can have the same
|
||||
@ -261,6 +263,7 @@ reads an input video and
|
||||
|
||||
@c man end DETAILED DESCRIPTION
|
||||
|
||||
@anchor{Stream selection}
|
||||
@chapter Stream selection
|
||||
@c man begin STREAM SELECTION
|
||||
|
||||
|
7
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/ffbuild/.gitignore
vendored
Normal file
7
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/ffbuild/.gitignore
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
/.config
|
||||
/bin2c
|
||||
/bin2c.exe
|
||||
/config.fate
|
||||
/config.log
|
||||
/config.mak
|
||||
/config.sh
|
@ -3,6 +3,8 @@ OBJS-$(HAVE_ARMV6) += $(ARMV6-OBJS) $(ARMV6-OBJS-yes)
|
||||
OBJS-$(HAVE_ARMV8) += $(ARMV8-OBJS) $(ARMV8-OBJS-yes)
|
||||
OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
|
||||
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
|
||||
OBJS-$(HAVE_SVE) += $(SVE-OBJS) $(SVE-OBJS-yes)
|
||||
OBJS-$(HAVE_SVE2) += $(SVE2-OBJS) $(SVE2-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSP) += $(MIPSDSP-OBJS) $(MIPSDSP-OBJS-yes)
|
||||
|
@ -18,7 +18,7 @@ BIN2C = $(BIN2CEXE)
|
||||
ifndef V
|
||||
Q = @
|
||||
ECHO = printf "$(1)\t%s\n" $(2)
|
||||
BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS X86ASM AR LD STRIP CP WINDRES NVCC BIN2C
|
||||
BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS X86ASM AR LD STRIP CP WINDRES NVCC BIN2C METALCC METALLIB
|
||||
SILENT = DEPCC DEPHOSTCC DEPAS DEPX86ASM RANLIB RM
|
||||
|
||||
MSG = $@
|
||||
|
@ -728,7 +728,7 @@ static void print_stream_maps(void)
|
||||
av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file->index,
|
||||
ost->index, ost->enc_ctx->codec->name);
|
||||
ost->index, ost->enc->enc_ctx->codec->name);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -737,9 +737,9 @@ static void print_stream_maps(void)
|
||||
ost->ist->index,
|
||||
ost->file->index,
|
||||
ost->index);
|
||||
if (ost->enc_ctx) {
|
||||
if (ost->enc) {
|
||||
const AVCodec *in_codec = ost->ist->dec;
|
||||
const AVCodec *out_codec = ost->enc_ctx->codec;
|
||||
const AVCodec *out_codec = ost->enc->enc_ctx->codec;
|
||||
const char *decoder_name = "?";
|
||||
const char *in_codec_name = "?";
|
||||
const char *encoder_name = "?";
|
||||
|
@ -328,6 +328,8 @@ typedef struct OutputFilterOptions {
|
||||
enum AVColorRange color_range;
|
||||
|
||||
enum VideoSyncMethod vsync_method;
|
||||
AVRational frame_rate;
|
||||
AVRational max_frame_rate;
|
||||
|
||||
int sample_rate;
|
||||
AVChannelLayout ch_layout;
|
||||
@ -461,14 +463,6 @@ typedef struct InputStream {
|
||||
* currently video and audio only */
|
||||
InputFilter **filters;
|
||||
int nb_filters;
|
||||
|
||||
/*
|
||||
* Output targets that do not go through lavfi, i.e. subtitles or
|
||||
* streamcopy. Those two cases are distinguished by the OutputStream
|
||||
* having an encoder or not.
|
||||
*/
|
||||
struct OutputStream **outputs;
|
||||
int nb_outputs;
|
||||
} InputStream;
|
||||
|
||||
typedef struct InputFile {
|
||||
@ -545,13 +539,6 @@ typedef struct EncStats {
|
||||
int lock_initialized;
|
||||
} EncStats;
|
||||
|
||||
extern const char *const forced_keyframes_const_names[];
|
||||
|
||||
typedef enum {
|
||||
ENCODER_FINISHED = 1,
|
||||
MUXER_FINISHED = 2,
|
||||
} OSTFinished ;
|
||||
|
||||
enum {
|
||||
KF_FORCE_SOURCE = 1,
|
||||
#if FFMPEG_OPT_FORCE_KF_SOURCE_NO_DROP
|
||||
@ -575,7 +562,15 @@ typedef struct KeyframeForceCtx {
|
||||
int dropped_keyframe;
|
||||
} KeyframeForceCtx;
|
||||
|
||||
typedef struct Encoder Encoder;
|
||||
typedef struct Encoder {
|
||||
const AVClass *class;
|
||||
|
||||
AVCodecContext *enc_ctx;
|
||||
|
||||
// number of frames/samples sent to the encoder
|
||||
uint64_t frames_encoded;
|
||||
uint64_t samples_encoded;
|
||||
} Encoder;
|
||||
|
||||
enum CroppingType {
|
||||
CROP_DISABLED = 0,
|
||||
@ -594,12 +589,6 @@ typedef struct OutputStream {
|
||||
|
||||
int index; /* stream index in the output file */
|
||||
|
||||
/**
|
||||
* Codec parameters for packets submitted to the muxer (i.e. before
|
||||
* bitstream filtering, if any).
|
||||
*/
|
||||
AVCodecParameters *par_in;
|
||||
|
||||
/* input stream that is the source for this output stream;
|
||||
* may be NULL for streams with no well-defined source, e.g.
|
||||
* attachments or outputs from complex filtergraphs */
|
||||
@ -608,12 +597,8 @@ typedef struct OutputStream {
|
||||
AVStream *st; /* stream in the output file */
|
||||
|
||||
Encoder *enc;
|
||||
AVCodecContext *enc_ctx;
|
||||
|
||||
/* video only */
|
||||
AVRational frame_rate;
|
||||
AVRational max_frame_rate;
|
||||
int force_fps;
|
||||
#if FFMPEG_OPT_TOP
|
||||
int top_field_first;
|
||||
#endif
|
||||
@ -636,9 +621,6 @@ typedef struct OutputStream {
|
||||
/* stats */
|
||||
// number of packets send to the muxer
|
||||
atomic_uint_least64_t packets_written;
|
||||
// number of frames/samples sent to the encoder
|
||||
uint64_t frames_encoded;
|
||||
uint64_t samples_encoded;
|
||||
|
||||
/* packet quality factor */
|
||||
atomic_int quality;
|
||||
@ -762,10 +744,11 @@ int find_codec(void *logctx, const char *name,
|
||||
int parse_and_set_vsync(const char *arg, int *vsync_var, int file_idx, int st_idx, int is_global);
|
||||
|
||||
int filtergraph_is_simple(const FilterGraph *fg);
|
||||
int init_simple_filtergraph(InputStream *ist, OutputStream *ost,
|
||||
char *graph_desc,
|
||||
Scheduler *sch, unsigned sch_idx_enc,
|
||||
const OutputFilterOptions *opts);
|
||||
int fg_create_simple(FilterGraph **pfg,
|
||||
InputStream *ist,
|
||||
char *graph_desc,
|
||||
Scheduler *sch, unsigned sched_idx_enc,
|
||||
const OutputFilterOptions *opts);
|
||||
int fg_finalise_bindings(void);
|
||||
|
||||
/**
|
||||
@ -779,7 +762,7 @@ const FrameData *frame_data_c(AVFrame *frame);
|
||||
FrameData *packet_data (AVPacket *pkt);
|
||||
const FrameData *packet_data_c(AVPacket *pkt);
|
||||
|
||||
int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost,
|
||||
int ofilter_bind_enc(OutputFilter *ofilter,
|
||||
unsigned sched_idx_enc,
|
||||
const OutputFilterOptions *opts);
|
||||
|
||||
@ -858,7 +841,7 @@ int dec_request_view(Decoder *dec, const ViewSpecifier *vs,
|
||||
SchedulerNode *src);
|
||||
|
||||
int enc_alloc(Encoder **penc, const AVCodec *codec,
|
||||
Scheduler *sch, unsigned sch_idx);
|
||||
Scheduler *sch, unsigned sch_idx, void *log_parent);
|
||||
void enc_free(Encoder **penc);
|
||||
|
||||
int enc_open(void *opaque, const AVFrame *frame);
|
||||
@ -871,7 +854,8 @@ int enc_loopback(Encoder *enc);
|
||||
*
|
||||
* Open the muxer once all the streams have been initialized.
|
||||
*/
|
||||
int of_stream_init(OutputFile *of, OutputStream *ost);
|
||||
int of_stream_init(OutputFile *of, OutputStream *ost,
|
||||
const AVCodecContext *enc_ctx);
|
||||
int of_write_trailer(OutputFile *of);
|
||||
int of_open(const OptionsContext *o, const char *filename, Scheduler *sch);
|
||||
void of_free(OutputFile **pof);
|
||||
@ -883,7 +867,8 @@ int64_t of_filesize(OutputFile *of);
|
||||
int ifile_open(const OptionsContext *o, const char *filename, Scheduler *sch);
|
||||
void ifile_close(InputFile **f);
|
||||
|
||||
int ist_output_add(InputStream *ist, OutputStream *ost);
|
||||
int ist_use(InputStream *ist, int decoding_needed,
|
||||
const ViewSpecifier *vs, SchedulerNode *src);
|
||||
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple,
|
||||
const ViewSpecifier *vs, InputFilterOptions *opts,
|
||||
SchedulerNode *src);
|
||||
|
@ -840,7 +840,6 @@ static void ist_free(InputStream **pist)
|
||||
|
||||
av_dict_free(&ds->decoder_opts);
|
||||
av_freep(&ist->filters);
|
||||
av_freep(&ist->outputs);
|
||||
av_freep(&ds->dec_opts.hwaccel_device);
|
||||
|
||||
avcodec_parameters_free(&ist->par);
|
||||
@ -874,8 +873,8 @@ void ifile_close(InputFile **pf)
|
||||
av_freep(pf);
|
||||
}
|
||||
|
||||
static int ist_use(InputStream *ist, int decoding_needed,
|
||||
const ViewSpecifier *vs, SchedulerNode *src)
|
||||
int ist_use(InputStream *ist, int decoding_needed,
|
||||
const ViewSpecifier *vs, SchedulerNode *src)
|
||||
{
|
||||
Demuxer *d = demuxer_from_ifile(ist->file);
|
||||
DemuxStream *ds = ds_from_ist(ist);
|
||||
@ -975,25 +974,6 @@ static int ist_use(InputStream *ist, int decoding_needed,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ist_output_add(InputStream *ist, OutputStream *ost)
|
||||
{
|
||||
DemuxStream *ds = ds_from_ist(ist);
|
||||
SchedulerNode src;
|
||||
int ret;
|
||||
|
||||
ret = ist_use(ist, ost->enc ? DECODING_FOR_OST : 0, NULL, &src);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = GROW_ARRAY(ist->outputs, ist->nb_outputs);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ist->outputs[ist->nb_outputs - 1] = ost;
|
||||
|
||||
return ost->enc ? ds->sch_idx_dec : ds->sch_idx_stream;
|
||||
}
|
||||
|
||||
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple,
|
||||
const ViewSpecifier *vs, InputFilterOptions *opts,
|
||||
SchedulerNode *src)
|
||||
|
@ -38,7 +38,12 @@
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
||||
struct Encoder {
|
||||
typedef struct EncoderPriv {
|
||||
Encoder e;
|
||||
|
||||
void *log_parent;
|
||||
char log_name[32];
|
||||
|
||||
// combined size of all the packets received from the encoder
|
||||
uint64_t data_size;
|
||||
|
||||
@ -50,7 +55,12 @@ struct Encoder {
|
||||
|
||||
Scheduler *sch;
|
||||
unsigned sch_idx;
|
||||
};
|
||||
} EncoderPriv;
|
||||
|
||||
static EncoderPriv *ep_from_enc(Encoder *enc)
|
||||
{
|
||||
return (EncoderPriv*)enc;
|
||||
}
|
||||
|
||||
// data that is local to the decoder thread and not visible outside of it
|
||||
typedef struct EncoderThread {
|
||||
@ -65,56 +75,90 @@ void enc_free(Encoder **penc)
|
||||
if (!enc)
|
||||
return;
|
||||
|
||||
if (enc->enc_ctx)
|
||||
av_freep(&enc->enc_ctx->stats_in);
|
||||
avcodec_free_context(&enc->enc_ctx);
|
||||
|
||||
av_freep(penc);
|
||||
}
|
||||
|
||||
int enc_alloc(Encoder **penc, const AVCodec *codec,
|
||||
Scheduler *sch, unsigned sch_idx)
|
||||
static const char *enc_item_name(void *obj)
|
||||
{
|
||||
Encoder *enc;
|
||||
const EncoderPriv *ep = obj;
|
||||
|
||||
return ep->log_name;
|
||||
}
|
||||
|
||||
static const AVClass enc_class = {
|
||||
.class_name = "Encoder",
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
.parent_log_context_offset = offsetof(EncoderPriv, log_parent),
|
||||
.item_name = enc_item_name,
|
||||
};
|
||||
|
||||
int enc_alloc(Encoder **penc, const AVCodec *codec,
|
||||
Scheduler *sch, unsigned sch_idx, void *log_parent)
|
||||
{
|
||||
EncoderPriv *ep;
|
||||
int ret = 0;
|
||||
|
||||
*penc = NULL;
|
||||
|
||||
enc = av_mallocz(sizeof(*enc));
|
||||
if (!enc)
|
||||
ep = av_mallocz(sizeof(*ep));
|
||||
if (!ep)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
enc->sch = sch;
|
||||
enc->sch_idx = sch_idx;
|
||||
ep->e.class = &enc_class;
|
||||
ep->log_parent = log_parent;
|
||||
|
||||
*penc = enc;
|
||||
ep->sch = sch;
|
||||
ep->sch_idx = sch_idx;
|
||||
|
||||
snprintf(ep->log_name, sizeof(ep->log_name), "enc:%s", codec->name);
|
||||
|
||||
ep->e.enc_ctx = avcodec_alloc_context3(codec);
|
||||
if (!ep->e.enc_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
*penc = &ep->e;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
enc_free((Encoder**)&ep);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hw_device_setup_for_encode(OutputStream *ost, AVBufferRef *frames_ref)
|
||||
static int hw_device_setup_for_encode(Encoder *e, AVCodecContext *enc_ctx,
|
||||
AVBufferRef *frames_ref)
|
||||
{
|
||||
const AVCodecHWConfig *config;
|
||||
HWDevice *dev = NULL;
|
||||
|
||||
if (frames_ref &&
|
||||
((AVHWFramesContext*)frames_ref->data)->format ==
|
||||
ost->enc_ctx->pix_fmt) {
|
||||
enc_ctx->pix_fmt) {
|
||||
// Matching format, will try to use hw_frames_ctx.
|
||||
} else {
|
||||
frames_ref = NULL;
|
||||
}
|
||||
|
||||
for (int i = 0;; i++) {
|
||||
config = avcodec_get_hw_config(ost->enc_ctx->codec, i);
|
||||
config = avcodec_get_hw_config(enc_ctx->codec, i);
|
||||
if (!config)
|
||||
break;
|
||||
|
||||
if (frames_ref &&
|
||||
config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX &&
|
||||
(config->pix_fmt == AV_PIX_FMT_NONE ||
|
||||
config->pix_fmt == ost->enc_ctx->pix_fmt)) {
|
||||
av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using input "
|
||||
config->pix_fmt == enc_ctx->pix_fmt)) {
|
||||
av_log(e, AV_LOG_VERBOSE, "Using input "
|
||||
"frames context (format %s) with %s encoder.\n",
|
||||
av_get_pix_fmt_name(ost->enc_ctx->pix_fmt),
|
||||
ost->enc_ctx->codec->name);
|
||||
ost->enc_ctx->hw_frames_ctx = av_buffer_ref(frames_ref);
|
||||
if (!ost->enc_ctx->hw_frames_ctx)
|
||||
av_get_pix_fmt_name(enc_ctx->pix_fmt),
|
||||
enc_ctx->codec->name);
|
||||
enc_ctx->hw_frames_ctx = av_buffer_ref(frames_ref);
|
||||
if (!enc_ctx->hw_frames_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
@ -125,11 +169,11 @@ static int hw_device_setup_for_encode(OutputStream *ost, AVBufferRef *frames_ref
|
||||
}
|
||||
|
||||
if (dev) {
|
||||
av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using device %s "
|
||||
av_log(e, AV_LOG_VERBOSE, "Using device %s "
|
||||
"(type %s) with %s encoder.\n", dev->name,
|
||||
av_hwdevice_get_type_name(dev->type), ost->enc_ctx->codec->name);
|
||||
ost->enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref);
|
||||
if (!ost->enc_ctx->hw_device_ctx)
|
||||
av_hwdevice_get_type_name(dev->type), enc_ctx->codec->name);
|
||||
enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref);
|
||||
if (!enc_ctx->hw_device_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
} else {
|
||||
// No device required, or no device available.
|
||||
@ -137,37 +181,13 @@ static int hw_device_setup_for_encode(OutputStream *ost, AVBufferRef *frames_ref
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_encoder_id(OutputFile *of, OutputStream *ost)
|
||||
{
|
||||
const char *cname = ost->enc_ctx->codec->name;
|
||||
uint8_t *encoder_string;
|
||||
int encoder_string_len;
|
||||
|
||||
if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
|
||||
return 0;
|
||||
|
||||
encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(cname) + 2;
|
||||
encoder_string = av_mallocz(encoder_string_len);
|
||||
if (!encoder_string)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (!of->bitexact && !ost->bitexact)
|
||||
av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
|
||||
else
|
||||
av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
|
||||
av_strlcat(encoder_string, cname, encoder_string_len);
|
||||
av_dict_set(&ost->st->metadata, "encoder", encoder_string,
|
||||
AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int enc_open(void *opaque, const AVFrame *frame)
|
||||
{
|
||||
OutputStream *ost = opaque;
|
||||
InputStream *ist = ost->ist;
|
||||
Encoder *e = ost->enc;
|
||||
AVCodecContext *enc_ctx = ost->enc_ctx;
|
||||
EncoderPriv *ep = ep_from_enc(e);
|
||||
AVCodecContext *enc_ctx = e->enc_ctx;
|
||||
Decoder *dec = NULL;
|
||||
const AVCodec *enc = enc_ctx->codec;
|
||||
OutputFile *of = ost->file;
|
||||
@ -175,7 +195,7 @@ int enc_open(void *opaque, const AVFrame *frame)
|
||||
int frame_samples = 0;
|
||||
int ret;
|
||||
|
||||
if (e->opened)
|
||||
if (ep->opened)
|
||||
return 0;
|
||||
|
||||
// frame is always non-NULL for audio and video
|
||||
@ -200,10 +220,6 @@ int enc_open(void *opaque, const AVFrame *frame)
|
||||
}
|
||||
}
|
||||
|
||||
ret = set_encoder_id(of, ost);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ist)
|
||||
dec = ist->decoder;
|
||||
|
||||
@ -211,7 +227,6 @@ int enc_open(void *opaque, const AVFrame *frame)
|
||||
if (ost->type == AVMEDIA_TYPE_AUDIO || ost->type == AVMEDIA_TYPE_VIDEO) {
|
||||
enc_ctx->time_base = frame->time_base;
|
||||
enc_ctx->framerate = fd->frame_rate_filter;
|
||||
ost->st->avg_frame_rate = fd->frame_rate_filter;
|
||||
}
|
||||
|
||||
switch (enc_ctx->codec_type) {
|
||||
@ -238,7 +253,7 @@ int enc_open(void *opaque, const AVFrame *frame)
|
||||
frame->height > 0);
|
||||
enc_ctx->width = frame->width;
|
||||
enc_ctx->height = frame->height;
|
||||
enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
|
||||
enc_ctx->sample_aspect_ratio =
|
||||
ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
|
||||
av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
|
||||
frame->sample_aspect_ratio;
|
||||
@ -312,42 +327,31 @@ int enc_open(void *opaque, const AVFrame *frame)
|
||||
|
||||
enc_ctx->flags |= AV_CODEC_FLAG_FRAME_DURATION;
|
||||
|
||||
ret = hw_device_setup_for_encode(ost, frame ? frame->hw_frames_ctx : NULL);
|
||||
ret = hw_device_setup_for_encode(e, enc_ctx, frame ? frame->hw_frames_ctx : NULL);
|
||||
if (ret < 0) {
|
||||
av_log(ost, AV_LOG_ERROR,
|
||||
av_log(e, AV_LOG_ERROR,
|
||||
"Encoding hardware device setup failed: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avcodec_open2(ost->enc_ctx, enc, NULL)) < 0) {
|
||||
if ((ret = avcodec_open2(enc_ctx, enc, NULL)) < 0) {
|
||||
if (ret != AVERROR_EXPERIMENTAL)
|
||||
av_log(ost, AV_LOG_ERROR, "Error while opening encoder - maybe "
|
||||
av_log(e, AV_LOG_ERROR, "Error while opening encoder - maybe "
|
||||
"incorrect parameters such as bit_rate, rate, width or height.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
e->opened = 1;
|
||||
ep->opened = 1;
|
||||
|
||||
if (ost->enc_ctx->frame_size)
|
||||
frame_samples = ost->enc_ctx->frame_size;
|
||||
if (enc_ctx->frame_size)
|
||||
frame_samples = enc_ctx->frame_size;
|
||||
|
||||
if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
|
||||
ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
|
||||
av_log(ost, AV_LOG_WARNING, "The bitrate parameter is set too low."
|
||||
if (enc_ctx->bit_rate && enc_ctx->bit_rate < 1000 &&
|
||||
enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
|
||||
av_log(e, AV_LOG_WARNING, "The bitrate parameter is set too low."
|
||||
" It takes bits/s as argument, not kbits/s\n");
|
||||
|
||||
ret = avcodec_parameters_from_context(ost->par_in, ost->enc_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(ost, AV_LOG_FATAL,
|
||||
"Error initializing the output stream codec context.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
// copy timebase while removing common factors
|
||||
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
|
||||
ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
|
||||
|
||||
ret = of_stream_init(of, ost);
|
||||
ret = of_stream_init(of, ost, enc_ctx);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -369,19 +373,20 @@ static int do_subtitle_out(OutputFile *of, OutputStream *ost, const AVSubtitle *
|
||||
AVPacket *pkt)
|
||||
{
|
||||
Encoder *e = ost->enc;
|
||||
EncoderPriv *ep = ep_from_enc(e);
|
||||
int subtitle_out_max_size = 1024 * 1024;
|
||||
int subtitle_out_size, nb, i, ret;
|
||||
AVCodecContext *enc;
|
||||
int64_t pts;
|
||||
|
||||
if (sub->pts == AV_NOPTS_VALUE) {
|
||||
av_log(ost, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
|
||||
av_log(e, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
|
||||
return exit_on_error ? AVERROR(EINVAL) : 0;
|
||||
}
|
||||
if ((of->start_time != AV_NOPTS_VALUE && sub->pts < of->start_time))
|
||||
return 0;
|
||||
|
||||
enc = ost->enc_ctx;
|
||||
enc = e->enc_ctx;
|
||||
|
||||
/* Note: DVB subtitle need one packet to draw them and one other
|
||||
packet to clear them */
|
||||
@ -420,11 +425,11 @@ static int do_subtitle_out(OutputFile *of, OutputStream *ost, const AVSubtitle *
|
||||
local_sub.rects += i;
|
||||
}
|
||||
|
||||
ost->frames_encoded++;
|
||||
e->frames_encoded++;
|
||||
|
||||
subtitle_out_size = avcodec_encode_subtitle(enc, pkt->data, pkt->size, &local_sub);
|
||||
if (subtitle_out_size < 0) {
|
||||
av_log(ost, AV_LOG_FATAL, "Subtitle encoding failed\n");
|
||||
av_log(e, AV_LOG_FATAL, "Subtitle encoding failed\n");
|
||||
return subtitle_out_size;
|
||||
}
|
||||
|
||||
@ -442,7 +447,7 @@ static int do_subtitle_out(OutputFile *of, OutputStream *ost, const AVSubtitle *
|
||||
}
|
||||
pkt->dts = pkt->pts;
|
||||
|
||||
ret = sch_enc_send(e->sch, e->sch_idx, pkt);
|
||||
ret = sch_enc_send(ep->sch, ep->sch_idx, pkt);
|
||||
if (ret < 0) {
|
||||
av_packet_unref(pkt);
|
||||
return ret;
|
||||
@ -457,6 +462,7 @@ void enc_stats_write(OutputStream *ost, EncStats *es,
|
||||
uint64_t frame_num)
|
||||
{
|
||||
Encoder *e = ost->enc;
|
||||
EncoderPriv *ep = ep_from_enc(e);
|
||||
AVIOContext *io = es->io;
|
||||
AVRational tb = frame ? frame->time_base : pkt->time_base;
|
||||
int64_t pts = frame ? frame->pts : pkt->pts;
|
||||
@ -494,7 +500,7 @@ void enc_stats_write(OutputStream *ost, EncStats *es,
|
||||
|
||||
if (frame) {
|
||||
switch (c->type) {
|
||||
case ENC_STATS_SAMPLE_NUM: avio_printf(io, "%"PRIu64, ost->samples_encoded); continue;
|
||||
case ENC_STATS_SAMPLE_NUM: avio_printf(io, "%"PRIu64, e->samples_encoded); continue;
|
||||
case ENC_STATS_NB_SAMPLES: avio_printf(io, "%d", frame->nb_samples); continue;
|
||||
default: av_assert0(0);
|
||||
}
|
||||
@ -512,7 +518,7 @@ void enc_stats_write(OutputStream *ost, EncStats *es,
|
||||
}
|
||||
case ENC_STATS_AVG_BITRATE: {
|
||||
double duration = pkt->dts * av_q2d(tb);
|
||||
avio_printf(io, "%g", duration > 0 ? 8.0 * e->data_size / duration : -1.);
|
||||
avio_printf(io, "%g", duration > 0 ? 8.0 * ep->data_size / duration : -1.);
|
||||
continue;
|
||||
}
|
||||
default: av_assert0(0);
|
||||
@ -533,9 +539,10 @@ static inline double psnr(double d)
|
||||
static int update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
|
||||
{
|
||||
Encoder *e = ost->enc;
|
||||
EncoderPriv *ep = ep_from_enc(e);
|
||||
const uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
|
||||
NULL);
|
||||
AVCodecContext *enc = ost->enc_ctx;
|
||||
AVCodecContext *enc = e->enc_ctx;
|
||||
enum AVPictureType pict_type;
|
||||
int64_t frame_number;
|
||||
double ti1, bitrate, avg_bitrate;
|
||||
@ -566,7 +573,7 @@ static int update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_
|
||||
}
|
||||
}
|
||||
|
||||
frame_number = e->packets_encoded;
|
||||
frame_number = ep->packets_encoded;
|
||||
if (vstats_version <= 1) {
|
||||
fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number,
|
||||
quality / (float)FF_QP2LAMBDA);
|
||||
@ -586,9 +593,9 @@ static int update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_
|
||||
ti1 = 0.01;
|
||||
|
||||
bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0;
|
||||
avg_bitrate = (double)(e->data_size * 8) / ti1 / 1000.0;
|
||||
avg_bitrate = (double)(ep->data_size * 8) / ti1 / 1000.0;
|
||||
fprintf(vstats_file, "s_size= %8.0fKiB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
|
||||
(double)e->data_size / 1024, ti1, bitrate, avg_bitrate);
|
||||
(double)ep->data_size / 1024, ti1, bitrate, avg_bitrate);
|
||||
fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(pict_type));
|
||||
|
||||
return 0;
|
||||
@ -598,7 +605,8 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
||||
AVPacket *pkt)
|
||||
{
|
||||
Encoder *e = ost->enc;
|
||||
AVCodecContext *enc = ost->enc_ctx;
|
||||
EncoderPriv *ep = ep_from_enc(e);
|
||||
AVCodecContext *enc = e->enc_ctx;
|
||||
const char *type_desc = av_get_media_type_string(enc->codec_type);
|
||||
const char *action = frame ? "encode" : "flush";
|
||||
int ret;
|
||||
@ -613,13 +621,13 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
||||
|
||||
if (ost->enc_stats_pre.io)
|
||||
enc_stats_write(ost, &ost->enc_stats_pre, frame, NULL,
|
||||
ost->frames_encoded);
|
||||
e->frames_encoded);
|
||||
|
||||
ost->frames_encoded++;
|
||||
ost->samples_encoded += frame->nb_samples;
|
||||
e->frames_encoded++;
|
||||
e->samples_encoded += frame->nb_samples;
|
||||
|
||||
if (debug_ts) {
|
||||
av_log(ost, AV_LOG_INFO, "encoder <- type:%s "
|
||||
av_log(e, AV_LOG_INFO, "encoder <- type:%s "
|
||||
"frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
|
||||
type_desc,
|
||||
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
|
||||
@ -634,7 +642,7 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
||||
|
||||
ret = avcodec_send_frame(enc, frame);
|
||||
if (ret < 0 && !(ret == AVERROR_EOF && !frame)) {
|
||||
av_log(ost, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
|
||||
av_log(e, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
|
||||
type_desc);
|
||||
return ret;
|
||||
}
|
||||
@ -659,7 +667,7 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
||||
return 0;
|
||||
} else if (ret < 0) {
|
||||
if (ret != AVERROR_EOF)
|
||||
av_log(ost, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
|
||||
av_log(e, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -670,7 +678,7 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
||||
|
||||
// attach stream parameters to first packet if requested
|
||||
avcodec_parameters_free(&fd->par_enc);
|
||||
if (e->attach_par && !e->packets_encoded) {
|
||||
if (ep->attach_par && !ep->packets_encoded) {
|
||||
fd->par_enc = avcodec_parameters_alloc();
|
||||
if (!fd->par_enc)
|
||||
return AVERROR(ENOMEM);
|
||||
@ -690,10 +698,10 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
||||
|
||||
if (ost->enc_stats_post.io)
|
||||
enc_stats_write(ost, &ost->enc_stats_post, NULL, pkt,
|
||||
e->packets_encoded);
|
||||
ep->packets_encoded);
|
||||
|
||||
if (debug_ts) {
|
||||
av_log(ost, AV_LOG_INFO, "encoder -> type:%s "
|
||||
av_log(e, AV_LOG_INFO, "encoder -> type:%s "
|
||||
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
|
||||
"duration:%s duration_time:%s\n",
|
||||
type_desc,
|
||||
@ -702,11 +710,11 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base));
|
||||
}
|
||||
|
||||
e->data_size += pkt->size;
|
||||
ep->data_size += pkt->size;
|
||||
|
||||
e->packets_encoded++;
|
||||
ep->packets_encoded++;
|
||||
|
||||
ret = sch_enc_send(e->sch, e->sch_idx, pkt);
|
||||
ret = sch_enc_send(ep->sch, ep->sch_idx, pkt);
|
||||
if (ret < 0) {
|
||||
av_packet_unref(pkt);
|
||||
return ret;
|
||||
@ -764,6 +772,7 @@ force_keyframe:
|
||||
|
||||
static int frame_encode(OutputStream *ost, AVFrame *frame, AVPacket *pkt)
|
||||
{
|
||||
Encoder *e = ost->enc;
|
||||
OutputFile *of = ost->file;
|
||||
enum AVMediaType type = ost->type;
|
||||
|
||||
@ -781,8 +790,8 @@ static int frame_encode(OutputStream *ost, AVFrame *frame, AVPacket *pkt)
|
||||
return AVERROR_EOF;
|
||||
|
||||
if (type == AVMEDIA_TYPE_VIDEO) {
|
||||
frame->quality = ost->enc_ctx->global_quality;
|
||||
frame->pict_type = forced_kf_apply(ost, &ost->kf, frame);
|
||||
frame->quality = e->enc_ctx->global_quality;
|
||||
frame->pict_type = forced_kf_apply(e, &ost->kf, frame);
|
||||
|
||||
#if FFMPEG_OPT_TOP
|
||||
if (ost->top_field_first >= 0) {
|
||||
@ -791,9 +800,9 @@ static int frame_encode(OutputStream *ost, AVFrame *frame, AVPacket *pkt)
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
if (!(ost->enc_ctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
|
||||
ost->enc_ctx->ch_layout.nb_channels != frame->ch_layout.nb_channels) {
|
||||
av_log(ost, AV_LOG_ERROR,
|
||||
if (!(e->enc_ctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
|
||||
e->enc_ctx->ch_layout.nb_channels != frame->ch_layout.nb_channels) {
|
||||
av_log(e, AV_LOG_ERROR,
|
||||
"Audio channel count changed and encoder does not support parameter changes\n");
|
||||
return 0;
|
||||
}
|
||||
@ -807,7 +816,7 @@ static void enc_thread_set_name(const OutputStream *ost)
|
||||
{
|
||||
char name[16];
|
||||
snprintf(name, sizeof(name), "enc%d:%d:%s", ost->file->index, ost->index,
|
||||
ost->enc_ctx->codec->name);
|
||||
ost->enc->enc_ctx->codec->name);
|
||||
ff_thread_setname(name);
|
||||
}
|
||||
|
||||
@ -842,6 +851,7 @@ int encoder_thread(void *arg)
|
||||
{
|
||||
OutputStream *ost = arg;
|
||||
Encoder *e = ost->enc;
|
||||
EncoderPriv *ep = ep_from_enc(e);
|
||||
EncoderThread et;
|
||||
int ret = 0, input_status = 0;
|
||||
int name_set = 0;
|
||||
@ -864,17 +874,17 @@ int encoder_thread(void *arg)
|
||||
}
|
||||
|
||||
while (!input_status) {
|
||||
input_status = sch_enc_receive(e->sch, e->sch_idx, et.frame);
|
||||
input_status = sch_enc_receive(ep->sch, ep->sch_idx, et.frame);
|
||||
if (input_status < 0) {
|
||||
if (input_status == AVERROR_EOF) {
|
||||
av_log(ost, AV_LOG_VERBOSE, "Encoder thread received EOF\n");
|
||||
if (e->opened)
|
||||
av_log(e, AV_LOG_VERBOSE, "Encoder thread received EOF\n");
|
||||
if (ep->opened)
|
||||
break;
|
||||
|
||||
av_log(ost, AV_LOG_ERROR, "Could not open encoder before EOF\n");
|
||||
av_log(e, AV_LOG_ERROR, "Could not open encoder before EOF\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
av_log(ost, AV_LOG_ERROR, "Error receiving a frame for encoding: %s\n",
|
||||
av_log(e, AV_LOG_ERROR, "Error receiving a frame for encoding: %s\n",
|
||||
av_err2str(ret));
|
||||
ret = input_status;
|
||||
}
|
||||
@ -893,9 +903,9 @@ int encoder_thread(void *arg)
|
||||
|
||||
if (ret < 0) {
|
||||
if (ret == AVERROR_EOF)
|
||||
av_log(ost, AV_LOG_VERBOSE, "Encoder returned EOF, finishing\n");
|
||||
av_log(e, AV_LOG_VERBOSE, "Encoder returned EOF, finishing\n");
|
||||
else
|
||||
av_log(ost, AV_LOG_ERROR, "Error encoding a frame: %s\n",
|
||||
av_log(e, AV_LOG_ERROR, "Error encoding a frame: %s\n",
|
||||
av_err2str(ret));
|
||||
break;
|
||||
}
|
||||
@ -905,7 +915,7 @@ int encoder_thread(void *arg)
|
||||
if (ret == 0 || ret == AVERROR_EOF) {
|
||||
ret = frame_encode(ost, NULL, et.pkt);
|
||||
if (ret < 0 && ret != AVERROR_EOF)
|
||||
av_log(ost, AV_LOG_ERROR, "Error flushing encoder: %s\n",
|
||||
av_log(e, AV_LOG_ERROR, "Error flushing encoder: %s\n",
|
||||
av_err2str(ret));
|
||||
}
|
||||
|
||||
@ -921,6 +931,7 @@ finish:
|
||||
|
||||
int enc_loopback(Encoder *enc)
|
||||
{
|
||||
enc->attach_par = 1;
|
||||
return enc->sch_idx;
|
||||
EncoderPriv *ep = ep_from_enc(enc);
|
||||
ep->attach_par = 1;
|
||||
return ep->sch_idx;
|
||||
}
|
||||
|
@ -783,8 +783,7 @@ static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layout
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost,
|
||||
unsigned sched_idx_enc,
|
||||
int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc,
|
||||
const OutputFilterOptions *opts)
|
||||
{
|
||||
OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
|
||||
@ -793,7 +792,8 @@ int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost,
|
||||
int ret;
|
||||
|
||||
av_assert0(!ofilter->bound);
|
||||
av_assert0(ofilter->type == ost->type);
|
||||
av_assert0(!opts->enc ||
|
||||
ofilter->type == opts->enc->type);
|
||||
|
||||
ofilter->bound = 1;
|
||||
av_freep(&ofilter->linklabel);
|
||||
@ -854,10 +854,9 @@ int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost,
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ofp->fps.vsync_method = opts->vsync_method;
|
||||
ofp->fps.framerate = ost->frame_rate;
|
||||
ofp->fps.framerate_max = ost->max_frame_rate;
|
||||
ofp->fps.framerate_supported = ost->force_fps || !opts->enc ?
|
||||
NULL : opts->frame_rates;
|
||||
ofp->fps.framerate = opts->frame_rate;
|
||||
ofp->fps.framerate_max = opts->max_frame_rate;
|
||||
ofp->fps.framerate_supported = opts->frame_rates;
|
||||
|
||||
// reduce frame rate for mpeg4 to be within the spec limits
|
||||
if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
|
||||
@ -1181,25 +1180,27 @@ fail:
|
||||
return 0;
|
||||
}
|
||||
|
||||
int init_simple_filtergraph(InputStream *ist, OutputStream *ost,
|
||||
char *graph_desc,
|
||||
Scheduler *sch, unsigned sched_idx_enc,
|
||||
const OutputFilterOptions *opts)
|
||||
int fg_create_simple(FilterGraph **pfg,
|
||||
InputStream *ist,
|
||||
char *graph_desc,
|
||||
Scheduler *sch, unsigned sched_idx_enc,
|
||||
const OutputFilterOptions *opts)
|
||||
{
|
||||
const enum AVMediaType type = ist->par->codec_type;
|
||||
FilterGraph *fg;
|
||||
FilterGraphPriv *fgp;
|
||||
int ret;
|
||||
|
||||
ret = fg_create(&ost->fg_simple, graph_desc, sch);
|
||||
ret = fg_create(pfg, graph_desc, sch);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
fg = ost->fg_simple;
|
||||
fg = *pfg;
|
||||
fgp = fgp_from_fg(fg);
|
||||
|
||||
fgp->is_simple = 1;
|
||||
|
||||
snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
|
||||
av_get_media_type_string(ost->type)[0], opts->name);
|
||||
av_get_media_type_string(type)[0], opts->name);
|
||||
|
||||
if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
|
||||
av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
|
||||
@ -1209,21 +1210,19 @@ int init_simple_filtergraph(InputStream *ist, OutputStream *ost,
|
||||
graph_desc, fg->nb_inputs, fg->nb_outputs);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (fg->outputs[0]->type != ost->type) {
|
||||
if (fg->outputs[0]->type != type) {
|
||||
av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
|
||||
"it to %s output stream\n",
|
||||
av_get_media_type_string(fg->outputs[0]->type),
|
||||
av_get_media_type_string(ost->type));
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
ost->filter = fg->outputs[0];
|
||||
|
||||
ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = ofilter_bind_ost(fg->outputs[0], ost, sched_idx_enc, opts);
|
||||
ret = ofilter_bind_enc(fg->outputs[0], sched_idx_enc, opts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -1351,8 +1350,10 @@ static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
|
||||
} else {
|
||||
ist = ist_find_unused(type);
|
||||
if (!ist) {
|
||||
av_log(fg, AV_LOG_FATAL, "Cannot find a matching stream for "
|
||||
"unlabeled input pad %s\n", ifilter->name);
|
||||
av_log(fg, AV_LOG_FATAL,
|
||||
"Cannot find an unused %s input stream to feed the "
|
||||
"unlabeled input pad %s.\n",
|
||||
av_get_media_type_string(type), ifilter->name);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
@ -1585,14 +1586,18 @@ static int configure_output_audio_filter(FilterGraph *fg, AVFilterGraph *graph,
|
||||
int ret;
|
||||
|
||||
snprintf(name, sizeof(name), "out_%s", ofp->name);
|
||||
ret = avfilter_graph_create_filter(&ofp->filter,
|
||||
avfilter_get_by_name("abuffersink"),
|
||||
name, NULL, NULL, graph);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ofp->filter = avfilter_graph_alloc_filter(graph,
|
||||
avfilter_get_by_name("abuffersink"),
|
||||
name);
|
||||
if (!ofp->filter)
|
||||
return AVERROR(ENOMEM);
|
||||
if ((ret = av_opt_set_int(ofp->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
return ret;
|
||||
|
||||
ret = avfilter_init_dict(ofp->filter, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
|
||||
AVFilterContext *filt_ctx; \
|
||||
\
|
||||
@ -1685,9 +1690,6 @@ static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph,
|
||||
AVFilterContext *last_filter;
|
||||
const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
|
||||
const AVPixFmtDescriptor *desc;
|
||||
AVRational fr = ifp->opts.framerate;
|
||||
AVRational sar;
|
||||
AVBPrint args;
|
||||
char name[255];
|
||||
int ret, pad_idx = 0;
|
||||
AVBufferSrcParameters *par = av_buffersrc_parameters_alloc();
|
||||
@ -1697,30 +1699,34 @@ static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph,
|
||||
if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
|
||||
sub2video_prepare(ifp);
|
||||
|
||||
sar = ifp->sample_aspect_ratio;
|
||||
if(!sar.den)
|
||||
sar = (AVRational){0,1};
|
||||
av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
||||
av_bprintf(&args,
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
|
||||
"pixel_aspect=%d/%d:colorspace=%d:range=%d",
|
||||
ifp->width, ifp->height, ifp->format,
|
||||
ifp->time_base.num, ifp->time_base.den, sar.num, sar.den,
|
||||
ifp->color_space, ifp->color_range);
|
||||
if (fr.num && fr.den)
|
||||
av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
|
||||
snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
|
||||
ifp->opts.name);
|
||||
|
||||
|
||||
if ((ret = avfilter_graph_create_filter(&ifp->filter, buffer_filt, name,
|
||||
args.str, NULL, graph)) < 0)
|
||||
ifp->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name);
|
||||
if (!ifp->filter) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
par->hw_frames_ctx = ifp->hw_frames_ctx;
|
||||
}
|
||||
|
||||
par->format = ifp->format;
|
||||
par->time_base = ifp->time_base;
|
||||
par->frame_rate = ifp->opts.framerate;
|
||||
par->width = ifp->width;
|
||||
par->height = ifp->height;
|
||||
par->sample_aspect_ratio = ifp->sample_aspect_ratio.den > 0 ?
|
||||
ifp->sample_aspect_ratio : (AVRational){ 0, 1 };
|
||||
par->color_space = ifp->color_space;
|
||||
par->color_range = ifp->color_range;
|
||||
par->hw_frames_ctx = ifp->hw_frames_ctx;
|
||||
ret = av_buffersrc_parameters_set(ifp->filter, par);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
av_freep(&par);
|
||||
|
||||
ret = avfilter_init_dict(ifp->filter, NULL);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
last_filter = ifp->filter;
|
||||
|
||||
desc = av_pix_fmt_desc_get(ifp->format);
|
||||
|
@ -581,9 +581,9 @@ static int bsf_init(MuxStream *ms)
|
||||
int ret;
|
||||
|
||||
if (!ctx)
|
||||
return avcodec_parameters_copy(ost->st->codecpar, ost->par_in);
|
||||
return avcodec_parameters_copy(ost->st->codecpar, ms->par_in);
|
||||
|
||||
ret = avcodec_parameters_copy(ctx->par_in, ost->par_in);
|
||||
ret = avcodec_parameters_copy(ctx->par_in, ms->par_in);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -608,12 +608,29 @@ static int bsf_init(MuxStream *ms)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int of_stream_init(OutputFile *of, OutputStream *ost)
|
||||
int of_stream_init(OutputFile *of, OutputStream *ost,
|
||||
const AVCodecContext *enc_ctx)
|
||||
{
|
||||
Muxer *mux = mux_from_of(of);
|
||||
MuxStream *ms = ms_from_ost(ost);
|
||||
int ret;
|
||||
|
||||
if (enc_ctx) {
|
||||
// use upstream time base unless it has been overridden previously
|
||||
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
|
||||
ost->st->time_base = av_add_q(enc_ctx->time_base, (AVRational){0, 1});
|
||||
|
||||
ost->st->avg_frame_rate = enc_ctx->framerate;
|
||||
ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio;
|
||||
|
||||
ret = avcodec_parameters_from_context(ms->par_in, enc_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(ost, AV_LOG_FATAL,
|
||||
"Error initializing the output stream codec parameters.\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize bitstream filters for the output stream
|
||||
* needs to be done here, because the codec id for streamcopy is not
|
||||
* known until now */
|
||||
@ -644,8 +661,8 @@ static int check_written(OutputFile *of)
|
||||
|
||||
total_packets_written += packets_written;
|
||||
|
||||
if (ost->enc_ctx &&
|
||||
(ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
|
||||
if (ost->enc &&
|
||||
(ost->enc->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
|
||||
!= AV_CODEC_FLAG_PASS1)
|
||||
pass1_used = 0;
|
||||
|
||||
@ -706,9 +723,9 @@ static void mux_final_stats(Muxer *mux)
|
||||
of->index, j, av_get_media_type_string(type));
|
||||
if (ost->enc) {
|
||||
av_log(of, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
|
||||
ost->frames_encoded);
|
||||
ost->enc->frames_encoded);
|
||||
if (type == AVMEDIA_TYPE_AUDIO)
|
||||
av_log(of, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
|
||||
av_log(of, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->enc->samples_encoded);
|
||||
av_log(of, AV_LOG_VERBOSE, "; ");
|
||||
}
|
||||
|
||||
@ -806,7 +823,7 @@ static void ost_free(OutputStream **post)
|
||||
ost->logfile = NULL;
|
||||
}
|
||||
|
||||
avcodec_parameters_free(&ost->par_in);
|
||||
avcodec_parameters_free(&ms->par_in);
|
||||
|
||||
av_bsf_free(&ms->bsf_ctx);
|
||||
av_packet_free(&ms->bsf_pkt);
|
||||
@ -820,10 +837,6 @@ static void ost_free(OutputStream **post)
|
||||
|
||||
av_freep(&ost->attachment_filename);
|
||||
|
||||
if (ost->enc_ctx)
|
||||
av_freep(&ost->enc_ctx->stats_in);
|
||||
avcodec_free_context(&ost->enc_ctx);
|
||||
|
||||
enc_stats_uninit(&ost->enc_stats_pre);
|
||||
enc_stats_uninit(&ost->enc_stats_post);
|
||||
enc_stats_uninit(&ms->stats);
|
||||
|
@ -36,6 +36,12 @@
|
||||
typedef struct MuxStream {
|
||||
OutputStream ost;
|
||||
|
||||
/**
|
||||
* Codec parameters for packets submitted to the muxer (i.e. before
|
||||
* bitstream filtering, if any).
|
||||
*/
|
||||
AVCodecParameters *par_in;
|
||||
|
||||
// name used for logging
|
||||
char log_name[32];
|
||||
|
||||
@ -79,6 +85,10 @@ typedef struct MuxStream {
|
||||
int ts_drop;
|
||||
#endif
|
||||
|
||||
AVRational frame_rate;
|
||||
AVRational max_frame_rate;
|
||||
int force_fps;
|
||||
|
||||
const char *apad;
|
||||
} MuxStream;
|
||||
|
||||
|
@ -67,8 +67,9 @@ static int check_opt_bitexact(void *ctx, const AVDictionary *opts,
|
||||
}
|
||||
|
||||
static int choose_encoder(const OptionsContext *o, AVFormatContext *s,
|
||||
OutputStream *ost, const AVCodec **enc)
|
||||
MuxStream *ms, const AVCodec **enc)
|
||||
{
|
||||
OutputStream *ost = &ms->ost;
|
||||
enum AVMediaType type = ost->type;
|
||||
const char *codec_name = NULL;
|
||||
|
||||
@ -90,20 +91,20 @@ static int choose_encoder(const OptionsContext *o, AVFormatContext *s,
|
||||
}
|
||||
|
||||
if (!codec_name) {
|
||||
ost->par_in->codec_id = av_guess_codec(s->oformat, NULL, s->url, NULL, ost->type);
|
||||
*enc = avcodec_find_encoder(ost->par_in->codec_id);
|
||||
ms->par_in->codec_id = av_guess_codec(s->oformat, NULL, s->url, NULL, ost->type);
|
||||
*enc = avcodec_find_encoder(ms->par_in->codec_id);
|
||||
if (!*enc) {
|
||||
av_log(ost, AV_LOG_FATAL, "Automatic encoder selection failed "
|
||||
"Default encoder for format %s (codec %s) is "
|
||||
"probably disabled. Please choose an encoder manually.\n",
|
||||
s->oformat->name, avcodec_get_name(ost->par_in->codec_id));
|
||||
s->oformat->name, avcodec_get_name(ms->par_in->codec_id));
|
||||
return AVERROR_ENCODER_NOT_FOUND;
|
||||
}
|
||||
} else if (strcmp(codec_name, "copy")) {
|
||||
int ret = find_codec(ost, codec_name, ost->type, 1, enc);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ost->par_in->codec_id = (*enc)->id;
|
||||
ms->par_in->codec_id = (*enc)->id;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -423,27 +424,6 @@ static int ost_get_filters(const OptionsContext *o, AVFormatContext *oc,
|
||||
#endif
|
||||
opt_match_per_stream_str(ost, &o->filters, oc, ost->st, &filters);
|
||||
|
||||
if (!ost->enc) {
|
||||
if (
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
filters_script ||
|
||||
#endif
|
||||
filters) {
|
||||
av_log(ost, AV_LOG_ERROR,
|
||||
"%s '%s' was specified, but codec copy was selected. "
|
||||
"Filtering and streamcopy cannot be used together.\n",
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
filters ? "Filtergraph" : "Filtergraph script",
|
||||
filters ? filters : filters_script
|
||||
#else
|
||||
"Filtergraph", filters
|
||||
#endif
|
||||
);
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!ost->ist) {
|
||||
if (
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
@ -554,7 +534,7 @@ static enum AVPixelFormat pix_fmt_parse(OutputStream *ost, const char *name)
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
ret = avcodec_get_supported_config(ost->enc_ctx, NULL, AV_CODEC_CONFIG_PIX_FORMAT,
|
||||
ret = avcodec_get_supported_config(ost->enc->enc_ctx, NULL, AV_CODEC_CONFIG_PIX_FORMAT,
|
||||
0, (const void **) &fmts, NULL);
|
||||
if (ret < 0)
|
||||
return AV_PIX_FMT_NONE;
|
||||
@ -586,7 +566,7 @@ static enum AVPixelFormat pix_fmt_parse(OutputStream *ost, const char *name)
|
||||
}
|
||||
|
||||
if (fmts && !fmt_in_list(fmts, fmt))
|
||||
fmt = choose_pixel_fmt(ost->enc_ctx, fmt);
|
||||
fmt = choose_pixel_fmt(ost->enc->enc_ctx, fmt);
|
||||
|
||||
return fmt;
|
||||
}
|
||||
@ -604,13 +584,13 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
|
||||
st = ost->st;
|
||||
|
||||
opt_match_per_stream_str(ost, &o->frame_rates, oc, st, &frame_rate);
|
||||
if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
|
||||
if (frame_rate && av_parse_video_rate(&ms->frame_rate, frame_rate) < 0) {
|
||||
av_log(ost, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
opt_match_per_stream_str(ost, &o->max_frame_rates, oc, st, &max_frame_rate);
|
||||
if (max_frame_rate && av_parse_video_rate(&ost->max_frame_rate, max_frame_rate) < 0) {
|
||||
if (max_frame_rate && av_parse_video_rate(&ms->max_frame_rate, max_frame_rate) < 0) {
|
||||
av_log(ost, AV_LOG_FATAL, "Invalid maximum framerate value: %s\n", max_frame_rate);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
@ -631,8 +611,8 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
|
||||
ost->frame_aspect_ratio = q;
|
||||
}
|
||||
|
||||
if (ost->enc_ctx) {
|
||||
AVCodecContext *video_enc = ost->enc_ctx;
|
||||
if (ost->enc) {
|
||||
AVCodecContext *video_enc = ost->enc->enc_ctx;
|
||||
const char *p = NULL, *fps_mode = NULL;
|
||||
const char *frame_size = NULL;
|
||||
const char *frame_pix_fmt = NULL;
|
||||
@ -745,10 +725,10 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
|
||||
ost->logfile_prefix ? ost->logfile_prefix :
|
||||
DEFAULT_PASS_LOGFILENAME_PREFIX,
|
||||
ost_idx);
|
||||
if (!strcmp(ost->enc_ctx->codec->name, "libx264") || !strcmp(ost->enc_ctx->codec->name, "libvvenc")) {
|
||||
if (av_opt_is_set_to_default_by_name(ost->enc_ctx, "stats",
|
||||
if (!strcmp(video_enc->codec->name, "libx264") || !strcmp(video_enc->codec->name, "libvvenc")) {
|
||||
if (av_opt_is_set_to_default_by_name(video_enc, "stats",
|
||||
AV_OPT_SEARCH_CHILDREN) > 0)
|
||||
av_opt_set(ost->enc_ctx, "stats", logfilename,
|
||||
av_opt_set(video_enc, "stats", logfilename,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
} else {
|
||||
if (video_enc->flags & AV_CODEC_FLAG_PASS2) {
|
||||
@ -774,7 +754,7 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
|
||||
}
|
||||
}
|
||||
|
||||
opt_match_per_stream_int(ost, &o->force_fps, oc, st, &ost->force_fps);
|
||||
opt_match_per_stream_int(ost, &o->force_fps, oc, st, &ms->force_fps);
|
||||
|
||||
#if FFMPEG_OPT_TOP
|
||||
ost->top_field_first = -1;
|
||||
@ -795,7 +775,7 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ost->frame_rate.num || ost->max_frame_rate.num) &&
|
||||
if ((ms->frame_rate.num || ms->max_frame_rate.num) &&
|
||||
!(*vsync_method == VSYNC_AUTO ||
|
||||
*vsync_method == VSYNC_CFR || *vsync_method == VSYNC_VSCFR)) {
|
||||
av_log(ost, AV_LOG_FATAL, "One of -r/-fpsmax was specified "
|
||||
@ -804,7 +784,7 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
|
||||
}
|
||||
|
||||
if (*vsync_method == VSYNC_AUTO) {
|
||||
if (ost->frame_rate.num || ost->max_frame_rate.num) {
|
||||
if (ms->frame_rate.num || ms->max_frame_rate.num) {
|
||||
*vsync_method = VSYNC_CFR;
|
||||
} else if (!strcmp(oc->oformat->name, "avi")) {
|
||||
*vsync_method = VSYNC_VFR;
|
||||
@ -841,8 +821,8 @@ static int new_stream_audio(Muxer *mux, const OptionsContext *o,
|
||||
AVFormatContext *oc = mux->fc;
|
||||
AVStream *st = ost->st;
|
||||
|
||||
if (ost->enc_ctx) {
|
||||
AVCodecContext *audio_enc = ost->enc_ctx;
|
||||
if (ost->enc) {
|
||||
AVCodecContext *audio_enc = ost->enc->enc_ctx;
|
||||
int channels = 0;
|
||||
const char *layout = NULL;
|
||||
const char *sample_fmt = NULL;
|
||||
@ -880,8 +860,8 @@ static int new_stream_subtitle(Muxer *mux, const OptionsContext *o,
|
||||
|
||||
st = ost->st;
|
||||
|
||||
if (ost->enc_ctx) {
|
||||
AVCodecContext *subtitle_enc = ost->enc_ctx;
|
||||
if (ost->enc) {
|
||||
AVCodecContext *subtitle_enc = ost->enc->enc_ctx;
|
||||
|
||||
AVCodecDescriptor const *input_descriptor =
|
||||
avcodec_descriptor_get(ost->ist->par->codec_id);
|
||||
@ -916,14 +896,16 @@ static int new_stream_subtitle(Muxer *mux, const OptionsContext *o,
|
||||
|
||||
static int
|
||||
ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
|
||||
const OptionsContext *o, char *filters,
|
||||
const OptionsContext *o,
|
||||
AVRational enc_tb, enum VideoSyncMethod vsync_method,
|
||||
int keep_pix_fmt, int autoscale, int threads_manual,
|
||||
const ViewSpecifier *vs)
|
||||
const ViewSpecifier *vs,
|
||||
SchedulerNode *src)
|
||||
{
|
||||
OutputStream *ost = &ms->ost;
|
||||
AVCodecContext *enc_ctx = ost->enc_ctx;
|
||||
AVCodecContext *enc_ctx = ost->enc->enc_ctx;
|
||||
char name[16];
|
||||
char *filters = NULL;
|
||||
int ret;
|
||||
|
||||
OutputFilterOptions opts = {
|
||||
@ -936,6 +918,8 @@ ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
|
||||
.color_space = enc_ctx->colorspace,
|
||||
.color_range = enc_ctx->color_range,
|
||||
.vsync_method = vsync_method,
|
||||
.frame_rate = ms->frame_rate,
|
||||
.max_frame_rate = ms->max_frame_rate,
|
||||
.sample_rate = enc_ctx->sample_rate,
|
||||
.ch_layout = enc_ctx->ch_layout,
|
||||
.sws_opts = o->g->sws_dict,
|
||||
@ -962,7 +946,7 @@ ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
if (!ost->force_fps) {
|
||||
if (!ms->force_fps) {
|
||||
ret = avcodec_get_supported_config(enc_ctx, NULL,
|
||||
AV_CODEC_CONFIG_FRAME_RATE, 0,
|
||||
(const void **) &opts.frame_rates, NULL);
|
||||
@ -1003,41 +987,73 @@ ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ost_get_filters(o, mux->fc, ost, &filters);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ofilter) {
|
||||
av_assert0(!filters);
|
||||
ost->filter = ofilter;
|
||||
ret = ofilter_bind_ost(ofilter, ost, ms->sch_idx_enc, &opts);
|
||||
ret = ofilter_bind_enc(ofilter, ms->sch_idx_enc, &opts);
|
||||
} else {
|
||||
ret = init_simple_filtergraph(ost->ist, ost, filters,
|
||||
mux->sch, ms->sch_idx_enc, &opts);
|
||||
ret = fg_create_simple(&ost->fg_simple, ost->ist, filters,
|
||||
mux->sch, ms->sch_idx_enc, &opts);
|
||||
if (ret >= 0)
|
||||
ost->filter = ost->fg_simple->outputs[0];
|
||||
|
||||
}
|
||||
av_freep(&opts.nb_threads);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = sch_connect(mux->sch, SCH_ENC(ms->sch_idx_enc),
|
||||
SCH_MSTREAM(mux->sch_idx, ms->sch_idx));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
*src = SCH_ENC(ms->sch_idx_enc);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int streamcopy_init(const Muxer *mux, OutputStream *ost, AVDictionary **encoder_opts)
|
||||
static int streamcopy_init(const OptionsContext *o, const Muxer *mux,
|
||||
OutputStream *ost, AVDictionary **encoder_opts)
|
||||
{
|
||||
MuxStream *ms = ms_from_ost(ost);
|
||||
|
||||
const InputStream *ist = ost->ist;
|
||||
const InputFile *ifile = ist->file;
|
||||
|
||||
AVCodecParameters *par = ost->par_in;
|
||||
AVCodecParameters *par = ms->par_in;
|
||||
uint32_t codec_tag = par->codec_tag;
|
||||
|
||||
AVCodecContext *codec_ctx = NULL;
|
||||
|
||||
AVRational fr = ost->frame_rate;
|
||||
AVRational fr = ms->frame_rate;
|
||||
|
||||
int ret = 0;
|
||||
|
||||
const char *filters = NULL;
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
const char *filters_script = NULL;
|
||||
|
||||
opt_match_per_stream_str(ost, &o->filter_scripts, mux->fc, ost->st, &filters_script);
|
||||
#endif
|
||||
opt_match_per_stream_str(ost, &o->filters, mux->fc, ost->st, &filters);
|
||||
|
||||
if (
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
filters_script ||
|
||||
#endif
|
||||
filters) {
|
||||
av_log(ost, AV_LOG_ERROR,
|
||||
"%s '%s' was specified, but codec copy was selected. "
|
||||
"Filtering and streamcopy cannot be used together.\n",
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
filters ? "Filtergraph" : "Filtergraph script",
|
||||
filters ? filters : filters_script
|
||||
#else
|
||||
"Filtergraph", filters
|
||||
#endif
|
||||
);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
codec_ctx = avcodec_alloc_context3(NULL);
|
||||
if (!codec_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
@ -1139,6 +1155,28 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int set_encoder_id(OutputStream *ost, const AVCodec *codec)
|
||||
{
|
||||
const char *cname = codec->name;
|
||||
uint8_t *encoder_string;
|
||||
int encoder_string_len;
|
||||
|
||||
encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(cname) + 2;
|
||||
encoder_string = av_mallocz(encoder_string_len);
|
||||
if (!encoder_string)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (!ost->file->bitexact && !ost->bitexact)
|
||||
av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
|
||||
else
|
||||
av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
|
||||
av_strlcat(encoder_string, cname, encoder_string_len);
|
||||
av_dict_set(&ost->st->metadata, "encoder", encoder_string,
|
||||
AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
||||
InputStream *ist, OutputFilter *ofilter, const ViewSpecifier *vs,
|
||||
OutputStream **post)
|
||||
@ -1148,13 +1186,14 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
||||
OutputStream *ost;
|
||||
const AVCodec *enc;
|
||||
AVStream *st;
|
||||
SchedulerNode src = { .type = SCH_NODE_TYPE_NONE };
|
||||
AVDictionary *encoder_opts = NULL;
|
||||
int ret = 0, keep_pix_fmt = 0, autoscale = 1;
|
||||
int threads_manual = 0;
|
||||
AVRational enc_tb = { 0, 0 };
|
||||
enum VideoSyncMethod vsync_method = VSYNC_AUTO;
|
||||
const char *bsfs = NULL, *time_base = NULL, *codec_tag = NULL;
|
||||
char *filters = NULL, *next;
|
||||
char *next;
|
||||
double qscale = -1;
|
||||
|
||||
st = avformat_new_stream(oc, NULL);
|
||||
@ -1198,8 +1237,8 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
||||
}
|
||||
}
|
||||
|
||||
ost->par_in = avcodec_parameters_alloc();
|
||||
if (!ost->par_in)
|
||||
ms->par_in = avcodec_parameters_alloc();
|
||||
if (!ms->par_in)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ms->last_mux_dts = AV_NOPTS_VALUE;
|
||||
@ -1207,27 +1246,23 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
||||
ost->st = st;
|
||||
ost->ist = ist;
|
||||
ost->kf.ref_pts = AV_NOPTS_VALUE;
|
||||
ost->par_in->codec_type = type;
|
||||
ms->par_in->codec_type = type;
|
||||
st->codecpar->codec_type = type;
|
||||
|
||||
ret = choose_encoder(o, oc, ost, &enc);
|
||||
ret = choose_encoder(o, oc, ms, &enc);
|
||||
if (ret < 0) {
|
||||
av_log(ost, AV_LOG_FATAL, "Error selecting an encoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (enc) {
|
||||
ost->enc_ctx = avcodec_alloc_context3(enc);
|
||||
if (!ost->enc_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ret = sch_add_enc(mux->sch, encoder_thread, ost,
|
||||
ost->type == AVMEDIA_TYPE_SUBTITLE ? NULL : enc_open);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ms->sch_idx_enc = ret;
|
||||
|
||||
ret = enc_alloc(&ost->enc, enc, mux->sch, ms->sch_idx_enc);
|
||||
ret = enc_alloc(&ost->enc, enc, mux->sch, ms->sch_idx_enc, ost);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -1262,21 +1297,21 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
||||
if (!ms->pkt)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (ost->enc_ctx) {
|
||||
if (ost->enc) {
|
||||
AVIOContext *s = NULL;
|
||||
char *buf = NULL, *arg = NULL;
|
||||
const char *enc_stats_pre = NULL, *enc_stats_post = NULL, *mux_stats = NULL;
|
||||
const char *enc_time_base = NULL, *preset = NULL;
|
||||
|
||||
ret = filter_codec_opts(o->g->codec_opts, ost->enc_ctx->codec_id,
|
||||
oc, st, ost->enc_ctx->codec, &encoder_opts,
|
||||
ret = filter_codec_opts(o->g->codec_opts, enc->id,
|
||||
oc, st, enc, &encoder_opts,
|
||||
&mux->enc_opts_used);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
opt_match_per_stream_str(ost, &o->presets, oc, st, &preset);
|
||||
opt_match_per_stream_int(ost, &o->autoscale, oc, st, &autoscale);
|
||||
if (preset && (!(ret = get_preset_file_2(preset, ost->enc_ctx->codec->name, &s)))) {
|
||||
if (preset && (!(ret = get_preset_file_2(preset, enc->name, &s)))) {
|
||||
AVBPrint bprint;
|
||||
av_bprint_init(&bprint, 0, AV_BPRINT_SIZE_UNLIMITED);
|
||||
do {
|
||||
@ -1376,7 +1411,7 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
||||
|
||||
threads_manual = !!av_dict_get(encoder_opts, "threads", NULL, 0);
|
||||
|
||||
ret = av_opt_set_dict2(ost->enc_ctx, &encoder_opts, AV_OPT_SEARCH_CHILDREN);
|
||||
ret = av_opt_set_dict2(ost->enc->enc_ctx, &encoder_opts, AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(ost, AV_LOG_ERROR, "Error applying encoder options: %s\n",
|
||||
av_err2str(ret));
|
||||
@ -1389,7 +1424,7 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
||||
|
||||
// default to automatic thread count
|
||||
if (!threads_manual)
|
||||
ost->enc_ctx->thread_count = 0;
|
||||
ost->enc->enc_ctx->thread_count = 0;
|
||||
} else {
|
||||
ret = filter_codec_opts(o->g->codec_opts, AV_CODEC_ID_NONE, oc, st,
|
||||
NULL, &encoder_opts,
|
||||
@ -1401,8 +1436,14 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
||||
|
||||
if (o->bitexact) {
|
||||
ost->bitexact = 1;
|
||||
} else if (ost->enc_ctx) {
|
||||
ost->bitexact = !!(ost->enc_ctx->flags & AV_CODEC_FLAG_BITEXACT);
|
||||
} else if (ost->enc) {
|
||||
ost->bitexact = !!(ost->enc->enc_ctx->flags & AV_CODEC_FLAG_BITEXACT);
|
||||
}
|
||||
|
||||
if (enc) {
|
||||
ret = set_encoder_id(ost, enc);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
opt_match_per_stream_str(ost, &o->time_bases, oc, st, &time_base);
|
||||
@ -1447,15 +1488,15 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
||||
tag = AV_RL32(buf);
|
||||
}
|
||||
ost->st->codecpar->codec_tag = tag;
|
||||
ost->par_in->codec_tag = tag;
|
||||
if (ost->enc_ctx)
|
||||
ost->enc_ctx->codec_tag = tag;
|
||||
ms->par_in->codec_tag = tag;
|
||||
if (ost->enc)
|
||||
ost->enc->enc_ctx->codec_tag = tag;
|
||||
}
|
||||
|
||||
opt_match_per_stream_dbl(ost, &o->qscale, oc, st, &qscale);
|
||||
if (ost->enc_ctx && qscale >= 0) {
|
||||
ost->enc_ctx->flags |= AV_CODEC_FLAG_QSCALE;
|
||||
ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
|
||||
if (ost->enc && qscale >= 0) {
|
||||
ost->enc->enc_ctx->flags |= AV_CODEC_FLAG_QSCALE;
|
||||
ost->enc->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
|
||||
}
|
||||
|
||||
if (ms->sch_idx >= 0) {
|
||||
@ -1477,8 +1518,8 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
||||
opt_match_per_stream_int(ost, &o->fix_sub_duration_heartbeat,
|
||||
oc, st, &ost->fix_sub_duration_heartbeat);
|
||||
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER && ost->enc_ctx)
|
||||
ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER && ost->enc)
|
||||
ost->enc->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
opt_match_per_stream_int(ost, &o->copy_initial_nonkeyframes,
|
||||
oc, st, &ms->copy_initial_nonkeyframes);
|
||||
@ -1490,48 +1531,43 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO) {
|
||||
ret = ost_get_filters(o, oc, ost, &filters);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (ost->enc &&
|
||||
(type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)) {
|
||||
ret = ost_bind_filter(mux, ms, ofilter, o, filters, enc_tb, vsync_method,
|
||||
keep_pix_fmt, autoscale, threads_manual, vs);
|
||||
ret = ost_bind_filter(mux, ms, ofilter, o, enc_tb, vsync_method,
|
||||
keep_pix_fmt, autoscale, threads_manual, vs, &src);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
} else if (ost->ist) {
|
||||
int sched_idx = ist_output_add(ost->ist, ost);
|
||||
if (sched_idx < 0) {
|
||||
ret = ist_use(ost->ist, !!ost->enc, NULL, &src);
|
||||
if (ret < 0) {
|
||||
av_log(ost, AV_LOG_ERROR,
|
||||
"Error binding an input stream\n");
|
||||
ret = sched_idx;
|
||||
goto fail;
|
||||
}
|
||||
ms->sch_idx_src = sched_idx;
|
||||
ms->sch_idx_src = src.idx;
|
||||
|
||||
// src refers to a decoder for transcoding, demux stream otherwise
|
||||
if (ost->enc) {
|
||||
ret = sch_connect(mux->sch, SCH_DEC_OUT(sched_idx, 0),
|
||||
SCH_ENC(ms->sch_idx_enc));
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
ret = sch_connect(mux->sch, SCH_ENC(ms->sch_idx_enc),
|
||||
SCH_MSTREAM(mux->sch_idx, ms->sch_idx));
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
} else {
|
||||
ret = sch_connect(mux->sch, SCH_DSTREAM(ost->ist->file->index, sched_idx),
|
||||
SCH_MSTREAM(ost->file->index, ms->sch_idx));
|
||||
ret = sch_connect(mux->sch,
|
||||
src, SCH_ENC(ms->sch_idx_enc));
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
src = SCH_ENC(ms->sch_idx_enc);
|
||||
}
|
||||
}
|
||||
|
||||
if (src.type != SCH_NODE_TYPE_NONE) {
|
||||
ret = sch_connect(mux->sch,
|
||||
src, SCH_MSTREAM(mux->sch_idx, ms->sch_idx));
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
} else {
|
||||
// only attachment streams don't have a source
|
||||
av_assert0(type == AVMEDIA_TYPE_ATTACHMENT && ms->sch_idx < 0);
|
||||
}
|
||||
|
||||
if (ost->ist && !ost->enc) {
|
||||
ret = streamcopy_init(mux, ost, &encoder_opts);
|
||||
ret = streamcopy_init(o, mux, ost, &encoder_opts);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
}
|
||||
@ -1799,6 +1835,7 @@ loop_end:
|
||||
|
||||
static int of_add_attachments(Muxer *mux, const OptionsContext *o)
|
||||
{
|
||||
MuxStream *ms;
|
||||
OutputStream *ost;
|
||||
int err;
|
||||
|
||||
@ -1866,9 +1903,11 @@ read_fail:
|
||||
return err;
|
||||
}
|
||||
|
||||
ms = ms_from_ost(ost);
|
||||
|
||||
ost->attachment_filename = attachment_filename;
|
||||
ost->par_in->extradata = attachment;
|
||||
ost->par_in->extradata_size = len;
|
||||
ms->par_in->extradata = attachment;
|
||||
ms->par_in->extradata_size = len;
|
||||
|
||||
p = strrchr(o->attachments[i], '/');
|
||||
av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
|
||||
@ -2009,7 +2048,7 @@ static int setup_sync_queues(Muxer *mux, AVFormatContext *oc,
|
||||
int limit_frames = 0, limit_frames_av_enc = 0;
|
||||
|
||||
#define IS_AV_ENC(ost, type) \
|
||||
(ost->enc_ctx && (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO))
|
||||
(ost->enc && (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO))
|
||||
#define IS_INTERLEAVED(type) (type != AVMEDIA_TYPE_ATTACHMENT)
|
||||
|
||||
for (int i = 0; i < oc->nb_streams; i++) {
|
||||
@ -2021,8 +2060,8 @@ static int setup_sync_queues(Muxer *mux, AVFormatContext *oc,
|
||||
|
||||
nb_interleaved += IS_INTERLEAVED(type);
|
||||
nb_av_enc += IS_AV_ENC(ost, type);
|
||||
nb_audio_fs += (ost->enc_ctx && type == AVMEDIA_TYPE_AUDIO &&
|
||||
!(ost->enc_ctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE));
|
||||
nb_audio_fs += (ost->enc && type == AVMEDIA_TYPE_AUDIO &&
|
||||
!(ost->enc->enc_ctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE));
|
||||
|
||||
limit_frames |= ms->max_frames < INT64_MAX;
|
||||
limit_frames_av_enc |= (ms->max_frames < INT64_MAX) && IS_AV_ENC(ost, type);
|
||||
@ -2977,9 +3016,6 @@ static int copy_meta(Muxer *mux, const OptionsContext *o)
|
||||
if (!ost->ist) /* this is true e.g. for attached files */
|
||||
continue;
|
||||
av_dict_copy(&ost->st->metadata, ost->ist->st->metadata, AV_DICT_DONT_OVERWRITE);
|
||||
if (ost->enc_ctx) {
|
||||
av_dict_set(&ost->st->metadata, "encoder", NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -3056,7 +3092,7 @@ finish:
|
||||
return ret;
|
||||
}
|
||||
|
||||
const char *const forced_keyframes_const_names[] = {
|
||||
static const char *const forced_keyframes_const_names[] = {
|
||||
"n",
|
||||
"n_forced",
|
||||
"prev_forced_n",
|
||||
@ -3158,7 +3194,7 @@ static int process_forced_keyframes(Muxer *mux, const OptionsContext *o)
|
||||
mux->fc, ost->st, &forced_keyframes);
|
||||
|
||||
if (!(ost->type == AVMEDIA_TYPE_VIDEO &&
|
||||
ost->enc_ctx && forced_keyframes))
|
||||
ost->enc && forced_keyframes))
|
||||
continue;
|
||||
|
||||
if (!strncmp(forced_keyframes, "expr:", 5)) {
|
||||
@ -3385,7 +3421,7 @@ int of_open(const OptionsContext *o, const char *filename, Scheduler *sch)
|
||||
OutputStream *ost = of->streams[i];
|
||||
|
||||
if (!ost->enc) {
|
||||
err = of_stream_init(of, ost);
|
||||
err = of_stream_init(of, ost, NULL);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
@ -388,7 +388,6 @@ static const struct TextureFormatEntry {
|
||||
{ AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
|
||||
{ AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
|
||||
{ AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
|
||||
{ AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
|
||||
};
|
||||
|
||||
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
|
||||
@ -895,7 +894,7 @@ static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_B
|
||||
format == AV_PIX_FMT_BGR32 ||
|
||||
format == AV_PIX_FMT_BGR32_1)
|
||||
*sdl_blendmode = SDL_BLENDMODE_BLEND;
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map); i++) {
|
||||
if (format == sdl_texture_format_map[i].format) {
|
||||
*sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
|
||||
return;
|
||||
@ -941,7 +940,6 @@ static enum AVColorSpace sdl_supported_color_spaces[] = {
|
||||
AVCOL_SPC_BT709,
|
||||
AVCOL_SPC_BT470BG,
|
||||
AVCOL_SPC_SMPTE170M,
|
||||
AVCOL_SPC_UNSPECIFIED,
|
||||
};
|
||||
|
||||
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
|
||||
@ -1861,7 +1859,6 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
{
|
||||
enum AVPixelFormat pix_fmts[FF_ARRAY_ELEMS(sdl_texture_format_map)];
|
||||
char sws_flags_str[512] = "";
|
||||
char buffersrc_args[256];
|
||||
int ret;
|
||||
AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
|
||||
AVCodecParameters *codecpar = is->video_st->codecpar;
|
||||
@ -1875,14 +1872,13 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < renderer_info.num_texture_formats; i++) {
|
||||
for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
|
||||
for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map); j++) {
|
||||
if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
|
||||
pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
|
||||
|
||||
while ((e = av_dict_iterate(sws_dict, e))) {
|
||||
if (!strcmp(e->key, "sws_flags")) {
|
||||
@ -1895,36 +1891,49 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
|
||||
graph->scale_sws_opts = av_strdup(sws_flags_str);
|
||||
|
||||
snprintf(buffersrc_args, sizeof(buffersrc_args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d:"
|
||||
"colorspace=%d:range=%d",
|
||||
frame->width, frame->height, frame->format,
|
||||
is->video_st->time_base.num, is->video_st->time_base.den,
|
||||
codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1),
|
||||
frame->colorspace, frame->color_range);
|
||||
if (fr.num && fr.den)
|
||||
av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
|
||||
|
||||
if ((ret = avfilter_graph_create_filter(&filt_src,
|
||||
avfilter_get_by_name("buffer"),
|
||||
"ffplay_buffer", buffersrc_args, NULL,
|
||||
graph)) < 0)
|
||||
filt_src = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffer"),
|
||||
"ffplay_buffer");
|
||||
if (!filt_src) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
par->format = frame->format;
|
||||
par->time_base = is->video_st->time_base;
|
||||
par->width = frame->width;
|
||||
par->height = frame->height;
|
||||
par->sample_aspect_ratio = codecpar->sample_aspect_ratio;
|
||||
par->color_space = frame->colorspace;
|
||||
par->color_range = frame->color_range;
|
||||
par->frame_rate = fr;
|
||||
par->hw_frames_ctx = frame->hw_frames_ctx;
|
||||
ret = av_buffersrc_parameters_set(filt_src, par);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
ret = avfilter_graph_create_filter(&filt_out,
|
||||
avfilter_get_by_name("buffersink"),
|
||||
"ffplay_buffersink", NULL, NULL, graph);
|
||||
ret = avfilter_init_dict(filt_src, NULL);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
filt_out = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffersink"),
|
||||
"ffplay_buffersink");
|
||||
if (!filt_out) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = av_opt_set_array(filt_out, "pixel_formats", AV_OPT_SEARCH_CHILDREN,
|
||||
0, nb_pix_fmts, AV_OPT_TYPE_PIXEL_FMT, pix_fmts)) < 0)
|
||||
goto fail;
|
||||
if (!vk_renderer &&
|
||||
(ret = av_opt_set_int_list(filt_out, "color_spaces", sdl_supported_color_spaces, AVCOL_SPC_UNSPECIFIED, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
(ret = av_opt_set_array(filt_out, "colorspaces", AV_OPT_SEARCH_CHILDREN,
|
||||
0, FF_ARRAY_ELEMS(sdl_supported_color_spaces),
|
||||
AV_OPT_TYPE_INT, sdl_supported_color_spaces)) < 0)
|
||||
goto fail;
|
||||
|
||||
ret = avfilter_init_dict(filt_out, NULL);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
last_filter = filt_out;
|
||||
@ -1994,8 +2003,6 @@ fail:
|
||||
|
||||
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
|
||||
{
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
|
||||
int sample_rates[2] = { 0, -1 };
|
||||
AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
|
||||
char aresample_swr_opts[512] = "";
|
||||
const AVDictionaryEntry *e = NULL;
|
||||
@ -2029,30 +2036,28 @@ static int configure_audio_filters(VideoState *is, const char *afilters, int for
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
|
||||
ret = avfilter_graph_create_filter(&filt_asink,
|
||||
avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
|
||||
NULL, NULL, is->agraph);
|
||||
if (ret < 0)
|
||||
filt_asink = avfilter_graph_alloc_filter(is->agraph, avfilter_get_by_name("abuffersink"),
|
||||
"ffplay_abuffersink");
|
||||
if (!filt_asink) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
goto end;
|
||||
if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
if ((ret = av_opt_set(filt_asink, "sample_formats", "s16", AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
goto end;
|
||||
|
||||
if (force_output_format) {
|
||||
av_bprint_clear(&bp);
|
||||
av_channel_layout_describe_bprint(&is->audio_tgt.ch_layout, &bp);
|
||||
sample_rates [0] = is->audio_tgt.freq;
|
||||
if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
if ((ret = av_opt_set_array(filt_asink, "channel_layouts", AV_OPT_SEARCH_CHILDREN,
|
||||
0, 1, AV_OPT_TYPE_CHLAYOUT, &is->audio_tgt.ch_layout)) < 0)
|
||||
goto end;
|
||||
if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
goto end;
|
||||
if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
if ((ret = av_opt_set_array(filt_asink, "samplerates", AV_OPT_SEARCH_CHILDREN,
|
||||
0, 1, AV_OPT_TYPE_INT, &is->audio_tgt.freq)) < 0)
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_init_dict(filt_asink, NULL);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
|
||||
goto end;
|
||||
|
6
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavcodec/.gitignore
vendored
Normal file
6
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavcodec/.gitignore
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
/*_tablegen
|
||||
/*_tables.c
|
||||
/*_tables.h
|
||||
/bsf_list.c
|
||||
/codec_list.c
|
||||
/parser_list.c
|
@ -1008,6 +1008,7 @@ OBJS-$(CONFIG_AV1_D3D12VA_HWACCEL) += dxva2_av1.o d3d12va_av1.o
|
||||
OBJS-$(CONFIG_AV1_NVDEC_HWACCEL) += nvdec_av1.o
|
||||
OBJS-$(CONFIG_AV1_VAAPI_HWACCEL) += vaapi_av1.o
|
||||
OBJS-$(CONFIG_AV1_VDPAU_HWACCEL) += vdpau_av1.o
|
||||
OBJS-$(CONFIG_AV1_VIDEOTOOLBOX_HWACCEL) += videotoolbox_av1.o
|
||||
OBJS-$(CONFIG_AV1_VULKAN_HWACCEL) += vulkan_decode.o vulkan_av1.o
|
||||
OBJS-$(CONFIG_H263_VAAPI_HWACCEL) += vaapi_mpeg4.o
|
||||
OBJS-$(CONFIG_H263_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
|
||||
|
@ -4141,9 +4141,9 @@ DISABLE_I8MM
|
||||
#endif
|
||||
|
||||
function vvc_put_qpel_hv4_8_end_neon
|
||||
vvc_load_qpel_filterh x5
|
||||
mov x7, #(VVC_MAX_PB_SIZE * 2)
|
||||
b 1f
|
||||
vvc_load_qpel_filterh x5
|
||||
mov x7, #(VVC_MAX_PB_SIZE * 2)
|
||||
b 1f
|
||||
endfunc
|
||||
|
||||
function hevc_put_hevc_qpel_hv4_8_end_neon
|
||||
|
@ -52,6 +52,50 @@ void ff_vvc_avg_12_neon(uint8_t *dst, ptrdiff_t dst_stride,
|
||||
const int16_t *src0, const int16_t *src1, int width,
|
||||
int height);
|
||||
|
||||
void ff_vvc_w_avg_8_neon(uint8_t *_dst, ptrdiff_t _dst_stride,
|
||||
const int16_t *src0, const int16_t *src1,
|
||||
int width, int height,
|
||||
uintptr_t w0_w1, uintptr_t offset_shift);
|
||||
void ff_vvc_w_avg_10_neon(uint8_t *_dst, ptrdiff_t _dst_stride,
|
||||
const int16_t *src0, const int16_t *src1,
|
||||
int width, int height,
|
||||
uintptr_t w0_w1, uintptr_t offset_shift);
|
||||
void ff_vvc_w_avg_12_neon(uint8_t *_dst, ptrdiff_t _dst_stride,
|
||||
const int16_t *src0, const int16_t *src1,
|
||||
int width, int height,
|
||||
uintptr_t w0_w1, uintptr_t offset_shift);
|
||||
/* When passing arguments to functions, Apple platforms diverge from the ARM64
|
||||
* standard ABI for functions that require passing arguments on the stack. To
|
||||
* simplify portability in the assembly function interface, use a different
|
||||
* function signature that doesn't require passing arguments on the stack.
|
||||
*/
|
||||
#define W_AVG_FUN(bit_depth) \
|
||||
static void vvc_w_avg_ ## bit_depth(uint8_t *dst, ptrdiff_t dst_stride, \
|
||||
const int16_t *src0, const int16_t *src1, int width, int height, \
|
||||
int denom, int w0, int w1, int o0, int o1) \
|
||||
{ \
|
||||
int shift = denom + FFMAX(3, 15 - bit_depth); \
|
||||
int offset = ((o0 + o1) * (1 << (bit_depth - 8)) + 1) * (1 << (shift - 1)); \
|
||||
uintptr_t w0_w1 = ((uintptr_t)w0 << 32) | (uint32_t)w1; \
|
||||
uintptr_t offset_shift = ((uintptr_t)offset << 32) | (uint32_t)shift; \
|
||||
ff_vvc_w_avg_ ## bit_depth ## _neon(dst, dst_stride, src0, src1, width, height, w0_w1, offset_shift); \
|
||||
}
|
||||
|
||||
W_AVG_FUN(8)
|
||||
W_AVG_FUN(10)
|
||||
W_AVG_FUN(12)
|
||||
|
||||
#define DMVR_FUN(fn, bd) \
|
||||
void ff_vvc_dmvr_ ## fn ## bd ## _neon(int16_t *dst, \
|
||||
const uint8_t *_src, ptrdiff_t _src_stride, int height, \
|
||||
intptr_t mx, intptr_t my, int width);
|
||||
|
||||
DMVR_FUN(, 8)
|
||||
DMVR_FUN(, 12)
|
||||
DMVR_FUN(hv_, 8)
|
||||
DMVR_FUN(hv_, 10)
|
||||
DMVR_FUN(hv_, 12)
|
||||
|
||||
void ff_vvc_dsp_init_aarch64(VVCDSPContext *const c, const int bd)
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
@ -123,6 +167,9 @@ void ff_vvc_dsp_init_aarch64(VVCDSPContext *const c, const int bd)
|
||||
c->inter.put_uni_w[0][6][0][0] = ff_vvc_put_pel_uni_w_pixels128_8_neon;
|
||||
|
||||
c->inter.avg = ff_vvc_avg_8_neon;
|
||||
c->inter.w_avg = vvc_w_avg_8;
|
||||
c->inter.dmvr[0][0] = ff_vvc_dmvr_8_neon;
|
||||
c->inter.dmvr[1][1] = ff_vvc_dmvr_hv_8_neon;
|
||||
|
||||
for (int i = 0; i < FF_ARRAY_ELEMS(c->sao.band_filter); i++)
|
||||
c->sao.band_filter[i] = ff_h26x_sao_band_filter_8x8_8_neon;
|
||||
@ -163,11 +210,16 @@ void ff_vvc_dsp_init_aarch64(VVCDSPContext *const c, const int bd)
|
||||
}
|
||||
} else if (bd == 10) {
|
||||
c->inter.avg = ff_vvc_avg_10_neon;
|
||||
c->inter.w_avg = vvc_w_avg_10;
|
||||
c->inter.dmvr[1][1] = ff_vvc_dmvr_hv_10_neon;
|
||||
|
||||
c->alf.filter[LUMA] = alf_filter_luma_10_neon;
|
||||
c->alf.filter[CHROMA] = alf_filter_chroma_10_neon;
|
||||
} else if (bd == 12) {
|
||||
c->inter.avg = ff_vvc_avg_12_neon;
|
||||
c->inter.w_avg = vvc_w_avg_12;
|
||||
c->inter.dmvr[0][0] = ff_vvc_dmvr_12_neon;
|
||||
c->inter.dmvr[1][1] = ff_vvc_dmvr_hv_12_neon;
|
||||
|
||||
c->alf.filter[LUMA] = alf_filter_luma_12_neon;
|
||||
c->alf.filter[CHROMA] = alf_filter_chroma_12_neon;
|
||||
|
@ -22,9 +22,9 @@
|
||||
|
||||
#define VVC_MAX_PB_SIZE 128
|
||||
|
||||
.macro vvc_avg, bit_depth
|
||||
.macro vvc_avg type, bit_depth
|
||||
|
||||
.macro vvc_avg_\bit_depth\()_2_4, tap
|
||||
.macro vvc_\type\()_\bit_depth\()_2_4 tap
|
||||
.if \tap == 2
|
||||
ldr s0, [src0]
|
||||
ldr s2, [src1]
|
||||
@ -32,9 +32,19 @@
|
||||
ldr d0, [src0]
|
||||
ldr d2, [src1]
|
||||
.endif
|
||||
|
||||
.ifc \type, avg
|
||||
saddl v4.4s, v0.4h, v2.4h
|
||||
add v4.4s, v4.4s, v16.4s
|
||||
sqshrn v4.4h, v4.4s, #(15 - \bit_depth)
|
||||
.else
|
||||
mov v4.16b, v16.16b
|
||||
smlal v4.4s, v0.4h, v19.4h
|
||||
smlal v4.4s, v2.4h, v20.4h
|
||||
sqshl v4.4s, v4.4s, v22.4s
|
||||
sqxtn v4.4h, v4.4s
|
||||
.endif
|
||||
|
||||
.if \bit_depth == 8
|
||||
sqxtun v4.8b, v4.8h
|
||||
.if \tap == 2
|
||||
@ -57,7 +67,7 @@
|
||||
add dst, dst, dst_stride
|
||||
.endm
|
||||
|
||||
function ff_vvc_avg_\bit_depth\()_neon, export=1
|
||||
function ff_vvc_\type\()_\bit_depth\()_neon, export=1
|
||||
dst .req x0
|
||||
dst_stride .req x1
|
||||
src0 .req x2
|
||||
@ -67,42 +77,64 @@ function ff_vvc_avg_\bit_depth\()_neon, export=1
|
||||
|
||||
mov x10, #(VVC_MAX_PB_SIZE * 2)
|
||||
cmp width, #8
|
||||
.if \bit_depth == 8
|
||||
movi v16.4s, #64
|
||||
.ifc \type, avg
|
||||
movi v16.4s, #(1 << (14 - \bit_depth))
|
||||
.else
|
||||
.if \bit_depth == 10
|
||||
mov w6, #1023
|
||||
movi v16.4s, #16
|
||||
.else
|
||||
mov w6, #4095
|
||||
movi v16.4s, #4
|
||||
.endif
|
||||
lsr x11, x6, #32 // weight0
|
||||
mov w12, w6 // weight1
|
||||
lsr x13, x7, #32 // offset
|
||||
mov w14, w7 // shift
|
||||
|
||||
dup v19.8h, w11
|
||||
neg w14, w14 // so we can use sqshl
|
||||
dup v20.8h, w12
|
||||
dup v16.4s, w13
|
||||
dup v22.4s, w14
|
||||
.endif // avg
|
||||
|
||||
.if \bit_depth >= 10
|
||||
// clip pixel
|
||||
mov w6, #((1 << \bit_depth) - 1)
|
||||
movi v18.8h, #0
|
||||
dup v17.8h, w6
|
||||
.endif
|
||||
|
||||
b.eq 8f
|
||||
b.hi 16f
|
||||
cmp width, #4
|
||||
b.eq 4f
|
||||
2: // width == 2
|
||||
subs height, height, #1
|
||||
vvc_avg_\bit_depth\()_2_4 2
|
||||
vvc_\type\()_\bit_depth\()_2_4 2
|
||||
b.ne 2b
|
||||
b 32f
|
||||
4: // width == 4
|
||||
subs height, height, #1
|
||||
vvc_avg_\bit_depth\()_2_4 4
|
||||
vvc_\type\()_\bit_depth\()_2_4 4
|
||||
b.ne 4b
|
||||
b 32f
|
||||
8: // width == 8
|
||||
ld1 {v0.8h}, [src0], x10
|
||||
ld1 {v2.8h}, [src1], x10
|
||||
.ifc \type, avg
|
||||
saddl v4.4s, v0.4h, v2.4h
|
||||
saddl2 v5.4s, v0.8h, v2.8h
|
||||
add v4.4s, v4.4s, v16.4s
|
||||
add v5.4s, v5.4s, v16.4s
|
||||
sqshrn v4.4h, v4.4s, #(15 - \bit_depth)
|
||||
sqshrn2 v4.8h, v5.4s, #(15 - \bit_depth)
|
||||
.else
|
||||
mov v4.16b, v16.16b
|
||||
mov v5.16b, v16.16b
|
||||
smlal v4.4s, v0.4h, v19.4h
|
||||
smlal v4.4s, v2.4h, v20.4h
|
||||
smlal2 v5.4s, v0.8h, v19.8h
|
||||
smlal2 v5.4s, v2.8h, v20.8h
|
||||
sqshl v4.4s, v4.4s, v22.4s
|
||||
sqshl v5.4s, v5.4s, v22.4s
|
||||
sqxtn v4.4h, v4.4s
|
||||
sqxtn2 v4.8h, v5.4s
|
||||
.endif
|
||||
subs height, height, #1
|
||||
.if \bit_depth == 8
|
||||
sqxtun v4.8b, v4.8h
|
||||
@ -122,6 +154,7 @@ function ff_vvc_avg_\bit_depth\()_neon, export=1
|
||||
17:
|
||||
ldp q0, q1, [x7], #32
|
||||
ldp q2, q3, [x8], #32
|
||||
.ifc \type, avg
|
||||
saddl v4.4s, v0.4h, v2.4h
|
||||
saddl2 v5.4s, v0.8h, v2.8h
|
||||
saddl v6.4s, v1.4h, v3.4h
|
||||
@ -134,6 +167,28 @@ function ff_vvc_avg_\bit_depth\()_neon, export=1
|
||||
sqshrn2 v4.8h, v5.4s, #(15 - \bit_depth)
|
||||
sqshrn v6.4h, v6.4s, #(15 - \bit_depth)
|
||||
sqshrn2 v6.8h, v7.4s, #(15 - \bit_depth)
|
||||
.else // avg
|
||||
mov v4.16b, v16.16b
|
||||
mov v5.16b, v16.16b
|
||||
mov v6.16b, v16.16b
|
||||
mov v7.16b, v16.16b
|
||||
smlal v4.4s, v0.4h, v19.4h
|
||||
smlal v4.4s, v2.4h, v20.4h
|
||||
smlal2 v5.4s, v0.8h, v19.8h
|
||||
smlal2 v5.4s, v2.8h, v20.8h
|
||||
smlal v6.4s, v1.4h, v19.4h
|
||||
smlal v6.4s, v3.4h, v20.4h
|
||||
smlal2 v7.4s, v1.8h, v19.8h
|
||||
smlal2 v7.4s, v3.8h, v20.8h
|
||||
sqshl v4.4s, v4.4s, v22.4s
|
||||
sqshl v5.4s, v5.4s, v22.4s
|
||||
sqshl v6.4s, v6.4s, v22.4s
|
||||
sqshl v7.4s, v7.4s, v22.4s
|
||||
sqxtn v4.4h, v4.4s
|
||||
sqxtn v6.4h, v6.4s
|
||||
sqxtn2 v4.8h, v5.4s
|
||||
sqxtn2 v6.8h, v7.4s
|
||||
.endif // w_avg
|
||||
subs w6, w6, #16
|
||||
.if \bit_depth == 8
|
||||
sqxtun v4.8b, v4.8h
|
||||
@ -155,9 +210,411 @@ function ff_vvc_avg_\bit_depth\()_neon, export=1
|
||||
b.ne 16b
|
||||
32:
|
||||
ret
|
||||
|
||||
.unreq dst
|
||||
.unreq dst_stride
|
||||
.unreq src0
|
||||
.unreq src1
|
||||
.unreq width
|
||||
.unreq height
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
vvc_avg 8
|
||||
vvc_avg 10
|
||||
vvc_avg 12
|
||||
vvc_avg avg, 8
|
||||
vvc_avg avg, 10
|
||||
vvc_avg avg, 12
|
||||
vvc_avg w_avg, 8
|
||||
vvc_avg w_avg, 10
|
||||
vvc_avg w_avg, 12
|
||||
|
||||
/* x0: int16_t *dst
|
||||
* x1: const uint8_t *_src
|
||||
* x2: ptrdiff_t _src_stride
|
||||
* w3: int height
|
||||
* x4: intptr_t mx
|
||||
* x5: intptr_t my
|
||||
* w6: int width
|
||||
*/
|
||||
function ff_vvc_dmvr_8_neon, export=1
|
||||
dst .req x0
|
||||
src .req x1
|
||||
src_stride .req x2
|
||||
height .req w3
|
||||
mx .req x4
|
||||
my .req x5
|
||||
width .req w6
|
||||
|
||||
sxtw x6, w6
|
||||
mov x7, #(VVC_MAX_PB_SIZE * 2 + 8)
|
||||
cmp width, #16
|
||||
sub src_stride, src_stride, x6
|
||||
cset w15, gt // width > 16
|
||||
movi v16.8h, #2 // DMVR_SHIFT
|
||||
sub x7, x7, x6, lsl #1
|
||||
1:
|
||||
cbz w15, 2f
|
||||
ldr q0, [src], #16
|
||||
uxtl v1.8h, v0.8b
|
||||
uxtl2 v2.8h, v0.16b
|
||||
ushl v1.8h, v1.8h, v16.8h
|
||||
ushl v2.8h, v2.8h, v16.8h
|
||||
stp q1, q2, [dst], #32
|
||||
b 3f
|
||||
2:
|
||||
ldr d0, [src], #8
|
||||
uxtl v1.8h, v0.8b
|
||||
ushl v1.8h, v1.8h, v16.8h
|
||||
str q1, [dst], #16
|
||||
3:
|
||||
subs height, height, #1
|
||||
ldr s3, [src], #4
|
||||
uxtl v4.8h, v3.8b
|
||||
ushl v4.4h, v4.4h, v16.4h
|
||||
st1 {v4.4h}, [dst], x7
|
||||
|
||||
add src, src, src_stride
|
||||
b.ne 1b
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_vvc_dmvr_12_neon, export=1
|
||||
sxtw x6, w6
|
||||
mov x7, #(VVC_MAX_PB_SIZE * 2 + 8)
|
||||
cmp width, #16
|
||||
sub src_stride, src_stride, x6, lsl #1
|
||||
cset w15, gt // width > 16
|
||||
movi v16.8h, #2 // offset4
|
||||
sub x7, x7, x6, lsl #1
|
||||
1:
|
||||
cbz w15, 2f
|
||||
ldp q0, q1, [src], #32
|
||||
uaddl v2.4s, v0.4h, v16.4h
|
||||
uaddl2 v3.4s, v0.8h, v16.8h
|
||||
uaddl v4.4s, v1.4h, v16.4h
|
||||
uaddl2 v5.4s, v1.8h, v16.8h
|
||||
ushr v2.4s, v2.4s, #2
|
||||
ushr v3.4s, v3.4s, #2
|
||||
ushr v4.4s, v4.4s, #2
|
||||
ushr v5.4s, v5.4s, #2
|
||||
uqxtn v2.4h, v2.4s
|
||||
uqxtn2 v2.8h, v3.4s
|
||||
uqxtn v4.4h, v4.4s
|
||||
uqxtn2 v4.8h, v5.4s
|
||||
|
||||
stp q2, q4, [dst], #32
|
||||
b 3f
|
||||
2:
|
||||
ldr q0, [src], #16
|
||||
uaddl v2.4s, v0.4h, v16.4h
|
||||
uaddl2 v3.4s, v0.8h, v16.8h
|
||||
ushr v2.4s, v2.4s, #2
|
||||
ushr v3.4s, v3.4s, #2
|
||||
uqxtn v2.4h, v2.4s
|
||||
uqxtn2 v2.8h, v3.4s
|
||||
str q2, [dst], #16
|
||||
3:
|
||||
subs height, height, #1
|
||||
ldr d0, [src], #8
|
||||
uaddl v3.4s, v0.4h, v16.4h
|
||||
ushr v3.4s, v3.4s, #2
|
||||
uqxtn v3.4h, v3.4s
|
||||
st1 {v3.4h}, [dst], x7
|
||||
|
||||
add src, src, src_stride
|
||||
b.ne 1b
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_vvc_dmvr_hv_8_neon, export=1
|
||||
tmp0 .req x7
|
||||
tmp1 .req x8
|
||||
|
||||
sub sp, sp, #(VVC_MAX_PB_SIZE * 4)
|
||||
|
||||
movrel x9, X(ff_vvc_inter_luma_dmvr_filters)
|
||||
add x12, x9, mx, lsl #1
|
||||
ldrb w10, [x12]
|
||||
ldrb w11, [x12, #1]
|
||||
mov tmp0, sp
|
||||
add tmp1, tmp0, #(VVC_MAX_PB_SIZE * 2)
|
||||
// We know the value are positive
|
||||
dup v0.8h, w10 // filter_x[0]
|
||||
dup v1.8h, w11 // filter_x[1]
|
||||
|
||||
add x12, x9, my, lsl #1
|
||||
ldrb w10, [x12]
|
||||
ldrb w11, [x12, #1]
|
||||
sxtw x6, w6
|
||||
movi v30.8h, #(1 << (8 - 7)) // offset1
|
||||
movi v31.8h, #8 // offset2
|
||||
dup v2.8h, w10 // filter_y[0]
|
||||
dup v3.8h, w11 // filter_y[1]
|
||||
|
||||
// Valid value for width can only be 8 + 4, 16 + 4
|
||||
cmp width, #16
|
||||
mov w10, #0 // start filter_y or not
|
||||
add height, height, #1
|
||||
sub dst, dst, #(VVC_MAX_PB_SIZE * 2)
|
||||
sub src_stride, src_stride, x6
|
||||
cset w15, gt // width > 16
|
||||
1:
|
||||
mov x12, tmp0
|
||||
mov x13, tmp1
|
||||
mov x14, dst
|
||||
cbz w15, 2f
|
||||
|
||||
// width > 16
|
||||
ldur q5, [src, #1]
|
||||
ldr q4, [src], #16
|
||||
uxtl v7.8h, v5.8b
|
||||
uxtl2 v17.8h, v5.16b
|
||||
uxtl v6.8h, v4.8b
|
||||
uxtl2 v16.8h, v4.16b
|
||||
mul v6.8h, v6.8h, v0.8h
|
||||
mul v16.8h, v16.8h, v0.8h
|
||||
mla v6.8h, v7.8h, v1.8h
|
||||
mla v16.8h, v17.8h, v1.8h
|
||||
add v6.8h, v6.8h, v30.8h
|
||||
add v16.8h, v16.8h, v30.8h
|
||||
ushr v6.8h, v6.8h, #(8 - 6)
|
||||
ushr v7.8h, v16.8h, #(8 - 6)
|
||||
stp q6, q7, [x13], #32
|
||||
|
||||
cbz w10, 3f
|
||||
|
||||
ldp q16, q17, [x12], #32
|
||||
mul v16.8h, v16.8h, v2.8h
|
||||
mul v17.8h, v17.8h, v2.8h
|
||||
mla v16.8h, v6.8h, v3.8h
|
||||
mla v17.8h, v7.8h, v3.8h
|
||||
add v16.8h, v16.8h, v31.8h
|
||||
add v17.8h, v17.8h, v31.8h
|
||||
ushr v16.8h, v16.8h, #4
|
||||
ushr v17.8h, v17.8h, #4
|
||||
stp q16, q17, [x14], #32
|
||||
b 3f
|
||||
2:
|
||||
// width > 8
|
||||
ldur d5, [src, #1]
|
||||
ldr d4, [src], #8
|
||||
uxtl v7.8h, v5.8b
|
||||
uxtl v6.8h, v4.8b
|
||||
mul v6.8h, v6.8h, v0.8h
|
||||
mla v6.8h, v7.8h, v1.8h
|
||||
add v6.8h, v6.8h, v30.8h
|
||||
ushr v6.8h, v6.8h, #(8 - 6)
|
||||
str q6, [x13], #16
|
||||
|
||||
cbz w10, 3f
|
||||
|
||||
ldr q16, [x12], #16
|
||||
mul v16.8h, v16.8h, v2.8h
|
||||
mla v16.8h, v6.8h, v3.8h
|
||||
add v16.8h, v16.8h, v31.8h
|
||||
ushr v16.8h, v16.8h, #4
|
||||
str q16, [x14], #16
|
||||
3:
|
||||
ldr s5, [src, #1]
|
||||
ldr s4, [src], #4
|
||||
uxtl v7.8h, v5.8b
|
||||
uxtl v6.8h, v4.8b
|
||||
mul v6.4h, v6.4h, v0.4h
|
||||
mla v6.4h, v7.4h, v1.4h
|
||||
add v6.4h, v6.4h, v30.4h
|
||||
ushr v6.4h, v6.4h, #(8 - 6)
|
||||
str d6, [x13], #8
|
||||
|
||||
cbz w10, 4f
|
||||
|
||||
ldr d16, [x12], #8
|
||||
mul v16.4h, v16.4h, v2.4h
|
||||
mla v16.4h, v6.4h, v3.4h
|
||||
add v16.4h, v16.4h, v31.4h
|
||||
ushr v16.4h, v16.4h, #4
|
||||
str d16, [x14], #8
|
||||
4:
|
||||
subs height, height, #1
|
||||
mov w10, #1
|
||||
add src, src, src_stride
|
||||
add dst, dst, #(VVC_MAX_PB_SIZE * 2)
|
||||
eor tmp0, tmp0, tmp1
|
||||
eor tmp1, tmp0, tmp1
|
||||
eor tmp0, tmp0, tmp1
|
||||
b.ne 1b
|
||||
|
||||
add sp, sp, #(VVC_MAX_PB_SIZE * 4)
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_vvc_dmvr_hv_12_neon, export=1
|
||||
movi v29.4s, #(12 - 6)
|
||||
movi v30.4s, #(1 << (12 - 7)) // offset1
|
||||
b 0f
|
||||
endfunc
|
||||
|
||||
function ff_vvc_dmvr_hv_10_neon, export=1
|
||||
movi v29.4s, #(10 - 6)
|
||||
movi v30.4s, #(1 << (10 - 7)) // offset1
|
||||
0:
|
||||
movi v31.4s, #8 // offset2
|
||||
neg v29.4s, v29.4s
|
||||
|
||||
sub sp, sp, #(VVC_MAX_PB_SIZE * 4)
|
||||
|
||||
movrel x9, X(ff_vvc_inter_luma_dmvr_filters)
|
||||
add x12, x9, mx, lsl #1
|
||||
ldrb w10, [x12]
|
||||
ldrb w11, [x12, #1]
|
||||
mov tmp0, sp
|
||||
add tmp1, tmp0, #(VVC_MAX_PB_SIZE * 2)
|
||||
// We know the value are positive
|
||||
dup v0.8h, w10 // filter_x[0]
|
||||
dup v1.8h, w11 // filter_x[1]
|
||||
|
||||
add x12, x9, my, lsl #1
|
||||
ldrb w10, [x12]
|
||||
ldrb w11, [x12, #1]
|
||||
sxtw x6, w6
|
||||
dup v2.8h, w10 // filter_y[0]
|
||||
dup v3.8h, w11 // filter_y[1]
|
||||
|
||||
// Valid value for width can only be 8 + 4, 16 + 4
|
||||
cmp width, #16
|
||||
mov w10, #0 // start filter_y or not
|
||||
add height, height, #1
|
||||
sub dst, dst, #(VVC_MAX_PB_SIZE * 2)
|
||||
sub src_stride, src_stride, x6, lsl #1
|
||||
cset w15, gt // width > 16
|
||||
1:
|
||||
mov x12, tmp0
|
||||
mov x13, tmp1
|
||||
mov x14, dst
|
||||
cbz w15, 2f
|
||||
|
||||
// width > 16
|
||||
add x16, src, #2
|
||||
ldp q6, q16, [src], #32
|
||||
ldp q7, q17, [x16]
|
||||
umull v4.4s, v6.4h, v0.4h
|
||||
umull2 v5.4s, v6.8h, v0.8h
|
||||
umull v18.4s, v16.4h, v0.4h
|
||||
umull2 v19.4s, v16.8h, v0.8h
|
||||
umlal v4.4s, v7.4h, v1.4h
|
||||
umlal2 v5.4s, v7.8h, v1.8h
|
||||
umlal v18.4s, v17.4h, v1.4h
|
||||
umlal2 v19.4s, v17.8h, v1.8h
|
||||
|
||||
add v4.4s, v4.4s, v30.4s
|
||||
add v5.4s, v5.4s, v30.4s
|
||||
add v18.4s, v18.4s, v30.4s
|
||||
add v19.4s, v19.4s, v30.4s
|
||||
ushl v4.4s, v4.4s, v29.4s
|
||||
ushl v5.4s, v5.4s, v29.4s
|
||||
ushl v18.4s, v18.4s, v29.4s
|
||||
ushl v19.4s, v19.4s, v29.4s
|
||||
uqxtn v6.4h, v4.4s
|
||||
uqxtn2 v6.8h, v5.4s
|
||||
uqxtn v7.4h, v18.4s
|
||||
uqxtn2 v7.8h, v19.4s
|
||||
stp q6, q7, [x13], #32
|
||||
|
||||
cbz w10, 3f
|
||||
|
||||
ldp q4, q5, [x12], #32
|
||||
umull v17.4s, v4.4h, v2.4h
|
||||
umull2 v18.4s, v4.8h, v2.8h
|
||||
umull v19.4s, v5.4h, v2.4h
|
||||
umull2 v20.4s, v5.8h, v2.8h
|
||||
umlal v17.4s, v6.4h, v3.4h
|
||||
umlal2 v18.4s, v6.8h, v3.8h
|
||||
umlal v19.4s, v7.4h, v3.4h
|
||||
umlal2 v20.4s, v7.8h, v3.8h
|
||||
add v17.4s, v17.4s, v31.4s
|
||||
add v18.4s, v18.4s, v31.4s
|
||||
add v19.4s, v19.4s, v31.4s
|
||||
add v20.4s, v20.4s, v31.4s
|
||||
ushr v17.4s, v17.4s, #4
|
||||
ushr v18.4s, v18.4s, #4
|
||||
ushr v19.4s, v19.4s, #4
|
||||
ushr v20.4s, v20.4s, #4
|
||||
uqxtn v6.4h, v17.4s
|
||||
uqxtn2 v6.8h, v18.4s
|
||||
uqxtn v7.4h, v19.4s
|
||||
uqxtn2 v7.8h, v20.4s
|
||||
stp q6, q7, [x14], #32
|
||||
b 3f
|
||||
2:
|
||||
// width > 8
|
||||
ldur q7, [src, #2]
|
||||
ldr q6, [src], #16
|
||||
umull v4.4s, v6.4h, v0.4h
|
||||
umull2 v5.4s, v6.8h, v0.8h
|
||||
umlal v4.4s, v7.4h, v1.4h
|
||||
umlal2 v5.4s, v7.8h, v1.8h
|
||||
|
||||
add v4.4s, v4.4s, v30.4s
|
||||
add v5.4s, v5.4s, v30.4s
|
||||
ushl v4.4s, v4.4s, v29.4s
|
||||
ushl v5.4s, v5.4s, v29.4s
|
||||
uqxtn v6.4h, v4.4s
|
||||
uqxtn2 v6.8h, v5.4s
|
||||
str q6, [x13], #16
|
||||
|
||||
cbz w10, 3f
|
||||
|
||||
ldr q16, [x12], #16
|
||||
umull v17.4s, v16.4h, v2.4h
|
||||
umull2 v18.4s, v16.8h, v2.8h
|
||||
umlal v17.4s, v6.4h, v3.4h
|
||||
umlal2 v18.4s, v6.8h, v3.8h
|
||||
add v17.4s, v17.4s, v31.4s
|
||||
add v18.4s, v18.4s, v31.4s
|
||||
ushr v17.4s, v17.4s, #4
|
||||
ushr v18.4s, v18.4s, #4
|
||||
uqxtn v16.4h, v17.4s
|
||||
uqxtn2 v16.8h, v18.4s
|
||||
str q16, [x14], #16
|
||||
3:
|
||||
ldr d7, [src, #2]
|
||||
ldr d6, [src], #8
|
||||
umull v4.4s, v7.4h, v1.4h
|
||||
umlal v4.4s, v6.4h, v0.4h
|
||||
add v4.4s, v4.4s, v30.4s
|
||||
ushl v4.4s, v4.4s, v29.4s
|
||||
uqxtn v6.4h, v4.4s
|
||||
str d6, [x13], #8
|
||||
|
||||
cbz w10, 4f
|
||||
|
||||
ldr d16, [x12], #8
|
||||
umull v17.4s, v16.4h, v2.4h
|
||||
umlal v17.4s, v6.4h, v3.4h
|
||||
add v17.4s, v17.4s, v31.4s
|
||||
ushr v17.4s, v17.4s, #4
|
||||
uqxtn v16.4h, v17.4s
|
||||
str d16, [x14], #8
|
||||
4:
|
||||
subs height, height, #1
|
||||
mov w10, #1
|
||||
add src, src, src_stride
|
||||
add dst, dst, #(VVC_MAX_PB_SIZE * 2)
|
||||
eor tmp0, tmp0, tmp1
|
||||
eor tmp1, tmp0, tmp1
|
||||
eor tmp0, tmp0, tmp1
|
||||
b.ne 1b
|
||||
|
||||
add sp, sp, #(VVC_MAX_PB_SIZE * 4)
|
||||
ret
|
||||
|
||||
.unreq dst
|
||||
.unreq src
|
||||
.unreq src_stride
|
||||
.unreq height
|
||||
.unreq mx
|
||||
.unreq my
|
||||
.unreq width
|
||||
.unreq tmp0
|
||||
.unreq tmp1
|
||||
endfunc
|
||||
|
@ -766,11 +766,50 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
|
||||
switch (avctx->codec->id) {
|
||||
case AV_CODEC_ID_H264:
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_INSERT_AUD, !!ctx->aud);
|
||||
switch (frame->pict_type) {
|
||||
case AV_PICTURE_TYPE_I:
|
||||
if (ctx->forced_idr) {
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_INSERT_SPS, 1);
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_INSERT_PPS, 1);
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_PICTURE_TYPE_IDR);
|
||||
} else {
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_PICTURE_TYPE_I);
|
||||
}
|
||||
break;
|
||||
case AV_PICTURE_TYPE_P:
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_PICTURE_TYPE_P);
|
||||
break;
|
||||
case AV_PICTURE_TYPE_B:
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_PICTURE_TYPE_B);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_HEVC:
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_HEVC_INSERT_AUD, !!ctx->aud);
|
||||
switch (frame->pict_type) {
|
||||
case AV_PICTURE_TYPE_I:
|
||||
if (ctx->forced_idr) {
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_HEVC_INSERT_HEADER, 1);
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_HEVC_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_IDR);
|
||||
} else {
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_HEVC_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_I);
|
||||
}
|
||||
break;
|
||||
case AV_PICTURE_TYPE_P:
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_HEVC_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_P);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_AV1:
|
||||
if (frame->pict_type == AV_PICTURE_TYPE_I) {
|
||||
if (ctx->forced_idr) {
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_AV1_FORCE_INSERT_SEQUENCE_HEADER, 1);
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_AV1_FORCE_FRAME_TYPE, AMF_VIDEO_ENCODER_AV1_FORCE_FRAME_TYPE_KEY);
|
||||
} else {
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_AV1_FORCE_FRAME_TYPE, AMF_VIDEO_ENCODER_AV1_FORCE_FRAME_TYPE_INTRA_ONLY);
|
||||
}
|
||||
}
|
||||
break;
|
||||
//case AV_CODEC_ID_AV1 not supported
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -114,6 +114,7 @@ typedef struct AmfContext {
|
||||
int max_b_frames;
|
||||
int qvbr_quality_level;
|
||||
int hw_high_motion_quality_boost;
|
||||
int forced_idr;
|
||||
|
||||
// HEVC - specific options
|
||||
|
||||
|
@ -116,6 +116,7 @@ static const AVOption options[] = {
|
||||
{ "none", "no adaptive quantization", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_AQ_MODE_NONE }, 0, 0, VE, .unit = "adaptive_quantisation_mode" },
|
||||
{ "caq", "context adaptive quantization", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_AQ_MODE_CAQ }, 0, 0, VE, .unit = "adaptive_quantisation_mode" },
|
||||
|
||||
{ "forced_idr", "Force I frames to be IDR frames", OFFSET(forced_idr), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
|
||||
|
||||
{ "align", "alignment mode", OFFSET(align), AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_NO_RESTRICTIONS }, AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_64X16_ONLY, AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_NO_RESTRICTIONS, VE, .unit = "align" },
|
||||
{ "64x16", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_64X16_ONLY }, 0, 0, VE, .unit = "align" },
|
||||
|
@ -133,6 +133,7 @@ static const AVOption options[] = {
|
||||
{ "me_half_pel", "Enable ME Half Pixel", OFFSET(me_half_pel), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VE },
|
||||
{ "me_quarter_pel", "Enable ME Quarter Pixel", OFFSET(me_quarter_pel),AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VE },
|
||||
|
||||
{ "forced_idr", "Force I frames to be IDR frames", OFFSET(forced_idr) , AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
|
||||
{ "aud", "Inserts AU Delimiter NAL unit", OFFSET(aud) , AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VE },
|
||||
|
||||
|
||||
|
@ -100,6 +100,7 @@ static const AVOption options[] = {
|
||||
{ "me_half_pel", "Enable ME Half Pixel", OFFSET(me_half_pel), AV_OPT_TYPE_BOOL,{ .i64 = -1 }, -1, 1, VE },
|
||||
{ "me_quarter_pel", "Enable ME Quarter Pixel ", OFFSET(me_quarter_pel),AV_OPT_TYPE_BOOL,{ .i64 = -1 }, -1, 1, VE },
|
||||
|
||||
{ "forced_idr", "Force I frames to be IDR frames", OFFSET(forced_idr) ,AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE },
|
||||
{ "aud", "Inserts AU Delimiter NAL unit", OFFSET(aud) ,AV_OPT_TYPE_BOOL,{ .i64 = -1 }, -1, 1, VE },
|
||||
|
||||
|
||||
@ -269,7 +270,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||
if (avctx->slices > 1) {
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME, avctx->slices);
|
||||
}
|
||||
AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE, deblocking_filter);
|
||||
AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE, !deblocking_filter);
|
||||
|
||||
if (ctx->header_insertion_mode != -1) {
|
||||
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE, ctx->header_insertion_mode);
|
||||
@ -511,6 +512,7 @@ static const FFCodecDefault defaults[] = {
|
||||
{ "slices", "1" },
|
||||
{ "qmin", "-1" },
|
||||
{ "qmax", "-1" },
|
||||
{ "flags", "+loop"},
|
||||
{ NULL },
|
||||
};
|
||||
static const AVClass hevc_amf_class = {
|
||||
@ -536,7 +538,7 @@ const FFCodec ff_hevc_amf_encoder = {
|
||||
.caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE |
|
||||
FF_CODEC_CAP_INIT_CLEANUP,
|
||||
.p.pix_fmts = ff_amf_pix_fmts,
|
||||
.color_ranges = AVCOL_RANGE_MPEG, /* FIXME: implement tagging */
|
||||
.color_ranges = AVCOL_RANGE_MPEG | AVCOL_RANGE_JPEG,
|
||||
.p.wrapper_name = "amf",
|
||||
.hw_configs = ff_amfenc_hw_configs,
|
||||
};
|
||||
|
@ -541,6 +541,7 @@ static int get_pixel_format(AVCodecContext *avctx)
|
||||
CONFIG_AV1_NVDEC_HWACCEL + \
|
||||
CONFIG_AV1_VAAPI_HWACCEL + \
|
||||
CONFIG_AV1_VDPAU_HWACCEL + \
|
||||
CONFIG_AV1_VIDEOTOOLBOX_HWACCEL + \
|
||||
CONFIG_AV1_VULKAN_HWACCEL)
|
||||
enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
|
||||
|
||||
@ -568,6 +569,9 @@ static int get_pixel_format(AVCodecContext *avctx)
|
||||
#if CONFIG_AV1_VDPAU_HWACCEL
|
||||
*fmtp++ = AV_PIX_FMT_VDPAU;
|
||||
#endif
|
||||
#if CONFIG_AV1_VIDEOTOOLBOX_HWACCEL
|
||||
*fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
|
||||
#endif
|
||||
#if CONFIG_AV1_VULKAN_HWACCEL
|
||||
*fmtp++ = AV_PIX_FMT_VULKAN;
|
||||
#endif
|
||||
@ -592,6 +596,9 @@ static int get_pixel_format(AVCodecContext *avctx)
|
||||
#if CONFIG_AV1_VDPAU_HWACCEL
|
||||
*fmtp++ = AV_PIX_FMT_VDPAU;
|
||||
#endif
|
||||
#if CONFIG_AV1_VIDEOTOOLBOX_HWACCEL
|
||||
*fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
|
||||
#endif
|
||||
#if CONFIG_AV1_VULKAN_HWACCEL
|
||||
*fmtp++ = AV_PIX_FMT_VULKAN;
|
||||
#endif
|
||||
@ -1439,6 +1446,10 @@ static int av1_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
|
||||
|
||||
if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) {
|
||||
int show_frame = s->raw_frame_header->show_frame;
|
||||
// Set nb_unit to point at the next OBU, to indicate which
|
||||
// OBUs have been processed for this current frame. (If this
|
||||
// frame gets output, we set nb_unit to this value later too.)
|
||||
s->nb_unit = i + 1;
|
||||
if (avctx->hwaccel && s->cur_frame.f) {
|
||||
ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
|
||||
if (ret < 0) {
|
||||
@ -1449,6 +1460,8 @@ static int av1_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
|
||||
|
||||
update_reference_list(avctx);
|
||||
|
||||
// Set start_unit to indicate the first OBU of the next frame.
|
||||
s->start_unit = s->nb_unit;
|
||||
raw_tile_group = NULL;
|
||||
s->raw_frame_header = NULL;
|
||||
|
||||
@ -1478,7 +1491,7 @@ end:
|
||||
s->raw_frame_header = NULL;
|
||||
av_packet_unref(s->pkt);
|
||||
ff_cbs_fragment_reset(&s->current_obu);
|
||||
s->nb_unit = 0;
|
||||
s->nb_unit = s->start_unit = 0;
|
||||
}
|
||||
if (!ret && !frame->buf[0])
|
||||
ret = AVERROR(EAGAIN);
|
||||
@ -1505,7 +1518,7 @@ static int av1_receive_frame(AVCodecContext *avctx, AVFrame *frame)
|
||||
return ret;
|
||||
}
|
||||
|
||||
s->nb_unit = 0;
|
||||
s->nb_unit = s->start_unit = 0;
|
||||
av_log(avctx, AV_LOG_DEBUG, "Total OBUs on this packet: %d.\n",
|
||||
s->current_obu.nb_units);
|
||||
}
|
||||
@ -1526,7 +1539,7 @@ static void av1_decode_flush(AVCodecContext *avctx)
|
||||
|
||||
av1_frame_unref(&s->cur_frame);
|
||||
s->operating_point_idc = 0;
|
||||
s->nb_unit = 0;
|
||||
s->nb_unit = s->start_unit = 0;
|
||||
s->raw_frame_header = NULL;
|
||||
s->raw_seq = NULL;
|
||||
s->cll = NULL;
|
||||
@ -1594,6 +1607,9 @@ const FFCodec ff_av1_decoder = {
|
||||
#if CONFIG_AV1_VDPAU_HWACCEL
|
||||
HWACCEL_VDPAU(av1),
|
||||
#endif
|
||||
#if CONFIG_AV1_VIDEOTOOLBOX_HWACCEL
|
||||
HWACCEL_VIDEOTOOLBOX(av1),
|
||||
#endif
|
||||
#if CONFIG_AV1_VULKAN_HWACCEL
|
||||
HWACCEL_VULKAN(av1),
|
||||
#endif
|
||||
|
@ -114,7 +114,8 @@ typedef struct AV1DecContext {
|
||||
AV1Frame ref[AV1_NUM_REF_FRAMES];
|
||||
AV1Frame cur_frame;
|
||||
|
||||
int nb_unit;
|
||||
int nb_unit; ///< The index of the next OBU to be processed.
|
||||
int start_unit; ///< The index of the first OBU of the current frame.
|
||||
|
||||
// AVOptions
|
||||
int operating_point;
|
||||
|
@ -144,6 +144,8 @@ av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
|
||||
sc->slice_height = sye - sys;
|
||||
sc->slice_x = sxs;
|
||||
sc->slice_y = sys;
|
||||
sc->sx = sx;
|
||||
sc->sy = sy;
|
||||
|
||||
sc->sample_buffer = av_malloc_array((f->width + 6), 3 * MAX_PLANES *
|
||||
sizeof(*sc->sample_buffer));
|
||||
|
@ -75,6 +75,7 @@ typedef struct FFV1SliceContext {
|
||||
int slice_height;
|
||||
int slice_x;
|
||||
int slice_y;
|
||||
int sx, sy;
|
||||
|
||||
int run_index;
|
||||
int slice_coding_mode;
|
||||
|
@ -354,6 +354,9 @@ static int decode_slice(AVCodecContext *c, void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
if (sc->slice_damaged && (f->avctx->err_recognition & AV_EF_EXPLODE))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if ((c->active_thread_type & FF_THREAD_FRAME) && !f->frame_damaged)
|
||||
ff_progress_frame_report(&f->picture, si);
|
||||
|
||||
|
@ -528,6 +528,11 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
avctx->slices > 1)
|
||||
s->version = FFMAX(s->version, 2);
|
||||
|
||||
if ((avctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) && s->ac == AC_GOLOMB_RICE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "2 Pass mode is not possible with golomb coding\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
// Unspecified level & slices, we choose version 1.2+ to ensure multithreaded decodability
|
||||
if (avctx->slices == 0 && avctx->level < 0 && avctx->width * avctx->height > 720*576)
|
||||
s->version = FFMAX(s->version, 2);
|
||||
@ -918,10 +923,10 @@ static void encode_slice_header(FFV1Context *f, FFV1SliceContext *sc)
|
||||
int j;
|
||||
memset(state, 128, sizeof(state));
|
||||
|
||||
put_symbol(c, state, (sc->slice_x +1)*f->num_h_slices / f->width , 0);
|
||||
put_symbol(c, state, (sc->slice_y +1)*f->num_v_slices / f->height , 0);
|
||||
put_symbol(c, state, (sc->slice_width +1)*f->num_h_slices / f->width -1, 0);
|
||||
put_symbol(c, state, (sc->slice_height+1)*f->num_v_slices / f->height-1, 0);
|
||||
put_symbol(c, state, sc->sx, 0);
|
||||
put_symbol(c, state, sc->sy, 0);
|
||||
put_symbol(c, state, 0, 0);
|
||||
put_symbol(c, state, 0, 0);
|
||||
for (j=0; j<f->plane_count; j++) {
|
||||
put_symbol(c, state, sc->plane[j].quant_table_index, 0);
|
||||
av_assert0(sc->plane[j].quant_table_index == f->context_model);
|
||||
|
@ -102,6 +102,8 @@ av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
|
||||
ff_h264qpel_init_arm(c, bit_depth);
|
||||
#elif ARCH_PPC
|
||||
ff_h264qpel_init_ppc(c, bit_depth);
|
||||
#elif ARCH_RISCV
|
||||
ff_h264qpel_init_riscv(c, bit_depth);
|
||||
#elif ARCH_X86
|
||||
ff_h264qpel_init_x86(c, bit_depth);
|
||||
#elif ARCH_MIPS
|
||||
|
@ -34,6 +34,7 @@ void ff_h264qpel_init(H264QpelContext *c, int bit_depth);
|
||||
void ff_h264qpel_init_aarch64(H264QpelContext *c, int bit_depth);
|
||||
void ff_h264qpel_init_arm(H264QpelContext *c, int bit_depth);
|
||||
void ff_h264qpel_init_ppc(H264QpelContext *c, int bit_depth);
|
||||
void ff_h264qpel_init_riscv(H264QpelContext *c, int bit_depth);
|
||||
void ff_h264qpel_init_x86(H264QpelContext *c, int bit_depth);
|
||||
void ff_h264qpel_init_mips(H264QpelContext *c, int bit_depth);
|
||||
void ff_h264qpel_init_loongarch(H264QpelContext *c, int bit_depth);
|
||||
|
@ -26,6 +26,7 @@ extern const struct FFHWAccel ff_av1_dxva2_hwaccel;
|
||||
extern const struct FFHWAccel ff_av1_nvdec_hwaccel;
|
||||
extern const struct FFHWAccel ff_av1_vaapi_hwaccel;
|
||||
extern const struct FFHWAccel ff_av1_vdpau_hwaccel;
|
||||
extern const struct FFHWAccel ff_av1_videotoolbox_hwaccel;
|
||||
extern const struct FFHWAccel ff_av1_vulkan_hwaccel;
|
||||
extern const struct FFHWAccel ff_h263_vaapi_hwaccel;
|
||||
extern const struct FFHWAccel ff_h263_videotoolbox_hwaccel;
|
||||
|
@ -33,6 +33,8 @@ RVV-OBJS-$(CONFIG_H264CHROMA) += riscv/h264_mc_chroma.o
|
||||
OBJS-$(CONFIG_H264DSP) += riscv/h264dsp_init.o
|
||||
RVV-OBJS-$(CONFIG_H264DSP) += riscv/h264addpx_rvv.o riscv/h264dsp_rvv.o \
|
||||
riscv/h264idct_rvv.o
|
||||
OBJS-$(CONFIG_H264QPEL) += riscv/h264qpel_init.o
|
||||
RVV-OBJS-$(CONFIG_H264QPEL) += riscv/h264qpel_rvv.o
|
||||
OBJS-$(CONFIG_HUFFYUV_DECODER) += riscv/huffyuvdsp_init.o
|
||||
RVV-OBJS-$(CONFIG_HUFFYUV_DECODER) += riscv/huffyuvdsp_rvv.o
|
||||
OBJS-$(CONFIG_IDCTDSP) += riscv/idctdsp_init.o
|
||||
|
@ -96,14 +96,31 @@ av_cold void ff_h264dsp_init_riscv(H264DSPContext *dsp, const int bit_depth,
|
||||
if (flags & AV_CPU_FLAG_RVV_I32) {
|
||||
const bool zvl128b = ff_rv_vlen_least(128);
|
||||
|
||||
if (bit_depth == 8 && zvl128b) {
|
||||
for (int i = 0; i < 4; i++) {
|
||||
dsp->weight_h264_pixels_tab[i] =
|
||||
ff_h264_weight_funcs_8_rvv[i].weight;
|
||||
dsp->biweight_h264_pixels_tab[i] =
|
||||
ff_h264_weight_funcs_8_rvv[i].biweight;
|
||||
if (bit_depth == 8) {
|
||||
if (zvl128b) {
|
||||
if (flags & AV_CPU_FLAG_RVB)
|
||||
dsp->weight_h264_pixels_tab[0] =
|
||||
ff_h264_weight_funcs_8_rvv[0].weight;
|
||||
dsp->biweight_h264_pixels_tab[0] =
|
||||
ff_h264_weight_funcs_8_rvv[0].biweight;
|
||||
}
|
||||
if (flags & AV_CPU_FLAG_RVV_I64) {
|
||||
dsp->weight_h264_pixels_tab[1] =
|
||||
ff_h264_weight_funcs_8_rvv[1].weight;
|
||||
dsp->biweight_h264_pixels_tab[1] =
|
||||
ff_h264_weight_funcs_8_rvv[1].biweight;
|
||||
}
|
||||
dsp->weight_h264_pixels_tab[2] =
|
||||
ff_h264_weight_funcs_8_rvv[2].weight;
|
||||
dsp->biweight_h264_pixels_tab[2] =
|
||||
ff_h264_weight_funcs_8_rvv[2].biweight;
|
||||
dsp->weight_h264_pixels_tab[3] =
|
||||
ff_h264_weight_funcs_8_rvv[3].weight;
|
||||
dsp->biweight_h264_pixels_tab[3] =
|
||||
ff_h264_weight_funcs_8_rvv[3].biweight;
|
||||
}
|
||||
|
||||
if (bit_depth == 8 && zvl128b) {
|
||||
dsp->h264_v_loop_filter_luma = ff_h264_v_loop_filter_luma_8_rvv;
|
||||
dsp->h264_h_loop_filter_luma = ff_h264_h_loop_filter_luma_8_rvv;
|
||||
dsp->h264_h_loop_filter_luma_mbaff =
|
||||
|
@ -28,20 +28,30 @@
|
||||
|
||||
#include "libavutil/riscv/asm.S"
|
||||
|
||||
func ff_h264_weight_pixels_simple_8_rvv, zve32x
|
||||
.variant_cc ff_h264_weight_pixels_simple_8_rvv
|
||||
func ff_h264_weight_pixels_simple_8_rvv, zve32x, b
|
||||
csrwi vxrm, 0
|
||||
sll a5, a5, a3
|
||||
1:
|
||||
vsetvli zero, a6, e16, m2, ta, ma
|
||||
vle8.v v8, (a0)
|
||||
addi a2, a2, -1
|
||||
vsetvli zero, t6, e16, m2, ta, ma
|
||||
add t0, a0, a1
|
||||
vle8.v v8, (a0)
|
||||
addi a2, a2, -2
|
||||
vle8.v v9, (t0)
|
||||
vzext.vf2 v24, v8
|
||||
vzext.vf2 v26, v9
|
||||
vmul.vx v16, v24, a4
|
||||
vmul.vx v18, v26, a4
|
||||
vsadd.vx v16, v16, a5
|
||||
vmax.vx v16, v16, zero
|
||||
vsetvli zero, zero, e8, m1, ta, ma
|
||||
vsadd.vx v18, v18, a5
|
||||
vmax.vx v16, v16, zero
|
||||
vmax.vx v18, v18, zero
|
||||
vsetvli zero, zero, e8, m1, ta, ma
|
||||
vnclipu.wx v8, v16, a3
|
||||
vse8.v v8, (a0)
|
||||
vnclipu.wx v9, v18, a3
|
||||
vse8.v v8, (a0)
|
||||
vse8.v v9, (t0)
|
||||
sh1add a0, a1, a0
|
||||
add a0, a0, a1
|
||||
bnez a2, 1b
|
||||
|
||||
@ -76,103 +86,77 @@ func ff_h264_biweight_pixels_simple_8_rvv, zve32x
|
||||
ret
|
||||
endfunc
|
||||
|
||||
func ff_h264_weight_pixels_8_rvv, zve32x
|
||||
.macro h264_weight depth, w, b=
|
||||
func ff_h264_weight_pixels\w\()_\depth\()_rvv, zve64x
|
||||
lpad 0
|
||||
.ifb \b
|
||||
li t6, \w
|
||||
j ff_h264_weight_pixels_simple_\depth\()_rvv
|
||||
.else
|
||||
csrwi vxrm, 0
|
||||
sll a5, a5, a3
|
||||
1:
|
||||
mv t0, a0
|
||||
mv t6, a6
|
||||
2:
|
||||
vsetvli t2, a2, e16, m8, ta, ma
|
||||
vlsseg2e8.v v0, (t0), a1
|
||||
addi t6, t6, -2
|
||||
vzext.vf2 v16, v0
|
||||
vzext.vf2 v24, v4
|
||||
vmul.vx v16, v16, a4
|
||||
vmul.vx v24, v24, a4
|
||||
vsetvli t1, a2, e\b, m2, ta, ma
|
||||
vlse\b\().v v8, (a0), a1
|
||||
vsetvli t0, zero, e16, m4, ta, ma
|
||||
vzext.vf2 v24, v8
|
||||
sub a2, a2, t1
|
||||
vmul.vx v16, v24, a4
|
||||
mul t2, t1, a1
|
||||
vsadd.vx v16, v16, a5
|
||||
vsadd.vx v24, v24, a5
|
||||
vmax.vx v16, v16, zero
|
||||
vmax.vx v24, v24, zero
|
||||
vsetvli zero, zero, e8, m4, ta, ma
|
||||
vnclipu.wx v0, v16, a3
|
||||
vnclipu.wx v4, v24, a3
|
||||
vssseg2e8.v v0, (t0), a1
|
||||
addi t0, t0, 2
|
||||
bnez t6, 2b
|
||||
|
||||
mul t3, a1, t2
|
||||
sub a2, a2, t2
|
||||
add a0, a0, t3
|
||||
vsetvli zero, zero, e8, m2, ta, ma
|
||||
vnclipu.wx v8, v16, a3
|
||||
vsetvli zero, t1, e\b, m2, ta, ma
|
||||
vsse\b\().v v8, (a0), a1
|
||||
add a0, a0, t2
|
||||
bnez a2, 1b
|
||||
|
||||
ret
|
||||
.endif
|
||||
endfunc
|
||||
|
||||
.variant_cc ff_h264_biweight_pixels_8_rvv
|
||||
func ff_h264_biweight_pixels_8_rvv, zve32x
|
||||
func ff_h264_biweight_pixels\w\()_\depth\()_rvv, zve64x
|
||||
lpad 0
|
||||
li t6, \w
|
||||
.ifb \b
|
||||
j ff_h264_biweight_pixels_simple_8_rvv
|
||||
.else
|
||||
csrwi vxrm, 2
|
||||
addi a7, a7, 1
|
||||
ori a7, a7, 1
|
||||
sll a7, a7, a4
|
||||
addi a4, a4, 1
|
||||
1:
|
||||
mv t0, a0
|
||||
mv t1, a1
|
||||
mv t5, t6
|
||||
2:
|
||||
vsetvli t2, a3, e16, m8, ta, ma
|
||||
vlsseg2e8.v v0, (t0), a2
|
||||
vlsseg2e8.v v8, (t1), a2
|
||||
addi t5, t5, -2
|
||||
vmv.v.x v16, a7
|
||||
vmv.v.x v24, a7
|
||||
vsetvli zero, zero, e8, m4, ta, ma
|
||||
vwmaccsu.vx v16, a5, v0
|
||||
vwmaccsu.vx v24, a5, v4
|
||||
vwmaccsu.vx v16, a6, v8
|
||||
vwmaccsu.vx v24, a6, v12
|
||||
vsetvli zero, zero, e16, m8, ta, ma
|
||||
vsetvli t1, a3, e\b, m2, ta, ma
|
||||
vlse\b\().v v8, (a0), a2
|
||||
sub a3, a3, t1
|
||||
vlse\b\().v v12, (a1), a2
|
||||
mul t2, t1, a2
|
||||
vsetvli t0, zero, e16, m4, ta, ma
|
||||
vmv.v.x v16, a7
|
||||
vsetvli zero, zero, e8, m2, ta, ma
|
||||
vwmaccsu.vx v16, a5, v8
|
||||
add a1, a1, t2
|
||||
vwmaccsu.vx v16, a6, v12
|
||||
vsetvli zero, zero, e16, m4, ta, ma
|
||||
vmax.vx v16, v16, zero
|
||||
vmax.vx v24, v24, zero
|
||||
vsetvli zero, zero, e8, m4, ta, ma
|
||||
vnclipu.wx v0, v16, a4
|
||||
vnclipu.wx v4, v24, a4
|
||||
vssseg2e8.v v0, (t0), a2
|
||||
addi t0, t0, 2
|
||||
addi t1, t1, 2
|
||||
bnez t5, 2b
|
||||
|
||||
mul t3, a2, t2
|
||||
sub a3, a3, t2
|
||||
add a0, a0, t3
|
||||
add a1, a1, t3
|
||||
vsetvli zero, zero, e8, m2, ta, ma
|
||||
vnclipu.wx v8, v16, a4
|
||||
vsetvli zero, t1, e\b, m2, ta, ma
|
||||
vsse\b\().v v8, (a0), a2
|
||||
add a0, a0, t2
|
||||
.endif
|
||||
bnez a3, 1b
|
||||
|
||||
ret
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
.irp w, 16, 8, 4, 2
|
||||
func ff_h264_weight_pixels\w\()_8_rvv, zve32x
|
||||
lpad 0
|
||||
li a6, \w
|
||||
.if \w == 16
|
||||
j ff_h264_weight_pixels_simple_8_rvv
|
||||
.else
|
||||
j ff_h264_weight_pixels_8_rvv
|
||||
.endif
|
||||
endfunc
|
||||
|
||||
func ff_h264_biweight_pixels\w\()_8_rvv, zve32x
|
||||
lpad 0
|
||||
li t6, \w
|
||||
.if \w == 16
|
||||
j ff_h264_biweight_pixels_simple_8_rvv
|
||||
.else
|
||||
j ff_h264_biweight_pixels_8_rvv
|
||||
.endif
|
||||
endfunc
|
||||
.endr
|
||||
h264_weight 8, 2, 16
|
||||
h264_weight 8, 4, 32
|
||||
h264_weight 8, 8, 64
|
||||
h264_weight 8, 16
|
||||
|
||||
.global ff_h264_weight_funcs_8_rvv
|
||||
.hidden ff_h264_weight_funcs_8_rvv
|
||||
|
@ -0,0 +1,113 @@
|
||||
/*
|
||||
* RISC-V optimised DSP functions
|
||||
* Copyright (c) 2024 Niklas Haas
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "config.h"
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/riscv/cpu.h"
|
||||
#include "libavcodec/h264qpel.h"
|
||||
|
||||
#define DECL_QPEL_OPS(OP, SIZE, EXT) \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc00_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc10_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc20_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc30_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc01_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc11_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc21_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc31_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc02_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc12_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc22_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc32_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc03_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc13_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc23_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride); \
|
||||
void ff_ ## OP ## _h264_qpel ## SIZE ## _mc33_ ## EXT(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
|
||||
DECL_QPEL_OPS(put, 16, rvv256)
|
||||
DECL_QPEL_OPS(put, 8, rvv256)
|
||||
// DECL_QPEL_OPS(put, 4, rvv256)
|
||||
|
||||
DECL_QPEL_OPS(avg, 16, rvv256)
|
||||
DECL_QPEL_OPS(avg, 8, rvv256)
|
||||
// DECL_QPEL_OPS(avg, 4, rvv256)
|
||||
|
||||
DECL_QPEL_OPS(put, 16, rvv)
|
||||
DECL_QPEL_OPS(put, 8, rvv)
|
||||
DECL_QPEL_OPS(put, 4, rvv)
|
||||
|
||||
DECL_QPEL_OPS(avg, 16, rvv)
|
||||
DECL_QPEL_OPS(avg, 8, rvv)
|
||||
DECL_QPEL_OPS(avg, 4, rvv)
|
||||
|
||||
#define SET_QPEL_FNS(OP, IDX, SIZE, EXT) \
|
||||
do { \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][ 0] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc00_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][ 1] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc10_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][ 2] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc20_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][ 3] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc30_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][ 4] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc01_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][ 5] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc11_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][ 6] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc21_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][ 7] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc31_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][ 8] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc02_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][ 9] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc12_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][10] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc22_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][11] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc32_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][12] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc03_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][13] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc13_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][14] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc23_ ## EXT; \
|
||||
c->OP ## _h264_qpel_pixels_tab[IDX][15] = ff_ ## OP ## _h264_qpel ## SIZE ## _mc33_ ## EXT; \
|
||||
} while (0)
|
||||
|
||||
av_cold void ff_h264qpel_init_riscv(H264QpelContext *c, int bit_depth)
|
||||
{
|
||||
#if HAVE_RVV
|
||||
int flags = av_get_cpu_flags();
|
||||
if (flags & AV_CPU_FLAG_RVV_I32) {
|
||||
const int vlen = 8 * ff_get_rv_vlenb();
|
||||
|
||||
switch (bit_depth) {
|
||||
case 8:
|
||||
if (vlen >= 256) {
|
||||
SET_QPEL_FNS(put, 0, 16, rvv256);
|
||||
SET_QPEL_FNS(put, 1, 8, rvv256);
|
||||
SET_QPEL_FNS(put, 2, 4, rvv);
|
||||
|
||||
SET_QPEL_FNS(avg, 0, 16, rvv256);
|
||||
SET_QPEL_FNS(avg, 1, 8, rvv256);
|
||||
SET_QPEL_FNS(avg, 2, 4, rvv);
|
||||
} else if (vlen >= 128) {
|
||||
SET_QPEL_FNS(put, 0, 16, rvv);
|
||||
SET_QPEL_FNS(put, 1, 8, rvv);
|
||||
SET_QPEL_FNS(put, 2, 4, rvv);
|
||||
|
||||
SET_QPEL_FNS(avg, 0, 16, rvv);
|
||||
SET_QPEL_FNS(avg, 1, 8, rvv);
|
||||
SET_QPEL_FNS(avg, 2, 4, rvv);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
@ -0,0 +1,464 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2024 Niklas Haas
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "libavutil/riscv/asm.S"
|
||||
|
||||
.macro lx rd, addr
|
||||
#if (__riscv_xlen == 32)
|
||||
lw \rd, \addr
|
||||
#elif (__riscv_xlen == 64)
|
||||
ld \rd, \addr
|
||||
#else
|
||||
lq \rd, \addr
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro sx rd, addr
|
||||
#if (__riscv_xlen == 32)
|
||||
sw \rd, \addr
|
||||
#elif (__riscv_xlen == 64)
|
||||
sd \rd, \addr
|
||||
#else
|
||||
sq \rd, \addr
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* output is unclipped; clobbers v26-v31 plus t0 and t02 */
|
||||
.macro lowpass_h vdst, src
|
||||
addi t4, \src, 3
|
||||
lbu t5, 2(\src)
|
||||
vle8.v v31, (t4)
|
||||
lbu t4, 1(\src)
|
||||
vslide1up.vx v30, v31, t5
|
||||
lbu t5, 0(\src)
|
||||
vslide1up.vx v29, v30, t4
|
||||
lbu t4, -1(\src)
|
||||
vslide1up.vx v28, v29, t5
|
||||
lbu t5, -2(\src)
|
||||
vslide1up.vx v27, v28, t4
|
||||
vslide1up.vx v26, v27, t5
|
||||
vwaddu.vv \vdst, v26, v31
|
||||
vwmaccu.vx \vdst, t6, v28
|
||||
vwmaccu.vx \vdst, t6, v29
|
||||
vwmaccsu.vx \vdst, a7, v27
|
||||
vwmaccsu.vx \vdst, a7, v30
|
||||
.endm
|
||||
|
||||
/* output is unclipped */
|
||||
.macro lowpass_v vdst, vsrc0, vsrc1, vsrc2, vsrc3, vsrc4, vsrc5, signed=0
|
||||
.if \signed
|
||||
vwadd.vv \vdst, \vsrc0, \vsrc5
|
||||
vwmacc.vx \vdst, t6, \vsrc2
|
||||
vwmacc.vx \vdst, t6, \vsrc3
|
||||
vwmacc.vx \vdst, a7, \vsrc1
|
||||
vwmacc.vx \vdst, a7, \vsrc4
|
||||
.else
|
||||
vwaddu.vv \vdst, \vsrc0, \vsrc5
|
||||
vwmaccu.vx \vdst, t6, \vsrc2
|
||||
vwmaccu.vx \vdst, t6, \vsrc3
|
||||
vwmaccsu.vx \vdst, a7, \vsrc1
|
||||
vwmaccsu.vx \vdst, a7, \vsrc4
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro qpel_mc00 op, dst, src, stride, size
|
||||
func ff_\op\()_h264_qpel_pixels, zve32x
|
||||
1: add t1, a2, a1
|
||||
add t2, a2, t1
|
||||
add t3, a2, t2
|
||||
vle8.v v0, (a1)
|
||||
vle8.v v1, (t1)
|
||||
vle8.v v2, (t2)
|
||||
vle8.v v3, (t3)
|
||||
addi a4, a4, -4
|
||||
add a1, a2, t3
|
||||
add t1, a2, a0
|
||||
add t2, a2, t1
|
||||
add t3, a2, t2
|
||||
.ifc \op, avg
|
||||
vle8.v v4, (a0)
|
||||
vle8.v v5, (t1)
|
||||
vle8.v v6, (t2)
|
||||
vle8.v v7, (t3)
|
||||
vaaddu.vv v0, v0, v4
|
||||
vaaddu.vv v1, v1, v5
|
||||
vaaddu.vv v2, v2, v6
|
||||
vaaddu.vv v3, v3, v7
|
||||
.endif
|
||||
vse8.v v0, (a0)
|
||||
vse8.v v1, (t1)
|
||||
vse8.v v2, (t2)
|
||||
vse8.v v3, (t3)
|
||||
add a0, a2, t3
|
||||
bnez a4, 1b
|
||||
jr t0
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
qpel_mc00 put, a0, a1, a2, a4
|
||||
qpel_mc00 avg, a0, a1, a2, a4
|
||||
|
||||
.macro qpel_lowpass op, ext, lmul, lmul2
|
||||
func ff_\op\()_h264_qpel_h_lowpass_\lmul\ext, zve32x
|
||||
1: add t1, a3, a1
|
||||
add t2, a3, t1
|
||||
add t3, a3, t2
|
||||
lowpass_h v0, a1
|
||||
lowpass_h v2, t1
|
||||
lowpass_h v4, t2
|
||||
lowpass_h v6, t3
|
||||
add a1, a3, t3
|
||||
addi a4, a4, -4
|
||||
vsetvli zero, zero, e16, \lmul2, ta, ma
|
||||
vmax.vx v0, v0, zero
|
||||
vmax.vx v2, v2, zero
|
||||
vmax.vx v4, v4, zero
|
||||
vmax.vx v6, v6, zero
|
||||
vsetvli zero, zero, e8, \lmul, ta, ma
|
||||
vnclipu.wi v0, v0, 5
|
||||
vnclipu.wi v2, v2, 5
|
||||
vnclipu.wi v4, v4, 5
|
||||
vnclipu.wi v6, v6, 5
|
||||
.ifc \ext, _l2
|
||||
add t1, a6, a5
|
||||
add t2, a6, t1
|
||||
add t3, a6, t2
|
||||
vle8.v v8, (a5)
|
||||
vle8.v v10, (t1)
|
||||
vle8.v v12, (t2)
|
||||
vle8.v v14, (t3)
|
||||
add a5, a2, t3
|
||||
vaaddu.vv v0, v0, v8
|
||||
vaaddu.vv v2, v2, v10
|
||||
vaaddu.vv v4, v4, v12
|
||||
vaaddu.vv v6, v6, v14
|
||||
.endif
|
||||
add t1, a2, a0
|
||||
add t2, a2, t1
|
||||
add t3, a2, t2
|
||||
.ifc \op, avg
|
||||
vle8.v v1, (a0)
|
||||
vle8.v v3, (t1)
|
||||
vle8.v v5, (t2)
|
||||
vle8.v v7, (t3)
|
||||
vaaddu.vv v0, v0, v1
|
||||
vaaddu.vv v2, v2, v3
|
||||
vaaddu.vv v4, v4, v5
|
||||
vaaddu.vv v6, v6, v7
|
||||
.endif
|
||||
vse8.v v0, (a0)
|
||||
vse8.v v2, (t1)
|
||||
vse8.v v4, (t2)
|
||||
vse8.v v6, (t3)
|
||||
add a0, a2, t3
|
||||
bnez a4, 1b
|
||||
jr t0
|
||||
endfunc
|
||||
|
||||
func ff_\op\()_h264_qpel_v_lowpass_\lmul\ext, zve32x
|
||||
sub t1, a1, a3
|
||||
sub t2, t1, a3
|
||||
vle8.v v2, (a1)
|
||||
vle8.v v1, (t1)
|
||||
vle8.v v0, (t2)
|
||||
add t1, a1, a3
|
||||
add t2, t1, a3
|
||||
add a1, t2, a3
|
||||
vle8.v v3, (t1)
|
||||
vle8.v v4, (t2)
|
||||
1: add t1, a3, a1
|
||||
add t2, a3, t1
|
||||
add t3, a3, t2
|
||||
vle8.v v5, (a1)
|
||||
vle8.v v6, (t1)
|
||||
vle8.v v7, (t2)
|
||||
vle8.v v8, (t3)
|
||||
add a1, a3, t3
|
||||
lowpass_v v24, v0, v1, v2, v3, v4, v5
|
||||
lowpass_v v26, v1, v2, v3, v4, v5, v6
|
||||
lowpass_v v28, v2, v3, v4, v5, v6, v7
|
||||
lowpass_v v30, v3, v4, v5, v6, v7, v8
|
||||
addi a4, a4, -4
|
||||
vsetvli zero, zero, e16, \lmul2, ta, ma
|
||||
vmax.vx v24, v24, zero
|
||||
vmax.vx v26, v26, zero
|
||||
vmax.vx v28, v28, zero
|
||||
vmax.vx v30, v30, zero
|
||||
vsetvli zero, zero, e8, \lmul, ta, ma
|
||||
vnclipu.wi v24, v24, 5
|
||||
vnclipu.wi v26, v26, 5
|
||||
vnclipu.wi v28, v28, 5
|
||||
vnclipu.wi v30, v30, 5
|
||||
.ifc \ext, _l2
|
||||
add t1, a6, a5
|
||||
add t2, a6, t1
|
||||
add t3, a6, t2
|
||||
vle8.v v9, (a5)
|
||||
vle8.v v10, (t1)
|
||||
vle8.v v11, (t2)
|
||||
vle8.v v12, (t3)
|
||||
add a5, a6, t3
|
||||
vaaddu.vv v24, v24, v9
|
||||
vaaddu.vv v26, v26, v10
|
||||
vaaddu.vv v28, v28, v11
|
||||
vaaddu.vv v30, v30, v12
|
||||
.endif
|
||||
add t1, a2, a0
|
||||
add t2, a2, t1
|
||||
add t3, a2, t2
|
||||
.ifc \op, avg
|
||||
vle8.v v9, (a0)
|
||||
vle8.v v10, (t1)
|
||||
vle8.v v11, (t2)
|
||||
vle8.v v12, (t3)
|
||||
vaaddu.vv v24, v24, v9
|
||||
vaaddu.vv v26, v26, v10
|
||||
vaaddu.vv v28, v28, v11
|
||||
vaaddu.vv v30, v30, v12
|
||||
.endif
|
||||
vse8.v v24, (a0)
|
||||
vse8.v v26, (t1)
|
||||
vse8.v v28, (t2)
|
||||
vse8.v v30, (t3)
|
||||
add a0, a2, t3
|
||||
vmv.v.v v0, v4
|
||||
vmv.v.v v1, v5
|
||||
vmv.v.v v2, v6
|
||||
vmv.v.v v3, v7
|
||||
vmv.v.v v4, v8
|
||||
bnez a4, 1b
|
||||
jr t0
|
||||
endfunc
|
||||
|
||||
func ff_\op\()_h264_qpel_hv_lowpass_\lmul\ext, zve32x
|
||||
sub t1, a1, a3
|
||||
sub t2, t1, a3
|
||||
lowpass_h v4, a1
|
||||
lowpass_h v2, t1
|
||||
lowpass_h v0, t2
|
||||
add t1, a1, a3
|
||||
add t2, t1, a3
|
||||
add a1, t2, a3
|
||||
lowpass_h v6, t1
|
||||
lowpass_h v8, t2
|
||||
1: add t1, a3, a1
|
||||
add t2, a3, t1
|
||||
add t3, a3, t2
|
||||
lowpass_h v10, a1
|
||||
lowpass_h v12, t1
|
||||
lowpass_h v14, t2
|
||||
lowpass_h v16, t3
|
||||
vsetvli zero, zero, e16, \lmul2, ta, ma
|
||||
addi a4, a4, -4
|
||||
lowpass_v v20, v0, v2, v4, v6, v8, v10, signed=1
|
||||
lowpass_v v24, v2, v4, v6, v8, v10, v12, signed=1
|
||||
lowpass_v v28, v4, v6, v8, v10, v12, v14, signed=1
|
||||
vnclip.wi v0, v20, 10
|
||||
lowpass_v v20, v6, v8, v10, v12, v14, v16, signed=1
|
||||
vnclip.wi v2, v24, 10
|
||||
vnclip.wi v4, v28, 10
|
||||
vnclip.wi v6, v20, 10
|
||||
vmax.vx v18, v0, zero
|
||||
vmax.vx v20, v2, zero
|
||||
vmax.vx v22, v4, zero
|
||||
vmax.vx v24, v6, zero
|
||||
vmv.v.v v0, v8
|
||||
vmv.v.v v2, v10
|
||||
vmv.v.v v4, v12
|
||||
vmv.v.v v6, v14
|
||||
vmv.v.v v8, v16
|
||||
add a1, a3, t3
|
||||
vsetvli zero, zero, e8, \lmul, ta, ma
|
||||
vnclipu.wi v18, v18, 0
|
||||
vnclipu.wi v20, v20, 0
|
||||
vnclipu.wi v22, v22, 0
|
||||
vnclipu.wi v24, v24, 0
|
||||
.ifc \ext, _l2
|
||||
add t1, a6, a5
|
||||
add t2, a6, t1
|
||||
add t3, a6, t2
|
||||
vle8.v v26, (a5)
|
||||
vle8.v v27, (t1)
|
||||
vle8.v v28, (t2)
|
||||
vle8.v v29, (t3)
|
||||
add a5, a6, t3
|
||||
vaaddu.vv v18, v18, v26
|
||||
vaaddu.vv v20, v20, v27
|
||||
vaaddu.vv v22, v22, v28
|
||||
vaaddu.vv v24, v24, v29
|
||||
.endif
|
||||
add t1, a2, a0
|
||||
add t2, a2, t1
|
||||
add t3, a2, t2
|
||||
.ifc \op, avg
|
||||
vle8.v v26, (a0)
|
||||
vle8.v v27, (t1)
|
||||
vle8.v v28, (t2)
|
||||
vle8.v v29, (t3)
|
||||
vaaddu.vv v18, v18, v26
|
||||
vaaddu.vv v20, v20, v27
|
||||
vaaddu.vv v22, v22, v28
|
||||
vaaddu.vv v24, v24, v29
|
||||
.endif
|
||||
vse8.v v18, (a0)
|
||||
vse8.v v20, (t1)
|
||||
vse8.v v22, (t2)
|
||||
vse8.v v24, (t3)
|
||||
add a0, a2, t3
|
||||
bnez a4, 1b
|
||||
jr t0
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
/* Note: We could possibly specialize for the width 8 / width 4 cases by
|
||||
loading 32 bit integers, but this makes the convolutions more complicated
|
||||
to implement, so it's not necessarily any faster. */
|
||||
|
||||
.macro h264_qpel lmul, lmul2
|
||||
qpel_lowpass put, , \lmul, \lmul2
|
||||
qpel_lowpass put, _l2, \lmul, \lmul2
|
||||
qpel_lowpass avg, , \lmul, \lmul2
|
||||
qpel_lowpass avg, _l2, \lmul, \lmul2
|
||||
.endm
|
||||
|
||||
h264_qpel m1, m2
|
||||
h264_qpel mf2, m1
|
||||
h264_qpel mf4, mf2
|
||||
h264_qpel mf8, mf4
|
||||
|
||||
.macro h264_qpel_1pass op, case, lmul, size, ext=rvv, dir, offset
|
||||
func ff_\op\()_h264_qpel\size\()_\case\()_\ext, zve32x
|
||||
lpad 0
|
||||
vsetivli zero, \size, e8, \lmul, ta, ma
|
||||
csrwi vxrm, 0
|
||||
li a4, \size
|
||||
li t6, 20
|
||||
li a7, -5
|
||||
mv a3, a2
|
||||
mv t0, ra
|
||||
.ifnb \offset
|
||||
.ifc \dir, v
|
||||
add a5, a1, \offset
|
||||
.else
|
||||
addi a5, a1, \offset
|
||||
.endif
|
||||
mv a6, a3
|
||||
j ff_\op\()_h264_qpel_\dir\()_lowpass_\lmul\()_l2
|
||||
.else
|
||||
j ff_\op\()_h264_qpel_\dir\()_lowpass_\lmul\()
|
||||
.endif
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
.macro h264_qpel_2pass op, case, lmul, size, ext=rvv, dir1, dir2, off1=0, off2
|
||||
func ff_\op\()_h264_qpel\size\()_\case\()_\ext, zve32x
|
||||
lpad 0
|
||||
vsetivli zero, \size, e8, \lmul, ta, ma
|
||||
csrwi vxrm, 0
|
||||
addi sp, sp, (-(__riscv_xlen >> 2))
|
||||
li a4, \size
|
||||
li t6, 20
|
||||
li a7, -5
|
||||
sx a0, 0(sp)
|
||||
sx a1, (__riscv_xlen >> 3)(sp)
|
||||
.ifc \off1, a2
|
||||
add a1, a1, \off1
|
||||
.elseif \off1
|
||||
addi a1, a1, \off1
|
||||
.endif
|
||||
mv a3, a2
|
||||
.ifc \op, avg
|
||||
// Use temporary array on stack for the first pass
|
||||
addi a0, sp, -(\size * \size)
|
||||
li a2, \size
|
||||
.endif
|
||||
jal t0, ff_put_h264_qpel_\dir1\()_lowpass_\lmul
|
||||
lx a0, 0(sp)
|
||||
lx a1, (__riscv_xlen >> 3)(sp)
|
||||
.ifc \op, put
|
||||
// Directly reuse the first pass output buffer
|
||||
mv a5, a0
|
||||
mv a6, a2
|
||||
.else
|
||||
addi a5, sp, -(\size * \size)
|
||||
li a6, \size
|
||||
mv a2, a3
|
||||
.endif
|
||||
.ifnb \off2
|
||||
addi a1, a1, \off2
|
||||
.endif
|
||||
li a4, \size
|
||||
mv t0, ra
|
||||
addi sp, sp, 16
|
||||
j ff_\op\()_h264_qpel_\dir2\()_lowpass_\lmul\()_l2
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
.macro ff_h264_qpel_fns op, lmul, size, ext=rvv
|
||||
func ff_\op\()_h264_qpel\size\()_mc00_\ext, zve32x
|
||||
lpad 0
|
||||
vsetivli zero, \size, e8, \lmul, ta, ma
|
||||
csrwi vxrm, 0
|
||||
li a4, \size
|
||||
mv t0, ra
|
||||
j ff_\op\()_h264_qpel_pixels
|
||||
endfunc
|
||||
|
||||
h264_qpel_1pass \op, mc20, \lmul, \size, \ext, h
|
||||
h264_qpel_1pass \op, mc02, \lmul, \size, \ext, v
|
||||
h264_qpel_1pass \op, mc10, \lmul, \size, \ext, h, 0
|
||||
h264_qpel_1pass \op, mc30, \lmul, \size, \ext, h, 1
|
||||
h264_qpel_1pass \op, mc01, \lmul, \size, \ext, v, zero
|
||||
h264_qpel_1pass \op, mc03, \lmul, \size, \ext, v, a2
|
||||
h264_qpel_1pass \op, mc22, \lmul, \size, \ext, hv
|
||||
|
||||
h264_qpel_2pass \op, mc11, \lmul, \size, \ext, h, v
|
||||
h264_qpel_2pass \op, mc21, \lmul, \size, \ext, h, hv
|
||||
h264_qpel_2pass \op, mc12, \lmul, \size, \ext, v, hv
|
||||
h264_qpel_2pass \op, mc31, \lmul, \size, \ext, h, v, off2=1
|
||||
h264_qpel_2pass \op, mc13, \lmul, \size, \ext, h, v, a2
|
||||
h264_qpel_2pass \op, mc33, \lmul, \size, \ext, h, v, a2, 1
|
||||
h264_qpel_2pass \op, mc23, \lmul, \size, \ext, h, hv, a2
|
||||
h264_qpel_2pass \op, mc32, \lmul, \size, \ext, v, hv, 1
|
||||
.endm
|
||||
|
||||
ff_h264_qpel_fns put, mf2, 16, rvv256
|
||||
ff_h264_qpel_fns put, mf4, 8, rvv256
|
||||
/* ff_h264_qpel_fns put, mf8, 4, rvv256 */
|
||||
|
||||
ff_h264_qpel_fns avg, mf2, 16, rvv256
|
||||
ff_h264_qpel_fns avg, mf4, 8, rvv256
|
||||
/* ff_h264_qpel_fns avg, mf8, 4, rvv256 */
|
||||
|
||||
ff_h264_qpel_fns put, m1, 16, rvv
|
||||
ff_h264_qpel_fns put, mf2, 8, rvv
|
||||
ff_h264_qpel_fns put, mf4, 4, rvv
|
||||
|
||||
ff_h264_qpel_fns avg, m1, 16, rvv
|
||||
ff_h264_qpel_fns avg, mf2, 8, rvv
|
||||
ff_h264_qpel_fns avg, mf4, 4, rvv
|
@ -0,0 +1,2 @@
|
||||
OBJS-$(CONFIG_VVC_DECODER) += riscv/vvc/vvcdsp_init.o
|
||||
RVV-OBJS-$(CONFIG_VVC_DECODER) += riscv/vvc/vvc_mc_rvv.o
|
@ -0,0 +1,287 @@
|
||||
/*
|
||||
* Copyright (c) 2024 Institue of Software Chinese Academy of Sciences (ISCAS).
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/riscv/asm.S"
|
||||
|
||||
.macro vsetvlstatic8 w, vlen
|
||||
.if \w == 2 && \vlen == 128
|
||||
vsetivli zero, \w, e8, mf8, ta, ma
|
||||
.elseif \w == 4 && \vlen == 128
|
||||
vsetivli zero, \w, e8, mf4, ta, ma
|
||||
.elseif \w == 8 && \vlen == 128
|
||||
vsetivli zero, \w, e8, mf2, ta, ma
|
||||
.elseif \w == 16 && \vlen == 128
|
||||
vsetivli zero, \w, e8, m1, ta, ma
|
||||
.elseif \w == 32 && \vlen == 128
|
||||
li t0, \w
|
||||
vsetvli zero, t0, e8, m2, ta, ma
|
||||
.elseif \w <= 4 && \vlen == 256
|
||||
vsetivli zero, \w, e8, mf8, ta, ma
|
||||
.elseif \w == 8 && \vlen == 256
|
||||
vsetivli zero, \w, e8, mf4, ta, ma
|
||||
.elseif \w == 16 && \vlen == 256
|
||||
vsetivli zero, \w, e8, mf2, ta, ma
|
||||
.elseif \w == 32 && \vlen == 256
|
||||
li t0, \w
|
||||
vsetvli zero, t0, e8, m1, ta, ma
|
||||
.elseif \w == 64 && \vlen == 256
|
||||
li t0, \w
|
||||
vsetvli zero, t0, e8, m2, ta, ma
|
||||
.else
|
||||
li t0, \w
|
||||
vsetvli zero, t0, e8, m4, ta, ma
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro vsetvlstatic16 w, vlen
|
||||
.if \w == 2 && \vlen == 128
|
||||
vsetivli zero, \w, e16, mf4, ta, ma
|
||||
.elseif \w == 4 && \vlen == 128
|
||||
vsetivli zero, \w, e16, mf2, ta, ma
|
||||
.elseif \w == 8 && \vlen == 128
|
||||
vsetivli zero, \w, e16, m1, ta, ma
|
||||
.elseif \w == 16 && \vlen == 128
|
||||
vsetivli zero, \w, e16, m2, ta, ma
|
||||
.elseif \w == 32 && \vlen == 128
|
||||
li t0, \w
|
||||
vsetvli zero, t0, e16, m4, ta, ma
|
||||
.elseif \w <= 4 && \vlen == 256
|
||||
vsetivli zero, \w, e16, mf4, ta, ma
|
||||
.elseif \w == 8 && \vlen == 256
|
||||
vsetivli zero, \w, e16, mf2, ta, ma
|
||||
.elseif \w == 16 && \vlen == 256
|
||||
vsetivli zero, \w, e16, m1, ta, ma
|
||||
.elseif \w == 32 && \vlen == 256
|
||||
li t0, \w
|
||||
vsetvli zero, t0, e16, m2, ta, ma
|
||||
.elseif \w == 64 && \vlen == 256
|
||||
li t0, \w
|
||||
vsetvli zero, t0, e16, m4, ta, ma
|
||||
.else
|
||||
li t0, \w
|
||||
vsetvli zero, t0, e16, m8, ta, ma
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro vsetvlstatic32 w, vlen
|
||||
.if \w == 2
|
||||
vsetivli zero, \w, e32, mf2, ta, ma
|
||||
.elseif \w == 4 && \vlen == 128
|
||||
vsetivli zero, \w, e32, m1, ta, ma
|
||||
.elseif \w == 8 && \vlen == 128
|
||||
vsetivli zero, \w, e32, m2, ta, ma
|
||||
.elseif \w == 16 && \vlen == 128
|
||||
vsetivli zero, \w, e32, m4, ta, ma
|
||||
.elseif \w == 4 && \vlen == 256
|
||||
vsetivli zero, \w, e32, mf2, ta, ma
|
||||
.elseif \w == 8 && \vlen == 256
|
||||
vsetivli zero, \w, e32, m1, ta, ma
|
||||
.elseif \w == 16 && \vlen == 256
|
||||
vsetivli zero, \w, e32, m2, ta, ma
|
||||
.elseif \w == 32 && \vlen == 256
|
||||
li t0, \w
|
||||
vsetvli zero, t0, e32, m4, ta, ma
|
||||
.else
|
||||
li t0, \w
|
||||
vsetvli zero, t0, e32, m8, ta, ma
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro avg w, vlen, id
|
||||
\id\w\vlen:
|
||||
.if \w < 128
|
||||
vsetvlstatic16 \w, \vlen
|
||||
addi t0, a2, 128*2
|
||||
addi t1, a3, 128*2
|
||||
add t2, a0, a1
|
||||
vle16.v v0, (a2)
|
||||
vle16.v v8, (a3)
|
||||
addi a5, a5, -2
|
||||
vle16.v v16, (t0)
|
||||
vle16.v v24, (t1)
|
||||
vadd.vv v8, v8, v0
|
||||
vadd.vv v24, v24, v16
|
||||
vmax.vx v8, v8, zero
|
||||
vmax.vx v24, v24, zero
|
||||
vsetvlstatic8 \w, \vlen
|
||||
addi a2, a2, 128*4
|
||||
vnclipu.wi v8, v8, 7
|
||||
vnclipu.wi v24, v24, 7
|
||||
addi a3, a3, 128*4
|
||||
vse8.v v8, (a0)
|
||||
vse8.v v24, (t2)
|
||||
sh1add a0, a1, a0
|
||||
.else
|
||||
addi a5, a5, -1
|
||||
mv t1, a0
|
||||
mv t2, a2
|
||||
mv t3, a3
|
||||
mv t4, a4
|
||||
1:
|
||||
vsetvli t0, a4, e16, m8, ta, ma
|
||||
sub a4, a4, t0
|
||||
vle16.v v0, (a2)
|
||||
vle16.v v8, (a3)
|
||||
vadd.vv v8, v8, v0
|
||||
vmax.vx v8, v8, zero
|
||||
vsetvli zero, zero, e8, m4, ta, ma
|
||||
vnclipu.wi v8, v8, 7
|
||||
vse8.v v8, (a0)
|
||||
sh1add a2, t0, a2
|
||||
sh1add a3, t0, a3
|
||||
add a0, a0, t0
|
||||
bnez a4, 1b
|
||||
add a0, t1, a1
|
||||
addi a2, t2, 128*2
|
||||
addi a3, t3, 128*2
|
||||
mv a4, t4
|
||||
.endif
|
||||
bnez a5, \id\w\vlen\()b
|
||||
ret
|
||||
.endm
|
||||
|
||||
|
||||
.macro AVG_JMP_TABLE id, vlen
|
||||
const jmp_table_\id\vlen
|
||||
.4byte \id\()2\vlen\()f - jmp_table_\id\vlen
|
||||
.4byte \id\()4\vlen\()f - jmp_table_\id\vlen
|
||||
.4byte \id\()8\vlen\()f - jmp_table_\id\vlen
|
||||
.4byte \id\()16\vlen\()f - jmp_table_\id\vlen
|
||||
.4byte \id\()32\vlen\()f - jmp_table_\id\vlen
|
||||
.4byte \id\()64\vlen\()f - jmp_table_\id\vlen
|
||||
.4byte \id\()128\vlen\()f - jmp_table_\id\vlen
|
||||
endconst
|
||||
.endm
|
||||
|
||||
.macro AVG_J vlen, id
|
||||
clz t1, a4
|
||||
neg t1, t1
|
||||
lla t5, jmp_table_\id\vlen
|
||||
sh2add t1, t1, t5
|
||||
lw t1, ((__riscv_xlen-2)<<2)(t1)
|
||||
add t1, t1, t5
|
||||
jr t1
|
||||
.endm
|
||||
|
||||
.macro func_avg vlen
|
||||
func ff_vvc_avg_8_rvv_\vlen\(), zve32x, zbb, zba
|
||||
lpad 0
|
||||
AVG_JMP_TABLE 1, \vlen
|
||||
csrwi vxrm, 0
|
||||
AVG_J \vlen, 1
|
||||
.irp w,2,4,8,16,32,64,128
|
||||
avg \w, \vlen, 1
|
||||
.endr
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
func_avg 128
|
||||
func_avg 256
|
||||
|
||||
#if (__riscv_xlen == 64)
|
||||
.macro w_avg w, vlen, id
|
||||
\id\w\vlen:
|
||||
.if \w <= 32 || (\w == 64 && \vlen == 256)
|
||||
vsetvlstatic16 \w, \vlen
|
||||
addi t0, a2, 128*2
|
||||
addi t1, a3, 128*2
|
||||
vle16.v v0, (a2)
|
||||
vle16.v v4, (a3)
|
||||
addi a5, a5, -2
|
||||
vle16.v v8, (t0)
|
||||
vle16.v v12, (t1)
|
||||
vwmul.vx v16, v0, a7
|
||||
vwmul.vx v24, v8, a7
|
||||
vwmacc.vx v16, t3, v4
|
||||
vwmacc.vx v24, t3, v12
|
||||
vsetvlstatic32 \w, \vlen
|
||||
add t2, a0, a1
|
||||
vadd.vx v16, v16, t4
|
||||
vadd.vx v24, v24, t4
|
||||
vsetvlstatic16 \w, \vlen
|
||||
vnsrl.wx v16, v16, t6
|
||||
vnsrl.wx v24, v24, t6
|
||||
vmax.vx v16, v16, zero
|
||||
vmax.vx v24, v24, zero
|
||||
vsetvlstatic8 \w, \vlen
|
||||
addi a2, a2, 128*4
|
||||
vnclipu.wi v16, v16, 0
|
||||
vnclipu.wi v24, v24, 0
|
||||
vse8.v v16, (a0)
|
||||
addi a3, a3, 128*4
|
||||
vse8.v v24, (t2)
|
||||
sh1add a0, a1, a0
|
||||
.else
|
||||
addi a5, a5, -1
|
||||
mv t1, a0
|
||||
mv t2, a2
|
||||
mv t5, a3
|
||||
mv a6, a4
|
||||
1:
|
||||
vsetvli t0, a4, e16, m4, ta, ma
|
||||
sub a4, a4, t0
|
||||
vle16.v v0, (a2)
|
||||
vle16.v v4, (a3)
|
||||
vwmul.vx v16, v0, a7
|
||||
vwmacc.vx v16, t3, v4
|
||||
vsetvli zero, zero, e32, m8, ta, ma
|
||||
vadd.vx v16, v16, t4
|
||||
vsetvli zero, zero, e16, m4, ta, ma
|
||||
vnsrl.wx v16, v16, t6
|
||||
vmax.vx v16, v16, zero
|
||||
vsetvli zero, zero, e8, m2, ta, ma
|
||||
vnclipu.wi v16, v16, 0
|
||||
vse8.v v16, (a0)
|
||||
sh1add a2, t0, a2
|
||||
sh1add a3, t0, a3
|
||||
add a0, a0, t0
|
||||
bnez a4, 1b
|
||||
add a0, t1, a1
|
||||
addi a2, t2, 128*2
|
||||
addi a3, t5, 128*2
|
||||
mv a4, a6
|
||||
.endif
|
||||
bnez a5, \id\w\vlen\()b
|
||||
ret
|
||||
.endm
|
||||
|
||||
.macro func_w_avg vlen
|
||||
func ff_vvc_w_avg_8_rvv_\vlen\(), zve32x, zbb, zba
|
||||
lpad 0
|
||||
AVG_JMP_TABLE 2, \vlen
|
||||
csrwi vxrm, 0
|
||||
addi t6, a6, 7
|
||||
ld t3, (sp)
|
||||
ld t4, 8(sp)
|
||||
ld t5, 16(sp)
|
||||
addi t4, t4, 1 // o0 + o1 + 1
|
||||
add t4, t4, t5
|
||||
addi t5, t6, -1 // shift - 1
|
||||
sll t4, t4, t5
|
||||
AVG_J \vlen, 2
|
||||
.irp w,2,4,8,16,32,64,128
|
||||
w_avg \w, \vlen, 2
|
||||
.endr
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
func_w_avg 128
|
||||
func_w_avg 256
|
||||
#endif
|
@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright (c) 2024 Institue of Software Chinese Academy of Sciences (ISCAS).
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/cpu.h"
|
||||
#include "libavutil/riscv/cpu.h"
|
||||
#include "libavcodec/vvc/dsp.h"
|
||||
|
||||
#define bf(fn, bd, opt) fn##_##bd##_##opt
|
||||
|
||||
#define AVG_PROTOTYPES(bd, opt) \
|
||||
void bf(ff_vvc_avg, bd, opt)(uint8_t *dst, ptrdiff_t dst_stride, \
|
||||
const int16_t *src0, const int16_t *src1, int width, int height); \
|
||||
void bf(ff_vvc_w_avg, bd, opt)(uint8_t *dst, ptrdiff_t dst_stride, \
|
||||
const int16_t *src0, const int16_t *src1, int width, int height, \
|
||||
int denom, int w0, int w1, int o0, int o1);
|
||||
|
||||
AVG_PROTOTYPES(8, rvv_128)
|
||||
AVG_PROTOTYPES(8, rvv_256)
|
||||
|
||||
void ff_vvc_dsp_init_riscv(VVCDSPContext *const c, const int bd)
|
||||
{
|
||||
#if HAVE_RVV
|
||||
const int flags = av_get_cpu_flags();
|
||||
int vlenb = ff_get_rv_vlenb();
|
||||
|
||||
if ((flags & AV_CPU_FLAG_RVV_I32) && (flags & AV_CPU_FLAG_RVB) &&
|
||||
vlenb >= 32) {
|
||||
switch (bd) {
|
||||
case 8:
|
||||
c->inter.avg = ff_vvc_avg_8_rvv_256;
|
||||
# if (__riscv_xlen == 64)
|
||||
c->inter.w_avg = ff_vvc_w_avg_8_rvv_256;
|
||||
# endif
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else if ((flags & AV_CPU_FLAG_RVV_I32) && (flags & AV_CPU_FLAG_RVB) &&
|
||||
vlenb >= 16) {
|
||||
switch (bd) {
|
||||
case 8:
|
||||
c->inter.avg = ff_vvc_avg_8_rvv_128;
|
||||
# if (__riscv_xlen == 64)
|
||||
c->inter.w_avg = ff_vvc_w_avg_8_rvv_128;
|
||||
# endif
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
21
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavcodec/tests/.gitignore
vendored
Normal file
21
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavcodec/tests/.gitignore
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
/av1_levels
|
||||
/avcodec
|
||||
/avpacket
|
||||
/bitstream_be
|
||||
/bitstream_le
|
||||
/cabac
|
||||
/celp_math
|
||||
/codec_desc
|
||||
/dct
|
||||
/golomb
|
||||
/h264_levels
|
||||
/h265_levels
|
||||
/htmlsubtitles
|
||||
/iirfilter
|
||||
/jpeg2000dwt
|
||||
/mathops
|
||||
/mjpegenc_huffman
|
||||
/motion
|
||||
/mpeg12framerate
|
||||
/rangecoder
|
||||
/snowenc
|
@ -29,7 +29,7 @@
|
||||
|
||||
#include "version_major.h"
|
||||
|
||||
#define LIBAVCODEC_VERSION_MINOR 19
|
||||
#define LIBAVCODEC_VERSION_MINOR 21
|
||||
#define LIBAVCODEC_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
|
||||
|
@ -56,6 +56,10 @@ enum { kCMVideoCodecType_HEVC = 'hvc1' };
|
||||
enum { kCMVideoCodecType_VP9 = 'vp09' };
|
||||
#endif
|
||||
|
||||
#if !HAVE_KCMVIDEOCODECTYPE_AV1
|
||||
enum { kCMVideoCodecType_AV1 = 'av01' };
|
||||
#endif
|
||||
|
||||
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
|
||||
|
||||
typedef struct VTHWFrame {
|
||||
@ -92,6 +96,26 @@ int ff_videotoolbox_buffer_copy(VTContext *vtctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ff_videotoolbox_buffer_append(VTContext *vtctx,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size)
|
||||
{
|
||||
void *tmp;
|
||||
|
||||
tmp = av_fast_realloc(vtctx->bitstream,
|
||||
&vtctx->allocated_size,
|
||||
vtctx->bitstream_size + size);
|
||||
|
||||
if (!tmp)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
vtctx->bitstream = tmp;
|
||||
memcpy(vtctx->bitstream + vtctx->bitstream_size, buffer, size);
|
||||
vtctx->bitstream_size += size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
|
||||
{
|
||||
int ret;
|
||||
@ -846,6 +870,13 @@ static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec
|
||||
if (data)
|
||||
CFDictionarySetValue(avc_info, CFSTR("vpcC"), data);
|
||||
break;
|
||||
#endif
|
||||
#if CONFIG_AV1_VIDEOTOOLBOX_HWACCEL
|
||||
case kCMVideoCodecType_AV1 :
|
||||
data = ff_videotoolbox_av1c_extradata_create(avctx);
|
||||
if (data)
|
||||
CFDictionarySetValue(avc_info, CFSTR("av1C"), data);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
break;
|
||||
@ -912,6 +943,9 @@ static int videotoolbox_start(AVCodecContext *avctx)
|
||||
case AV_CODEC_ID_VP9 :
|
||||
videotoolbox->cm_codec_type = kCMVideoCodecType_VP9;
|
||||
break;
|
||||
case AV_CODEC_ID_AV1 :
|
||||
videotoolbox->cm_codec_type = kCMVideoCodecType_AV1;
|
||||
break;
|
||||
default :
|
||||
break;
|
||||
}
|
||||
|
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Videotoolbox hardware acceleration for AV1
|
||||
* Copyright (c) 2023 Jan Ekström
|
||||
* Copyright (c) 2024 Ruslan Chernenko
|
||||
* Copyright (c) 2024 Martin Storsjö
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
#include "av1dec.h"
|
||||
#include "hwaccel_internal.h"
|
||||
#include "internal.h"
|
||||
#include "vt_internal.h"
|
||||
|
||||
CFDataRef ff_videotoolbox_av1c_extradata_create(AVCodecContext *avctx)
|
||||
{
|
||||
AV1DecContext *s = avctx->priv_data;
|
||||
uint8_t *buf;
|
||||
CFDataRef data;
|
||||
if (!s->raw_seq)
|
||||
return NULL;
|
||||
|
||||
buf = av_malloc(s->seq_data_ref->size + 4);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
buf[0] = 0x81; // version and marker (constant)
|
||||
buf[1] = s->raw_seq->seq_profile << 5 | s->raw_seq->seq_level_idx[0];
|
||||
buf[2] = s->raw_seq->seq_tier[0] << 7 |
|
||||
s->raw_seq->color_config.high_bitdepth << 6 |
|
||||
s->raw_seq->color_config.twelve_bit << 5 |
|
||||
s->raw_seq->color_config.mono_chrome << 4 |
|
||||
s->raw_seq->color_config.subsampling_x << 3 |
|
||||
s->raw_seq->color_config.subsampling_y << 2 |
|
||||
s->raw_seq->color_config.chroma_sample_position;
|
||||
|
||||
if (s->raw_seq->initial_display_delay_present_flag)
|
||||
buf[3] = 0 << 5 |
|
||||
s->raw_seq->initial_display_delay_present_flag << 4 |
|
||||
s->raw_seq->initial_display_delay_minus_1[0];
|
||||
else
|
||||
buf[3] = 0x00;
|
||||
memcpy(buf + 4, s->seq_data_ref->data, s->seq_data_ref->size);
|
||||
data = CFDataCreate(kCFAllocatorDefault, buf, s->seq_data_ref->size + 4);
|
||||
av_free(buf);
|
||||
return data;
|
||||
};
|
||||
|
||||
|
||||
static int videotoolbox_av1_start_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int videotoolbox_av1_decode_slice(AVCodecContext *avctx,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int videotoolbox_av1_end_frame(AVCodecContext *avctx)
|
||||
{
|
||||
const AV1DecContext *s = avctx->priv_data;
|
||||
VTContext *vtctx = avctx->internal->hwaccel_priv_data;
|
||||
AVFrame *frame = s->cur_frame.f;
|
||||
|
||||
vtctx->bitstream_size = 0;
|
||||
for (int i = s->start_unit; i < s->nb_unit; i++)
|
||||
ff_videotoolbox_buffer_append(vtctx, s->current_obu.units[i].data,
|
||||
s->current_obu.units[i].data_size);
|
||||
return ff_videotoolbox_common_end_frame(avctx, frame);
|
||||
}
|
||||
|
||||
const FFHWAccel ff_av1_videotoolbox_hwaccel = {
|
||||
.p.name = "av1_videotoolbox",
|
||||
.p.type = AVMEDIA_TYPE_VIDEO,
|
||||
.p.id = AV_CODEC_ID_AV1,
|
||||
.p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
|
||||
.alloc_frame = ff_videotoolbox_alloc_frame,
|
||||
.start_frame = videotoolbox_av1_start_frame,
|
||||
.decode_slice = videotoolbox_av1_decode_slice,
|
||||
.end_frame = videotoolbox_av1_end_frame,
|
||||
.frame_params = ff_videotoolbox_frame_params,
|
||||
.init = ff_videotoolbox_common_init,
|
||||
.uninit = ff_videotoolbox_uninit,
|
||||
.priv_data_size = sizeof(VTContext),
|
||||
};
|
@ -56,6 +56,9 @@ int ff_videotoolbox_frame_params(AVCodecContext *avctx,
|
||||
int ff_videotoolbox_buffer_copy(VTContext *vtctx,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size);
|
||||
int ff_videotoolbox_buffer_append(VTContext *vtctx,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size);
|
||||
int ff_videotoolbox_uninit(AVCodecContext *avctx);
|
||||
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buffer,
|
||||
@ -64,6 +67,7 @@ int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size);
|
||||
int ff_videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame);
|
||||
CFDataRef ff_videotoolbox_av1c_extradata_create(AVCodecContext *avctx);
|
||||
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx);
|
||||
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx);
|
||||
CFDataRef ff_videotoolbox_vpcc_extradata_create(AVCodecContext *avctx);
|
||||
|
@ -102,6 +102,8 @@ void ff_vvc_dsp_init(VVCDSPContext *vvcdsp, int bit_depth)
|
||||
|
||||
#if ARCH_AARCH64
|
||||
ff_vvc_dsp_init_aarch64(vvcdsp, bit_depth);
|
||||
#elif ARCH_RISCV
|
||||
ff_vvc_dsp_init_riscv(vvcdsp, bit_depth);
|
||||
#elif ARCH_X86
|
||||
ff_vvc_dsp_init_x86(vvcdsp, bit_depth);
|
||||
#endif
|
||||
|
@ -179,6 +179,7 @@ typedef struct VVCDSPContext {
|
||||
void ff_vvc_dsp_init(VVCDSPContext *hpc, int bit_depth);
|
||||
|
||||
void ff_vvc_dsp_init_aarch64(VVCDSPContext *hpc, const int bit_depth);
|
||||
void ff_vvc_dsp_init_riscv(VVCDSPContext *hpc, const int bit_depth);
|
||||
void ff_vvc_dsp_init_x86(VVCDSPContext *hpc, const int bit_depth);
|
||||
|
||||
#endif /* AVCODEC_VVC_DSP_H */
|
||||
|
@ -472,6 +472,9 @@ static void FUNC(apply_bdof)(uint8_t *_dst, const ptrdiff_t _dst_stride, const i
|
||||
(filter[0] * src[x] + \
|
||||
filter[1] * src[x + stride])
|
||||
|
||||
#define DMVR_FILTER2(filter, src0, src1) \
|
||||
(filter[0] * src0 + filter[1] * src1)
|
||||
|
||||
//8.5.3.2.2 Luma sample bilinear interpolation process
|
||||
static void FUNC(dmvr)(int16_t *dst, const uint8_t *_src, const ptrdiff_t _src_stride,
|
||||
const int height, const intptr_t mx, const intptr_t my, const int width)
|
||||
@ -541,31 +544,31 @@ static void FUNC(dmvr_v)(int16_t *dst, const uint8_t *_src, const ptrdiff_t _src
|
||||
static void FUNC(dmvr_hv)(int16_t *dst, const uint8_t *_src, const ptrdiff_t _src_stride,
|
||||
const int height, const intptr_t mx, const intptr_t my, const int width)
|
||||
{
|
||||
int16_t tmp_array[(MAX_PB_SIZE + BILINEAR_EXTRA) * MAX_PB_SIZE];
|
||||
int16_t *tmp = tmp_array;
|
||||
int16_t tmp_array[MAX_PB_SIZE * 2];
|
||||
int16_t *tmp0 = tmp_array;
|
||||
int16_t *tmp1 = tmp_array + MAX_PB_SIZE;
|
||||
const pixel *src = (const pixel*)_src;
|
||||
const ptrdiff_t src_stride = _src_stride / sizeof(pixel);
|
||||
const int8_t *filter = ff_vvc_inter_luma_dmvr_filters[mx];
|
||||
const int8_t *filter_x = ff_vvc_inter_luma_dmvr_filters[mx];
|
||||
const int8_t *filter_y = ff_vvc_inter_luma_dmvr_filters[my];
|
||||
const int shift1 = BIT_DEPTH - 6;
|
||||
const int offset1 = 1 << (shift1 - 1);
|
||||
const int shift2 = 4;
|
||||
const int offset2 = 1 << (shift2 - 1);
|
||||
|
||||
src -= BILINEAR_EXTRA_BEFORE * src_stride;
|
||||
for (int y = 0; y < height + BILINEAR_EXTRA; y++) {
|
||||
for (int x = 0; x < width; x++)
|
||||
tmp[x] = (DMVR_FILTER(src, 1) + offset1) >> shift1;
|
||||
src += src_stride;
|
||||
tmp += MAX_PB_SIZE;
|
||||
}
|
||||
for (int x = 0; x < width; x++)
|
||||
tmp0[x] = (DMVR_FILTER2(filter_x, src[x], src[x + 1]) + offset1) >> shift1;
|
||||
src += src_stride;
|
||||
|
||||
tmp = tmp_array + BILINEAR_EXTRA_BEFORE * MAX_PB_SIZE;
|
||||
filter = ff_vvc_inter_luma_dmvr_filters[my];
|
||||
for (int y = 0; y < height; y++) {
|
||||
for (int x = 0; x < width; x++)
|
||||
dst[x] = (DMVR_FILTER(tmp, MAX_PB_SIZE) + offset2) >> shift2;
|
||||
tmp += MAX_PB_SIZE;
|
||||
for (int y = 1; y < height + BILINEAR_EXTRA; y++) {
|
||||
for (int x = 0; x < width; x++) {
|
||||
tmp1[x] = (DMVR_FILTER2(filter_x, src[x], src[x + 1]) + offset1) >> shift1;
|
||||
dst[x] = (DMVR_FILTER2(filter_y, tmp0[x], tmp1[x]) + offset2) >> shift2;
|
||||
}
|
||||
src += src_stride;
|
||||
dst += MAX_PB_SIZE;
|
||||
FFSWAP(int16_t *, tmp0, tmp1);
|
||||
}
|
||||
}
|
||||
|
||||
|
2
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavdevice/.gitignore
vendored
Normal file
2
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavdevice/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/indev_list.c
|
||||
/outdev_list.c
|
@ -632,7 +632,6 @@ static int get_video_config(AVFormatContext *s)
|
||||
{
|
||||
AVFContext *ctx = (AVFContext*)s->priv_data;
|
||||
CVImageBufferRef image_buffer;
|
||||
CMBlockBufferRef block_buffer;
|
||||
CGSize image_buffer_size;
|
||||
AVStream* stream = avformat_new_stream(s, NULL);
|
||||
|
||||
@ -652,7 +651,6 @@ static int get_video_config(AVFormatContext *s)
|
||||
avpriv_set_pts_info(stream, 64, 1, avf_time_base);
|
||||
|
||||
image_buffer = CMSampleBufferGetImageBuffer(ctx->current_frame);
|
||||
block_buffer = CMSampleBufferGetDataBuffer(ctx->current_frame);
|
||||
|
||||
if (image_buffer) {
|
||||
image_buffer_size = CVImageBufferGetEncodedSize(image_buffer);
|
||||
|
@ -249,18 +249,24 @@ av_cold static int lavfi_read_header(AVFormatContext *avctx)
|
||||
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL,
|
||||
};
|
||||
|
||||
ret = avfilter_graph_create_filter(&sink, abuffersink,
|
||||
inout->name, NULL,
|
||||
NULL, lavfi->graph);
|
||||
if (ret >= 0)
|
||||
ret = av_opt_set_bin(sink, "sample_fmts", (const uint8_t*)sample_fmts,
|
||||
sizeof(sample_fmts), AV_OPT_SEARCH_CHILDREN);
|
||||
sink = avfilter_graph_alloc_filter(lavfi->graph, abuffersink, inout->name);
|
||||
if (!sink) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(sink, "sample_fmts", (const uint8_t*)sample_fmts,
|
||||
sizeof(sample_fmts), AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
ret = av_opt_set_int(sink, "all_channel_counts", 1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
ret = avfilter_init_dict(sink, NULL);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
} else {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Output '%s' is not a video or audio output, not yet supported\n", inout->name);
|
||||
|
1
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavdevice/tests/.gitignore
vendored
Normal file
1
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavdevice/tests/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/timefilter
|
@ -29,7 +29,7 @@
|
||||
|
||||
#include "version_major.h"
|
||||
|
||||
#define LIBAVDEVICE_VERSION_MINOR 3
|
||||
#define LIBAVDEVICE_VERSION_MINOR 4
|
||||
#define LIBAVDEVICE_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
|
||||
|
1
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavfilter/.gitignore
vendored
Normal file
1
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavfilter/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/filter_list.c
|
@ -80,22 +80,24 @@ static int activate(AVFilterContext *ctx)
|
||||
return ff_filter_frame(outlink, frame);
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AFDelaySrcContext *s = ctx->priv;
|
||||
const AFDelaySrcContext *s = ctx->priv;
|
||||
AVChannelLayout chlayouts[] = { s->chlayout, { 0 } };
|
||||
int sample_rates[] = { s->sample_rate, -1 };
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP,
|
||||
AV_SAMPLE_FMT_NONE };
|
||||
int ret = ff_set_common_formats_from_list(ctx, sample_fmts);
|
||||
int ret = ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, sample_fmts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = ff_set_common_channel_layouts_from_list(ctx, chlayouts);
|
||||
ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, chlayouts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return ff_set_common_samplerates_from_list(ctx, sample_rates);
|
||||
return ff_set_common_samplerates_from_list2(ctx, cfg_in, cfg_out, sample_rates);
|
||||
}
|
||||
|
||||
static int config_output(AVFilterLink *outlink)
|
||||
@ -146,5 +148,5 @@ const AVFilter ff_asrc_afdelaysrc = {
|
||||
.activate = activate,
|
||||
.inputs = NULL,
|
||||
FILTER_OUTPUTS(afdelaysrc_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
};
|
||||
|
@ -113,24 +113,26 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
av_tx_uninit(&s->itx_ctx);
|
||||
}
|
||||
|
||||
static av_cold int query_formats(AVFilterContext *ctx)
|
||||
static av_cold int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AudioFIRSourceContext *s = ctx->priv;
|
||||
const AudioFIRSourceContext *s = ctx->priv;
|
||||
static const AVChannelLayout chlayouts[] = { AV_CHANNEL_LAYOUT_MONO, { 0 } };
|
||||
int sample_rates[] = { s->sample_rate, -1 };
|
||||
static const enum AVSampleFormat sample_fmts[] = {
|
||||
AV_SAMPLE_FMT_FLT,
|
||||
AV_SAMPLE_FMT_NONE
|
||||
};
|
||||
int ret = ff_set_common_formats_from_list(ctx, sample_fmts);
|
||||
int ret = ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, sample_fmts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = ff_set_common_channel_layouts_from_list(ctx, chlayouts);
|
||||
ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, chlayouts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return ff_set_common_samplerates_from_list(ctx, sample_rates);
|
||||
return ff_set_common_samplerates_from_list2(ctx, cfg_in, cfg_out, sample_rates);
|
||||
}
|
||||
|
||||
static int parse_string(char *str, float **items, int *nb_items, int *items_size)
|
||||
@ -304,7 +306,7 @@ const AVFilter ff_asrc_afirsrc = {
|
||||
.priv_size = sizeof(AudioFIRSourceContext),
|
||||
.inputs = NULL,
|
||||
FILTER_OUTPUTS(afirsrc_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &afirsrc_class,
|
||||
};
|
||||
|
||||
@ -585,6 +587,6 @@ const AVFilter ff_asrc_afireqsrc = {
|
||||
.priv_size = sizeof(AudioFIRSourceContext),
|
||||
.inputs = NULL,
|
||||
FILTER_OUTPUTS(afireqsrc_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &afireqsrc_class,
|
||||
};
|
||||
|
@ -83,24 +83,26 @@ static const AVOption anoisesrc_options[] = {
|
||||
|
||||
AVFILTER_DEFINE_CLASS(anoisesrc);
|
||||
|
||||
static av_cold int query_formats(AVFilterContext *ctx)
|
||||
static av_cold int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
ANoiseSrcContext *s = ctx->priv;
|
||||
const ANoiseSrcContext *s = ctx->priv;
|
||||
static const AVChannelLayout chlayouts[] = { AV_CHANNEL_LAYOUT_MONO, { 0 } };
|
||||
int sample_rates[] = { s->sample_rate, -1 };
|
||||
static const enum AVSampleFormat sample_fmts[] = {
|
||||
AV_SAMPLE_FMT_DBL,
|
||||
AV_SAMPLE_FMT_NONE
|
||||
};
|
||||
int ret = ff_set_common_formats_from_list(ctx, sample_fmts);
|
||||
int ret = ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, sample_fmts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = ff_set_common_channel_layouts_from_list(ctx, chlayouts);
|
||||
ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, chlayouts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return ff_set_common_samplerates_from_list(ctx, sample_rates);
|
||||
return ff_set_common_samplerates_from_list2(ctx, cfg_in, cfg_out, sample_rates);
|
||||
}
|
||||
|
||||
static double white_filter(double white, double *buf)
|
||||
@ -244,6 +246,6 @@ const AVFilter ff_asrc_anoisesrc = {
|
||||
.inputs = NULL,
|
||||
.activate = activate,
|
||||
FILTER_OUTPUTS(anoisesrc_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &anoisesrc_class,
|
||||
};
|
||||
|
@ -61,18 +61,20 @@ static const AVOption anullsrc_options[]= {
|
||||
|
||||
AVFILTER_DEFINE_CLASS(anullsrc);
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
ANullContext *null = ctx->priv;
|
||||
const ANullContext *null = ctx->priv;
|
||||
const AVChannelLayout chlayouts[] = { null->ch_layout, { 0 } };
|
||||
int sample_rates[] = { null->sample_rate, -1 };
|
||||
int ret;
|
||||
|
||||
if ((ret = ff_set_common_formats (ctx, ff_all_formats (AVMEDIA_TYPE_AUDIO))) < 0 ||
|
||||
(ret = ff_set_common_samplerates_from_list(ctx, sample_rates)) < 0)
|
||||
ret = ff_set_common_samplerates_from_list2(ctx, cfg_in, cfg_out, sample_rates);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return ff_set_common_channel_layouts_from_list(ctx, chlayouts);
|
||||
return ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, chlayouts);
|
||||
}
|
||||
|
||||
static av_cold int config_props(AVFilterLink *outlink)
|
||||
@ -124,7 +126,7 @@ const AVFilter ff_asrc_anullsrc = {
|
||||
.priv_size = sizeof(ANullContext),
|
||||
.inputs = NULL,
|
||||
FILTER_OUTPUTS(avfilter_asrc_anullsrc_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.activate = activate,
|
||||
.priv_class = &anullsrc_class,
|
||||
};
|
||||
|
@ -255,24 +255,35 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
av_audio_fifo_free(flite->fifo);
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
FliteContext *flite = ctx->priv;
|
||||
const FliteContext *flite = ctx->priv;
|
||||
|
||||
static const enum AVSampleFormat formats[] = {
|
||||
AV_SAMPLE_FMT_S16,
|
||||
AV_SAMPLE_FMT_NONE,
|
||||
};
|
||||
int sample_rates[] = { flite->sample_rate, -1 };
|
||||
AVChannelLayout layouts[2] = {
|
||||
{ .nb_channels = 0 },
|
||||
};
|
||||
|
||||
int ret;
|
||||
|
||||
AVFilterChannelLayouts *chlayouts = NULL;
|
||||
AVFilterFormats *sample_formats = NULL;
|
||||
AVFilterFormats *sample_rates = NULL;
|
||||
AVChannelLayout chlayout = { 0 };
|
||||
av_channel_layout_default(&layouts[0], flite->nb_channels);
|
||||
|
||||
av_channel_layout_default(&chlayout, flite->nb_channels);
|
||||
ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, layouts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = ff_add_channel_layout (&chlayouts , &chlayout )) < 0 ||
|
||||
(ret = ff_set_common_channel_layouts (ctx , chlayouts )) < 0 ||
|
||||
(ret = ff_add_format (&sample_formats, AV_SAMPLE_FMT_S16 )) < 0 ||
|
||||
(ret = ff_set_common_formats (ctx , sample_formats )) < 0 ||
|
||||
(ret = ff_add_format (&sample_rates , flite->sample_rate )) < 0 ||
|
||||
(ret = ff_set_common_samplerates (ctx , sample_rates )) < 0)
|
||||
ret = ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, formats);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = ff_set_common_samplerates_from_list2(ctx, cfg_in, cfg_out, sample_rates);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@ -348,6 +359,6 @@ const AVFilter ff_asrc_flite = {
|
||||
.activate = activate,
|
||||
.inputs = NULL,
|
||||
FILTER_OUTPUTS(flite_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &flite_class,
|
||||
};
|
||||
|
@ -75,7 +75,9 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
av_freep(&s->taps);
|
||||
}
|
||||
|
||||
static av_cold int query_formats(AVFilterContext *ctx)
|
||||
static av_cold int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
HilbertContext *s = ctx->priv;
|
||||
static const AVChannelLayout chlayouts[] = { AV_CHANNEL_LAYOUT_MONO, { 0 } };
|
||||
@ -84,15 +86,15 @@ static av_cold int query_formats(AVFilterContext *ctx)
|
||||
AV_SAMPLE_FMT_FLT,
|
||||
AV_SAMPLE_FMT_NONE
|
||||
};
|
||||
int ret = ff_set_common_formats_from_list(ctx, sample_fmts);
|
||||
int ret = ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, sample_fmts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = ff_set_common_channel_layouts_from_list(ctx, chlayouts);
|
||||
ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, chlayouts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return ff_set_common_samplerates_from_list(ctx, sample_rates);
|
||||
return ff_set_common_samplerates_from_list2(ctx, cfg_in, cfg_out, sample_rates);
|
||||
}
|
||||
|
||||
static av_cold int config_props(AVFilterLink *outlink)
|
||||
@ -168,6 +170,6 @@ const AVFilter ff_asrc_hilbert = {
|
||||
.priv_size = sizeof(HilbertContext),
|
||||
.inputs = NULL,
|
||||
FILTER_OUTPUTS(hilbert_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &hilbert_class,
|
||||
};
|
||||
|
@ -74,22 +74,24 @@ static int activate(AVFilterContext *ctx)
|
||||
return ff_filter_frame(outlink, frame);
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
SincContext *s = ctx->priv;
|
||||
const SincContext *s = ctx->priv;
|
||||
static const AVChannelLayout chlayouts[] = { AV_CHANNEL_LAYOUT_MONO, { 0 } };
|
||||
int sample_rates[] = { s->sample_rate, -1 };
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT,
|
||||
AV_SAMPLE_FMT_NONE };
|
||||
int ret = ff_set_common_formats_from_list(ctx, sample_fmts);
|
||||
int ret = ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, sample_fmts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = ff_set_common_channel_layouts_from_list(ctx, chlayouts);
|
||||
ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, chlayouts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return ff_set_common_samplerates_from_list(ctx, sample_rates);
|
||||
return ff_set_common_samplerates_from_list2(ctx, cfg_in, cfg_out, sample_rates);
|
||||
}
|
||||
|
||||
static float *make_lpf(int num_taps, float Fc, float beta, float rho,
|
||||
@ -428,5 +430,5 @@ const AVFilter ff_asrc_sinc = {
|
||||
.activate = activate,
|
||||
.inputs = NULL,
|
||||
FILTER_OUTPUTS(sinc_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
};
|
||||
|
@ -178,22 +178,24 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
av_freep(&sine->sin);
|
||||
}
|
||||
|
||||
static av_cold int query_formats(AVFilterContext *ctx)
|
||||
static av_cold int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
SineContext *sine = ctx->priv;
|
||||
const SineContext *sine = ctx->priv;
|
||||
static const AVChannelLayout chlayouts[] = { AV_CHANNEL_LAYOUT_MONO, { 0 } };
|
||||
int sample_rates[] = { sine->sample_rate, -1 };
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16,
|
||||
AV_SAMPLE_FMT_NONE };
|
||||
int ret = ff_set_common_formats_from_list(ctx, sample_fmts);
|
||||
int ret = ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, sample_fmts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = ff_set_common_channel_layouts_from_list(ctx, chlayouts);
|
||||
ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, chlayouts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return ff_set_common_samplerates_from_list(ctx, sample_rates);
|
||||
return ff_set_common_samplerates_from_list2(ctx, cfg_in, cfg_out, sample_rates);
|
||||
}
|
||||
|
||||
static av_cold int config_props(AVFilterLink *outlink)
|
||||
@ -271,6 +273,6 @@ const AVFilter ff_asrc_sine = {
|
||||
.priv_size = sizeof(SineContext),
|
||||
.inputs = NULL,
|
||||
FILTER_OUTPUTS(sine_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &sine_class,
|
||||
};
|
||||
|
@ -73,30 +73,21 @@ static const AVOption a3dscope_options[] = {
|
||||
|
||||
AVFILTER_DEFINE_CLASS(a3dscope);
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVFilterChannelLayouts *layouts = NULL;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
|
||||
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
|
||||
int ret;
|
||||
|
||||
formats = ff_make_format_list(sample_fmts);
|
||||
if ((ret = ff_formats_ref (formats, &inlink->outcfg.formats )) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_make_format_list(pix_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
|
||||
return ret;
|
||||
|
||||
layouts = ff_all_channel_counts();
|
||||
if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@ -352,7 +343,7 @@ const AVFilter ff_avf_a3dscope = {
|
||||
.activate = activate,
|
||||
FILTER_INPUTS(audio3dscope_inputs),
|
||||
FILTER_OUTPUTS(audio3dscope_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &a3dscope_class,
|
||||
.process_command = ff_filter_process_command,
|
||||
};
|
||||
|
@ -65,12 +65,11 @@ static const AVOption abitscope_options[] = {
|
||||
|
||||
AVFILTER_DEFINE_CLASS(abitscope);
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVFilterChannelLayouts *layouts;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
|
||||
AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S64P,
|
||||
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
|
||||
@ -79,21 +78,11 @@ static int query_formats(AVFilterContext *ctx)
|
||||
int ret;
|
||||
|
||||
formats = ff_make_format_list(sample_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
|
||||
return ret;
|
||||
|
||||
layouts = ff_all_channel_counts();
|
||||
if (!layouts)
|
||||
return AVERROR(ENOMEM);
|
||||
if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_make_format_list(pix_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@ -322,7 +311,7 @@ const AVFilter ff_avf_abitscope = {
|
||||
.priv_size = sizeof(AudioBitScopeContext),
|
||||
FILTER_INPUTS(inputs),
|
||||
FILTER_OUTPUTS(outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.uninit = uninit,
|
||||
.activate = activate,
|
||||
.priv_class = &abitscope_class,
|
||||
|
@ -92,28 +92,21 @@ static const AVOption ahistogram_options[] = {
|
||||
|
||||
AVFILTER_DEFINE_CLASS(ahistogram);
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVFilterChannelLayouts *layouts = NULL;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
|
||||
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE };
|
||||
int ret = AVERROR(EINVAL);
|
||||
|
||||
formats = ff_make_format_list(sample_fmts);
|
||||
if ((ret = ff_formats_ref (formats, &inlink->outcfg.formats )) < 0 ||
|
||||
(layouts = ff_all_channel_counts()) == NULL ||
|
||||
(ret = ff_channel_layouts_ref (layouts, &inlink->outcfg.channel_layouts)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_make_format_list(pix_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@ -509,6 +502,6 @@ const AVFilter ff_avf_ahistogram = {
|
||||
.activate = activate,
|
||||
FILTER_INPUTS(ahistogram_inputs),
|
||||
FILTER_OUTPUTS(ahistogram_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &ahistogram_class,
|
||||
};
|
||||
|
@ -90,35 +90,32 @@ static const AVOption aphasemeter_options[] = {
|
||||
|
||||
AVFILTER_DEFINE_CLASS(aphasemeter);
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AudioPhaseMeterContext *s = ctx->priv;
|
||||
const AudioPhaseMeterContext *s = ctx->priv;
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVFilterChannelLayouts *layout = NULL;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
|
||||
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
|
||||
static const AVChannelLayout layouts[] = {
|
||||
AV_CHANNEL_LAYOUT_STEREO,
|
||||
{ .nb_channels = 0 },
|
||||
};
|
||||
int ret;
|
||||
|
||||
formats = ff_make_format_list(sample_fmts);
|
||||
if ((ret = ff_formats_ref (formats, &inlink->outcfg.formats )) < 0 ||
|
||||
(ret = ff_formats_ref (formats, &outlink->incfg.formats )) < 0 ||
|
||||
(ret = ff_add_channel_layout (&layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO )) < 0 ||
|
||||
(ret = ff_channel_layouts_ref (layout , &inlink->outcfg.channel_layouts)) < 0 ||
|
||||
(ret = ff_channel_layouts_ref (layout , &outlink->incfg.channel_layouts)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0 ||
|
||||
(ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0 ||
|
||||
(ret = ff_formats_ref(formats, &outlink->incfg.samplerates)) < 0)
|
||||
ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, layouts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (s->do_video) {
|
||||
AVFilterLink *outlink = ctx->outputs[1];
|
||||
|
||||
formats = ff_make_format_list(pix_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[1]->formats)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -440,7 +437,7 @@ const AVFilter ff_avf_aphasemeter = {
|
||||
FILTER_INPUTS(inputs),
|
||||
.activate = activate,
|
||||
.outputs = NULL,
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &aphasemeter_class,
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
|
||||
};
|
||||
|
@ -230,28 +230,29 @@ static int fade(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVFilterChannelLayouts *layout = NULL;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
|
||||
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
|
||||
static const AVChannelLayout layouts[] = {
|
||||
AV_CHANNEL_LAYOUT_STEREO,
|
||||
{ .nb_channels = 0 },
|
||||
};
|
||||
int ret;
|
||||
|
||||
formats = ff_make_format_list(sample_fmts);
|
||||
if ((ret = ff_formats_ref (formats, &inlink->outcfg.formats )) < 0 ||
|
||||
(ret = ff_add_channel_layout (&layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO)) < 0 ||
|
||||
(ret = ff_channel_layouts_ref (layout , &inlink->outcfg.channel_layouts)) < 0)
|
||||
if ((ret = ff_formats_ref (formats, &cfg_in[0]->formats )) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
|
||||
ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, layouts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_make_format_list(pix_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@ -493,7 +494,7 @@ const AVFilter ff_avf_avectorscope = {
|
||||
.activate = activate,
|
||||
FILTER_INPUTS(audiovectorscope_inputs),
|
||||
FILTER_OUTPUTS(audiovectorscope_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &avectorscope_class,
|
||||
.flags = AVFILTER_FLAG_SLICE_THREADS,
|
||||
.process_command = ff_filter_process_command,
|
||||
|
@ -72,9 +72,11 @@ static const AVOption concat_options[] = {
|
||||
|
||||
AVFILTER_DEFINE_CLASS(concat);
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
ConcatContext *cat = ctx->priv;
|
||||
const ConcatContext *cat = ctx->priv;
|
||||
unsigned type, nb_str, idx0 = 0, idx, str, seg;
|
||||
AVFilterFormats *formats, *rates = NULL;
|
||||
AVFilterChannelLayouts *layouts = NULL;
|
||||
@ -87,25 +89,25 @@ static int query_formats(AVFilterContext *ctx)
|
||||
|
||||
/* Set the output formats */
|
||||
formats = ff_all_formats(type);
|
||||
if ((ret = ff_formats_ref(formats, &ctx->outputs[idx]->incfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[idx]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
if (type == AVMEDIA_TYPE_AUDIO) {
|
||||
rates = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(rates, &ctx->outputs[idx]->incfg.samplerates)) < 0)
|
||||
if ((ret = ff_formats_ref(rates, &cfg_out[idx]->samplerates)) < 0)
|
||||
return ret;
|
||||
layouts = ff_all_channel_layouts();
|
||||
if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[idx]->incfg.channel_layouts)) < 0)
|
||||
if ((ret = ff_channel_layouts_ref(layouts, &cfg_out[idx]->channel_layouts)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Set the same formats for each corresponding input */
|
||||
for (seg = 0; seg < cat->nb_segments; seg++) {
|
||||
if ((ret = ff_formats_ref(formats, &ctx->inputs[idx]->outcfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_in[idx]->formats)) < 0)
|
||||
return ret;
|
||||
if (type == AVMEDIA_TYPE_AUDIO) {
|
||||
if ((ret = ff_formats_ref(rates, &ctx->inputs[idx]->outcfg.samplerates)) < 0 ||
|
||||
(ret = ff_channel_layouts_ref(layouts, &ctx->inputs[idx]->outcfg.channel_layouts)) < 0)
|
||||
if ((ret = ff_formats_ref(rates, &cfg_in[idx]->samplerates)) < 0 ||
|
||||
(ret = ff_channel_layouts_ref(layouts, &cfg_in[idx]->channel_layouts)) < 0)
|
||||
return ret;
|
||||
}
|
||||
idx += ctx->nb_outputs;
|
||||
@ -460,6 +462,6 @@ const AVFilter ff_avf_concat = {
|
||||
.outputs = NULL,
|
||||
.priv_class = &concat_class,
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_DYNAMIC_OUTPUTS,
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.process_command = process_command,
|
||||
};
|
||||
|
@ -1314,12 +1314,11 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
common_uninit(ctx->priv);
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVFilterChannelLayouts *layouts = NULL;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
|
||||
static const enum AVPixelFormat pix_fmts[] = {
|
||||
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
|
||||
@ -1331,20 +1330,16 @@ static int query_formats(AVFilterContext *ctx)
|
||||
|
||||
/* set input audio formats */
|
||||
formats = ff_make_format_list(sample_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
layouts = ff_make_channel_layout_list(channel_layouts);
|
||||
if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
|
||||
ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, channel_layouts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* set output video format */
|
||||
formats = ff_make_format_list(pix_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@ -1612,6 +1607,6 @@ const AVFilter ff_avf_showcqt = {
|
||||
.priv_size = sizeof(ShowCQTContext),
|
||||
FILTER_INPUTS(ff_audio_default_filterpad),
|
||||
FILTER_OUTPUTS(showcqt_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &showcqt_class,
|
||||
};
|
||||
|
@ -222,30 +222,21 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
av_freep(&s->fdsp);
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVFilterChannelLayouts *layouts = NULL;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
|
||||
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE };
|
||||
int ret;
|
||||
|
||||
formats = ff_make_format_list(sample_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
|
||||
return ret;
|
||||
|
||||
layouts = ff_all_channel_counts();
|
||||
if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_make_format_list(pix_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@ -1334,7 +1325,7 @@ const AVFilter ff_avf_showcwt = {
|
||||
.priv_size = sizeof(ShowCWTContext),
|
||||
FILTER_INPUTS(ff_audio_default_filterpad),
|
||||
FILTER_OUTPUTS(showcwt_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.activate = activate,
|
||||
.priv_class = &showcwt_class,
|
||||
.flags = AVFILTER_FLAG_SLICE_THREADS,
|
||||
|
@ -116,32 +116,23 @@ static const AVOption showfreqs_options[] = {
|
||||
|
||||
AVFILTER_DEFINE_CLASS(showfreqs);
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVFilterChannelLayouts *layouts = NULL;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
|
||||
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
|
||||
int ret;
|
||||
|
||||
/* set input audio formats */
|
||||
formats = ff_make_format_list(sample_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
|
||||
return ret;
|
||||
|
||||
layouts = ff_all_channel_counts();
|
||||
if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
/* set output video format */
|
||||
formats = ff_make_format_list(pix_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@ -566,6 +557,6 @@ const AVFilter ff_avf_showfreqs = {
|
||||
.activate = activate,
|
||||
FILTER_INPUTS(ff_audio_default_filterpad),
|
||||
FILTER_OUTPUTS(showfreqs_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &showfreqs_class,
|
||||
};
|
||||
|
@ -80,28 +80,26 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
av_audio_fifo_free(s->fifo);
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVFilterChannelLayouts *layout = NULL;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
|
||||
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GBRP, AV_PIX_FMT_NONE };
|
||||
static const AVChannelLayout layouts[] = { AV_CHANNEL_LAYOUT_STEREO, { .nb_channels = 0 } };
|
||||
int ret;
|
||||
|
||||
formats = ff_make_format_list(sample_fmts);
|
||||
if ((ret = ff_formats_ref (formats, &inlink->outcfg.formats )) < 0 ||
|
||||
(ret = ff_add_channel_layout (&layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO)) < 0 ||
|
||||
(ret = ff_channel_layouts_ref (layout , &inlink->outcfg.channel_layouts)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
|
||||
ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, layouts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_make_format_list(pix_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@ -330,7 +328,7 @@ const AVFilter ff_avf_showspatial = {
|
||||
.priv_size = sizeof(ShowSpatialContext),
|
||||
FILTER_INPUTS(ff_audio_default_filterpad),
|
||||
FILTER_OUTPUTS(showspatial_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.activate = spatial_activate,
|
||||
.priv_class = &showspatial_class,
|
||||
.flags = AVFILTER_FLAG_SLICE_THREADS,
|
||||
|
@ -358,32 +358,23 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
av_freep(&s->frames);
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVFilterChannelLayouts *layouts = NULL;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
|
||||
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE };
|
||||
int ret;
|
||||
|
||||
/* set input audio formats */
|
||||
formats = ff_make_format_list(sample_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
|
||||
return ret;
|
||||
|
||||
layouts = ff_all_channel_counts();
|
||||
if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
/* set output video format */
|
||||
formats = ff_make_format_list(pix_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@ -1696,7 +1687,7 @@ const AVFilter ff_avf_showspectrum = {
|
||||
.priv_size = sizeof(ShowSpectrumContext),
|
||||
FILTER_INPUTS(ff_audio_default_filterpad),
|
||||
FILTER_OUTPUTS(showspectrum_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.activate = activate,
|
||||
.priv_class = &showspectrum_class,
|
||||
.flags = AVFILTER_FLAG_SLICE_THREADS,
|
||||
@ -1883,7 +1874,7 @@ const AVFilter ff_avf_showspectrumpic = {
|
||||
.priv_size = sizeof(ShowSpectrumContext),
|
||||
FILTER_INPUTS(showspectrumpic_inputs),
|
||||
FILTER_OUTPUTS(showspectrumpic_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &showspectrumpic_class,
|
||||
.flags = AVFILTER_FLAG_SLICE_THREADS,
|
||||
};
|
||||
|
@ -110,30 +110,21 @@ static av_cold int init(AVFilterContext *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVFilterChannelLayouts *layouts = NULL;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
|
||||
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
|
||||
int ret;
|
||||
|
||||
formats = ff_make_format_list(sample_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
|
||||
return ret;
|
||||
|
||||
layouts = ff_all_channel_counts();
|
||||
if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_make_format_list(pix_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@ -520,6 +511,6 @@ const AVFilter ff_avf_showvolume = {
|
||||
.priv_size = sizeof(ShowVolumeContext),
|
||||
FILTER_INPUTS(showvolume_inputs),
|
||||
FILTER_OUTPUTS(showvolume_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &showvolume_class,
|
||||
};
|
||||
|
@ -155,32 +155,23 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
}
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
static int query_formats(const AVFilterContext *ctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVFilterChannelLayouts *layouts = NULL;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
|
||||
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
int ret;
|
||||
|
||||
/* set input audio formats */
|
||||
formats = ff_make_format_list(sample_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
|
||||
return ret;
|
||||
|
||||
layouts = ff_all_channel_layouts();
|
||||
if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
|
||||
return ret;
|
||||
|
||||
formats = ff_all_samplerates();
|
||||
if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
/* set output video format */
|
||||
formats = ff_make_format_list(pix_fmts);
|
||||
if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
|
||||
if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@ -814,7 +805,7 @@ const AVFilter ff_avf_showwaves = {
|
||||
FILTER_INPUTS(ff_audio_default_filterpad),
|
||||
.activate = activate,
|
||||
FILTER_OUTPUTS(showwaves_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &showwaves_class,
|
||||
};
|
||||
|
||||
@ -925,7 +916,7 @@ const AVFilter ff_avf_showwavespic = {
|
||||
.priv_size = sizeof(ShowWavesContext),
|
||||
FILTER_INPUTS(showwavespic_inputs),
|
||||
FILTER_OUTPUTS(showwavespic_outputs),
|
||||
FILTER_QUERY_FUNC(query_formats),
|
||||
FILTER_QUERY_FUNC2(query_formats),
|
||||
.priv_class = &showwavespic_class,
|
||||
};
|
||||
|
||||
|
@ -159,7 +159,8 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad,
|
||||
src->outputs[srcpad] || dst->inputs[dstpad])
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
if (!fffilterctx(src)->initialized || !fffilterctx(dst)->initialized) {
|
||||
if (!(fffilterctx(src)->state_flags & AV_CLASS_STATE_INITIALIZED) ||
|
||||
!(fffilterctx(dst)->state_flags & AV_CLASS_STATE_INITIALIZED)) {
|
||||
av_log(src, AV_LOG_ERROR, "Filters must be initialized before linking.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
@ -676,6 +677,7 @@ static const AVClass avfilter_class = {
|
||||
.child_next = filter_child_next,
|
||||
.child_class_iterate = filter_child_class_iterate,
|
||||
.option = avfilter_options,
|
||||
.state_flags_offset = offsetof(FFFilterContext, state_flags),
|
||||
};
|
||||
|
||||
static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
|
||||
@ -909,7 +911,7 @@ int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
|
||||
FFFilterContext *ctxi = fffilterctx(ctx);
|
||||
int ret = 0;
|
||||
|
||||
if (ctxi->initialized) {
|
||||
if (ctxi->state_flags & AV_CLASS_STATE_INITIALIZED) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Filter already initialized\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
@ -940,7 +942,7 @@ int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ctxi->initialized = 1;
|
||||
ctxi->state_flags |= AV_CLASS_STATE_INITIALIZED;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -845,9 +845,9 @@ AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
|
||||
AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, const char *name);
|
||||
|
||||
/**
|
||||
* Create and add a filter instance into an existing graph.
|
||||
* The filter instance is created from the filter filt and inited
|
||||
* with the parameter args. opaque is currently ignored.
|
||||
* A convenience wrapper that allocates and initializes a filter in a single
|
||||
* step. The filter instance is created from the filter filt and inited with the
|
||||
* parameter args. opaque is currently ignored.
|
||||
*
|
||||
* In case of success put in *filt_ctx the pointer to the created
|
||||
* filter instance, otherwise set *filt_ctx to NULL.
|
||||
@ -856,6 +856,12 @@ AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, const char *nam
|
||||
* @param graph_ctx the filter graph
|
||||
* @return a negative AVERROR error code in case of failure, a non
|
||||
* negative value otherwise
|
||||
*
|
||||
* @warning Since the filter is initialized after this function successfully
|
||||
* returns, you MUST NOT set any further options on it. If you need to
|
||||
* do that, call ::avfilter_graph_alloc_filter(), followed by setting
|
||||
* the options, followed by ::avfilter_init_dict() instead of this
|
||||
* function.
|
||||
*/
|
||||
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,
|
||||
const char *name, const char *args, void *opaque,
|
||||
|
@ -100,9 +100,8 @@ typedef struct FFFilterContext {
|
||||
|
||||
avfilter_execute_func *execute;
|
||||
|
||||
// 1 when avfilter_init_*() was successfully called on this filter
|
||||
// 0 otherwise
|
||||
int initialized;
|
||||
// AV_CLASS_STATE_FLAG_*
|
||||
unsigned state_flags;
|
||||
} FFFilterContext;
|
||||
|
||||
static inline FFFilterContext *fffilterctx(AVFilterContext *ctx)
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
#include "audio.h"
|
||||
@ -42,28 +43,49 @@
|
||||
typedef struct BufferSinkContext {
|
||||
const AVClass *class;
|
||||
unsigned warning_limit;
|
||||
unsigned frame_size;
|
||||
|
||||
/* only used for video */
|
||||
#if FF_API_BUFFERSINK_OPTS
|
||||
enum AVPixelFormat *pixel_fmts; ///< list of accepted pixel formats
|
||||
int pixel_fmts_size;
|
||||
enum AVColorSpace *color_spaces; ///< list of accepted color spaces
|
||||
int color_spaces_size;
|
||||
enum AVColorRange *color_ranges; ///< list of accepted color ranges
|
||||
int color_ranges_size;
|
||||
#endif
|
||||
|
||||
enum AVPixelFormat *pixel_formats;
|
||||
unsigned nb_pixel_formats;
|
||||
|
||||
int *colorspaces;
|
||||
unsigned nb_colorspaces;
|
||||
|
||||
int *colorranges;
|
||||
unsigned nb_colorranges;
|
||||
|
||||
/* only used for audio */
|
||||
#if FF_API_BUFFERSINK_OPTS
|
||||
enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats
|
||||
int sample_fmts_size;
|
||||
char *channel_layouts_str; ///< list of accepted channel layouts
|
||||
int all_channel_counts;
|
||||
int *sample_rates; ///< list of accepted sample rates
|
||||
int sample_rates_size;
|
||||
#endif
|
||||
|
||||
enum AVSampleFormat *sample_formats;
|
||||
unsigned nb_sample_formats;
|
||||
|
||||
int *samplerates;
|
||||
unsigned nb_samplerates;
|
||||
|
||||
AVChannelLayout *channel_layouts;
|
||||
unsigned nb_channel_layouts;
|
||||
|
||||
AVFrame *peeked_frame;
|
||||
} BufferSinkContext;
|
||||
|
||||
#define NB_ITEMS(list) (list ## _size / sizeof(*list))
|
||||
|
||||
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
|
||||
{
|
||||
return av_buffersink_get_frame_flags(ctx, frame, 0);
|
||||
@ -137,6 +159,39 @@ static av_cold int common_init(AVFilterContext *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define TERMINATE_ARRAY(arr, val) \
|
||||
if (s->arr) { \
|
||||
void *tmp = av_realloc_array(s->arr, s->nb_ ## arr + 1, sizeof(*s->arr)); \
|
||||
if (!tmp) \
|
||||
return AVERROR(ENOMEM); \
|
||||
s->arr = tmp; \
|
||||
s->arr[s->nb_ ## arr] = val; \
|
||||
}
|
||||
|
||||
static int init_video(AVFilterContext *ctx)
|
||||
{
|
||||
BufferSinkContext *s = ctx->priv;
|
||||
|
||||
TERMINATE_ARRAY(pixel_formats, AV_PIX_FMT_NONE);
|
||||
TERMINATE_ARRAY(colorranges, -1);
|
||||
TERMINATE_ARRAY(colorspaces, -1);
|
||||
|
||||
return common_init(ctx);
|
||||
}
|
||||
|
||||
static int init_audio(AVFilterContext *ctx)
|
||||
{
|
||||
BufferSinkContext *s = ctx->priv;
|
||||
|
||||
TERMINATE_ARRAY(sample_formats, AV_SAMPLE_FMT_NONE);
|
||||
TERMINATE_ARRAY(samplerates, -1);
|
||||
TERMINATE_ARRAY(channel_layouts, (AVChannelLayout){ .nb_channels = 0 });
|
||||
|
||||
return common_init(ctx);
|
||||
}
|
||||
|
||||
#undef TERMINATE_ARRAY
|
||||
|
||||
static void uninit(AVFilterContext *ctx)
|
||||
{
|
||||
BufferSinkContext *buf = ctx->priv;
|
||||
@ -162,11 +217,25 @@ static int activate(AVFilterContext *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int config_input_audio(AVFilterLink *inlink)
|
||||
{
|
||||
BufferSinkContext *buf = inlink->dst->priv;
|
||||
FilterLink *l = ff_filter_link(inlink);
|
||||
|
||||
l->min_samples = l->max_samples = buf->frame_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
|
||||
{
|
||||
FilterLink *inlink = ff_filter_link(ctx->inputs[0]);
|
||||
BufferSinkContext *buf = ctx->priv;
|
||||
buf->frame_size = frame_size;
|
||||
|
||||
inlink->min_samples = inlink->max_samples = frame_size;
|
||||
if (ctx->inputs && ctx->inputs[0]) {
|
||||
FilterLink *l = ff_filter_link(ctx->inputs[0]);
|
||||
l->min_samples = l->max_samples = buf->frame_size;
|
||||
}
|
||||
}
|
||||
|
||||
#define MAKE_AVFILTERLINK_ACCESSOR(type, field) \
|
||||
@ -220,6 +289,9 @@ int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if FF_API_BUFFERSINK_OPTS
|
||||
#define NB_ITEMS(list) (list ## _size / sizeof(*list))
|
||||
|
||||
#define CHECK_LIST_SIZE(field) \
|
||||
if (buf->field ## _size % sizeof(*buf->field)) { \
|
||||
av_log(ctx, AV_LOG_ERROR, "Invalid size for " #field ": %d, " \
|
||||
@ -227,12 +299,40 @@ int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out
|
||||
buf->field ## _size, (int)sizeof(*buf->field)); \
|
||||
return AVERROR(EINVAL); \
|
||||
}
|
||||
#endif
|
||||
|
||||
static int vsink_query_formats(AVFilterContext *ctx)
|
||||
{
|
||||
BufferSinkContext *buf = ctx->priv;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
#if FF_API_BUFFERSINK_OPTS
|
||||
if ((buf->pixel_fmts_size || buf->color_spaces_size || buf->color_ranges_size) &&
|
||||
(buf->nb_pixel_formats || buf->nb_colorspaces || buf->nb_colorranges)) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Cannot combine old and new format lists\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (buf->nb_pixel_formats || buf->nb_colorspaces || buf->nb_colorranges) {
|
||||
#endif
|
||||
if (buf->nb_pixel_formats) {
|
||||
ret = ff_set_common_formats_from_list(ctx, buf->pixel_formats);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
if (buf->nb_colorspaces) {
|
||||
ret = ff_set_common_color_spaces_from_list(ctx, buf->colorspaces);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
if (buf->nb_colorranges) {
|
||||
ret = ff_set_common_color_ranges_from_list(ctx, buf->colorranges);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
#if FF_API_BUFFERSINK_OPTS
|
||||
} else {
|
||||
unsigned i;
|
||||
CHECK_LIST_SIZE(pixel_fmts)
|
||||
CHECK_LIST_SIZE(color_spaces)
|
||||
CHECK_LIST_SIZE(color_ranges)
|
||||
@ -262,6 +362,8 @@ static int vsink_query_formats(AVFilterContext *ctx)
|
||||
if ((ret = ff_set_common_color_ranges(ctx, formats)) < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -269,12 +371,38 @@ static int vsink_query_formats(AVFilterContext *ctx)
|
||||
static int asink_query_formats(AVFilterContext *ctx)
|
||||
{
|
||||
BufferSinkContext *buf = ctx->priv;
|
||||
int ret;
|
||||
|
||||
#if FF_API_BUFFERSINK_OPTS
|
||||
if ((buf->sample_fmts_size || buf->channel_layouts_str || buf->sample_rates_size) &&
|
||||
(buf->nb_sample_formats || buf->nb_samplerates || buf->nb_channel_layouts)) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Cannot combine old and new format lists\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (buf->nb_sample_formats || buf->nb_samplerates || buf->nb_channel_layouts) {
|
||||
#endif
|
||||
if (buf->nb_sample_formats) {
|
||||
ret = ff_set_common_formats_from_list(ctx, buf->sample_formats);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
if (buf->nb_samplerates) {
|
||||
ret = ff_set_common_samplerates_from_list(ctx, buf->samplerates);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
if (buf->nb_channel_layouts) {
|
||||
ret = ff_set_common_channel_layouts_from_list(ctx, buf->channel_layouts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
#if FF_API_BUFFERSINK_OPTS
|
||||
} else {
|
||||
AVFilterFormats *formats = NULL;
|
||||
AVChannelLayout layout = { 0 };
|
||||
AVFilterChannelLayouts *layouts = NULL;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
CHECK_LIST_SIZE(sample_fmts)
|
||||
CHECK_LIST_SIZE(sample_rates)
|
||||
|
||||
@ -328,6 +456,8 @@ static int asink_query_formats(AVFilterContext *ctx)
|
||||
if ((ret = ff_set_common_samplerates(ctx, formats)) < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -335,19 +465,38 @@ static int asink_query_formats(AVFilterContext *ctx)
|
||||
#define OFFSET(x) offsetof(BufferSinkContext, x)
|
||||
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
|
||||
static const AVOption buffersink_options[] = {
|
||||
{ "pix_fmts", "set the supported pixel formats", OFFSET(pixel_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
|
||||
{ "color_spaces", "set the supported color spaces", OFFSET(color_spaces), AV_OPT_TYPE_BINARY, .flags = FLAGS },
|
||||
{ "color_ranges", "set the supported color ranges", OFFSET(color_ranges), AV_OPT_TYPE_BINARY, .flags = FLAGS },
|
||||
#if FF_API_BUFFERSINK_OPTS
|
||||
{ "pix_fmts", "set the supported pixel formats", OFFSET(pixel_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS | AV_OPT_FLAG_DEPRECATED },
|
||||
{ "color_spaces", "set the supported color spaces", OFFSET(color_spaces), AV_OPT_TYPE_BINARY, .flags = FLAGS | AV_OPT_FLAG_DEPRECATED },
|
||||
{ "color_ranges", "set the supported color ranges", OFFSET(color_ranges), AV_OPT_TYPE_BINARY, .flags = FLAGS | AV_OPT_FLAG_DEPRECATED },
|
||||
#endif
|
||||
|
||||
{ "pixel_formats", "array of supported pixel formats", OFFSET(pixel_formats),
|
||||
AV_OPT_TYPE_PIXEL_FMT | AV_OPT_TYPE_FLAG_ARRAY, .max = INT_MAX, .flags = FLAGS },
|
||||
{ "colorspaces", "array of supported color spaces", OFFSET(colorspaces),
|
||||
AV_OPT_TYPE_INT | AV_OPT_TYPE_FLAG_ARRAY, .max = INT_MAX, .flags = FLAGS },
|
||||
{ "colorranges", "array of supported color ranges", OFFSET(colorranges),
|
||||
AV_OPT_TYPE_INT | AV_OPT_TYPE_FLAG_ARRAY, .max = INT_MAX, .flags = FLAGS },
|
||||
|
||||
{ NULL },
|
||||
};
|
||||
#undef FLAGS
|
||||
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
|
||||
static const AVOption abuffersink_options[] = {
|
||||
{ "sample_fmts", "set the supported sample formats", OFFSET(sample_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
|
||||
{ "sample_rates", "set the supported sample rates", OFFSET(sample_rates), AV_OPT_TYPE_BINARY, .flags = FLAGS },
|
||||
#if FF_API_BUFFERSINK_OPTS
|
||||
{ "sample_fmts", "set the supported sample formats", OFFSET(sample_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS | AV_OPT_FLAG_DEPRECATED },
|
||||
{ "sample_rates", "set the supported sample rates", OFFSET(sample_rates), AV_OPT_TYPE_BINARY, .flags = FLAGS | AV_OPT_FLAG_DEPRECATED },
|
||||
{ "ch_layouts", "set a '|'-separated list of supported channel layouts",
|
||||
OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = FLAGS },
|
||||
{ "all_channel_counts", "accept all channel counts", OFFSET(all_channel_counts), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
|
||||
OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = FLAGS | AV_OPT_FLAG_DEPRECATED },
|
||||
{ "all_channel_counts", "accept all channel counts", OFFSET(all_channel_counts), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS | AV_OPT_FLAG_DEPRECATED },
|
||||
#endif
|
||||
|
||||
{ "sample_formats", "array of supported sample formats", OFFSET(sample_formats),
|
||||
AV_OPT_TYPE_SAMPLE_FMT | AV_OPT_TYPE_FLAG_ARRAY, .max = INT_MAX, .flags = FLAGS },
|
||||
{ "samplerates", "array of supported sample formats", OFFSET(samplerates),
|
||||
AV_OPT_TYPE_INT | AV_OPT_TYPE_FLAG_ARRAY, .max = INT_MAX, .flags = FLAGS },
|
||||
{ "channel_layouts", "array of supported channel layouts", OFFSET(channel_layouts),
|
||||
AV_OPT_TYPE_CHLAYOUT | AV_OPT_TYPE_FLAG_ARRAY, .flags = FLAGS },
|
||||
{ NULL },
|
||||
};
|
||||
#undef FLAGS
|
||||
@ -360,7 +509,7 @@ const AVFilter ff_vsink_buffer = {
|
||||
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
|
||||
.priv_size = sizeof(BufferSinkContext),
|
||||
.priv_class = &buffersink_class,
|
||||
.init = common_init,
|
||||
.init = init_video,
|
||||
.uninit = uninit,
|
||||
.activate = activate,
|
||||
FILTER_INPUTS(ff_video_default_filterpad),
|
||||
@ -368,15 +517,23 @@ const AVFilter ff_vsink_buffer = {
|
||||
FILTER_QUERY_FUNC(vsink_query_formats),
|
||||
};
|
||||
|
||||
static const AVFilterPad inputs_audio[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.config_props = config_input_audio,
|
||||
},
|
||||
};
|
||||
|
||||
const AVFilter ff_asink_abuffer = {
|
||||
.name = "abuffersink",
|
||||
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
|
||||
.priv_class = &abuffersink_class,
|
||||
.priv_size = sizeof(BufferSinkContext),
|
||||
.init = common_init,
|
||||
.init = init_audio,
|
||||
.uninit = uninit,
|
||||
.activate = activate,
|
||||
FILTER_INPUTS(ff_audio_default_filterpad),
|
||||
FILTER_INPUTS(inputs_audio),
|
||||
.outputs = NULL,
|
||||
FILTER_QUERY_FUNC(asink_query_formats),
|
||||
};
|
||||
|
@ -54,20 +54,13 @@
|
||||
*
|
||||
* The format can be constrained by setting options, using av_opt_set() and
|
||||
* related functions with the AV_OPT_SEARCH_CHILDREN flag.
|
||||
* - pix_fmts (int list),
|
||||
* - color_spaces (int list),
|
||||
* - color_ranges (int list),
|
||||
* - sample_fmts (int list),
|
||||
* - sample_rates (int list),
|
||||
* - ch_layouts (string),
|
||||
* - channel_counts (int list),
|
||||
* - all_channel_counts (bool).
|
||||
* Most of these options are of type binary, and should be set using
|
||||
* av_opt_set_int_list() or av_opt_set_bin(). If they are not set, all
|
||||
* corresponding formats are accepted.
|
||||
*
|
||||
* As a special case, if ch_layouts is not set, all valid channel layouts are
|
||||
* accepted except for UNSPEC layouts, unless all_channel_counts is set.
|
||||
* - pixel_formats (array of pixel formats),
|
||||
* - colorspaces (array of int),
|
||||
* - colorranges (array of int),
|
||||
* - sample_formats (array of sample formats),
|
||||
* - samplerates (array of int),
|
||||
* - channel_layouts (array of channel layouts)
|
||||
* If an option is not set, all corresponding formats are accepted.
|
||||
*/
|
||||
|
||||
/**
|
||||
|
@ -627,7 +627,8 @@ int avfilter_graph_segment_init(AVFilterGraphSegment *seg, int flags)
|
||||
|
||||
if (p->filter_name)
|
||||
return fail_creation_pending(seg, p->filter_name, __func__);
|
||||
if (!p->filter || fffilterctx(p->filter)->initialized)
|
||||
if (!p->filter ||
|
||||
(fffilterctx(p->filter)->state_flags & AV_CLASS_STATE_INITIALIZED))
|
||||
continue;
|
||||
|
||||
ret = avfilter_init_dict(p->filter, NULL);
|
||||
|
@ -28,18 +28,19 @@ void ff_metal_compute_encoder_dispatch(id<MTLDevice> device,
|
||||
NSUInteger w = pipeline.threadExecutionWidth;
|
||||
NSUInteger h = pipeline.maxTotalThreadsPerThreadgroup / w;
|
||||
MTLSize threadsPerThreadgroup = MTLSizeMake(w, h, 1);
|
||||
BOOL fallback = YES;
|
||||
// MAC_OS_X_VERSION_10_15 is only defined on SDKs new enough to include its functionality (including iOS, tvOS, etc)
|
||||
#ifdef MAC_OS_X_VERSION_10_15
|
||||
if (@available(macOS 10.15, iOS 11, tvOS 14.5, *)) {
|
||||
if ([device supportsFamily:MTLGPUFamilyCommon3]) {
|
||||
MTLSize threadsPerGrid = MTLSizeMake(width, height, 1);
|
||||
[encoder dispatchThreads:threadsPerGrid threadsPerThreadgroup:threadsPerThreadgroup];
|
||||
fallback = NO;
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (fallback) {
|
||||
|
||||
// Fallback path, if we took the above one we already returned so none of this is reached
|
||||
{
|
||||
MTLSize threadgroups = MTLSizeMake((width + w - 1) / w,
|
||||
(height + h - 1) / h,
|
||||
1);
|
||||
|
1
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavfilter/opencl/.gitignore
vendored
Normal file
1
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavfilter/opencl/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.c
|
12
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavfilter/tests/.gitignore
vendored
Normal file
12
submodules/ffmpeg/Sources/FFMpeg/ffmpeg-7.1/libavfilter/tests/.gitignore
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
/dnn-layer-conv2d
|
||||
/dnn-layer-depth2space
|
||||
/dnn-layer-maximum
|
||||
/dnn-layer-pad
|
||||
/dnn-layer-mathbinary
|
||||
/dnn-layer-mathunary
|
||||
/dnn-layer-avgpool
|
||||
/dnn-layer-dense
|
||||
/drawutils
|
||||
/filtfmts
|
||||
/formats
|
||||
/integral
|
@ -26,22 +26,17 @@
|
||||
#include "formats.h"
|
||||
#include "vaapi_vpp.h"
|
||||
|
||||
int ff_vaapi_vpp_query_formats(AVFilterContext *avctx)
|
||||
int ff_vaapi_vpp_query_formats(const AVFilterContext *avctx,
|
||||
AVFilterFormatsConfig **cfg_in,
|
||||
AVFilterFormatsConfig **cfg_out)
|
||||
{
|
||||
enum AVPixelFormat pix_fmts[] = {
|
||||
static const enum AVPixelFormat pix_fmts[] = {
|
||||
AV_PIX_FMT_VAAPI, AV_PIX_FMT_NONE,
|
||||
};
|
||||
int err;
|
||||
|
||||
if ((err = ff_formats_ref(ff_make_format_list(pix_fmts),
|
||||
&avctx->inputs[0]->outcfg.formats)) < 0)
|
||||
return err;
|
||||
if ((err = ff_formats_ref(ff_make_format_list(pix_fmts),
|
||||
&avctx->outputs[0]->incfg.formats)) < 0)
|
||||
return err;
|
||||
|
||||
if ((err = ff_set_common_all_color_spaces(avctx)) < 0 ||
|
||||
(err = ff_set_common_all_color_ranges(avctx)) < 0)
|
||||
err = ff_set_common_formats_from_list2(avctx, cfg_in, cfg_out, pix_fmts);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user