\n", argv[0]);
- return 1;
- }
-
- av_register_all();
- avfilter_register_all();
-
- if ((ret = open_input_file(argv[1])) < 0)
- goto end;
- if ((ret = open_output_file(argv[2])) < 0)
- goto end;
- if ((ret = init_filters()) < 0)
- goto end;
-
- /* read all packets */
- while (1) {
- if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
- break;
- stream_index = packet.stream_index;
- type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
- av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
- stream_index);
-
- if (filter_ctx[stream_index].filter_graph) {
- av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
- frame = av_frame_alloc();
- if (!frame) {
- ret = AVERROR(ENOMEM);
- break;
- }
- av_packet_rescale_ts(&packet,
- ifmt_ctx->streams[stream_index]->time_base,
- ifmt_ctx->streams[stream_index]->codec->time_base);
- dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
- avcodec_decode_audio4;
- ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
- &got_frame, &packet);
- if (ret < 0) {
- av_frame_free(&frame);
- av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
- break;
- }
-
- if (got_frame) {
- frame->pts = av_frame_get_best_effort_timestamp(frame);
- ret = filter_encode_write_frame(frame, stream_index);
- av_frame_free(&frame);
- if (ret < 0)
- goto end;
- } else {
- av_frame_free(&frame);
- }
- } else {
- /* remux this frame without reencoding */
- av_packet_rescale_ts(&packet,
- ifmt_ctx->streams[stream_index]->time_base,
- ofmt_ctx->streams[stream_index]->time_base);
-
- ret = av_interleaved_write_frame(ofmt_ctx, &packet);
- if (ret < 0)
- goto end;
- }
- av_free_packet(&packet);
- }
-
- /* flush filters and encoders */
- for (i = 0; i < ifmt_ctx->nb_streams; i++) {
- /* flush filter */
- if (!filter_ctx[i].filter_graph)
- continue;
- ret = filter_encode_write_frame(NULL, i);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
- goto end;
- }
-
- /* flush encoder */
- ret = flush_encoder(i);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
- goto end;
- }
- }
-
- av_write_trailer(ofmt_ctx);
-end:
- av_free_packet(&packet);
- av_frame_free(&frame);
- for (i = 0; i < ifmt_ctx->nb_streams; i++) {
- avcodec_close(ifmt_ctx->streams[i]->codec);
- if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
- avcodec_close(ofmt_ctx->streams[i]->codec);
- if (filter_ctx && filter_ctx[i].filter_graph)
- avfilter_graph_free(&filter_ctx[i].filter_graph);
- }
- av_free(filter_ctx);
- avformat_close_input(&ifmt_ctx);
- if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
- avio_closep(&ofmt_ctx->pb);
- avformat_free_context(ofmt_ctx);
-
- if (ret < 0)
- av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
-
- return ret ? 1 : 0;
-}
diff --git a/Externals/ffmpeg/dev/doc/faq.html b/Externals/ffmpeg/dev/doc/faq.html
deleted file mode 100644
index be6ea62995..0000000000
--- a/Externals/ffmpeg/dev/doc/faq.html
+++ /dev/null
@@ -1,719 +0,0 @@
-
-
-
-
-
-
- FFmpeg FAQ
-
-
-
-
-
-
-
-
- FFmpeg FAQ
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 General Questions# TOC
-
-
-
1.1 Why doesn’t FFmpeg support feature [xyz]?# TOC
-
-
Because no one has taken on that task yet. FFmpeg development is
-driven by the tasks that are important to the individual developers.
-If there is a feature that is important to you, the best way to get
-it implemented is to undertake the task yourself or sponsor a developer.
-
-
-
1.2 FFmpeg does not support codec XXX. Can you include a Windows DLL loader to support it?# TOC
-
-
No. Windows DLLs are not portable, bloated and often slow.
-Moreover FFmpeg strives to support all codecs natively.
-A DLL loader is not conducive to that goal.
-
-
-
1.3 I cannot read this file although this format seems to be supported by ffmpeg.# TOC
-
-
Even if ffmpeg can read the container format, it may not support all its
-codecs. Please consult the supported codec list in the ffmpeg
-documentation.
-
-
-
1.4 Which codecs are supported by Windows?# TOC
-
-
Windows does not support standard formats like MPEG very well, unless you
-install some additional codecs.
-
-
The following list of video codecs should work on most Windows systems:
-
-msmpeg4v2
-.avi/.asf
-
-msmpeg4
-.asf only
-
-wmv1
-.asf only
-
-wmv2
-.asf only
-
-mpeg4
-Only if you have some MPEG-4 codec like ffdshow or Xvid installed.
-
-mpeg1video
-.mpg only
-
-
-
Note, ASF files often have .wmv or .wma extensions in Windows. It should also
-be mentioned that Microsoft claims a patent on the ASF format, and may sue
-or threaten users who create ASF files with non-Microsoft software. It is
-strongly advised to avoid ASF where possible.
-
-
The following list of audio codecs should work on most Windows systems:
-
-adpcm_ima_wav
-adpcm_ms
-pcm_s16le
-always
-
-libmp3lame
-If some MP3 codec like LAME is installed.
-
-
-
-
-
-
2 Compilation# TOC
-
-
-
2.1 error: can't find a register in class 'GENERAL_REGS' while reloading 'asm'
# TOC
-
-
This is a bug in gcc. Do not report it to us. Instead, please report it to
-the gcc developers. Note that we will not add workarounds for gcc bugs.
-
-
Also note that (some of) the gcc developers believe this is not a bug or
-not a bug they should fix:
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203 .
-Then again, some of them do not know the difference between an undecidable
-problem and an NP-hard problem...
-
-
-
2.2 I have installed this library with my distro’s package manager. Why does configure
not see it?# TOC
-
-
Distributions usually split libraries in several packages. The main package
-contains the files necessary to run programs using the library. The
-development package contains the files necessary to build programs using the
-library. Sometimes, docs and/or data are in a separate package too.
-
-
To build FFmpeg, you need to install the development package. It is usually
-called libfoo-dev or libfoo-devel . You can remove it after the
-build is finished, but be sure to keep the main package.
-
-
-
2.3 How do I make pkg-config
find my libraries?# TOC
-
-
Somewhere along with your libraries, there is a .pc file (or several)
-in a pkgconfig directory. You need to set environment variables to
-point pkg-config
to these files.
-
-
If you need to add directories to pkg-config
’s search list
-(typical use case: library installed separately), add it to
-$PKG_CONFIG_PATH
:
-
-
-
export PKG_CONFIG_PATH=/opt/x264/lib/pkgconfig:/opt/opus/lib/pkgconfig
-
-
-
If you need to replace pkg-config
’s search list
-(typical use case: cross-compiling), set it in
-$PKG_CONFIG_LIBDIR
:
-
-
-
export PKG_CONFIG_LIBDIR=/home/me/cross/usr/lib/pkgconfig:/home/me/cross/usr/local/lib/pkgconfig
-
-
-
If you need to know the library’s internal dependencies (typical use: static
-linking), add the --static
option to pkg-config
:
-
-
-
./configure --pkg-config-flags=--static
-
-
-
-
2.4 How do I use pkg-config
when cross-compiling?# TOC
-
-
The best way is to install pkg-config
in your cross-compilation
-environment. It will automatically use the cross-compilation libraries.
-
-
You can also use pkg-config
from the host environment by
-specifying explicitly --pkg-config=pkg-config
to configure
.
-In that case, you must point pkg-config
to the correct directories
-using the PKG_CONFIG_LIBDIR
, as explained in the previous entry.
-
-
As an intermediate solution, you can place in your cross-compilation
-environment a script that calls the host pkg-config
with
-PKG_CONFIG_LIBDIR
set. That script can look like that:
-
-
-
#!/bin/sh
-PKG_CONFIG_LIBDIR=/path/to/cross/lib/pkgconfig
-export PKG_CONFIG_LIBDIR
-exec /usr/bin/pkg-config "$@"
-
-
-
-
-
-
-
3.1 ffmpeg does not work; what is wrong?# TOC
-
-
Try a make distclean
in the ffmpeg source directory before the build.
-If this does not help see
-(http://ffmpeg.org/bugreports.html ).
-
-
-
3.2 How do I encode single pictures into movies?# TOC
-
-
First, rename your pictures to follow a numerical sequence.
-For example, img1.jpg, img2.jpg, img3.jpg,...
-Then you may run:
-
-
-
ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
-
-
-
Notice that ‘%d ’ is replaced by the image number.
-
-
img%03d.jpg means the sequence img001.jpg , img002.jpg , etc.
-
-
Use the -start_number option to declare a starting number for
-the sequence. This is useful if your sequence does not start with
-img001.jpg but is still in a numerical order. The following
-example will start with img100.jpg :
-
-
-
ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
-
-
-
If you have large number of pictures to rename, you can use the
-following command to ease the burden. The command, using the bourne
-shell syntax, symbolically links all files in the current directory
-that match *jpg
to the /tmp directory in the sequence of
-img001.jpg , img002.jpg and so on.
-
-
-
x=1; for i in *jpg; do counter=$(printf %03d $x); ln -s "$i" /tmp/img"$counter".jpg; x=$(($x+1)); done
-
-
-
If you want to sequence them by oldest modified first, substitute
-$(ls -r -t *jpg)
in place of *jpg
.
-
-
Then run:
-
-
-
ffmpeg -f image2 -i /tmp/img%03d.jpg /tmp/a.mpg
-
-
-
The same logic is used for any image format that ffmpeg reads.
-
-
You can also use cat
to pipe images to ffmpeg:
-
-
-
cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
-
-
-
-
3.3 How do I encode movie to single pictures?# TOC
-
-
Use:
-
-
-
ffmpeg -i movie.mpg movie%d.jpg
-
-
-
The movie.mpg used as input will be converted to
-movie1.jpg , movie2.jpg , etc...
-
-
Instead of relying on file format self-recognition, you may also use
-
--c:v ppm
--c:v png
--c:v mjpeg
-
-
to force the encoding.
-
-
Applying that to the previous example:
-
-
ffmpeg -i movie.mpg -f image2 -c:v mjpeg menu%d.jpg
-
-
-
Beware that there is no "jpeg" codec. Use "mjpeg" instead.
-
-
-
3.4 Why do I see a slight quality degradation with multithreaded MPEG* encoding?# TOC
-
-
For multithreaded MPEG* encoding, the encoded slices must be independent,
-otherwise thread n would practically have to wait for n-1 to finish, so it’s
-quite logical that there is a small reduction of quality. This is not a bug.
-
-
-
3.5 How can I read from the standard input or write to the standard output?# TOC
-
-
Use - as file name.
-
-
-
3.6 -f jpeg doesn’t work.# TOC
-
-
Try ’-f image2 test%d.jpg’.
-
-
-
3.7 Why can I not change the frame rate?# TOC
-
-
Some codecs, like MPEG-1/2, only allow a small number of fixed frame rates.
-Choose a different codec with the -c:v command line option.
-
-
-
3.8 How do I encode Xvid or DivX video with ffmpeg?# TOC
-
-
Both Xvid and DivX (version 4+) are implementations of the ISO MPEG-4
-standard (note that there are many other coding formats that use this
-same standard). Thus, use ’-c:v mpeg4’ to encode in these formats. The
-default fourcc stored in an MPEG-4-coded file will be ’FMP4’. If you want
-a different fourcc, use the ’-vtag’ option. E.g., ’-vtag xvid’ will
-force the fourcc ’xvid’ to be stored as the video fourcc rather than the
-default.
-
-
-
3.9 Which are good parameters for encoding high quality MPEG-4?# TOC
-
-
’-mbd rd -flags +mv4+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2’,
-things to try: ’-bf 2’, ’-flags qprd’, ’-flags mv0’, ’-flags skiprd’.
-
-
-
3.10 Which are good parameters for encoding high quality MPEG-1/MPEG-2?# TOC
-
-
’-mbd rd -trellis 2 -cmp 2 -subcmp 2 -g 100 -pass 1/2’
-but beware the ’-g 100’ might cause problems with some decoders.
-Things to try: ’-bf 2’, ’-flags qprd’, ’-flags mv0’, ’-flags skiprd.
-
-
-
3.11 Interlaced video looks very bad when encoded with ffmpeg, what is wrong?# TOC
-
-
You should use ’-flags +ilme+ildct’ and maybe ’-flags +alt’ for interlaced
-material, and try ’-top 0/1’ if the result looks really messed-up.
-
-
-
3.12 How can I read DirectShow files?# TOC
-
-
If you have built FFmpeg with ./configure --enable-avisynth
-(only possible on MinGW/Cygwin platforms),
-then you may use any file that DirectShow can read as input.
-
-
Just create an "input.avs" text file with this single line ...
-
-
DirectShowSource("C:\path to your file\yourfile.asf")
-
-
... and then feed that text file to ffmpeg:
-
-
-
For ANY other help on AviSynth, please visit the
-AviSynth homepage .
-
-
-
3.13 How can I join video files?# TOC
-
-
To "join" video files is quite ambiguous. The following list explains the
-different kinds of "joining" and points out how those are addressed in
-FFmpeg. To join video files may mean:
-
-
- To put them one after the other: this is called to concatenate them
-(in short: concat) and is addressed
-in this very faq .
-
- To put them together in the same file, to let the user choose between the
-different versions (example: different audio languages): this is called to
-multiplex them together (in short: mux), and is done by simply
-invoking ffmpeg with several -i options.
-
- For audio, to put all channels together in a single stream (example: two
-mono streams into one stereo stream): this is sometimes called to
-merge them, and can be done using the
-amerge
filter.
-
- For audio, to play one on top of the other: this is called to mix
-them, and can be done by first merging them into a single stream and then
-using the pan
filter to mix
-the channels at will.
-
- For video, to display both together, side by side or one on top of a part of
-the other; it can be done using the
-overlay
video filter.
-
-
-
-
-
3.14 How can I concatenate video files?# TOC
-
-
There are several solutions, depending on the exact circumstances.
-
-
-
3.14.1 Concatenating using the concat filter # TOC
-
-
FFmpeg has a concat
filter designed specifically for that, with examples in the
-documentation. This operation is recommended if you need to re-encode.
-
-
-
3.14.2 Concatenating using the concat demuxer # TOC
-
-
FFmpeg has a concat
demuxer which you can use when you want to avoid a re-encode and
-your format doesn’t support file level concatenation.
-
-
-
3.14.3 Concatenating using the concat protocol (file level)# TOC
-
-
FFmpeg has a concat
protocol designed specifically for that, with examples in the
-documentation.
-
-
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
-video by merely concatenating the files containing them.
-
-
Hence you may concatenate your multimedia files by first transcoding them to
-these privileged formats, then using the humble cat
command (or the
-equally humble copy
under Windows), and finally transcoding back to your
-format of choice.
-
-
-
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
-ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
-cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg
-ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
-
-
-
Additionally, you can use the concat
protocol instead of cat
or
-copy
which will avoid creation of a potentially huge intermediate file.
-
-
-
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
-ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
-ffmpeg -i concat:"intermediate1.mpg|intermediate2.mpg" -c copy intermediate_all.mpg
-ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
-
-
-
Note that you may need to escape the character "|" which is special for many
-shells.
-
-
Another option is usage of named pipes, should your platform support it:
-
-
-
mkfifo intermediate1.mpg
-mkfifo intermediate2.mpg
-ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
-ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
-cat intermediate1.mpg intermediate2.mpg |\
-ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi
-
-
-
-
3.14.4 Concatenating using raw audio and video# TOC
-
-
Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also
-allow concatenation, and the transcoding step is almost lossless.
-When using multiple yuv4mpegpipe(s), the first line needs to be discarded
-from all but the first stream. This can be accomplished by piping through
-tail
as seen below. Note that when piping through tail
you
-must use command grouping, { ;}
, to background properly.
-
-
For example, let’s say we want to concatenate two FLV files into an
-output.flv file:
-
-
-
mkfifo temp1.a
-mkfifo temp1.v
-mkfifo temp2.a
-mkfifo temp2.v
-mkfifo all.a
-mkfifo all.v
-ffmpeg -i input1.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp1.a < /dev/null &
-ffmpeg -i input2.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp2.a < /dev/null &
-ffmpeg -i input1.flv -an -f yuv4mpegpipe - > temp1.v < /dev/null &
-{ ffmpeg -i input2.flv -an -f yuv4mpegpipe - < /dev/null | tail -n +2 > temp2.v ; } &
-cat temp1.a temp2.a > all.a &
-cat temp1.v temp2.v > all.v &
-ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
- -f yuv4mpegpipe -i all.v \
- -y output.flv
-rm temp[12].[av] all.[av]
-
-
-
-
3.15 Using -f lavfi , audio becomes mono for no apparent reason.# TOC
-
-
Use -dumpgraph - to find out exactly where the channel layout is
-lost.
-
-
Most likely, it is through auto-inserted aresample
. Try to understand
-why the converting filter was needed at that place.
-
-
Just before the output is a likely place, as -f lavfi currently
-only support packed S16.
-
-
Then insert the correct aformat
explicitly in the filtergraph,
-specifying the exact format.
-
-
-
aformat=sample_fmts=s16:channel_layouts=stereo
-
-
-
-
3.16 Why does FFmpeg not see the subtitles in my VOB file?# TOC
-
-
VOB and a few other formats do not have a global header that describes
-everything present in the file. Instead, applications are supposed to scan
-the file to see what it contains. Since VOB files are frequently large, only
-the beginning is scanned. If the subtitles happen only later in the file,
-they will not be initially detected.
-
-
Some applications, including the ffmpeg
command-line tool, can only
-work with streams that were detected during the initial scan; streams that
-are detected later are ignored.
-
-
The size of the initial scan is controlled by two options: probesize
-(default ~5 Mo) and analyzeduration
(default 5,000,000 µs = 5 s). For
-the subtitle stream to be detected, both values must be large enough.
-
-
-
3.17 Why was the ffmpeg
-sameq option removed? What to use instead?# TOC
-
-
The -sameq option meant "same quantizer", and made sense only in a
-very limited set of cases. Unfortunately, a lot of people mistook it for
-"same quality" and used it in places where it did not make sense: it had
-roughly the expected visible effect, but achieved it in a very inefficient
-way.
-
-
Each encoder has its own set of options to set the quality-vs-size balance,
-use the options for the encoder you are using to set the quality level to a
-point acceptable for your tastes. The most common options to do that are
--qscale and -qmax , but you should peruse the documentation
-of the encoder you chose.
-
-
-
4 Development# TOC
-
-
-
4.1 Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?# TOC
-
-
Yes. Check the doc/examples directory in the source
-repository, also available online at:
-https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples .
-
-
Examples are also installed by default, usually in
-$PREFIX/share/ffmpeg/examples
.
-
-
Also you may read the Developers Guide of the FFmpeg documentation. Alternatively,
-examine the source code for one of the many open source projects that
-already incorporate FFmpeg at (projects.html ).
-
-
-
4.2 Can you support my C compiler XXX?# TOC
-
-
It depends. If your compiler is C99-compliant, then patches to support
-it are likely to be welcome if they do not pollute the source code
-with #ifdef
s related to the compiler.
-
-
-
4.3 Is Microsoft Visual C++ supported?# TOC
-
-
Yes. Please see the Microsoft Visual C++
-section in the FFmpeg documentation.
-
-
-
4.4 Can you add automake, libtool or autoconf support?# TOC
-
-
No. These tools are too bloated and they complicate the build.
-
-
-
4.5 Why not rewrite FFmpeg in object-oriented C++?# TOC
-
-
FFmpeg is already organized in a highly modular manner and does not need to
-be rewritten in a formal object language. Further, many of the developers
-favor straight C; it works for them. For more arguments on this matter,
-read "Programming Religion" .
-
-
-
4.6 Why are the ffmpeg programs devoid of debugging symbols?# TOC
-
-
The build process creates ffmpeg_g
, ffplay_g
, etc. which
-contain full debug information. Those binaries are stripped to create
-ffmpeg
, ffplay
, etc. If you need the debug information, use
-the *_g versions.
-
-
-
4.7 I do not like the LGPL, can I contribute code under the GPL instead?# TOC
-
-
Yes, as long as the code is optional and can easily and cleanly be placed
-under #if CONFIG_GPL without breaking anything. So, for example, a new codec
-or filter would be OK under GPL while a bug fix to LGPL code would not.
-
-
-
4.8 I’m using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves.# TOC
-
-
FFmpeg builds static libraries by default. In static libraries, dependencies
-are not handled. That has two consequences. First, you must specify the
-libraries in dependency order: -lavdevice
must come before
--lavformat
, -lavutil
must come after everything else, etc.
-Second, external libraries that are used in FFmpeg have to be specified too.
-
-
An easy way to get the full list of required libraries in dependency order
-is to use pkg-config
.
-
-
-
c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
-
-
-
See doc/example/Makefile and doc/example/pc-uninstalled for
-more details.
-
-
-
4.9 I’m using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available.# TOC
-
-
FFmpeg is a pure C project, so to use the libraries within your C++ application
-you need to explicitly state that you are using a C library. You can do this by
-encompassing your FFmpeg includes using extern "C"
.
-
-
See http://www.parashift.com/c++-faq-lite/mixing-c-and-cpp.html#faq-32.3
-
-
-
4.10 I’m using libavutil from within my C++ application but the compiler complains about ’UINT64_C’ was not declared in this scope# TOC
-
-
FFmpeg is a pure C project using C99 math features, in order to enable C++
-to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS
-
-
-
4.11 I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?# TOC
-
-
You have to create a custom AVIOContext using avio_alloc_context
,
-see libavformat/aviobuf.c in FFmpeg and libmpdemux/demux_lavf.c in MPlayer or MPlayer2 sources.
-
-
-
4.12 Where is the documentation about ffv1, msmpeg4, asv1, 4xm?# TOC
-
-
see http://www.ffmpeg.org/~michael/
-
-
-
4.13 How do I feed H.263-RTP (and other codecs in RTP) to libavcodec?# TOC
-
-
Even if peculiar since it is network oriented, RTP is a container like any
-other. You have to demux RTP before feeding the payload to libavcodec.
-In this specific case please look at RFC 4629 to see how it should be done.
-
-
-
4.14 AVStream.r_frame_rate is wrong, it is much larger than the frame rate.# TOC
-
-
r_frame_rate
is NOT the average frame rate, it is the smallest frame rate
-that can accurately represent all timestamps. So no, it is not
-wrong if it is larger than the average!
-For example, if you have mixed 25 and 30 fps content, then r_frame_rate
-will be 150 (it is the least common multiple).
-If you are looking for the average frame rate, see AVStream.avg_frame_rate
.
-
-
-
4.15 Why is make fate
not running all tests?# TOC
-
-
Make sure you have the fate-suite samples and the SAMPLES
Make variable
-or FATE_SAMPLES
environment variable or the --samples
-configure
option is set to the right path.
-
-
-
4.16 Why is make fate
not finding the samples?# TOC
-
-
Do you happen to have a ~
character in the samples path to indicate a
-home directory? The value is used in ways where the shell cannot expand it,
-causing FATE to not find files. Just replace ~
by the full path.
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/fate.html b/Externals/ffmpeg/dev/doc/fate.html
deleted file mode 100644
index 980d28b756..0000000000
--- a/Externals/ffmpeg/dev/doc/fate.html
+++ /dev/null
@@ -1,286 +0,0 @@
-
-
-
-
-
-
- FFmpeg Automated Testing Environment
-
-
-
-
-
-
-
-
- FFmpeg Automated Testing Environment
-
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Introduction# TOC
-
-
FATE is an extended regression suite on the client-side and a means
-for results aggregation and presentation on the server-side.
-
-
The first part of this document explains how you can use FATE from
-your FFmpeg source directory to test your ffmpeg binary. The second
-part describes how you can run FATE to submit the results to FFmpeg’s
-FATE server.
-
-
In any way you can have a look at the publicly viewable FATE results
-by visiting this website:
-
-
http://fate.ffmpeg.org/
-
-
This is especially recommended for all people contributing source
-code to FFmpeg, as it can be seen if some test on some platform broke
-with their recent contribution. This usually happens on the platforms
-the developers could not test on.
-
-
The second part of this document describes how you can run FATE to
-submit your results to FFmpeg’s FATE server. If you want to submit your
-results be sure to check that your combination of CPU, OS and compiler
-is not already listed on the above mentioned website.
-
-
In the third part you can find a comprehensive listing of FATE makefile
-targets and variables.
-
-
-
-
2 Using FATE from your FFmpeg source directory# TOC
-
-
If you want to run FATE on your machine you need to have the samples
-in place. You can get the samples via the build target fate-rsync.
-Use this command from the top-level source directory:
-
-
-
make fate-rsync SAMPLES=fate-suite/
-make fate SAMPLES=fate-suite/
-
-
-
The above commands set the samples location by passing a makefile
-variable via command line. It is also possible to set the samples
-location at source configuration time by invoking configure with
-‘–samples=<path to the samples directory>’. Afterwards you can
-invoke the makefile targets without setting the SAMPLES makefile
-variable. This is illustrated by the following commands:
-
-
-
./configure --samples=fate-suite/
-make fate-rsync
-make fate
-
-
-
Yet another way to tell FATE about the location of the sample
-directory is by making sure the environment variable FATE_SAMPLES
-contains the path to your samples directory. This can be achieved
-by e.g. putting that variable in your shell profile or by setting
-it in your interactive session.
-
-
-
FATE_SAMPLES=fate-suite/ make fate
-
-
-
-
Do not put a ’~’ character in the samples path to indicate a home
-directory. Because of shell nuances, this will cause FATE to fail.
-
-
To use a custom wrapper to run the test, pass --target-exec to
-configure
or set the TARGET_EXEC Make variable.
-
-
-
-
3 Submitting the results to the FFmpeg result aggregation server# TOC
-
-
To submit your results to the server you should run fate through the
-shell script tests/fate.sh from the FFmpeg sources. This script needs
-to be invoked with a configuration file as its first argument.
-
-
-
tests/fate.sh /path/to/fate_config
-
-
-
A configuration file template with comments describing the individual
-configuration variables can be found at doc/fate_config.sh.template .
-
-
The mentioned configuration template is also available here:
-
slot= # some unique identifier
-repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
-samples= # path to samples directory
-workdir= # directory in which to do all the work
-#fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
-comment= # optional description
-build_only= # set to "yes" for a compile-only instance that skips tests
-
-# the following are optional and map to configure options
-arch=
-cpu=
-cross_prefix=
-as=
-cc=
-ld=
-target_os=
-sysroot=
-target_exec=
-target_path=
-target_samples=
-extra_cflags=
-extra_ldflags=
-extra_libs=
-extra_conf= # extra configure options not covered above
-
-#make= # name of GNU make if not 'make'
-makeopts= # extra options passed to 'make'
-#tar= # command to create a tar archive from its arguments on stdout,
- # defaults to 'tar c'
-
-
Create a configuration that suits your needs, based on the configuration
-template. The ‘slot’ configuration variable can be any string that is not
-yet used, but it is suggested that you name it adhering to the following
-pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
-itself will be sourced in a shell script, therefore all shell features may
-be used. This enables you to setup the environment as you need it for your
-build.
-
-
For your first test runs the ‘fate_recv’ variable should be empty or
-commented out. This will run everything as normal except that it will omit
-the submission of the results to the server. The following files should be
-present in $workdir as specified in the configuration file:
-
-
- configure.log
- compile.log
- test.log
- report
- version
-
-
-
When you have everything working properly you can create an SSH key pair
-and send the public key to the FATE server administrator who can be contacted
-at the email address fate-admin@ffmpeg.org .
-
-
Configure your SSH client to use public key authentication with that key
-when connecting to the FATE server. Also do not forget to check the identity
-of the server and to accept its host key. This can usually be achieved by
-running your SSH client manually and killing it after you accepted the key.
-The FATE server’s fingerprint is:
-
-
-RSA
-d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51
-
-ECDSA
-76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86
-
-
-
-
If you have problems connecting to the FATE server, it may help to try out
-the ssh
command with one or more -v options. You should
-get detailed output concerning your SSH configuration and the authentication
-process.
-
-
The only thing left is to automate the execution of the fate.sh script and
-the synchronisation of the samples directory.
-
-
-
-
4 FATE makefile targets and variables# TOC
-
-
-
4.1 Makefile targets# TOC
-
-
-fate-rsync
-Download/synchronize sample files to the configured samples directory.
-
-
-fate-list
-Will list all fate/regression test targets.
-
-
-fate
-Run the FATE test suite (requires the fate-suite dataset).
-
-
-
-
-
4.2 Makefile variables# TOC
-
-
-V
-Verbosity level, can be set to 0, 1 or 2.
-
- 0: show just the test arguments
- 1: show just the command used in the test
- 2: show everything
-
-
-
-SAMPLES
-Specify or override the path to the FATE samples at make time, it has a
-meaning only while running the regression tests.
-
-
-THREADS
-Specify how many threads to use while running regression tests, it is
-quite useful to detect thread-related regressions.
-
-
-THREAD_TYPE
-Specify which threading strategy test, either slice or frame ,
-by default slice+frame
-
-
-CPUFLAGS
-Specify CPU flags.
-
-
-TARGET_EXEC
-Specify or override the wrapper used to run the tests.
-The TARGET_EXEC option provides a way to run FATE wrapped in
-valgrind
, qemu-user
or wine
or on remote targets
-through ssh
.
-
-
-GEN
-Set to 1 to generate the missing or mismatched references.
-
-
-
-
-
4.3 Examples# TOC
-
-
-
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-all.html b/Externals/ffmpeg/dev/doc/ffmpeg-all.html
deleted file mode 100644
index dc4fc35617..0000000000
--- a/Externals/ffmpeg/dev/doc/ffmpeg-all.html
+++ /dev/null
@@ -1,27303 +0,0 @@
-
-
-
-
-
-
- ffmpeg Documentation
-
-
-
-
-
-
-
-
- ffmpeg Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Synopsis# TOC
-
-
ffmpeg [global_options ] {[input_file_options ] -i input_file } ... {[output_file_options ] output_file } ...
-
-
-
2 Description# TOC
-
-
ffmpeg
is a very fast video and audio converter that can also grab from
-a live audio/video source. It can also convert between arbitrary sample
-rates and resize video on the fly with a high quality polyphase filter.
-
-
ffmpeg
reads from an arbitrary number of input "files" (which can be regular
-files, pipes, network streams, grabbing devices, etc.), specified by the
--i
option, and writes to an arbitrary number of output "files", which are
-specified by a plain output filename. Anything found on the command line which
-cannot be interpreted as an option is considered to be an output filename.
-
-
Each input or output file can, in principle, contain any number of streams of
-different types (video/audio/subtitle/attachment/data). The allowed number and/or
-types of streams may be limited by the container format. Selecting which
-streams from which inputs will go into which output is either done automatically
-or with the -map
option (see the Stream selection chapter).
-
-
To refer to input files in options, you must use their indices (0-based). E.g.
-the first input file is 0
, the second is 1
, etc. Similarly, streams
-within a file are referred to by their indices. E.g. 2:3
refers to the
-fourth stream in the third input file. Also see the Stream specifiers chapter.
-
-
As a general rule, options are applied to the next specified
-file. Therefore, order is important, and you can have the same
-option on the command line multiple times. Each occurrence is
-then applied to the next input or output file.
-Exceptions from this rule are the global options (e.g. verbosity level),
-which should be specified first.
-
-
Do not mix input and output files – first specify all input files, then all
-output files. Also do not mix options which belong to different files. All
-options apply ONLY to the next input or output file and are reset between files.
-
-
- To set the video bitrate of the output file to 64 kbit/s:
-
-
ffmpeg -i input.avi -b:v 64k -bufsize 64k output.avi
-
-
- To force the frame rate of the output file to 24 fps:
-
-
ffmpeg -i input.avi -r 24 output.avi
-
-
- To force the frame rate of the input file (valid for raw formats only)
-to 1 fps and the frame rate of the output file to 24 fps:
-
-
ffmpeg -r 1 -i input.m2v -r 24 output.avi
-
-
-
-
The format option may be needed for raw input files.
-
-
-
-
3 Detailed description# TOC
-
-
The transcoding process in ffmpeg
for each output can be described by
-the following diagram:
-
-
-
_______ ______________
-| | | |
-| input | demuxer | encoded data | decoder
-| file | ---------> | packets | -----+
-|_______| |______________| |
- v
- _________
- | |
- | decoded |
- | frames |
- |_________|
- ________ ______________ |
-| | | | |
-| output | <-------- | encoded data | <----+
-| file | muxer | packets | encoder
-|________| |______________|
-
-
-
-
-
ffmpeg
calls the libavformat library (containing demuxers) to read
-input files and get packets containing encoded data from them. When there are
-multiple input files, ffmpeg
tries to keep them synchronized by
-tracking lowest timestamp on any active input stream.
-
-
Encoded packets are then passed to the decoder (unless streamcopy is selected
-for the stream, see further for a description). The decoder produces
-uncompressed frames (raw video/PCM audio/...) which can be processed further by
-filtering (see next section). After filtering, the frames are passed to the
-encoder, which encodes them and outputs encoded packets. Finally those are
-passed to the muxer, which writes the encoded packets to the output file.
-
-
-
3.1 Filtering# TOC
-
Before encoding, ffmpeg
can process raw audio and video frames using
-filters from the libavfilter library. Several chained filters form a filter
-graph. ffmpeg
distinguishes between two types of filtergraphs:
-simple and complex.
-
-
-
3.1.1 Simple filtergraphs# TOC
-
Simple filtergraphs are those that have exactly one input and output, both of
-the same type. In the above diagram they can be represented by simply inserting
-an additional step between decoding and encoding:
-
-
-
_________ ______________
-| | | |
-| decoded | | encoded data |
-| frames |\ _ | packets |
-|_________| \ /||______________|
- \ __________ /
- simple _\|| | / encoder
- filtergraph | filtered |/
- | frames |
- |__________|
-
-
-
-
Simple filtergraphs are configured with the per-stream -filter option
-(with -vf and -af aliases for video and audio respectively).
-A simple filtergraph for video can look for example like this:
-
-
-
_______ _____________ _______ ________
-| | | | | | | |
-| input | ---> | deinterlace | ---> | scale | ---> | output |
-|_______| |_____________| |_______| |________|
-
-
-
-
Note that some filters change frame properties but not frame contents. E.g. the
-fps
filter in the example above changes number of frames, but does not
-touch the frame contents. Another example is the setpts
filter, which
-only sets timestamps and otherwise passes the frames unchanged.
-
-
-
3.1.2 Complex filtergraphs# TOC
-
Complex filtergraphs are those which cannot be described as simply a linear
-processing chain applied to one stream. This is the case, for example, when the graph has
-more than one input and/or output, or when output stream type is different from
-input. They can be represented with the following diagram:
-
-
-
_________
-| |
-| input 0 |\ __________
-|_________| \ | |
- \ _________ /| output 0 |
- \ | | / |__________|
- _________ \| complex | /
-| | | |/
-| input 1 |---->| filter |\
-|_________| | | \ __________
- /| graph | \ | |
- / | | \| output 1 |
- _________ / |_________| |__________|
-| | /
-| input 2 |/
-|_________|
-
-
-
-
Complex filtergraphs are configured with the -filter_complex option.
-Note that this option is global, since a complex filtergraph, by its nature,
-cannot be unambiguously associated with a single stream or file.
-
-
The -lavfi option is equivalent to -filter_complex .
-
-
A trivial example of a complex filtergraph is the overlay
filter, which
-has two video inputs and one video output, containing one video overlaid on top
-of the other. Its audio counterpart is the amix
filter.
-
-
-
3.2 Stream copy# TOC
-
Stream copy is a mode selected by supplying the copy
parameter to the
--codec option. It makes ffmpeg
omit the decoding and encoding
-step for the specified stream, so it does only demuxing and muxing. It is useful
-for changing the container format or modifying container-level metadata. The
-diagram above will, in this case, simplify to this:
-
-
-
_______ ______________ ________
-| | | | | |
-| input | demuxer | encoded data | muxer | output |
-| file | ---------> | packets | -------> | file |
-|_______| |______________| |________|
-
-
-
-
Since there is no decoding or encoding, it is very fast and there is no quality
-loss. However, it might not work in some cases because of many factors. Applying
-filters is obviously also impossible, since filters work on uncompressed data.
-
-
-
-
4 Stream selection# TOC
-
-
By default, ffmpeg
includes only one stream of each type (video, audio, subtitle)
-present in the input files and adds them to each output file. It picks the
-"best" of each based upon the following criteria: for video, it is the stream
-with the highest resolution, for audio, it is the stream with the most channels, for
-subtitles, it is the first subtitle stream. In the case where several streams of
-the same type rate equally, the stream with the lowest index is chosen.
-
-
You can disable some of those defaults by using the -vn/-an/-sn
options. For
-full manual control, use the -map
option, which disables the defaults just
-described.
-
-
-
-
5 Options# TOC
-
-
All the numerical options, if not specified otherwise, accept a string
-representing a number as input, which may be followed by one of the SI
-unit prefixes, for example: ’K’, ’M’, or ’G’.
-
-
If ’i’ is appended to the SI unit prefix, the complete prefix will be
-interpreted as a unit prefix for binary multiples, which are based on
-powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
-prefix multiplies the value by 8. This allows using, for example:
-’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
-
-
Options which do not take arguments are boolean options, and set the
-corresponding value to true. They can be set to false by prefixing
-the option name with "no". For example using "-nofoo"
-will set the boolean option with name "foo" to false.
-
-
-
5.1 Stream specifiers# TOC
-
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
-are used to precisely specify which stream(s) a given option belongs to.
-
-
A stream specifier is a string generally appended to the option name and
-separated from it by a colon. E.g. -codec:a:1 ac3
contains the
-a:1
stream specifier, which matches the second audio stream. Therefore, it
-would select the ac3 codec for the second audio stream.
-
-
A stream specifier can match several streams, so that the option is applied to all
-of them. E.g. the stream specifier in -b:a 128k
matches all audio
-streams.
-
-
An empty stream specifier matches all streams. For example, -codec copy
-or -codec: copy
would copy all the streams without reencoding.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index. E.g. -threads:1 4
would set the
-thread count for the second stream to 4.
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
-’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
-stream number stream_index of this type. Otherwise, it matches all
-streams of this type.
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number stream_index
-in the program with the id program_id . Otherwise, it matches all streams in the
-program.
-
-#stream_id or i:stream_id
-Match the stream by stream id (e.g. PID in MPEG-TS container).
-
-m:key [:value ]
-Matches streams with the metadata tag key having the specified value. If
-value is not given, matches streams that contain the given tag with any
-value.
-
-Note that in ffmpeg
, matching by metadata will only work properly for
-input files.
-
-
-
-
-
5.2 Generic options# TOC
-
-
These options are shared amongst the ff* tools.
-
-
--L
-Show license.
-
-
--h, -?, -help, --help [arg ]
-Show help. An optional parameter may be specified to print help about a specific
-item. If no argument is specified, only basic (non advanced) tool
-options are shown.
-
-Possible values of arg are:
-
-long
-Print advanced tool options in addition to the basic tool options.
-
-
-full
-Print complete list of options, including shared and private options
-for encoders, decoders, demuxers, muxers, filters, etc.
-
-
-decoder=decoder_name
-Print detailed information about the decoder named decoder_name . Use the
--decoders option to get a list of all decoders.
-
-
-encoder=encoder_name
-Print detailed information about the encoder named encoder_name . Use the
--encoders option to get a list of all encoders.
-
-
-demuxer=demuxer_name
-Print detailed information about the demuxer named demuxer_name . Use the
--formats option to get a list of all demuxers and muxers.
-
-
-muxer=muxer_name
-Print detailed information about the muxer named muxer_name . Use the
--formats option to get a list of all muxers and demuxers.
-
-
-filter=filter_name
-Print detailed information about the filter name filter_name . Use the
--filters option to get a list of all filters.
-
-
-
-
--version
-Show version.
-
-
--formats
-Show available formats (including devices).
-
-
--devices
-Show available devices.
-
-
--codecs
-Show all codecs known to libavcodec.
-
-Note that the term ’codec’ is used throughout this documentation as a shortcut
-for what is more correctly called a media bitstream format.
-
-
--decoders
-Show available decoders.
-
-
--encoders
-Show all available encoders.
-
-
--bsfs
-Show available bitstream filters.
-
-
--protocols
-Show available protocols.
-
-
--filters
-Show available libavfilter filters.
-
-
--pix_fmts
-Show available pixel formats.
-
-
--sample_fmts
-Show available sample formats.
-
-
--layouts
-Show channel names and standard channel layouts.
-
-
--colors
-Show recognized color names.
-
-
--sources device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sources of the intput device.
-Some devices may provide system-dependent source names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sources pulse,server=192.168.0.4
-
-
-
--sinks device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sinks of the output device.
-Some devices may provide system-dependent sink names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sinks pulse,server=192.168.0.4
-
-
-
--loglevel [repeat+]loglevel | -v [repeat+]loglevel
-Set the logging level used by the library.
-Adding "repeat+" indicates that repeated log output should not be compressed
-to the first line and the "Last message repeated n times" line will be
-omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-’repeat’ will not change the loglevel.
-loglevel is a string or a number containing one of the following values:
-
-‘quiet, -8 ’
-Show nothing at all; be silent.
-
-‘panic, 0 ’
-Only show fatal errors which could lead the process to crash, such as
-and assert failure. This is not currently used for anything.
-
-‘fatal, 8 ’
-Only show fatal errors. These are errors after which the process absolutely
-cannot continue after.
-
-‘error, 16 ’
-Show all errors, including ones which can be recovered from.
-
-‘warning, 24 ’
-Show all warnings and errors. Any message related to possibly
-incorrect or unexpected events will be shown.
-
-‘info, 32 ’
-Show informative messages during processing. This is in addition to
-warnings and errors. This is the default value.
-
-‘verbose, 40 ’
-Same as info
, except more verbose.
-
-‘debug, 48 ’
-Show everything, including debugging information.
-
-
-
-By default the program logs to stderr, if coloring is supported by the
-terminal, colors are used to mark errors and warnings. Log coloring
-can be disabled setting the environment variable
-AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
-the environment variable AV_LOG_FORCE_COLOR
.
-The use of the environment variable NO_COLOR
is deprecated and
-will be dropped in a following FFmpeg version.
-
-
--report
-Dump full command line and console output to a file named
-program -YYYYMMDD -HHMMSS .log
in the current
-directory.
-This file can be useful for bug reports.
-It also implies -loglevel verbose
.
-
-Setting the environment variable FFREPORT
to any value has the
-same effect. If the value is a ’:’-separated key=value sequence, these
-options will affect the report; option values must be escaped if they
-contain special characters or the options delimiter ’:’ (see the
-“Quoting and escaping” section in the ffmpeg-utils manual).
-
-The following options are recognized:
-
-file
-set the file name to use for the report; %p
is expanded to the name
-of the program, %t
is expanded to a timestamp, %%
is expanded
-to a plain %
-
-level
-set the log verbosity level using a numerical value (see -loglevel
).
-
-
-
-For example, to output a report to a file named ffreport.log
-using a log level of 32
(alias for log level info
):
-
-
-
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
-
-
-Errors in parsing the environment variable are not fatal, and will not
-appear in the report.
-
-
--hide_banner
-Suppress printing banner.
-
-All FFmpeg tools will normally show a copyright notice, build options
-and library versions. This option can be used to suppress printing
-this information.
-
-
--cpuflags flags (global )
-Allows setting and clearing cpu flags. This option is intended
-for testing. Do not use it unless you know what you’re doing.
-
-
ffmpeg -cpuflags -sse+mmx ...
-ffmpeg -cpuflags mmx ...
-ffmpeg -cpuflags 0 ...
-
-Possible flags for this option are:
-
-‘x86 ’
-
-‘mmx ’
-‘mmxext ’
-‘sse ’
-‘sse2 ’
-‘sse2slow ’
-‘sse3 ’
-‘sse3slow ’
-‘ssse3 ’
-‘atom ’
-‘sse4.1 ’
-‘sse4.2 ’
-‘avx ’
-‘xop ’
-‘fma4 ’
-‘3dnow ’
-‘3dnowext ’
-‘cmov ’
-
-
-‘ARM ’
-
-‘armv5te ’
-‘armv6 ’
-‘armv6t2 ’
-‘vfp ’
-‘vfpv3 ’
-‘neon ’
-
-
-‘PowerPC ’
-
-‘altivec ’
-
-
-‘Specific Processors ’
-
-‘pentium2 ’
-‘pentium3 ’
-‘pentium4 ’
-‘k6 ’
-‘k62 ’
-‘athlon ’
-‘athlonxp ’
-‘k8 ’
-
-
-
-
-
--opencl_bench
-Benchmark all available OpenCL devices and show the results. This option
-is only available when FFmpeg has been compiled with --enable-opencl
.
-
-
--opencl_options options (global )
-Set OpenCL environment options. This option is only available when
-FFmpeg has been compiled with --enable-opencl
.
-
-options must be a list of key =value option pairs
-separated by ’:’. See the “OpenCL Options” section in the
-ffmpeg-utils manual for the list of supported options.
-
-
-
-
-
5.3 AVOptions# TOC
-
-
These options are provided directly by the libavformat, libavdevice and
-libavcodec libraries. To see the list of available AVOptions, use the
--help option. They are separated into two categories:
-
-generic
-These options can be set for any container, codec or device. Generic options
-are listed under AVFormatContext options for containers/devices and under
-AVCodecContext options for codecs.
-
-private
-These options are specific to the given container, device or codec. Private
-options are listed under their corresponding containers/devices/codecs.
-
-
-
-
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
-an MP3 file, use the id3v2_version private option of the MP3
-muxer:
-
-
ffmpeg -i input.flac -id3v2_version 3 out.mp3
-
-
-
All codec AVOptions are per-stream, and thus a stream specifier
-should be attached to them.
-
-
Note: the -nooption syntax cannot be used for boolean
-AVOptions, use -option 0 /-option 1 .
-
-
Note: the old undocumented way of specifying per-stream AVOptions by
-prepending v/a/s to the options name is now obsolete and will be
-removed soon.
-
-
-
5.4 Main options# TOC
-
-
--f fmt (input/output )
-Force input or output file format. The format is normally auto detected for input
-files and guessed from the file extension for output files, so this option is not
-needed in most cases.
-
-
--i filename (input )
-input file name
-
-
--y (global )
-Overwrite output files without asking.
-
-
--n (global )
-Do not overwrite output files, and exit immediately if a specified
-output file already exists.
-
-
--c[:stream_specifier ] codec (input/output,per-stream )
--codec[:stream_specifier ] codec (input/output,per-stream )
-Select an encoder (when used before an output file) or a decoder (when used
-before an input file) for one or more streams. codec is the name of a
-decoder/encoder or a special value copy
(output only) to indicate that
-the stream is not to be re-encoded.
-
-For example
-
-
ffmpeg -i INPUT -map 0 -c:v libx264 -c:a copy OUTPUT
-
-encodes all video streams with libx264 and copies all audio streams.
-
-For each stream, the last matching c
option is applied, so
-
-
ffmpeg -i INPUT -map 0 -c copy -c:v:1 libx264 -c:a:137 libvorbis OUTPUT
-
-will copy all the streams except the second video, which will be encoded with
-libx264, and the 138th audio, which will be encoded with libvorbis.
-
-
--t duration (input/output )
-When used as an input option (before -i
), limit the duration of
-data read from the input file.
-
-When used as an output option (before an output filename), stop writing the
-output after its duration reaches duration .
-
-duration may be a number in seconds, or in hh:mm:ss[.xxx]
form.
-
--to and -t are mutually exclusive and -t has priority.
-
-
--to position (output )
-Stop writing the output at position .
-position may be a number in seconds, or in hh:mm:ss[.xxx]
form.
-
--to and -t are mutually exclusive and -t has priority.
-
-
--fs limit_size (output )
-Set the file size limit, expressed in bytes.
-
-
--ss position (input/output )
-When used as an input option (before -i
), seeks in this input file to
-position . Note the in most formats it is not possible to seek exactly, so
-ffmpeg
will seek to the closest seek point before position .
-When transcoding and -accurate_seek is enabled (the default), this
-extra segment between the seek point and position will be decoded and
-discarded. When doing stream copy or when -noaccurate_seek is used, it
-will be preserved.
-
-When used as an output option (before an output filename), decodes but discards
-input until the timestamps reach position .
-
-position may be either in seconds or in hh:mm:ss[.xxx]
form.
-
-
--itsoffset offset (input )
-Set the input time offset.
-
-offset must be a time duration specification,
-see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-The offset is added to the timestamps of the input files. Specifying
-a positive offset means that the corresponding streams are delayed by
-the time duration specified in offset .
-
-
--timestamp date (output )
-Set the recording timestamp in the container.
-
-date must be a time duration specification,
-see (ffmpeg-utils)the Date section in the ffmpeg-utils(1) manual .
-
-
--metadata[:metadata_specifier] key =value (output,per-metadata )
-Set a metadata key/value pair.
-
-An optional metadata_specifier may be given to set metadata
-on streams or chapters. See -map_metadata
documentation for
-details.
-
-This option overrides metadata set with -map_metadata
. It is
-also possible to delete metadata by using an empty value.
-
-For example, for setting the title in the output file:
-
-
ffmpeg -i in.avi -metadata title="my title" out.flv
-
-
-To set the language of the first audio stream:
-
-
ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
-
-
-
--target type (output )
-Specify target file type (vcd
, svcd
, dvd
, dv
,
-dv50
). type may be prefixed with pal-
, ntsc-
or
-film-
to use the corresponding standard. All the format options
-(bitrate, codecs, buffer sizes) are then set automatically. You can just type:
-
-
-
ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg
-
-
-Nevertheless you can specify additional options as long as you know
-they do not conflict with the standard, as in:
-
-
-
ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
-
-
-
--dframes number (output )
-Set the number of data frames to output. This is an alias for -frames:d
.
-
-
--frames[:stream_specifier ] framecount (output,per-stream )
-Stop writing to the stream after framecount frames.
-
-
--q[:stream_specifier ] q (output,per-stream )
--qscale[:stream_specifier ] q (output,per-stream )
-Use fixed quality scale (VBR). The meaning of q /qscale is
-codec-dependent.
-If qscale is used without a stream_specifier then it applies only
-to the video stream, this is to maintain compatibility with previous behavior
-and as specifying the same codec specific value to 2 different codecs that is
-audio and video generally is not what is intended when no stream_specifier is
-used.
-
-
--filter[:stream_specifier ] filtergraph (output,per-stream )
-Create the filtergraph specified by filtergraph and use it to
-filter the stream.
-
-filtergraph is a description of the filtergraph to apply to
-the stream, and must have a single input and a single output of the
-same type of the stream. In the filtergraph, the input is associated
-to the label in
, and the output to the label out
. See
-the ffmpeg-filters manual for more information about the filtergraph
-syntax.
-
-See the -filter_complex option if you
-want to create filtergraphs with multiple inputs and/or outputs.
-
-
--filter_script[:stream_specifier ] filename (output,per-stream )
-This option is similar to -filter , the only difference is that its
-argument is the name of the file from which a filtergraph description is to be
-read.
-
-
--pre[:stream_specifier ] preset_name (output,per-stream )
-Specify the preset for matching stream(s).
-
-
--stats (global )
-Print encoding progress/statistics. It is on by default, to explicitly
-disable it you need to specify -nostats
.
-
-
--progress url (global )
-Send program-friendly progress information to url .
-
-Progress information is written approximately every second and at the end of
-the encoding process. It is made of "key =value " lines. key
-consists of only alphanumeric characters. The last key of a sequence of
-progress information is always "progress".
-
-
--stdin
-Enable interaction on standard input. On by default unless standard input is
-used as an input. To explicitly disable interaction you need to specify
--nostdin
.
-
-Disabling interaction on standard input is useful, for example, if
-ffmpeg is in the background process group. Roughly the same result can
-be achieved with ffmpeg ... < /dev/null
but it requires a
-shell.
-
-
--debug_ts (global )
-Print timestamp information. It is off by default. This option is
-mostly useful for testing and debugging purposes, and the output
-format may change from one version to another, so it should not be
-employed by portable scripts.
-
-See also the option -fdebug ts
.
-
-
--attach filename (output )
-Add an attachment to the output file. This is supported by a few formats
-like Matroska for e.g. fonts used in rendering subtitles. Attachments
-are implemented as a specific type of stream, so this option will add
-a new stream to the file. It is then possible to use per-stream options
-on this stream in the usual way. Attachment streams created with this
-option will be created after all the other streams (i.e. those created
-with -map
or automatic mappings).
-
-Note that for Matroska you also have to set the mimetype metadata tag:
-
-
ffmpeg -i INPUT -attach DejaVuSans.ttf -metadata:s:2 mimetype=application/x-truetype-font out.mkv
-
-(assuming that the attachment stream will be third in the output file).
-
-
--dump_attachment[:stream_specifier ] filename (input,per-stream )
-Extract the matching attachment stream into a file named filename . If
-filename is empty, then the value of the filename
metadata tag
-will be used.
-
-E.g. to extract the first attachment to a file named ’out.ttf’:
-
-
ffmpeg -dump_attachment:t:0 out.ttf -i INPUT
-
-To extract all attachments to files determined by the filename
tag:
-
-
ffmpeg -dump_attachment:t "" -i INPUT
-
-
-Technical note – attachments are implemented as codec extradata, so this
-option can actually be used to extract extradata from any stream, not just
-attachments.
-
-
-
-
-
-
5.5 Video Options# TOC
-
-
--vframes number (output )
-Set the number of video frames to output. This is an alias for -frames:v
.
-
--r[:stream_specifier ] fps (input/output,per-stream )
-Set frame rate (Hz value, fraction or abbreviation).
-
-As an input option, ignore any timestamps stored in the file and instead
-generate timestamps assuming constant frame rate fps .
-This is not the same as the -framerate option used for some input formats
-like image2 or v4l2 (it used to be the same in older versions of FFmpeg).
-If in doubt use -framerate instead of the input option -r .
-
-As an output option, duplicate or drop input frames to achieve constant output
-frame rate fps .
-
-
--s[:stream_specifier ] size (input/output,per-stream )
-Set frame size.
-
-As an input option, this is a shortcut for the video_size private
-option, recognized by some demuxers for which the frame size is either not
-stored in the file or is configurable – e.g. raw video or video grabbers.
-
-As an output option, this inserts the scale
video filter to the
-end of the corresponding filtergraph. Please use the scale
filter
-directly to insert it at the beginning or some other place.
-
-The format is ‘wxh ’ (default - same as source).
-
-
--aspect[:stream_specifier ] aspect (output,per-stream )
-Set the video display aspect ratio specified by aspect .
-
-aspect can be a floating point number string, or a string of the
-form num :den , where num and den are the
-numerator and denominator of the aspect ratio. For example "4:3",
-"16:9", "1.3333", and "1.7777" are valid argument values.
-
-If used together with -vcodec copy , it will affect the aspect ratio
-stored at container level, but not the aspect ratio stored in encoded
-frames, if it exists.
-
-
--vn (output )
-Disable video recording.
-
-
--vcodec codec (output )
-Set the video codec. This is an alias for -codec:v
.
-
-
--pass[:stream_specifier ] n (output,per-stream )
-Select the pass number (1 or 2). It is used to do two-pass
-video encoding. The statistics of the video are recorded in the first
-pass into a log file (see also the option -passlogfile),
-and in the second pass that log file is used to generate the video
-at the exact requested bitrate.
-On pass 1, you may just deactivate audio and set output to null,
-examples for Windows and Unix:
-
-
ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y NUL
-ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y /dev/null
-
-
-
--passlogfile[:stream_specifier ] prefix (output,per-stream )
-Set two-pass log file name prefix to prefix , the default file name
-prefix is “ffmpeg2pass”. The complete file name will be
-PREFIX-N.log , where N is a number specific to the output
-stream
-
-
--vf filtergraph (output )
-Create the filtergraph specified by filtergraph and use it to
-filter the stream.
-
-This is an alias for -filter:v
, see the -filter option .
-
-
-
-
-
5.6 Advanced Video options# TOC
-
-
--pix_fmt[:stream_specifier ] format (input/output,per-stream )
-Set pixel format. Use -pix_fmts
to show all the supported
-pixel formats.
-If the selected pixel format can not be selected, ffmpeg will print a
-warning and select the best pixel format supported by the encoder.
-If pix_fmt is prefixed by a +
, ffmpeg will exit with an error
-if the requested pixel format can not be selected, and automatic conversions
-inside filtergraphs are disabled.
-If pix_fmt is a single +
, ffmpeg selects the same pixel format
-as the input (or graph output) and automatic conversions are disabled.
-
-
--sws_flags flags (input/output )
-Set SwScaler flags.
-
--vdt n
-Discard threshold.
-
-
--rc_override[:stream_specifier ] override (output,per-stream )
-Rate control override for specific intervals, formatted as "int,int,int"
-list separated with slashes. Two first values are the beginning and
-end frame numbers, last one is quantizer to use if positive, or quality
-factor if negative.
-
-
--ilme
-Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
-Use this option if your input file is interlaced and you want
-to keep the interlaced format for minimum losses.
-The alternative is to deinterlace the input stream with
--deinterlace , but deinterlacing introduces losses.
-
--psnr
-Calculate PSNR of compressed frames.
-
--vstats
-Dump video coding statistics to vstats_HHMMSS.log .
-
--vstats_file file
-Dump video coding statistics to file .
-
--top[:stream_specifier ] n (output,per-stream )
-top=1/bottom=0/auto=-1 field first
-
--dc precision
-Intra_dc_precision.
-
--vtag fourcc/tag (output )
-Force video tag/fourcc. This is an alias for -tag:v
.
-
--qphist (global )
-Show QP histogram
-
--vbsf bitstream_filter
-Deprecated see -bsf
-
-
--force_key_frames[:stream_specifier ] time [,time ...] (output,per-stream )
--force_key_frames[:stream_specifier ] expr:expr (output,per-stream )
-Force key frames at the specified timestamps, more precisely at the first
-frames after each specified time.
-
-If the argument is prefixed with expr:
, the string expr
-is interpreted like an expression and is evaluated for each frame. A
-key frame is forced in case the evaluation is non-zero.
-
-If one of the times is "chapters
[delta ]", it is expanded into
-the time of the beginning of all chapters in the file, shifted by
-delta , expressed as a time in seconds.
-This option can be useful to ensure that a seek point is present at a
-chapter mark or any other designated place in the output file.
-
-For example, to insert a key frame at 5 minutes, plus key frames 0.1 second
-before the beginning of every chapter:
-
-
-force_key_frames 0:05:00,chapters-0.1
-
-
-The expression in expr can contain the following constants:
-
-n
-the number of current processed frame, starting from 0
-
-n_forced
-the number of forced frames
-
-prev_forced_n
-the number of the previous forced frame, it is NAN
when no
-keyframe was forced yet
-
-prev_forced_t
-the time of the previous forced frame, it is NAN
when no
-keyframe was forced yet
-
-t
-the time of the current processed frame
-
-
-
-For example to force a key frame every 5 seconds, you can specify:
-
-
-force_key_frames expr:gte(t,n_forced*5)
-
-
-To force a key frame 5 seconds after the time of the last forced one,
-starting from second 13:
-
-
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
-
-
-Note that forcing too many keyframes is very harmful for the lookahead
-algorithms of certain encoders: using fixed-GOP options or similar
-would be more efficient.
-
-
--copyinkf[:stream_specifier ] (output,per-stream )
-When doing stream copy, copy also non-key frames found at the
-beginning.
-
-
--hwaccel[:stream_specifier ] hwaccel (input,per-stream )
-Use hardware acceleration to decode the matching stream(s). The allowed values
-of hwaccel are:
-
-none
-Do not use any hardware acceleration (the default).
-
-
-auto
-Automatically select the hardware acceleration method.
-
-
-vda
-Use Apple VDA hardware acceleration.
-
-
-vdpau
-Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
-
-
-dxva2
-Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
-
-
-
-This option has no effect if the selected hwaccel is not available or not
-supported by the chosen decoder.
-
-Note that most acceleration methods are intended for playback and will not be
-faster than software decoding on modern CPUs. Additionally, ffmpeg
-will usually need to copy the decoded frames from the GPU memory into the system
-memory, resulting in further performance loss. This option is thus mainly
-useful for testing.
-
-
--hwaccel_device[:stream_specifier ] hwaccel_device (input,per-stream )
-Select a device to use for hardware acceleration.
-
-This option only makes sense when the -hwaccel option is also
-specified. Its exact meaning depends on the specific hardware acceleration
-method chosen.
-
-
-vdpau
-For VDPAU, this option specifies the X11 display/screen to use. If this option
-is not specified, the value of the DISPLAY environment variable is used
-
-
-dxva2
-For DXVA2, this option should contain the number of the display adapter to use.
-If this option is not specified, the default adapter is used.
-
-
-
-
-
-
-
5.7 Audio Options# TOC
-
-
--aframes number (output )
-Set the number of audio frames to output. This is an alias for -frames:a
.
-
--ar[:stream_specifier ] freq (input/output,per-stream )
-Set the audio sampling frequency. For output streams it is set by
-default to the frequency of the corresponding input stream. For input
-streams this option only makes sense for audio grabbing devices and raw
-demuxers and is mapped to the corresponding demuxer options.
-
--aq q (output )
-Set the audio quality (codec-specific, VBR). This is an alias for -q:a.
-
--ac[:stream_specifier ] channels (input/output,per-stream )
-Set the number of audio channels. For output streams it is set by
-default to the number of input audio channels. For input streams
-this option only makes sense for audio grabbing devices and raw demuxers
-and is mapped to the corresponding demuxer options.
-
--an (output )
-Disable audio recording.
-
--acodec codec (input/output )
-Set the audio codec. This is an alias for -codec:a
.
-
--sample_fmt[:stream_specifier ] sample_fmt (output,per-stream )
-Set the audio sample format. Use -sample_fmts
to get a list
-of supported sample formats.
-
-
--af filtergraph (output )
-Create the filtergraph specified by filtergraph and use it to
-filter the stream.
-
-This is an alias for -filter:a
, see the -filter option .
-
-
-
-
-
5.8 Advanced Audio options# TOC
-
-
--atag fourcc/tag (output )
-Force audio tag/fourcc. This is an alias for -tag:a
.
-
--absf bitstream_filter
-Deprecated, see -bsf
-
--guess_layout_max channels (input,per-stream )
-If some input channel layout is not known, try to guess only if it
-corresponds to at most the specified number of channels. For example, 2
-tells to ffmpeg
to recognize 1 channel as mono and 2 channels as
-stereo but not 6 channels as 5.1. The default is to always try to guess. Use
-0 to disable all guessing.
-
-
-
-
-
5.9 Subtitle options# TOC
-
-
--scodec codec (input/output )
-Set the subtitle codec. This is an alias for -codec:s
.
-
--sn (output )
-Disable subtitle recording.
-
--sbsf bitstream_filter
-Deprecated, see -bsf
-
-
-
-
-
5.10 Advanced Subtitle options# TOC
-
-
--fix_sub_duration
-Fix subtitles durations. For each subtitle, wait for the next packet in the
-same stream and adjust the duration of the first to avoid overlap. This is
-necessary with some subtitles codecs, especially DVB subtitles, because the
-duration in the original packet is only a rough estimate and the end is
-actually marked by an empty subtitle frame. Failing to use this option when
-necessary can result in exaggerated durations or muxing failures due to
-non-monotonic timestamps.
-
-Note that this option will delay the output of all data until the next
-subtitle packet is decoded: it may increase memory consumption and latency a
-lot.
-
-
--canvas_size size
-Set the size of the canvas used to render subtitles.
-
-
-
-
-
-
5.11 Advanced options# TOC
-
-
--map [-]input_file_id [:stream_specifier ][,sync_file_id [:stream_specifier ]] | [linklabel] (output )
-
-Designate one or more input streams as a source for the output file. Each input
-stream is identified by the input file index input_file_id and
-the input stream index input_stream_id within the input
-file. Both indices start at 0. If specified,
-sync_file_id :stream_specifier sets which input stream
-is used as a presentation sync reference.
-
-The first -map
option on the command line specifies the
-source for output stream 0, the second -map
option specifies
-the source for output stream 1, etc.
-
-A -
character before the stream identifier creates a "negative" mapping.
-It disables matching streams from already created mappings.
-
-An alternative [linklabel] form will map outputs from complex filter
-graphs (see the -filter_complex option) to the output file.
-linklabel must correspond to a defined output link label in the graph.
-
-For example, to map ALL streams from the first input file to output
-
-
ffmpeg -i INPUT -map 0 output
-
-
-For example, if you have two audio streams in the first input file,
-these streams are identified by "0:0" and "0:1". You can use
--map
to select which streams to place in an output file. For
-example:
-
-
ffmpeg -i INPUT -map 0:1 out.wav
-
-will map the input stream in INPUT identified by "0:1" to
-the (single) output stream in out.wav .
-
-For example, to select the stream with index 2 from input file
-a.mov (specified by the identifier "0:2"), and stream with
-index 6 from input b.mov (specified by the identifier "1:6"),
-and copy them to the output file out.mov :
-
-
ffmpeg -i a.mov -i b.mov -c copy -map 0:2 -map 1:6 out.mov
-
-
-To select all video and the third audio stream from an input file:
-
-
ffmpeg -i INPUT -map 0:v -map 0:a:2 OUTPUT
-
-
-To map all the streams except the second audio, use negative mappings
-
-
ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT
-
-
-To pick the English audio stream:
-
-
ffmpeg -i INPUT -map 0:m:language:eng OUTPUT
-
-
-Note that using this option disables the default mappings for this output file.
-
-
--map_channel [input_file_id .stream_specifier .channel_id |-1][:output_file_id .stream_specifier ]
-Map an audio channel from a given input to an output. If
-output_file_id .stream_specifier is not set, the audio channel will
-be mapped on all the audio streams.
-
-Using "-1" instead of
-input_file_id .stream_specifier .channel_id will map a muted
-channel.
-
-For example, assuming INPUT is a stereo audio file, you can switch the
-two audio channels with the following command:
-
-
ffmpeg -i INPUT -map_channel 0.0.1 -map_channel 0.0.0 OUTPUT
-
-
-If you want to mute the first channel and keep the second:
-
-
ffmpeg -i INPUT -map_channel -1 -map_channel 0.0.1 OUTPUT
-
-
-The order of the "-map_channel" option specifies the order of the channels in
-the output stream. The output channel layout is guessed from the number of
-channels mapped (mono if one "-map_channel", stereo if two, etc.). Using "-ac"
-in combination of "-map_channel" makes the channel gain levels to be updated if
-input and output channel layouts don’t match (for instance two "-map_channel"
-options and "-ac 6").
-
-You can also extract each channel of an input to specific outputs; the following
-command extracts two channels of the INPUT audio stream (file 0, stream 0)
-to the respective OUTPUT_CH0 and OUTPUT_CH1 outputs:
-
-
ffmpeg -i INPUT -map_channel 0.0.0 OUTPUT_CH0 -map_channel 0.0.1 OUTPUT_CH1
-
-
-The following example splits the channels of a stereo input into two separate
-streams, which are put into the same output file:
-
-
ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg
-
-
-Note that currently each output stream can only contain channels from a single
-input stream; you can’t for example use "-map_channel" to pick multiple input
-audio channels contained in different streams (from the same or different files)
-and merge them into a single output stream. It is therefore not currently
-possible, for example, to turn two separate mono streams into a single stereo
-stream. However splitting a stereo stream into two single channel mono streams
-is possible.
-
-If you need this feature, a possible workaround is to use the amerge
-filter. For example, if you need to merge a media (here input.mkv ) with 2
-mono audio streams into one single stereo channel audio stream (and keep the
-video stream), you can use the following command:
-
-
ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv
-
-
-
--map_metadata[:metadata_spec_out ] infile [:metadata_spec_in ] (output,per-metadata )
-Set metadata information of the next output file from infile . Note that
-those are file indices (zero-based), not filenames.
-Optional metadata_spec_in/out parameters specify, which metadata to copy.
-A metadata specifier can have the following forms:
-
-g
-global metadata, i.e. metadata that applies to the whole file
-
-
-s [:stream_spec ]
-per-stream metadata. stream_spec is a stream specifier as described
-in the Stream specifiers chapter. In an input metadata specifier, the first
-matching stream is copied from. In an output metadata specifier, all matching
-streams are copied to.
-
-
-c :chapter_index
-per-chapter metadata. chapter_index is the zero-based chapter index.
-
-
-p :program_index
-per-program metadata. program_index is the zero-based program index.
-
-
-If metadata specifier is omitted, it defaults to global.
-
-By default, global metadata is copied from the first input file,
-per-stream and per-chapter metadata is copied along with streams/chapters. These
-default mappings are disabled by creating any mapping of the relevant type. A negative
-file index can be used to create a dummy mapping that just disables automatic copying.
-
-For example to copy metadata from the first stream of the input file to global metadata
-of the output file:
-
-
ffmpeg -i in.ogg -map_metadata 0:s:0 out.mp3
-
-
-To do the reverse, i.e. copy global metadata to all audio streams:
-
-
ffmpeg -i in.mkv -map_metadata:s:a 0:g out.mkv
-
-Note that simple 0
would work as well in this example, since global
-metadata is assumed by default.
-
-
--map_chapters input_file_index (output )
-Copy chapters from input file with index input_file_index to the next
-output file. If no chapter mapping is specified, then chapters are copied from
-the first input file with at least one chapter. Use a negative file index to
-disable any chapter copying.
-
-
--benchmark (global )
-Show benchmarking information at the end of an encode.
-Shows CPU time used and maximum memory consumption.
-Maximum memory consumption is not supported on all systems,
-it will usually display as 0 if not supported.
-
--benchmark_all (global )
-Show benchmarking information during the encode.
-Shows CPU time used in various steps (audio/video encode/decode).
-
--timelimit duration (global )
-Exit after ffmpeg has been running for duration seconds.
-
--dump (global )
-Dump each input packet to stderr.
-
--hex (global )
-When dumping packets, also dump the payload.
-
--re (input )
-Read input at native frame rate. Mainly used to simulate a grab device.
-or live input stream (e.g. when reading from a file). Should not be used
-with actual grab devices or live input streams (where it can cause packet
-loss).
-By default ffmpeg
attempts to read the input(s) as fast as possible.
-This option will slow down the reading of the input(s) to the native frame rate
-of the input(s). It is useful for real-time output (e.g. live streaming).
-
--loop_input
-Loop over the input stream. Currently it works only for image
-streams. This option is used for automatic FFserver testing.
-This option is deprecated, use -loop 1.
-
--loop_output number_of_times
-Repeatedly loop output for formats that support looping such as animated GIF
-(0 will loop the output infinitely).
-This option is deprecated, use -loop.
-
--vsync parameter
-Video sync method.
-For compatibility reasons old values can be specified as numbers.
-Newly added values will have to be specified as strings always.
-
-
-0, passthrough
-Each frame is passed with its timestamp from the demuxer to the muxer.
-
-1, cfr
-Frames will be duplicated and dropped to achieve exactly the requested
-constant frame rate.
-
-2, vfr
-Frames are passed through with their timestamp or dropped so as to
-prevent 2 frames from having the same timestamp.
-
-drop
-As passthrough but destroys all timestamps, making the muxer generate
-fresh timestamps based on frame-rate.
-
--1, auto
-Chooses between 1 and 2 depending on muxer capabilities. This is the
-default method.
-
-
-
-Note that the timestamps may be further modified by the muxer, after this.
-For example, in the case that the format option avoid_negative_ts
-is enabled.
-
-With -map you can select from which stream the timestamps should be
-taken. You can leave either video or audio unchanged and sync the
-remaining stream(s) to the unchanged one.
-
-
--async samples_per_second
-Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
-the parameter is the maximum samples per second by which the audio is changed.
--async 1 is a special case where only the start of the audio stream is corrected
-without any later correction.
-
-Note that the timestamps may be further modified by the muxer, after this.
-For example, in the case that the format option avoid_negative_ts
-is enabled.
-
-This option has been deprecated. Use the aresample
audio filter instead.
-
-
--copyts
-Do not process input timestamps, but keep their values without trying
-to sanitize them. In particular, do not remove the initial start time
-offset value.
-
-Note that, depending on the vsync option or on specific muxer
-processing (e.g. in case the format option avoid_negative_ts
-is enabled) the output timestamps may mismatch with the input
-timestamps even when this option is selected.
-
-
--start_at_zero
-When used with copyts , shift input timestamps so they start at zero.
-
-This means that using e.g. -ss 50
will make output timestamps start at
-50 seconds, regardless of what timestamp the input file started at.
-
-
--copytb mode
-Specify how to set the encoder timebase when stream copying. mode is an
-integer numeric value, and can assume one of the following values:
-
-
-1
-Use the demuxer timebase.
-
-The time base is copied to the output encoder from the corresponding input
-demuxer. This is sometimes required to avoid non monotonically increasing
-timestamps when copying video streams with variable frame rate.
-
-
-0
-Use the decoder timebase.
-
-The time base is copied to the output encoder from the corresponding input
-decoder.
-
-
--1
-Try to make the choice automatically, in order to generate a sane output.
-
-
-
-Default value is -1.
-
-
--shortest (output )
-Finish encoding when the shortest input stream ends.
-
--dts_delta_threshold
-Timestamp discontinuity delta threshold.
-
--muxdelay seconds (input )
-Set the maximum demux-decode delay.
-
--muxpreload seconds (input )
-Set the initial demux-decode delay.
-
--streamid output-stream-index :new-value (output )
-Assign a new stream-id value to an output stream. This option should be
-specified prior to the output filename to which it applies.
-For the situation where multiple output files exist, a streamid
-may be reassigned to a different value.
-
-For example, to set the stream 0 PID to 33 and the stream 1 PID to 36 for
-an output mpegts file:
-
-
ffmpeg -i infile -streamid 0:33 -streamid 1:36 out.ts
-
-
-
--bsf[:stream_specifier ] bitstream_filters (output,per-stream )
-Set bitstream filters for matching streams. bitstream_filters is
-a comma-separated list of bitstream filters. Use the -bsfs
option
-to get the list of bitstream filters.
-
-
ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
-
-
-
ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
-
-
-
--tag[:stream_specifier ] codec_tag (input/output,per-stream )
-Force a tag/fourcc for matching streams.
-
-
--timecode hh :mm :ss SEPff
-Specify Timecode for writing. SEP is ’:’ for non drop timecode and ’;’
-(or ’.’) for drop.
-
-
ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
-
-
-
--filter_complex filtergraph (global )
-Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or
-outputs. For simple graphs – those with one input and one output of the same
-type – see the -filter options. filtergraph is a description of
-the filtergraph, as described in the “Filtergraph syntax” section of the
-ffmpeg-filters manual.
-
-Input link labels must refer to input streams using the
-[file_index:stream_specifier]
syntax (i.e. the same as -map
-uses). If stream_specifier matches multiple streams, the first one will be
-used. An unlabeled input will be connected to the first unused input stream of
-the matching type.
-
-Output link labels are referred to with -map . Unlabeled outputs are
-added to the first output file.
-
-Note that with this option it is possible to use only lavfi sources without
-normal input files.
-
-For example, to overlay an image over video
-
-
ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
-'[out]' out.mkv
-
-Here [0:v]
refers to the first video stream in the first input file,
-which is linked to the first (main) input of the overlay filter. Similarly the
-first video stream in the second input is linked to the second (overlay) input
-of overlay.
-
-Assuming there is only one video stream in each input file, we can omit input
-labels, so the above is equivalent to
-
-
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map
-'[out]' out.mkv
-
-
-Furthermore we can omit the output label and the single output from the filter
-graph will be added to the output file automatically, so we can simply write
-
-
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
-
-
-To generate 5 seconds of pure red video using lavfi color
source:
-
-
ffmpeg -filter_complex 'color=c=red' -t 5 out.mkv
-
-
-
--lavfi filtergraph (global )
-Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or
-outputs. Equivalent to -filter_complex .
-
-
--filter_complex_script filename (global )
-This option is similar to -filter_complex , the only difference is that
-its argument is the name of the file from which a complex filtergraph
-description is to be read.
-
-
--accurate_seek (input )
-This option enables or disables accurate seeking in input files with the
--ss option. It is enabled by default, so seeking is accurate when
-transcoding. Use -noaccurate_seek to disable it, which may be useful
-e.g. when copying some streams and transcoding the others.
-
-
--override_ffserver (global )
-Overrides the input specifications from ffserver
. Using this
-option you can map any input stream to ffserver
and control
-many aspects of the encoding from ffmpeg
. Without this
-option ffmpeg
will transmit to ffserver
what is
-requested by ffserver
.
-
-The option is intended for cases where features are needed that cannot be
-specified to ffserver
but can be to ffmpeg
.
-
-
--sdp_file file (global )
-Print sdp information to file .
-This allows dumping sdp information when at least one output isn’t an
-rtp stream.
-
-
--discard (input )
-Allows discarding specific streams or frames of streams at the demuxer.
-Not all demuxers support this.
-
-
-none
-Discard no frame.
-
-
-default
-Default, which discards no frames.
-
-
-noref
-Discard all non-reference frames.
-
-
-bidir
-Discard all bidirectional frames.
-
-
-nokey
-Discard all frames excepts keyframes.
-
-
-all
-Discard all frames.
-
-
-
-
-
-
-
As a special exception, you can use a bitmap subtitle stream as input: it
-will be converted into a video with the same size as the largest video in
-the file, or 720x576 if no video is present. Note that this is an
-experimental and temporary solution. It will be removed once libavfilter has
-proper support for subtitles.
-
-
For example, to hardcode subtitles on top of a DVB-T recording stored in
-MPEG-TS format, delaying the subtitles by 1 second:
-
-
ffmpeg -i input.ts -filter_complex \
- '[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \
- -sn -map '#0x2dc' output.mkv
-
-
(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video,
-audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too)
-
-
-
5.12 Preset files# TOC
-
A preset file contains a sequence of option =value pairs,
-one for each line, specifying a sequence of options which would be
-awkward to specify on the command line. Lines starting with the hash
-(’#’) character are ignored and are used to provide comments. Check
-the presets directory in the FFmpeg source tree for examples.
-
-
There are two types of preset files: ffpreset and avpreset files.
-
-
-
5.12.1 ffpreset files# TOC
-
ffpreset files are specified with the vpre
, apre
,
-spre
, and fpre
options. The fpre
option takes the
-filename of the preset instead of a preset name as input and can be
-used for any kind of codec. For the vpre
, apre
, and
-spre
options, the options specified in a preset file are
-applied to the currently selected codec of the same type as the preset
-option.
-
-
The argument passed to the vpre
, apre
, and spre
-preset options identifies the preset file to use according to the
-following rules:
-
-
First ffmpeg searches for a file named arg .ffpreset in the
-directories $FFMPEG_DATADIR (if set), and $HOME/.ffmpeg , and in
-the datadir defined at configuration time (usually PREFIX/share/ffmpeg )
-or in a ffpresets folder along the executable on win32,
-in that order. For example, if the argument is libvpx-1080p
, it will
-search for the file libvpx-1080p.ffpreset .
-
-
If no such file is found, then ffmpeg will search for a file named
-codec_name -arg .ffpreset in the above-mentioned
-directories, where codec_name is the name of the codec to which
-the preset file options will be applied. For example, if you select
-the video codec with -vcodec libvpx
and use -vpre 1080p
,
-then it will search for the file libvpx-1080p.ffpreset .
-
-
-
5.12.2 avpreset files# TOC
-
avpreset files are specified with the pre
option. They work similar to
-ffpreset files, but they only allow encoder- specific options. Therefore, an
-option =value pair specifying an encoder cannot be used.
-
-
When the pre
option is specified, ffmpeg will look for files with the
-suffix .avpreset in the directories $AVCONV_DATADIR (if set), and
-$HOME/.avconv , and in the datadir defined at configuration time (usually
-PREFIX/share/ffmpeg ), in that order.
-
-
First ffmpeg searches for a file named codec_name -arg .avpreset in
-the above-mentioned directories, where codec_name is the name of the codec
-to which the preset file options will be applied. For example, if you select the
-video codec with -vcodec libvpx
and use -pre 1080p
, then it will
-search for the file libvpx-1080p.avpreset .
-
-
If no such file is found, then ffmpeg will search for a file named
-arg .avpreset in the same directories.
-
-
-
-
-
-
- For streaming at very low bitrates, use a low frame rate
-and a small GOP size. This is especially true for RealVideo where
-the Linux player does not seem to be very fast, so it can miss
-frames. An example is:
-
-
-
ffmpeg -g 3 -r 3 -t 10 -b:v 50k -s qcif -f rv10 /tmp/b.rm
-
-
- The parameter ’q’ which is displayed while encoding is the current
-quantizer. The value 1 indicates that a very good quality could
-be achieved. The value 31 indicates the worst quality. If q=31 appears
-too often, it means that the encoder cannot compress enough to meet
-your bitrate. You must either increase the bitrate, decrease the
-frame rate or decrease the frame size.
-
- If your computer is not fast enough, you can speed up the
-compression at the expense of the compression ratio. You can use
-’-me zero’ to speed up motion estimation, and ’-g 0’ to disable
-motion estimation completely (you have only I-frames, which means it
-is about as good as JPEG compression).
-
- To have very low audio bitrates, reduce the sampling frequency
-(down to 22050 Hz for MPEG audio, 22050 or 11025 for AC-3).
-
- To have a constant quality (but a variable bitrate), use the option
-’-qscale n’ when ’n’ is between 1 (excellent quality) and 31 (worst
-quality).
-
-
-
-
-
7 Examples# TOC
-
-
-
7.1 Video and Audio grabbing# TOC
-
-
If you specify the input format and device then ffmpeg can grab video
-and audio directly.
-
-
-
ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg
-
-
-
Or with an ALSA audio source (mono input, card id 1) instead of OSS:
-
-
ffmpeg -f alsa -ac 1 -i hw:1 -f video4linux2 -i /dev/video0 /tmp/out.mpg
-
-
-
Note that you must activate the right video source and channel before
-launching ffmpeg with any TV viewer such as
-xawtv by Gerd Knorr. You also
-have to set the audio recording levels correctly with a
-standard mixer.
-
-
-
7.2 X11 grabbing# TOC
-
-
Grab the X11 display with ffmpeg via
-
-
-
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0 /tmp/out.mpg
-
-
-
0.0 is display.screen number of your X11 server, same as
-the DISPLAY environment variable.
-
-
-
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0+10,20 /tmp/out.mpg
-
-
-
0.0 is display.screen number of your X11 server, same as the DISPLAY environment
-variable. 10 is the x-offset and 20 the y-offset for the grabbing.
-
-
-
7.3 Video and Audio file format conversion# TOC
-
-
Any supported file format and protocol can serve as input to ffmpeg:
-
-
Examples:
-
- You can use YUV files as input:
-
-
-
ffmpeg -i /tmp/test%d.Y /tmp/out.mpg
-
-
-It will use the files:
-
-
/tmp/test0.Y, /tmp/test0.U, /tmp/test0.V,
-/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc...
-
-
-The Y files use twice the resolution of the U and V files. They are
-raw files, without header. They can be generated by all decent video
-decoders. You must specify the size of the image with the -s option
-if ffmpeg cannot guess it.
-
- You can input from a raw YUV420P file:
-
-
-
ffmpeg -i /tmp/test.yuv /tmp/out.avi
-
-
-test.yuv is a file containing raw YUV planar data. Each frame is composed
-of the Y plane followed by the U and V planes at half vertical and
-horizontal resolution.
-
- You can output to a raw YUV420P file:
-
-
-
ffmpeg -i mydivx.avi hugefile.yuv
-
-
- You can set several input files and output files:
-
-
-
ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg
-
-
-Converts the audio file a.wav and the raw YUV video file a.yuv
-to MPEG file a.mpg.
-
- You can also do audio and video conversions at the same time:
-
-
-
ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2
-
-
-Converts a.wav to MPEG audio at 22050 Hz sample rate.
-
- You can encode to several formats at the same time and define a
-mapping from input stream to output streams:
-
-
-
ffmpeg -i /tmp/a.wav -map 0:a -b:a 64k /tmp/a.mp2 -map 0:a -b:a 128k /tmp/b.mp2
-
-
-Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. ’-map
-file:index’ specifies which input stream is used for each output
-stream, in the order of the definition of output streams.
-
- You can transcode decrypted VOBs:
-
-
-
ffmpeg -i snatch_1.vob -f avi -c:v mpeg4 -b:v 800k -g 300 -bf 2 -c:a libmp3lame -b:a 128k snatch.avi
-
-
-This is a typical DVD ripping example; the input is a VOB file, the
-output an AVI file with MPEG-4 video and MP3 audio. Note that in this
-command we use B-frames so the MPEG-4 stream is DivX5 compatible, and
-GOP size is 300 which means one intra frame every 10 seconds for 29.97fps
-input video. Furthermore, the audio stream is MP3-encoded so you need
-to enable LAME support by passing --enable-libmp3lame
to configure.
-The mapping is particularly useful for DVD transcoding
-to get the desired audio language.
-
-NOTE: To see the supported input formats, use ffmpeg -formats
.
-
- You can extract images from a video, or create a video from many images:
-
-For extracting images from a video:
-
-
ffmpeg -i foo.avi -r 1 -s WxH -f image2 foo-%03d.jpeg
-
-
-This will extract one video frame per second from the video and will
-output them in files named foo-001.jpeg , foo-002.jpeg ,
-etc. Images will be rescaled to fit the new WxH values.
-
-If you want to extract just a limited number of frames, you can use the
-above command in combination with the -vframes or -t option, or in
-combination with -ss to start extracting from a certain point in time.
-
-For creating a video from many images:
-
-
ffmpeg -f image2 -i foo-%03d.jpeg -r 12 -s WxH foo.avi
-
-
-The syntax foo-%03d.jpeg
specifies to use a decimal number
-composed of three digits padded with zeroes to express the sequence
-number. It is the same syntax supported by the C printf function, but
-only formats accepting a normal integer are suitable.
-
-When importing an image sequence, -i also supports expanding
-shell-like wildcard patterns (globbing) internally, by selecting the
-image2-specific -pattern_type glob
option.
-
-For example, for creating a video from filenames matching the glob pattern
-foo-*.jpeg
:
-
-
ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
-
-
- You can put many streams of the same type in the output:
-
-
-
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
-
-
-The resulting output file test12.nut will contain the first four streams
-from the input files in reverse order.
-
- To force CBR video output:
-
-
ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
-
-
- The four options lmin, lmax, mblmin and mblmax use ’lambda’ units,
-but you may use the QP2LAMBDA constant to easily convert from ’q’ units:
-
-
ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
-
-
-
-
-
-
8 Syntax# TOC
-
-
This section documents the syntax and formats employed by the FFmpeg
-libraries and tools.
-
-
-
8.1 Quoting and escaping# TOC
-
-
FFmpeg adopts the following quoting and escaping mechanism, unless
-explicitly specified. The following rules are applied:
-
-
- '
and \
are special characters (respectively used for
-quoting and escaping). In addition to them, there might be other
-special characters depending on the specific syntax where the escaping
-and quoting are employed.
-
- A special character is escaped by prefixing it with a ’\’.
-
- All characters enclosed between ” are included literally in the
-parsed string. The quote character '
itself cannot be quoted,
-so you may need to close the quote and escape it.
-
- Leading and trailing whitespaces, unless escaped or quoted, are
-removed from the parsed string.
-
-
-
Note that you may need to add a second level of escaping when using
-the command line or a script, which depends on the syntax of the
-adopted shell language.
-
-
The function av_get_token
defined in
-libavutil/avstring.h can be used to parse a token quoted or
-escaped according to the rules defined above.
-
-
The tool tools/ffescape in the FFmpeg source tree can be used
-to automatically quote or escape a string in a script.
-
-
-
8.1.1 Examples# TOC
-
-
- Escape the string Crime d'Amour
containing the '
special
-character:
-
-
- The string above contains a quote, so the '
needs to be escaped
-when quoting it:
-
-
- Include leading or trailing whitespaces using quoting:
-
-
' this string starts and ends with whitespaces '
-
-
- Escaping and quoting can be mixed together:
-
-
' The string '\'string\'' is a string '
-
-
- To include a literal \
you can use either escaping or quoting:
-
-
'c:\foo' can be written as c:\\foo
-
-
-
-
-
8.2 Date# TOC
-
-
The accepted syntax is:
-
-
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
-now
-
-
-
If the value is "now" it takes the current time.
-
-
Time is local time unless Z is appended, in which case it is
-interpreted as UTC.
-If the year-month-day part is not specified it takes the current
-year-month-day.
-
-
-
8.3 Time duration# TOC
-
-
There are two accepted syntaxes for expressing time duration.
-
-
-
-
HH expresses the number of hours, MM the number of minutes
-for a maximum of 2 digits, and SS the number of seconds for a
-maximum of 2 digits. The m at the end expresses decimal value for
-SS .
-
-
or
-
-
-
-
S expresses the number of seconds, with the optional decimal part
-m .
-
-
In both expressions, the optional ‘- ’ indicates negative duration.
-
-
-
8.3.1 Examples# TOC
-
-
The following examples are all valid time duration:
-
-
-‘55 ’
-55 seconds
-
-
-‘12:03:45 ’
-12 hours, 03 minutes and 45 seconds
-
-
-‘23.189 ’
-23.189 seconds
-
-
-
-
-
8.4 Video size# TOC
-
Specify the size of the sourced video, it may be a string of the form
-width xheight , or the name of a size abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-720x480
-
-‘pal ’
-720x576
-
-‘qntsc ’
-352x240
-
-‘qpal ’
-352x288
-
-‘sntsc ’
-640x480
-
-‘spal ’
-768x576
-
-‘film ’
-352x240
-
-‘ntsc-film ’
-352x240
-
-‘sqcif ’
-128x96
-
-‘qcif ’
-176x144
-
-‘cif ’
-352x288
-
-‘4cif ’
-704x576
-
-‘16cif ’
-1408x1152
-
-‘qqvga ’
-160x120
-
-‘qvga ’
-320x240
-
-‘vga ’
-640x480
-
-‘svga ’
-800x600
-
-‘xga ’
-1024x768
-
-‘uxga ’
-1600x1200
-
-‘qxga ’
-2048x1536
-
-‘sxga ’
-1280x1024
-
-‘qsxga ’
-2560x2048
-
-‘hsxga ’
-5120x4096
-
-‘wvga ’
-852x480
-
-‘wxga ’
-1366x768
-
-‘wsxga ’
-1600x1024
-
-‘wuxga ’
-1920x1200
-
-‘woxga ’
-2560x1600
-
-‘wqsxga ’
-3200x2048
-
-‘wquxga ’
-3840x2400
-
-‘whsxga ’
-6400x4096
-
-‘whuxga ’
-7680x4800
-
-‘cga ’
-320x200
-
-‘ega ’
-640x350
-
-‘hd480 ’
-852x480
-
-‘hd720 ’
-1280x720
-
-‘hd1080 ’
-1920x1080
-
-‘2k ’
-2048x1080
-
-‘2kflat ’
-1998x1080
-
-‘2kscope ’
-2048x858
-
-‘4k ’
-4096x2160
-
-‘4kflat ’
-3996x2160
-
-‘4kscope ’
-4096x1716
-
-‘nhd ’
-640x360
-
-‘hqvga ’
-240x160
-
-‘wqvga ’
-400x240
-
-‘fwqvga ’
-432x240
-
-‘hvga ’
-480x320
-
-‘qhd ’
-960x540
-
-
-
-
-
8.5 Video rate# TOC
-
-
Specify the frame rate of a video, expressed as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a float
-number or a valid video frame rate abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-30000/1001
-
-‘pal ’
-25/1
-
-‘qntsc ’
-30000/1001
-
-‘qpal ’
-25/1
-
-‘sntsc ’
-30000/1001
-
-‘spal ’
-25/1
-
-‘film ’
-24/1
-
-‘ntsc-film ’
-24000/1001
-
-
-
-
-
8.6 Ratio# TOC
-
-
A ratio can be expressed as an expression, or in the form
-numerator :denominator .
-
-
Note that a ratio with infinite (1/0) or negative value is
-considered valid, so you should check on the returned value if you
-want to exclude those values.
-
-
The undefined value can be expressed using the "0:0" string.
-
-
-
8.7 Color# TOC
-
-
It can be the name of a color as defined below (case insensitive match) or a
-[0x|#]RRGGBB[AA]
sequence, possibly followed by @ and a string
-representing the alpha component.
-
-
The alpha component may be a string composed by "0x" followed by an
-hexadecimal number or a decimal number between 0.0 and 1.0, which
-represents the opacity value (‘0x00 ’ or ‘0.0 ’ means completely
-transparent, ‘0xff ’ or ‘1.0 ’ completely opaque). If the alpha
-component is not specified then ‘0xff ’ is assumed.
-
-
The string ‘random ’ will result in a random color.
-
-
The following names of colors are recognized:
-
-‘AliceBlue ’
-0xF0F8FF
-
-‘AntiqueWhite ’
-0xFAEBD7
-
-‘Aqua ’
-0x00FFFF
-
-‘Aquamarine ’
-0x7FFFD4
-
-‘Azure ’
-0xF0FFFF
-
-‘Beige ’
-0xF5F5DC
-
-‘Bisque ’
-0xFFE4C4
-
-‘Black ’
-0x000000
-
-‘BlanchedAlmond ’
-0xFFEBCD
-
-‘Blue ’
-0x0000FF
-
-‘BlueViolet ’
-0x8A2BE2
-
-‘Brown ’
-0xA52A2A
-
-‘BurlyWood ’
-0xDEB887
-
-‘CadetBlue ’
-0x5F9EA0
-
-‘Chartreuse ’
-0x7FFF00
-
-‘Chocolate ’
-0xD2691E
-
-‘Coral ’
-0xFF7F50
-
-‘CornflowerBlue ’
-0x6495ED
-
-‘Cornsilk ’
-0xFFF8DC
-
-‘Crimson ’
-0xDC143C
-
-‘Cyan ’
-0x00FFFF
-
-‘DarkBlue ’
-0x00008B
-
-‘DarkCyan ’
-0x008B8B
-
-‘DarkGoldenRod ’
-0xB8860B
-
-‘DarkGray ’
-0xA9A9A9
-
-‘DarkGreen ’
-0x006400
-
-‘DarkKhaki ’
-0xBDB76B
-
-‘DarkMagenta ’
-0x8B008B
-
-‘DarkOliveGreen ’
-0x556B2F
-
-‘Darkorange ’
-0xFF8C00
-
-‘DarkOrchid ’
-0x9932CC
-
-‘DarkRed ’
-0x8B0000
-
-‘DarkSalmon ’
-0xE9967A
-
-‘DarkSeaGreen ’
-0x8FBC8F
-
-‘DarkSlateBlue ’
-0x483D8B
-
-‘DarkSlateGray ’
-0x2F4F4F
-
-‘DarkTurquoise ’
-0x00CED1
-
-‘DarkViolet ’
-0x9400D3
-
-‘DeepPink ’
-0xFF1493
-
-‘DeepSkyBlue ’
-0x00BFFF
-
-‘DimGray ’
-0x696969
-
-‘DodgerBlue ’
-0x1E90FF
-
-‘FireBrick ’
-0xB22222
-
-‘FloralWhite ’
-0xFFFAF0
-
-‘ForestGreen ’
-0x228B22
-
-‘Fuchsia ’
-0xFF00FF
-
-‘Gainsboro ’
-0xDCDCDC
-
-‘GhostWhite ’
-0xF8F8FF
-
-‘Gold ’
-0xFFD700
-
-‘GoldenRod ’
-0xDAA520
-
-‘Gray ’
-0x808080
-
-‘Green ’
-0x008000
-
-‘GreenYellow ’
-0xADFF2F
-
-‘HoneyDew ’
-0xF0FFF0
-
-‘HotPink ’
-0xFF69B4
-
-‘IndianRed ’
-0xCD5C5C
-
-‘Indigo ’
-0x4B0082
-
-‘Ivory ’
-0xFFFFF0
-
-‘Khaki ’
-0xF0E68C
-
-‘Lavender ’
-0xE6E6FA
-
-‘LavenderBlush ’
-0xFFF0F5
-
-‘LawnGreen ’
-0x7CFC00
-
-‘LemonChiffon ’
-0xFFFACD
-
-‘LightBlue ’
-0xADD8E6
-
-‘LightCoral ’
-0xF08080
-
-‘LightCyan ’
-0xE0FFFF
-
-‘LightGoldenRodYellow ’
-0xFAFAD2
-
-‘LightGreen ’
-0x90EE90
-
-‘LightGrey ’
-0xD3D3D3
-
-‘LightPink ’
-0xFFB6C1
-
-‘LightSalmon ’
-0xFFA07A
-
-‘LightSeaGreen ’
-0x20B2AA
-
-‘LightSkyBlue ’
-0x87CEFA
-
-‘LightSlateGray ’
-0x778899
-
-‘LightSteelBlue ’
-0xB0C4DE
-
-‘LightYellow ’
-0xFFFFE0
-
-‘Lime ’
-0x00FF00
-
-‘LimeGreen ’
-0x32CD32
-
-‘Linen ’
-0xFAF0E6
-
-‘Magenta ’
-0xFF00FF
-
-‘Maroon ’
-0x800000
-
-‘MediumAquaMarine ’
-0x66CDAA
-
-‘MediumBlue ’
-0x0000CD
-
-‘MediumOrchid ’
-0xBA55D3
-
-‘MediumPurple ’
-0x9370D8
-
-‘MediumSeaGreen ’
-0x3CB371
-
-‘MediumSlateBlue ’
-0x7B68EE
-
-‘MediumSpringGreen ’
-0x00FA9A
-
-‘MediumTurquoise ’
-0x48D1CC
-
-‘MediumVioletRed ’
-0xC71585
-
-‘MidnightBlue ’
-0x191970
-
-‘MintCream ’
-0xF5FFFA
-
-‘MistyRose ’
-0xFFE4E1
-
-‘Moccasin ’
-0xFFE4B5
-
-‘NavajoWhite ’
-0xFFDEAD
-
-‘Navy ’
-0x000080
-
-‘OldLace ’
-0xFDF5E6
-
-‘Olive ’
-0x808000
-
-‘OliveDrab ’
-0x6B8E23
-
-‘Orange ’
-0xFFA500
-
-‘OrangeRed ’
-0xFF4500
-
-‘Orchid ’
-0xDA70D6
-
-‘PaleGoldenRod ’
-0xEEE8AA
-
-‘PaleGreen ’
-0x98FB98
-
-‘PaleTurquoise ’
-0xAFEEEE
-
-‘PaleVioletRed ’
-0xD87093
-
-‘PapayaWhip ’
-0xFFEFD5
-
-‘PeachPuff ’
-0xFFDAB9
-
-‘Peru ’
-0xCD853F
-
-‘Pink ’
-0xFFC0CB
-
-‘Plum ’
-0xDDA0DD
-
-‘PowderBlue ’
-0xB0E0E6
-
-‘Purple ’
-0x800080
-
-‘Red ’
-0xFF0000
-
-‘RosyBrown ’
-0xBC8F8F
-
-‘RoyalBlue ’
-0x4169E1
-
-‘SaddleBrown ’
-0x8B4513
-
-‘Salmon ’
-0xFA8072
-
-‘SandyBrown ’
-0xF4A460
-
-‘SeaGreen ’
-0x2E8B57
-
-‘SeaShell ’
-0xFFF5EE
-
-‘Sienna ’
-0xA0522D
-
-‘Silver ’
-0xC0C0C0
-
-‘SkyBlue ’
-0x87CEEB
-
-‘SlateBlue ’
-0x6A5ACD
-
-‘SlateGray ’
-0x708090
-
-‘Snow ’
-0xFFFAFA
-
-‘SpringGreen ’
-0x00FF7F
-
-‘SteelBlue ’
-0x4682B4
-
-‘Tan ’
-0xD2B48C
-
-‘Teal ’
-0x008080
-
-‘Thistle ’
-0xD8BFD8
-
-‘Tomato ’
-0xFF6347
-
-‘Turquoise ’
-0x40E0D0
-
-‘Violet ’
-0xEE82EE
-
-‘Wheat ’
-0xF5DEB3
-
-‘White ’
-0xFFFFFF
-
-‘WhiteSmoke ’
-0xF5F5F5
-
-‘Yellow ’
-0xFFFF00
-
-‘YellowGreen ’
-0x9ACD32
-
-
-
-
-
8.8 Channel Layout# TOC
-
-
A channel layout specifies the spatial disposition of the channels in
-a multi-channel audio stream. To specify a channel layout, FFmpeg
-makes use of a special syntax.
-
-
Individual channels are identified by an id, as given by the table
-below:
-
-‘FL ’
-front left
-
-‘FR ’
-front right
-
-‘FC ’
-front center
-
-‘LFE ’
-low frequency
-
-‘BL ’
-back left
-
-‘BR ’
-back right
-
-‘FLC ’
-front left-of-center
-
-‘FRC ’
-front right-of-center
-
-‘BC ’
-back center
-
-‘SL ’
-side left
-
-‘SR ’
-side right
-
-‘TC ’
-top center
-
-‘TFL ’
-top front left
-
-‘TFC ’
-top front center
-
-‘TFR ’
-top front right
-
-‘TBL ’
-top back left
-
-‘TBC ’
-top back center
-
-‘TBR ’
-top back right
-
-‘DL ’
-downmix left
-
-‘DR ’
-downmix right
-
-‘WL ’
-wide left
-
-‘WR ’
-wide right
-
-‘SDL ’
-surround direct left
-
-‘SDR ’
-surround direct right
-
-‘LFE2 ’
-low frequency 2
-
-
-
-
Standard channel layout compositions can be specified by using the
-following identifiers:
-
-‘mono ’
-FC
-
-‘stereo ’
-FL+FR
-
-‘2.1 ’
-FL+FR+LFE
-
-‘3.0 ’
-FL+FR+FC
-
-‘3.0(back) ’
-FL+FR+BC
-
-‘4.0 ’
-FL+FR+FC+BC
-
-‘quad ’
-FL+FR+BL+BR
-
-‘quad(side) ’
-FL+FR+SL+SR
-
-‘3.1 ’
-FL+FR+FC+LFE
-
-‘5.0 ’
-FL+FR+FC+BL+BR
-
-‘5.0(side) ’
-FL+FR+FC+SL+SR
-
-‘4.1 ’
-FL+FR+FC+LFE+BC
-
-‘5.1 ’
-FL+FR+FC+LFE+BL+BR
-
-‘5.1(side) ’
-FL+FR+FC+LFE+SL+SR
-
-‘6.0 ’
-FL+FR+FC+BC+SL+SR
-
-‘6.0(front) ’
-FL+FR+FLC+FRC+SL+SR
-
-‘hexagonal ’
-FL+FR+FC+BL+BR+BC
-
-‘6.1 ’
-FL+FR+FC+LFE+BC+SL+SR
-
-‘6.1 ’
-FL+FR+FC+LFE+BL+BR+BC
-
-‘6.1(front) ’
-FL+FR+LFE+FLC+FRC+SL+SR
-
-‘7.0 ’
-FL+FR+FC+BL+BR+SL+SR
-
-‘7.0(front) ’
-FL+FR+FC+FLC+FRC+SL+SR
-
-‘7.1 ’
-FL+FR+FC+LFE+BL+BR+SL+SR
-
-‘7.1(wide) ’
-FL+FR+FC+LFE+BL+BR+FLC+FRC
-
-‘7.1(wide-side) ’
-FL+FR+FC+LFE+FLC+FRC+SL+SR
-
-‘octagonal ’
-FL+FR+FC+BL+BR+BC+SL+SR
-
-‘downmix ’
-DL+DR
-
-
-
-
A custom channel layout can be specified as a sequence of terms, separated by
-’+’ or ’|’. Each term can be:
-
- the name of a standard channel layout (e.g. ‘mono ’,
-‘stereo ’, ‘4.0 ’, ‘quad ’, ‘5.0 ’, etc.)
-
- the name of a single channel (e.g. ‘FL ’, ‘FR ’, ‘FC ’, ‘LFE ’, etc.)
-
- a number of channels, in decimal, optionally followed by ’c’, yielding
-the default channel layout for that number of channels (see the
-function av_get_default_channel_layout
)
-
- a channel layout mask, in hexadecimal starting with "0x" (see the
-AV_CH_*
macros in libavutil/channel_layout.h .
-
-
-
Starting from libavutil version 53 the trailing character "c" to
-specify a number of channels will be required, while a channel layout
-mask could also be specified as a decimal number (if and only if not
-followed by "c").
-
-
See also the function av_get_channel_layout
defined in
-libavutil/channel_layout.h .
-
-
-
9 Expression Evaluation# TOC
-
-
When evaluating an arithmetic expression, FFmpeg uses an internal
-formula evaluator, implemented through the libavutil/eval.h
-interface.
-
-
An expression may contain unary, binary operators, constants, and
-functions.
-
-
Two expressions expr1 and expr2 can be combined to form
-another expression "expr1 ;expr2 ".
-expr1 and expr2 are evaluated in turn, and the new
-expression evaluates to the value of expr2 .
-
-
The following binary operators are available: +
, -
,
-*
, /
, ^
.
-
-
The following unary operators are available: +
, -
.
-
-
The following functions are available:
-
-abs(x)
-Compute absolute value of x .
-
-
-acos(x)
-Compute arccosine of x .
-
-
-asin(x)
-Compute arcsine of x .
-
-
-atan(x)
-Compute arctangent of x .
-
-
-between(x, min, max)
-Return 1 if x is greater than or equal to min and lesser than or
-equal to max , 0 otherwise.
-
-
-bitand(x, y)
-bitor(x, y)
-Compute bitwise and/or operation on x and y .
-
-The results of the evaluation of x and y are converted to
-integers before executing the bitwise operation.
-
-Note that both the conversion to integer and the conversion back to
-floating point can lose precision. Beware of unexpected results for
-large numbers (usually 2^53 and larger).
-
-
-ceil(expr)
-Round the value of expression expr upwards to the nearest
-integer. For example, "ceil(1.5)" is "2.0".
-
-
-clip(x, min, max)
-Return the value of x clipped between min and max .
-
-
-cos(x)
-Compute cosine of x .
-
-
-cosh(x)
-Compute hyperbolic cosine of x .
-
-
-eq(x, y)
-Return 1 if x and y are equivalent, 0 otherwise.
-
-
-exp(x)
-Compute exponential of x (with base e
, the Euler’s number).
-
-
-floor(expr)
-Round the value of expression expr downwards to the nearest
-integer. For example, "floor(-1.5)" is "-2.0".
-
-
-gauss(x)
-Compute Gauss function of x , corresponding to
-exp(-x*x/2) / sqrt(2*PI)
.
-
-
-gcd(x, y)
-Return the greatest common divisor of x and y . If both x and
-y are 0 or either or both are less than zero then behavior is undefined.
-
-
-gt(x, y)
-Return 1 if x is greater than y , 0 otherwise.
-
-
-gte(x, y)
-Return 1 if x is greater than or equal to y , 0 otherwise.
-
-
-hypot(x, y)
-This function is similar to the C function with the same name; it returns
-"sqrt(x *x + y *y )", the length of the hypotenuse of a
-right triangle with sides of length x and y , or the distance of the
-point (x , y ) from the origin.
-
-
-if(x, y)
-Evaluate x , and if the result is non-zero return the result of
-the evaluation of y , return 0 otherwise.
-
-
-if(x, y, z)
-Evaluate x , and if the result is non-zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-ifnot(x, y)
-Evaluate x , and if the result is zero return the result of the
-evaluation of y , return 0 otherwise.
-
-
-ifnot(x, y, z)
-Evaluate x , and if the result is zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-isinf(x)
-Return 1.0 if x is +/-INFINITY, 0.0 otherwise.
-
-
-isnan(x)
-Return 1.0 if x is NAN, 0.0 otherwise.
-
-
-ld(var)
-Allow to load the value of the internal variable with number
-var , which was previously stored with st(var , expr ).
-The function returns the loaded value.
-
-
-log(x)
-Compute natural logarithm of x .
-
-
-lt(x, y)
-Return 1 if x is lesser than y , 0 otherwise.
-
-
-lte(x, y)
-Return 1 if x is lesser than or equal to y , 0 otherwise.
-
-
-max(x, y)
-Return the maximum between x and y .
-
-
-min(x, y)
-Return the maximum between x and y .
-
-
-mod(x, y)
-Compute the remainder of division of x by y .
-
-
-not(expr)
-Return 1.0 if expr is zero, 0.0 otherwise.
-
-
-pow(x, y)
-Compute the power of x elevated y , it is equivalent to
-"(x )^(y )".
-
-
-print(t)
-print(t, l)
-Print the value of expression t with loglevel l . If
-l is not specified then a default log level is used.
-Returns the value of the expression printed.
-
-Prints t with loglevel l
-
-
-random(x)
-Return a pseudo random value between 0.0 and 1.0. x is the index of the
-internal variable which will be used to save the seed/state.
-
-
-root(expr, max)
-Find an input value for which the function represented by expr
-with argument ld(0) is 0 in the interval 0..max .
-
-The expression in expr must denote a continuous function or the
-result is undefined.
-
-ld(0) is used to represent the function input value, which means
-that the given expression will be evaluated multiple times with
-various input values that the expression can access through
-ld(0)
. When the expression evaluates to 0 then the
-corresponding input value will be returned.
-
-
-sin(x)
-Compute sine of x .
-
-
-sinh(x)
-Compute hyperbolic sine of x .
-
-
-sqrt(expr)
-Compute the square root of expr . This is equivalent to
-"(expr )^.5".
-
-
-squish(x)
-Compute expression 1/(1 + exp(4*x))
.
-
-
-st(var, expr)
-Allow to store the value of the expression expr in an internal
-variable. var specifies the number of the variable where to
-store the value, and it is a value ranging from 0 to 9. The function
-returns the value stored in the internal variable.
-Note, Variables are currently not shared between expressions.
-
-
-tan(x)
-Compute tangent of x .
-
-
-tanh(x)
-Compute hyperbolic tangent of x .
-
-
-taylor(expr, x)
-taylor(expr, x, id)
-Evaluate a Taylor series at x , given an expression representing
-the ld(id)
-th derivative of a function at 0.
-
-When the series does not converge the result is undefined.
-
-ld(id) is used to represent the derivative order in expr ,
-which means that the given expression will be evaluated multiple times
-with various input values that the expression can access through
-ld(id)
. If id is not specified then 0 is assumed.
-
-Note, when you have the derivatives at y instead of 0,
-taylor(expr, x-y)
can be used.
-
-
-time(0)
-Return the current (wallclock) time in seconds.
-
-
-trunc(expr)
-Round the value of expression expr towards zero to the nearest
-integer. For example, "trunc(-1.5)" is "-1.0".
-
-
-while(cond, expr)
-Evaluate expression expr while the expression cond is
-non-zero, and returns the value of the last expr evaluation, or
-NAN if cond was always false.
-
-
-
-
The following constants are available:
-
-PI
-area of the unit disc, approximately 3.14
-
-E
-exp(1) (Euler’s number), approximately 2.718
-
-PHI
-golden ratio (1+sqrt(5))/2, approximately 1.618
-
-
-
-
Assuming that an expression is considered "true" if it has a non-zero
-value, note that:
-
-
*
works like AND
-
-
+
works like OR
-
-
For example the construct:
-
-
is equivalent to:
-
-
-
In your C code, you can extend the list of unary and binary functions,
-and define recognized constants, so that they are available for your
-expressions.
-
-
The evaluator also recognizes the International System unit prefixes.
-If ’i’ is appended after the prefix, binary prefixes are used, which
-are based on powers of 1024 instead of powers of 1000.
-The ’B’ postfix multiplies the value by 8, and can be appended after a
-unit prefix or used alone. This allows using for example ’KB’, ’MiB’,
-’G’ and ’B’ as number postfix.
-
-
The list of available International System prefixes follows, with
-indication of the corresponding powers of 10 and of 2.
-
-y
-10^-24 / 2^-80
-
-z
-10^-21 / 2^-70
-
-a
-10^-18 / 2^-60
-
-f
-10^-15 / 2^-50
-
-p
-10^-12 / 2^-40
-
-n
-10^-9 / 2^-30
-
-u
-10^-6 / 2^-20
-
-m
-10^-3 / 2^-10
-
-c
-10^-2
-
-d
-10^-1
-
-h
-10^2
-
-k
-10^3 / 2^10
-
-K
-10^3 / 2^10
-
-M
-10^6 / 2^20
-
-G
-10^9 / 2^30
-
-T
-10^12 / 2^40
-
-P
-10^15 / 2^40
-
-E
-10^18 / 2^50
-
-Z
-10^21 / 2^60
-
-Y
-10^24 / 2^70
-
-
-
-
-
-
10 OpenCL Options# TOC
-
-
When FFmpeg is configured with --enable-opencl
, it is possible
-to set the options for the global OpenCL context.
-
-
The list of supported options follows:
-
-
-build_options
-Set build options used to compile the registered kernels.
-
-See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
-
-
-platform_idx
-Select the index of the platform to run OpenCL code.
-
-The specified index must be one of the indexes in the device list
-which can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-device_idx
-Select the index of the device used to run OpenCL code.
-
-The specified index must be one of the indexes in the device list which
-can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-
-
-
-
11 Codec Options# TOC
-
-
libavcodec provides some generic global options, which can be set on
-all the encoders and decoders. In addition each codec may support
-so-called private options, which are specific for a given codec.
-
-
Sometimes, a global option may only affect a specific kind of codec,
-and may be nonsensical or ignored by another, so you need to be aware
-of the meaning of the specified options. Also some options are
-meant only for decoding or encoding.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVCodecContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follow:
-
-
-b integer (encoding,audio,video )
-Set bitrate in bits/s. Default value is 200K.
-
-
-ab integer (encoding,audio )
-Set audio bitrate (in bits/s). Default value is 128K.
-
-
-bt integer (encoding,video )
-Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate
-tolerance specifies how far ratecontrol is willing to deviate from the
-target average bitrate value. This is not related to min/max
-bitrate. Lowering tolerance too much has an adverse effect on quality.
-
-
-flags flags (decoding/encoding,audio,video,subtitles )
-Set generic flags.
-
-Possible values:
-
-‘mv4 ’
-Use four motion vector by macroblock (mpeg4).
-
-‘qpel ’
-Use 1/4 pel motion compensation.
-
-‘loop ’
-Use loop filter.
-
-‘qscale ’
-Use fixed qscale.
-
-‘gmc ’
-Use gmc.
-
-‘mv0 ’
-Always try a mb with mv=<0,0>.
-
-‘input_preserved ’
-‘pass1 ’
-Use internal 2pass ratecontrol in first pass mode.
-
-‘pass2 ’
-Use internal 2pass ratecontrol in second pass mode.
-
-‘gray ’
-Only decode/encode grayscale.
-
-‘emu_edge ’
-Do not draw edges.
-
-‘psnr ’
-Set error[?] variables during encoding.
-
-‘truncated ’
-‘naq ’
-Normalize adaptive quantization.
-
-‘ildct ’
-Use interlaced DCT.
-
-‘low_delay ’
-Force low delay.
-
-‘global_header ’
-Place global headers in extradata instead of every keyframe.
-
-‘bitexact ’
-Only write platform-, build- and time-independent data. (except (I)DCT).
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-‘aic ’
-Apply H263 advanced intra coding / mpeg4 ac prediction.
-
-‘cbp ’
-Deprecated, use mpegvideo private options instead.
-
-‘qprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘ilme ’
-Apply interlaced motion estimation.
-
-‘cgop ’
-Use closed gop.
-
-
-
-
-me_method integer (encoding,video )
-Set motion estimation method.
-
-Possible values:
-
-‘zero ’
-zero motion estimation (fastest)
-
-‘full ’
-full motion estimation (slowest)
-
-‘epzs ’
-EPZS motion estimation (default)
-
-‘esa ’
-esa motion estimation (alias for full)
-
-‘tesa ’
-tesa motion estimation
-
-‘dia ’
-dia motion estimation (alias for epzs)
-
-‘log ’
-log motion estimation
-
-‘phods ’
-phods motion estimation
-
-‘x1 ’
-X1 motion estimation
-
-‘hex ’
-hex motion estimation
-
-‘umh ’
-umh motion estimation
-
-‘iter ’
-iter motion estimation
-
-
-
-
-extradata_size integer
-Set extradata size.
-
-
-time_base rational number
-Set codec time base.
-
-It is the fundamental unit of time (in seconds) in terms of which
-frame timestamps are represented. For fixed-fps content, timebase
-should be 1 / frame_rate
and timestamp increments should be
-identically 1.
-
-
-g integer (encoding,video )
-Set the group of picture size. Default value is 12.
-
-
-ar integer (decoding/encoding,audio )
-Set audio sampling rate (in Hz).
-
-
-ac integer (decoding/encoding,audio )
-Set number of audio channels.
-
-
-cutoff integer (encoding,audio )
-Set cutoff bandwidth.
-
-
-frame_size integer (encoding,audio )
-Set audio frame size.
-
-Each submitted frame except the last must contain exactly frame_size
-samples per channel. May be 0 when the codec has
-CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not
-restricted. It is set by some decoders to indicate constant frame
-size.
-
-
-frame_number integer
-Set the frame number.
-
-
-delay integer
-qcomp float (encoding,video )
-Set video quantizer scale compression (VBR). It is used as a constant
-in the ratecontrol equation. Recommended range for default rc_eq:
-0.0-1.0.
-
-
-qblur float (encoding,video )
-Set video quantizer scale blur (VBR).
-
-
-qmin integer (encoding,video )
-Set min video quantizer scale (VBR). Must be included between -1 and
-69, default value is 2.
-
-
-qmax integer (encoding,video )
-Set max video quantizer scale (VBR). Must be included between -1 and
-1024, default value is 31.
-
-
-qdiff integer (encoding,video )
-Set max difference between the quantizer scale (VBR).
-
-
-bf integer (encoding,video )
-Set max number of B frames between non-B-frames.
-
-Must be an integer between -1 and 16. 0 means that B-frames are
-disabled. If a value of -1 is used, it will choose an automatic value
-depending on the encoder.
-
-Default value is 0.
-
-
-b_qfactor float (encoding,video )
-Set qp factor between P and B frames.
-
-
-rc_strategy integer (encoding,video )
-Set ratecontrol method.
-
-
-b_strategy integer (encoding,video )
-Set strategy to choose between I/P/B-frames.
-
-
-ps integer (encoding,video )
-Set RTP payload size in bytes.
-
-
-mv_bits integer
-header_bits integer
-i_tex_bits integer
-p_tex_bits integer
-i_count integer
-p_count integer
-skip_count integer
-misc_bits integer
-frame_bits integer
-codec_tag integer
-bug flags (decoding,video )
-Workaround not auto detected encoder bugs.
-
-Possible values:
-
-‘autodetect ’
-‘old_msmpeg4 ’
-some old lavc generated msmpeg4v3 files (no autodetection)
-
-‘xvid_ilace ’
-Xvid interlacing bug (autodetected if fourcc==XVIX)
-
-‘ump4 ’
-(autodetected if fourcc==UMP4)
-
-‘no_padding ’
-padding bug (autodetected)
-
-‘amv ’
-‘ac_vlc ’
-illegal vlc bug (autodetected per fourcc)
-
-‘qpel_chroma ’
-‘std_qpel ’
-old standard qpel (autodetected per fourcc/version)
-
-‘qpel_chroma2 ’
-‘direct_blocksize ’
-direct-qpel-blocksize bug (autodetected per fourcc/version)
-
-‘edge ’
-edge padding bug (autodetected per fourcc/version)
-
-‘hpel_chroma ’
-‘dc_clip ’
-‘ms ’
-Workaround various bugs in microsoft broken decoders.
-
-‘trunc ’
-trancated frames
-
-
-
-
-lelim integer (encoding,video )
-Set single coefficient elimination threshold for luminance (negative
-values also consider DC coefficient).
-
-
-celim integer (encoding,video )
-Set single coefficient elimination threshold for chrominance (negative
-values also consider dc coefficient)
-
-
-strict integer (decoding/encoding,audio,video )
-Specify how strictly to follow the standards.
-
-Possible values:
-
-‘very ’
-strictly conform to a older more strict version of the spec or reference software
-
-‘strict ’
-strictly conform to all the things in the spec no matter what consequences
-
-‘normal ’
-‘unofficial ’
-allow unofficial extensions
-
-‘experimental ’
-allow non standardized experimental things, experimental
-(unfinished/work in progress/not well tested) decoders and encoders.
-Note: experimental decoders can pose a security risk, do not use this for
-decoding untrusted input.
-
-
-
-
-b_qoffset float (encoding,video )
-Set QP offset between P and B frames.
-
-
-err_detect flags (decoding,audio,video )
-Set error detection flags.
-
-Possible values:
-
-‘crccheck ’
-verify embedded CRCs
-
-‘bitstream ’
-detect bitstream specification deviations
-
-‘buffer ’
-detect improper bitstream length
-
-‘explode ’
-abort decoding on minor error detection
-
-‘ignore_err ’
-ignore decoding errors, and continue decoding.
-This is useful if you want to analyze the content of a video and thus want
-everything to be decoded no matter what. This option will not result in a video
-that is pleasing to watch in case of errors.
-
-‘careful ’
-consider things that violate the spec and have not been seen in the wild as errors
-
-‘compliant ’
-consider all spec non compliancies as errors
-
-‘aggressive ’
-consider things that a sane encoder should not do as an error
-
-
-
-
-has_b_frames integer
-block_align integer
-mpeg_quant integer (encoding,video )
-Use MPEG quantizers instead of H.263.
-
-
-qsquish float (encoding,video )
-How to keep quantizer between qmin and qmax (0 = clip, 1 = use
-differentiable function).
-
-
-rc_qmod_amp float (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_qmod_freq integer (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_override_count integer
-rc_eq string (encoding,video )
-Set rate control equation. When computing the expression, besides the
-standard functions defined in the section ’Expression Evaluation’, the
-following functions are available: bits2qp(bits), qp2bits(qp). Also
-the following constants are available: iTex pTex tex mv fCode iCount
-mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
-avgTex.
-
-
-maxrate integer (encoding,audio,video )
-Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
-
-
-minrate integer (encoding,audio,video )
-Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR
-encode. It is of little use elsewise.
-
-
-bufsize integer (encoding,audio,video )
-Set ratecontrol buffer size (in bits).
-
-
-rc_buf_aggressivity float (encoding,video )
-Currently useless.
-
-
-i_qfactor float (encoding,video )
-Set QP factor between P and I frames.
-
-
-i_qoffset float (encoding,video )
-Set QP offset between P and I frames.
-
-
-rc_init_cplx float (encoding,video )
-Set initial complexity for 1-pass encoding.
-
-
-dct integer (encoding,video )
-Set DCT algorithm.
-
-Possible values:
-
-‘auto ’
-autoselect a good one (default)
-
-‘fastint ’
-fast integer
-
-‘int ’
-accurate integer
-
-‘mmx ’
-‘altivec ’
-‘faan ’
-floating point AAN DCT
-
-
-
-
-lumi_mask float (encoding,video )
-Compress bright areas stronger than medium ones.
-
-
-tcplx_mask float (encoding,video )
-Set temporal complexity masking.
-
-
-scplx_mask float (encoding,video )
-Set spatial complexity masking.
-
-
-p_mask float (encoding,video )
-Set inter masking.
-
-
-dark_mask float (encoding,video )
-Compress dark areas stronger than medium ones.
-
-
-idct integer (decoding/encoding,video )
-Select IDCT implementation.
-
-Possible values:
-
-‘auto ’
-‘int ’
-‘simple ’
-‘simplemmx ’
-‘simpleauto ’
-Automatically pick a IDCT compatible with the simple one
-
-
-‘arm ’
-‘altivec ’
-‘sh4 ’
-‘simplearm ’
-‘simplearmv5te ’
-‘simplearmv6 ’
-‘simpleneon ’
-‘simplealpha ’
-‘ipp ’
-‘xvidmmx ’
-‘faani ’
-floating point AAN IDCT
-
-
-
-
-slice_count integer
-ec flags (decoding,video )
-Set error concealment strategy.
-
-Possible values:
-
-‘guess_mvs ’
-iterative motion vector (MV) search (slow)
-
-‘deblock ’
-use strong deblock filter for damaged MBs
-
-‘favor_inter ’
-favor predicting from the previous frame instead of the current
-
-
-
-
-bits_per_coded_sample integer
-pred integer (encoding,video )
-Set prediction method.
-
-Possible values:
-
-‘left ’
-‘plane ’
-‘median ’
-
-
-
-aspect rational number (encoding,video )
-Set sample aspect ratio.
-
-
-debug flags (decoding/encoding,audio,video,subtitles )
-Print specific debug info.
-
-Possible values:
-
-‘pict ’
-picture info
-
-‘rc ’
-rate control
-
-‘bitstream ’
-‘mb_type ’
-macroblock (MB) type
-
-‘qp ’
-per-block quantization parameter (QP)
-
-‘mv ’
-motion vector
-
-‘dct_coeff ’
-‘skip ’
-‘startcode ’
-‘pts ’
-‘er ’
-error recognition
-
-‘mmco ’
-memory management control operations (H.264)
-
-‘bugs ’
-‘vis_qp ’
-visualize quantization parameter (QP), lower QP are tinted greener
-
-‘vis_mb_type ’
-visualize block types
-
-‘buffers ’
-picture buffer allocations
-
-‘thread_ops ’
-threading operations
-
-‘nomc ’
-skip motion compensation
-
-
-
-
-vismv integer (decoding,video )
-Visualize motion vectors (MVs).
-
-This option is deprecated, see the codecview filter instead.
-
-Possible values:
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-cmp integer (encoding,video )
-Set full pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-subcmp integer (encoding,video )
-Set sub pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-mbcmp integer (encoding,video )
-Set macroblock compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-ildctcmp integer (encoding,video )
-Set interlaced dct compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-dia_size integer (encoding,video )
-Set diamond type & size for motion estimation.
-
-
-last_pred integer (encoding,video )
-Set amount of motion predictors from the previous frame.
-
-
-preme integer (encoding,video )
-Set pre motion estimation.
-
-
-precmp integer (encoding,video )
-Set pre motion estimation compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-pre_dia_size integer (encoding,video )
-Set diamond type & size for motion estimation pre-pass.
-
-
-subq integer (encoding,video )
-Set sub pel motion estimation quality.
-
-
-dtg_active_format integer
-me_range integer (encoding,video )
-Set limit motion vectors range (1023 for DivX player).
-
-
-ibias integer (encoding,video )
-Set intra quant bias.
-
-
-pbias integer (encoding,video )
-Set inter quant bias.
-
-
-color_table_id integer
-global_quality integer (encoding,audio,video )
-coder integer (encoding,video )
-
-Possible values:
-
-‘vlc ’
-variable length coder / huffman coder
-
-‘ac ’
-arithmetic coder
-
-‘raw ’
-raw (no encoding)
-
-‘rle ’
-run-length coder
-
-‘deflate ’
-deflate-based coder
-
-
-
-
-context integer (encoding,video )
-Set context model.
-
-
-slice_flags integer
-xvmc_acceleration integer
-mbd integer (encoding,video )
-Set macroblock decision algorithm (high quality mode).
-
-Possible values:
-
-‘simple ’
-use mbcmp (default)
-
-‘bits ’
-use fewest bits
-
-‘rd ’
-use best rate distortion
-
-
-
-
-stream_codec_tag integer
-sc_threshold integer (encoding,video )
-Set scene change threshold.
-
-
-lmin integer (encoding,video )
-Set min lagrange factor (VBR).
-
-
-lmax integer (encoding,video )
-Set max lagrange factor (VBR).
-
-
-nr integer (encoding,video )
-Set noise reduction.
-
-
-rc_init_occupancy integer (encoding,video )
-Set number of bits which should be loaded into the rc buffer before
-decoding starts.
-
-
-flags2 flags (decoding/encoding,audio,video )
-
-Possible values:
-
-‘fast ’
-Allow non spec compliant speedup tricks.
-
-‘sgop ’
-Deprecated, use mpegvideo private options instead.
-
-‘noout ’
-Skip bitstream encoding.
-
-‘ignorecrop ’
-Ignore cropping information from sps.
-
-‘local_header ’
-Place global headers at every keyframe instead of in extradata.
-
-‘chunks ’
-Frame data might be split into multiple chunks.
-
-‘showall ’
-Show all frames before the first keyframe.
-
-‘skiprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘export_mvs ’
-Export motion vectors into frame side-data (see AV_FRAME_DATA_MOTION_VECTORS
)
-for codecs that support it. See also doc/examples/export_mvs.c .
-
-
-
-
-error integer (encoding,video )
-qns integer (encoding,video )
-Deprecated, use mpegvideo private options instead.
-
-
-threads integer (decoding/encoding,video )
-
-Possible values:
-
-‘auto ’
-detect a good number of threads
-
-
-
-
-me_threshold integer (encoding,video )
-Set motion estimation threshold.
-
-
-mb_threshold integer (encoding,video )
-Set macroblock threshold.
-
-
-dc integer (encoding,video )
-Set intra_dc_precision.
-
-
-nssew integer (encoding,video )
-Set nsse weight.
-
-
-skip_top integer (decoding,video )
-Set number of macroblock rows at the top which are skipped.
-
-
-skip_bottom integer (decoding,video )
-Set number of macroblock rows at the bottom which are skipped.
-
-
-profile integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-‘aac_main ’
-‘aac_low ’
-‘aac_ssr ’
-‘aac_ltp ’
-‘aac_he ’
-‘aac_he_v2 ’
-‘aac_ld ’
-‘aac_eld ’
-‘mpeg2_aac_low ’
-‘mpeg2_aac_he ’
-‘mpeg4_sp ’
-‘mpeg4_core ’
-‘mpeg4_main ’
-‘mpeg4_asp ’
-‘dts ’
-‘dts_es ’
-‘dts_96_24 ’
-‘dts_hd_hra ’
-‘dts_hd_ma ’
-
-
-
-level integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-
-
-
-lowres integer (decoding,audio,video )
-Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
-
-
-skip_threshold integer (encoding,video )
-Set frame skip threshold.
-
-
-skip_factor integer (encoding,video )
-Set frame skip factor.
-
-
-skip_exp integer (encoding,video )
-Set frame skip exponent.
-Negative values behave identical to the corresponding positive ones, except
-that the score is normalized.
-Positive values exist primarily for compatibility reasons and are not so useful.
-
-
-skipcmp integer (encoding,video )
-Set frame skip compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-border_mask float (encoding,video )
-Increase the quantizer for macroblocks close to borders.
-
-
-mblmin integer (encoding,video )
-Set min macroblock lagrange factor (VBR).
-
-
-mblmax integer (encoding,video )
-Set max macroblock lagrange factor (VBR).
-
-
-mepc integer (encoding,video )
-Set motion estimation bitrate penalty compensation (1.0 = 256).
-
-
-skip_loop_filter integer (decoding,video )
-skip_idct integer (decoding,video )
-skip_frame integer (decoding,video )
-
-Make decoder discard processing depending on the frame type selected
-by the option value.
-
-skip_loop_filter skips frame loop filtering, skip_idct
-skips frame IDCT/dequantization, skip_frame skips decoding.
-
-Possible values:
-
-‘none ’
-Discard no frame.
-
-
-‘default ’
-Discard useless frames like 0-sized frames.
-
-
-‘noref ’
-Discard all non-reference frames.
-
-
-‘bidir ’
-Discard all bidirectional frames.
-
-
-‘nokey ’
-Discard all frames excepts keyframes.
-
-
-‘all ’
-Discard all frames.
-
-
-
-Default value is ‘default ’.
-
-
-bidir_refine integer (encoding,video )
-Refine the two motion vectors used in bidirectional macroblocks.
-
-
-brd_scale integer (encoding,video )
-Downscale frames for dynamic B-frame decision.
-
-
-keyint_min integer (encoding,video )
-Set minimum interval between IDR-frames.
-
-
-refs integer (encoding,video )
-Set reference frames to consider for motion compensation.
-
-
-chromaoffset integer (encoding,video )
-Set chroma qp offset from luma.
-
-
-trellis integer (encoding,audio,video )
-Set rate-distortion optimal quantization.
-
-
-sc_factor integer (encoding,video )
-Set value multiplied by qscale for each frame and added to
-scene_change_score.
-
-
-mv0_threshold integer (encoding,video )
-b_sensitivity integer (encoding,video )
-Adjust sensitivity of b_frame_strategy 1.
-
-
-compression_level integer (encoding,audio,video )
-min_prediction_order integer (encoding,audio )
-max_prediction_order integer (encoding,audio )
-timecode_frame_start integer (encoding,video )
-Set GOP timecode frame start number, in non drop frame format.
-
-
-request_channels integer (decoding,audio )
-Set desired number of audio channels.
-
-
-bits_per_raw_sample integer
-channel_layout integer (decoding/encoding,audio )
-
-Possible values:
-
-request_channel_layout integer (decoding,audio )
-
-Possible values:
-
-rc_max_vbv_use float (encoding,video )
-rc_min_vbv_use float (encoding,video )
-ticks_per_frame integer (decoding/encoding,audio,video )
-color_primaries integer (decoding/encoding,video )
-color_trc integer (decoding/encoding,video )
-colorspace integer (decoding/encoding,video )
-color_range integer (decoding/encoding,video )
-chroma_sample_location integer (decoding/encoding,video )
-log_level_offset integer
-Set the log level offset.
-
-
-slices integer (encoding,video )
-Number of slices, used in parallelized encoding.
-
-
-thread_type flags (decoding/encoding,video )
-Select which multithreading methods to use.
-
-Use of ‘frame ’ will increase decoding delay by one frame per
-thread, so clients which cannot provide future frames should not use
-it.
-
-Possible values:
-
-‘slice ’
-Decode more than one part of a single frame at once.
-
-Multithreading using slices works only when the video was encoded with
-slices.
-
-
-‘frame ’
-Decode more than one frame at once.
-
-
-
-Default value is ‘slice+frame ’.
-
-
-audio_service_type integer (encoding,audio )
-Set audio service type.
-
-Possible values:
-
-‘ma ’
-Main Audio Service
-
-‘ef ’
-Effects
-
-‘vi ’
-Visually Impaired
-
-‘hi ’
-Hearing Impaired
-
-‘di ’
-Dialogue
-
-‘co ’
-Commentary
-
-‘em ’
-Emergency
-
-‘vo ’
-Voice Over
-
-‘ka ’
-Karaoke
-
-
-
-
-request_sample_fmt sample_fmt (decoding,audio )
-Set sample format audio decoders should prefer. Default value is
-none
.
-
-
-pkt_timebase rational number
-sub_charenc encoding (decoding,subtitles )
-Set the input subtitles character encoding.
-
-
-field_order field_order (video )
-Set/override the field order of the video.
-Possible values:
-
-‘progressive ’
-Progressive video
-
-‘tt ’
-Interlaced video, top field coded and displayed first
-
-‘bb ’
-Interlaced video, bottom field coded and displayed first
-
-‘tb ’
-Interlaced video, top coded first, bottom displayed first
-
-‘bt ’
-Interlaced video, bottom coded first, top displayed first
-
-
-
-
-skip_alpha integer (decoding,video )
-Set to 1 to disable processing alpha (transparency). This works like the
-‘gray ’ flag in the flags option which skips chroma information
-instead of alpha. Default is 0.
-
-
-codec_whitelist list (input )
-"," separated List of allowed decoders. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
-
12 Decoders# TOC
-
-
Decoders are configured elements in FFmpeg which allow the decoding of
-multimedia streams.
-
-
When you configure your FFmpeg build, all the supported native decoders
-are enabled by default. Decoders requiring an external library must be enabled
-manually via the corresponding --enable-lib
option. You can list all
-available decoders using the configure option --list-decoders
.
-
-
You can disable all the decoders with the configure option
---disable-decoders
and selectively enable / disable single decoders
-with the options --enable-decoder=DECODER
/
---disable-decoder=DECODER
.
-
-
The option -decoders
of the ff* tools will display the list of
-enabled decoders.
-
-
-
-
13 Video Decoders# TOC
-
-
A description of some of the currently available video decoders
-follows.
-
-
-
13.1 rawvideo# TOC
-
-
Raw video decoder.
-
-
This decoder decodes rawvideo streams.
-
-
-
13.1.1 Options# TOC
-
-
-top top_field_first
-Specify the assumed field type of the input video.
-
--1
-the video is assumed to be progressive (default)
-
-0
-bottom-field-first is assumed
-
-1
-top-field-first is assumed
-
-
-
-
-
-
-
-
-
14 Audio Decoders# TOC
-
-
A description of some of the currently available audio decoders
-follows.
-
-
-
14.1 ac3# TOC
-
-
AC-3 audio decoder.
-
-
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
-the undocumented RealAudio 3 (a.k.a. dnet).
-
-
-
14.1.1 AC-3 Decoder Options# TOC
-
-
--drc_scale value
-Dynamic Range Scale Factor. The factor to apply to dynamic range values
-from the AC-3 stream. This factor is applied exponentially.
-There are 3 notable scale factor ranges:
-
-drc_scale == 0
-DRC disabled. Produces full range audio.
-
-0 < drc_scale <= 1
-DRC enabled. Applies a fraction of the stream DRC value.
-Audio reproduction is between full range and full compression.
-
-drc_scale > 1
-DRC enabled. Applies drc_scale asymmetrically.
-Loud sounds are fully compressed. Soft sounds are enhanced.
-
-
-
-
-
-
-
-
14.2 ffwavesynth# TOC
-
-
Internal wave synthetizer.
-
-
This decoder generates wave patterns according to predefined sequences. Its
-use is purely internal and the format of the data it accepts is not publicly
-documented.
-
-
-
14.3 libcelt# TOC
-
-
libcelt decoder wrapper.
-
-
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
-Requires the presence of the libcelt headers and library during configuration.
-You need to explicitly configure the build with --enable-libcelt
.
-
-
-
14.4 libgsm# TOC
-
-
libgsm decoder wrapper.
-
-
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
-the presence of the libgsm headers and library during configuration. You need
-to explicitly configure the build with --enable-libgsm
.
-
-
This decoder supports both the ordinary GSM and the Microsoft variant.
-
-
-
14.5 libilbc# TOC
-
-
libilbc decoder wrapper.
-
-
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
-audio codec. Requires the presence of the libilbc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libilbc
.
-
-
-
14.5.1 Options# TOC
-
-
The following option is supported by the libilbc wrapper.
-
-
-enhance
-
-Enable the enhancement of the decoded audio when set to 1. The default
-value is 0 (disabled).
-
-
-
-
-
-
14.6 libopencore-amrnb# TOC
-
-
libopencore-amrnb decoder wrapper.
-
-
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
-Narrowband audio codec. Using it requires the presence of the
-libopencore-amrnb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrnb
.
-
-
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
-without this library.
-
-
-
14.7 libopencore-amrwb# TOC
-
-
libopencore-amrwb decoder wrapper.
-
-
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
-Wideband audio codec. Using it requires the presence of the
-libopencore-amrwb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrwb
.
-
-
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
-without this library.
-
-
-
14.8 libopus# TOC
-
-
libopus decoder wrapper.
-
-
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
-Requires the presence of the libopus headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopus
.
-
-
An FFmpeg native decoder for Opus exists, so users can decode Opus
-without this library.
-
-
-
-
15 Subtitles Decoders# TOC
-
-
-
15.1 dvdsub# TOC
-
-
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
-also be found in VobSub file pairs and in some Matroska files.
-
-
-
15.1.1 Options# TOC
-
-
-palette
-Specify the global palette used by the bitmaps. When stored in VobSub, the
-palette is normally specified in the index file; in Matroska, the palette is
-stored in the codec extra-data in the same format as in VobSub. In DVDs, the
-palette is stored in the IFO file, and therefore not available when reading
-from dumped VOB files.
-
-The format for this option is a string containing 16 24-bits hexadecimal
-numbers (without 0x prefix) separated by comas, for example 0d00ee,
-ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
-7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b
.
-
-
-ifo_palette
-Specify the IFO file from which the global palette is obtained.
-(experimental)
-
-
-forced_subs_only
-Only decode subtitle entries marked as forced. Some titles have forced
-and non-forced subtitles in the same track. Setting this flag to 1
-will only keep the forced subtitles. Default value is 0
.
-
-
-
-
-
15.2 libzvbi-teletext# TOC
-
-
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
-subtitles. Requires the presence of the libzvbi headers and library during
-configuration. You need to explicitly configure the build with
---enable-libzvbi
.
-
-
-
15.2.1 Options# TOC
-
-
-txt_page
-List of teletext page numbers to decode. You may use the special * string to
-match all pages. Pages that do not match the specified list are dropped.
-Default value is *.
-
-txt_chop_top
-Discards the top teletext line. Default value is 1.
-
-txt_format
-Specifies the format of the decoded subtitles. The teletext decoder is capable
-of decoding the teletext pages to bitmaps or to simple text, you should use
-"bitmap" for teletext pages, because certain graphics and colors cannot be
-expressed in simple text. You might use "text" for teletext based subtitles if
-your application can handle simple text based subtitles. Default value is
-bitmap.
-
-txt_left
-X offset of generated bitmaps, default is 0.
-
-txt_top
-Y offset of generated bitmaps, default is 0.
-
-txt_chop_spaces
-Chops leading and trailing spaces and removes empty lines from the generated
-text. This option is useful for teletext based subtitles where empty spaces may
-be present at the start or at the end of the lines or empty lines may be
-present between the subtitle lines because of double-sized teletext charactes.
-Default value is 1.
-
-txt_duration
-Sets the display duration of the decoded teletext pages or subtitles in
-miliseconds. Default value is 30000 which is 30 seconds.
-
-txt_transparent
-Force transparent background of the generated teletext bitmaps. Default value
-is 0 which means an opaque (black) background.
-
-
-
-
-
16 Encoders# TOC
-
-
Encoders are configured elements in FFmpeg which allow the encoding of
-multimedia streams.
-
-
When you configure your FFmpeg build, all the supported native encoders
-are enabled by default. Encoders requiring an external library must be enabled
-manually via the corresponding --enable-lib
option. You can list all
-available encoders using the configure option --list-encoders
.
-
-
You can disable all the encoders with the configure option
---disable-encoders
and selectively enable / disable single encoders
-with the options --enable-encoder=ENCODER
/
---disable-encoder=ENCODER
.
-
-
The option -encoders
of the ff* tools will display the list of
-enabled encoders.
-
-
-
-
17 Audio Encoders# TOC
-
-
A description of some of the currently available audio encoders
-follows.
-
-
-
17.1 aac# TOC
-
-
Advanced Audio Coding (AAC) encoder.
-
-
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
-low complexity (AAC-LC) profile is supported. To use this encoder, you must set
-strict option to ‘experimental ’ or lower.
-
-
As this encoder is experimental, unexpected behavior may exist from time to
-time. For a more stable AAC encoder, see libvo-aacenc . However, be warned
-that it has a worse quality reported by some users.
-
-
See also libfdk_aac and libfaac .
-
-
-
17.1.1 Options# TOC
-
-
-b
-Set bit rate in bits/s. Setting this automatically activates constant bit rate
-(CBR) mode.
-
-
-q
-Set quality for variable bit rate (VBR) mode. This option is valid only using
-the ffmpeg
command-line tool. For library interface users, use
-global_quality .
-
-
-stereo_mode
-Set stereo encoding mode. Possible values:
-
-
-‘auto ’
-Automatically selected by the encoder.
-
-
-‘ms_off ’
-Disable middle/side encoding. This is the default.
-
-
-‘ms_force ’
-Force middle/side encoding.
-
-
-
-
-aac_coder
-Set AAC encoder coding method. Possible values:
-
-
-‘faac ’
-FAAC-inspired method.
-
-This method is a simplified reimplementation of the method used in FAAC, which
-sets thresholds proportional to the band energies, and then decreases all the
-thresholds with quantizer steps to find the appropriate quantization with
-distortion below threshold band by band.
-
-The quality of this method is comparable to the two loop searching method
-described below, but somewhat a little better and slower.
-
-
-‘anmr ’
-Average noise to mask ratio (ANMR) trellis-based solution.
-
-This has a theoretic best quality out of all the coding methods, but at the
-cost of the slowest speed.
-
-
-‘twoloop ’
-Two loop searching (TLS) method.
-
-This method first sets quantizers depending on band thresholds and then tries
-to find an optimal combination by adding or subtracting a specific value from
-all quantizers and adjusting some individual quantizer a little.
-
-This method produces similar quality with the FAAC method and is the default.
-
-
-‘fast ’
-Constant quantizer method.
-
-This method sets a constant quantizer for all bands. This is the fastest of all
-the methods, yet produces the worst quality.
-
-
-
-
-
-
-
-
-
17.2 ac3 and ac3_fixed# TOC
-
-
AC-3 audio encoders.
-
-
These encoders implement part of ATSC A/52:2010 and ETSI TS 102 366, as well as
-the undocumented RealAudio 3 (a.k.a. dnet).
-
-
The ac3 encoder uses floating-point math, while the ac3_fixed
-encoder only uses fixed-point integer math. This does not mean that one is
-always faster, just that one or the other may be better suited to a
-particular system. The floating-point encoder will generally produce better
-quality audio for a given bitrate. The ac3_fixed encoder is not the
-default codec for any of the output formats, so it must be specified explicitly
-using the option -acodec ac3_fixed
in order to use it.
-
-
-
17.2.1 AC-3 Metadata# TOC
-
-
The AC-3 metadata options are used to set parameters that describe the audio,
-but in most cases do not affect the audio encoding itself. Some of the options
-do directly affect or influence the decoding and playback of the resulting
-bitstream, while others are just for informational purposes. A few of the
-options will add bits to the output stream that could otherwise be used for
-audio data, and will thus affect the quality of the output. Those will be
-indicated accordingly with a note in the option list below.
-
-
These parameters are described in detail in several publicly-available
-documents.
-
-
-
-
17.2.1.1 Metadata Control Options# TOC
-
-
--per_frame_metadata boolean
-Allow Per-Frame Metadata. Specifies if the encoder should check for changing
-metadata for each frame.
-
-0
-The metadata values set at initialization will be used for every frame in the
-stream. (default)
-
-1
-Metadata values can be changed before encoding each frame.
-
-
-
-
-
-
-
-
17.2.1.2 Downmix Levels# TOC
-
-
--center_mixlev level
-Center Mix Level. The amount of gain the decoder should apply to the center
-channel when downmixing to stereo. This field will only be written to the
-bitstream if a center channel is present. The value is specified as a scale
-factor. There are 3 valid values:
-
-0.707
-Apply -3dB gain
-
-0.595
-Apply -4.5dB gain (default)
-
-0.500
-Apply -6dB gain
-
-
-
-
--surround_mixlev level
-Surround Mix Level. The amount of gain the decoder should apply to the surround
-channel(s) when downmixing to stereo. This field will only be written to the
-bitstream if one or more surround channels are present. The value is specified
-as a scale factor. There are 3 valid values:
-
-0.707
-Apply -3dB gain
-
-0.500
-Apply -6dB gain (default)
-
-0.000
-Silence Surround Channel(s)
-
-
-
-
-
-
-
-
17.2.1.3 Audio Production Information# TOC
-
Audio Production Information is optional information describing the mixing
-environment. Either none or both of the fields are written to the bitstream.
-
-
--mixing_level number
-Mixing Level. Specifies peak sound pressure level (SPL) in the production
-environment when the mix was mastered. Valid values are 80 to 111, or -1 for
-unknown or not indicated. The default value is -1, but that value cannot be
-used if the Audio Production Information is written to the bitstream. Therefore,
-if the room_type
option is not the default value, the mixing_level
-option must not be -1.
-
-
--room_type type
-Room Type. Describes the equalization used during the final mixing session at
-the studio or on the dubbing stage. A large room is a dubbing stage with the
-industry standard X-curve equalization; a small room has flat equalization.
-This field will not be written to the bitstream if both the mixing_level
-option and the room_type
option have the default values.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-large
-Large Room
-
-2
-small
-Small Room
-
-
-
-
-
-
-
-
17.2.1.4 Other Metadata Options# TOC
-
-
--copyright boolean
-Copyright Indicator. Specifies whether a copyright exists for this audio.
-
-0
-off
-No Copyright Exists (default)
-
-1
-on
-Copyright Exists
-
-
-
-
--dialnorm value
-Dialogue Normalization. Indicates how far the average dialogue level of the
-program is below digital 100% full scale (0 dBFS). This parameter determines a
-level shift during audio reproduction that sets the average volume of the
-dialogue to a preset level. The goal is to match volume level between program
-sources. A value of -31dB will result in no volume level change, relative to
-the source volume, during audio reproduction. Valid values are whole numbers in
-the range -31 to -1, with -31 being the default.
-
-
--dsur_mode mode
-Dolby Surround Mode. Specifies whether the stereo signal uses Dolby Surround
-(Pro Logic). This field will only be written to the bitstream if the audio
-stream is stereo. Using this option does NOT mean the encoder will actually
-apply Dolby Surround processing.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-off
-Not Dolby Surround Encoded
-
-2
-on
-Dolby Surround Encoded
-
-
-
-
--original boolean
-Original Bit Stream Indicator. Specifies whether this audio is from the
-original source and not a copy.
-
-0
-off
-Not Original Source
-
-1
-on
-Original Source (default)
-
-
-
-
-
-
-
-
17.2.2 Extended Bitstream Information# TOC
-
The extended bitstream options are part of the Alternate Bit Stream Syntax as
-specified in Annex D of the A/52:2010 standard. It is grouped into 2 parts.
-If any one parameter in a group is specified, all values in that group will be
-written to the bitstream. Default values are used for those that are written
-but have not been specified. If the mixing levels are written, the decoder
-will use these values instead of the ones specified in the center_mixlev
-and surround_mixlev
options if it supports the Alternate Bit Stream
-Syntax.
-
-
-
17.2.2.1 Extended Bitstream Information - Part 1# TOC
-
-
--dmix_mode mode
-Preferred Stereo Downmix Mode. Allows the user to select either Lt/Rt
-(Dolby Surround) or Lo/Ro (normal stereo) as the preferred stereo downmix mode.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-ltrt
-Lt/Rt Downmix Preferred
-
-2
-loro
-Lo/Ro Downmix Preferred
-
-
-
-
--ltrt_cmixlev level
-Lt/Rt Center Mix Level. The amount of gain the decoder should apply to the
-center channel when downmixing to stereo in Lt/Rt mode.
-
-1.414
-Apply +3dB gain
-
-1.189
-Apply +1.5dB gain
-
-1.000
-Apply 0dB gain
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain (default)
-
-0.500
-Apply -6.0dB gain
-
-0.000
-Silence Center Channel
-
-
-
-
--ltrt_surmixlev level
-Lt/Rt Surround Mix Level. The amount of gain the decoder should apply to the
-surround channel(s) when downmixing to stereo in Lt/Rt mode.
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain
-
-0.500
-Apply -6.0dB gain (default)
-
-0.000
-Silence Surround Channel(s)
-
-
-
-
--loro_cmixlev level
-Lo/Ro Center Mix Level. The amount of gain the decoder should apply to the
-center channel when downmixing to stereo in Lo/Ro mode.
-
-1.414
-Apply +3dB gain
-
-1.189
-Apply +1.5dB gain
-
-1.000
-Apply 0dB gain
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain (default)
-
-0.500
-Apply -6.0dB gain
-
-0.000
-Silence Center Channel
-
-
-
-
--loro_surmixlev level
-Lo/Ro Surround Mix Level. The amount of gain the decoder should apply to the
-surround channel(s) when downmixing to stereo in Lo/Ro mode.
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain
-
-0.500
-Apply -6.0dB gain (default)
-
-0.000
-Silence Surround Channel(s)
-
-
-
-
-
-
-
-
17.2.2.2 Extended Bitstream Information - Part 2# TOC
-
-
--dsurex_mode mode
-Dolby Surround EX Mode. Indicates whether the stream uses Dolby Surround EX
-(7.1 matrixed to 5.1). Using this option does NOT mean the encoder will actually
-apply Dolby Surround EX processing.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-on
-Dolby Surround EX Off
-
-2
-off
-Dolby Surround EX On
-
-
-
-
--dheadphone_mode mode
-Dolby Headphone Mode. Indicates whether the stream uses Dolby Headphone
-encoding (multi-channel matrixed to 2.0 for use with headphones). Using this
-option does NOT mean the encoder will actually apply Dolby Headphone
-processing.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-on
-Dolby Headphone Off
-
-2
-off
-Dolby Headphone On
-
-
-
-
--ad_conv_type type
-A/D Converter Type. Indicates whether the audio has passed through HDCD A/D
-conversion.
-
-0
-standard
-Standard A/D Converter (default)
-
-1
-hdcd
-HDCD A/D Converter
-
-
-
-
-
-
-
-
17.2.3 Other AC-3 Encoding Options# TOC
-
-
--stereo_rematrixing boolean
-Stereo Rematrixing. Enables/Disables use of rematrixing for stereo input. This
-is an optional AC-3 feature that increases quality by selectively encoding
-the left/right channels as mid/side. This option is enabled by default, and it
-is highly recommended that it be left as enabled except for testing purposes.
-
-
-
-
-
-
17.2.4 Floating-Point-Only AC-3 Encoding Options# TOC
-
-
These options are only valid for the floating-point encoder and do not exist
-for the fixed-point encoder due to the corresponding features not being
-implemented in fixed-point.
-
-
--channel_coupling boolean
-Enables/Disables use of channel coupling, which is an optional AC-3 feature
-that increases quality by combining high frequency information from multiple
-channels into a single channel. The per-channel high frequency information is
-sent with less accuracy in both the frequency and time domains. This allows
-more bits to be used for lower frequencies while preserving enough information
-to reconstruct the high frequencies. This option is enabled by default for the
-floating-point encoder and should generally be left as enabled except for
-testing purposes or to increase encoding speed.
-
--1
-auto
-Selected by Encoder (default)
-
-0
-off
-Disable Channel Coupling
-
-1
-on
-Enable Channel Coupling
-
-
-
-
--cpl_start_band number
-Coupling Start Band. Sets the channel coupling start band, from 1 to 15. If a
-value higher than the bandwidth is used, it will be reduced to 1 less than the
-coupling end band. If auto is used, the start band will be determined by
-the encoder based on the bit rate, sample rate, and channel layout. This option
-has no effect if channel coupling is disabled.
-
--1
-auto
-Selected by Encoder (default)
-
-
-
-
-
-
-
-
17.3 libfaac# TOC
-
-
libfaac AAC (Advanced Audio Coding) encoder wrapper.
-
-
Requires the presence of the libfaac headers and library during
-configuration. You need to explicitly configure the build with
---enable-libfaac --enable-nonfree
.
-
-
This encoder is considered to be of higher quality with respect to the
-the native experimental FFmpeg AAC encoder .
-
-
For more information see the libfaac project at
-http://www.audiocoding.com/faac.html/ .
-
-
-
17.3.1 Options# TOC
-
-
The following shared FFmpeg codec options are recognized.
-
-
The following options are supported by the libfaac wrapper. The
-faac
-equivalent of the options are listed in parentheses.
-
-
-b (-b )
-Set bit rate in bits/s for ABR (Average Bit Rate) mode. If the bit rate
-is not explicitly specified, it is automatically set to a suitable
-value depending on the selected profile. faac
bitrate is
-expressed in kilobits/s.
-
-Note that libfaac does not support CBR (Constant Bit Rate) but only
-ABR (Average Bit Rate).
-
-If VBR mode is enabled this option is ignored.
-
-
-ar (-R )
-Set audio sampling rate (in Hz).
-
-
-ac (-c )
-Set the number of audio channels.
-
-
-cutoff (-C )
-Set cutoff frequency. If not specified (or explicitly set to 0) it
-will use a value automatically computed by the library. Default value
-is 0.
-
-
-profile
-Set audio profile.
-
-The following profiles are recognized:
-
-‘aac_main ’
-Main AAC (Main)
-
-
-‘aac_low ’
-Low Complexity AAC (LC)
-
-
-‘aac_ssr ’
-Scalable Sample Rate (SSR)
-
-
-‘aac_ltp ’
-Long Term Prediction (LTP)
-
-
-
-If not specified it is set to ‘aac_low ’.
-
-
-flags +qscale
-Set constant quality VBR (Variable Bit Rate) mode.
-
-
-global_quality
-Set quality in VBR mode as an integer number of lambda units.
-
-Only relevant when VBR mode is enabled with flags +qscale
. The
-value is converted to QP units by dividing it by FF_QP2LAMBDA
,
-and used to set the quality value used by libfaac. A reasonable range
-for the option value in QP units is [10-500], the higher the value the
-higher the quality.
-
-
-q (-q )
-Enable VBR mode when set to a non-negative value, and set constant
-quality value as a double floating point value in QP units.
-
-The value sets the quality value used by libfaac. A reasonable range
-for the option value is [10-500], the higher the value the higher the
-quality.
-
-This option is valid only using the ffmpeg
command-line
-tool. For library interface users, use global_quality .
-
-
-
-
-
17.3.2 Examples# TOC
-
-
- Use ffmpeg
to convert an audio file to ABR 128 kbps AAC in an M4A (MP4)
-container:
-
-
ffmpeg -i input.wav -codec:a libfaac -b:a 128k -output.m4a
-
-
- Use ffmpeg
to convert an audio file to VBR AAC, using the
-LTP AAC profile:
-
-
ffmpeg -i input.wav -c:a libfaac -profile:a aac_ltp -q:a 100 output.m4a
-
-
-
-
-
17.4 libfdk_aac# TOC
-
-
libfdk-aac AAC (Advanced Audio Coding) encoder wrapper.
-
-
The libfdk-aac library is based on the Fraunhofer FDK AAC code from
-the Android project.
-
-
Requires the presence of the libfdk-aac headers and library during
-configuration. You need to explicitly configure the build with
---enable-libfdk-aac
. The library is also incompatible with GPL,
-so if you allow the use of GPL, you should configure with
---enable-gpl --enable-nonfree --enable-libfdk-aac
.
-
-
This encoder is considered to be of higher quality with respect to
-both the native experimental FFmpeg AAC encoder and
-libfaac .
-
-
VBR encoding, enabled through the vbr or flags
-+qscale options, is experimental and only works with some
-combinations of parameters.
-
-
Support for encoding 7.1 audio is only available with libfdk-aac 0.1.3 or
-higher.
-
-
For more information see the fdk-aac project at
-http://sourceforge.net/p/opencore-amr/fdk-aac/ .
-
-
-
17.4.1 Options# TOC
-
-
The following options are mapped on the shared FFmpeg codec options.
-
-
-b
-Set bit rate in bits/s. If the bitrate is not explicitly specified, it
-is automatically set to a suitable value depending on the selected
-profile.
-
-In case VBR mode is enabled the option is ignored.
-
-
-ar
-Set audio sampling rate (in Hz).
-
-
-channels
-Set the number of audio channels.
-
-
-flags +qscale
-Enable fixed quality, VBR (Variable Bit Rate) mode.
-Note that VBR is implicitly enabled when the vbr value is
-positive.
-
-
-cutoff
-Set cutoff frequency. If not specified (or explicitly set to 0) it
-will use a value automatically computed by the library. Default value
-is 0.
-
-
-profile
-Set audio profile.
-
-The following profiles are recognized:
-
-‘aac_low ’
-Low Complexity AAC (LC)
-
-
-‘aac_he ’
-High Efficiency AAC (HE-AAC)
-
-
-‘aac_he_v2 ’
-High Efficiency AAC version 2 (HE-AACv2)
-
-
-‘aac_ld ’
-Low Delay AAC (LD)
-
-
-‘aac_eld ’
-Enhanced Low Delay AAC (ELD)
-
-
-
-If not specified it is set to ‘aac_low ’.
-
-
-
-
The following are private options of the libfdk_aac encoder.
-
-
-afterburner
-Enable afterburner feature if set to 1, disabled if set to 0. This
-improves the quality but also the required processing power.
-
-Default value is 1.
-
-
-eld_sbr
-Enable SBR (Spectral Band Replication) for ELD if set to 1, disabled
-if set to 0.
-
-Default value is 0.
-
-
-signaling
-Set SBR/PS signaling style.
-
-It can assume one of the following values:
-
-‘default ’
-choose signaling implicitly (explicit hierarchical by default,
-implicit if global header is disabled)
-
-
-‘implicit ’
-implicit backwards compatible signaling
-
-
-‘explicit_sbr ’
-explicit SBR, implicit PS signaling
-
-
-‘explicit_hierarchical ’
-explicit hierarchical signaling
-
-
-
-Default value is ‘default ’.
-
-
-latm
-Output LATM/LOAS encapsulated data if set to 1, disabled if set to 0.
-
-Default value is 0.
-
-
-header_period
-Set StreamMuxConfig and PCE repetition period (in frames) for sending
-in-band configuration buffers within LATM/LOAS transport layer.
-
-Must be a 16-bits non-negative integer.
-
-Default value is 0.
-
-
-vbr
-Set VBR mode, from 1 to 5. 1 is lowest quality (though still pretty
-good) and 5 is highest quality. A value of 0 will disable VBR, and CBR
-(Constant Bit Rate) is enabled.
-
-Currently only the ‘aac_low ’ profile supports VBR encoding.
-
-VBR modes 1-5 correspond to roughly the following average bit rates:
-
-
-‘1 ’
-32 kbps/channel
-
-‘2 ’
-40 kbps/channel
-
-‘3 ’
-48-56 kbps/channel
-
-‘4 ’
-64 kbps/channel
-
-‘5 ’
-about 80-96 kbps/channel
-
-
-
-Default value is 0.
-
-
-
-
-
17.4.2 Examples# TOC
-
-
- Use ffmpeg
to convert an audio file to VBR AAC in an M4A (MP4)
-container:
-
-
ffmpeg -i input.wav -codec:a libfdk_aac -vbr 3 output.m4a
-
-
- Use ffmpeg
to convert an audio file to CBR 64k kbps AAC, using the
-High-Efficiency AAC profile:
-
-
ffmpeg -i input.wav -c:a libfdk_aac -profile:a aac_he -b:a 64k output.m4a
-
-
-
-
-
17.5 libmp3lame# TOC
-
-
LAME (Lame Ain’t an MP3 Encoder) MP3 encoder wrapper.
-
-
Requires the presence of the libmp3lame headers and library during
-configuration. You need to explicitly configure the build with
---enable-libmp3lame
.
-
-
See libshine for a fixed-point MP3 encoder, although with a
-lower quality.
-
-
-
17.5.1 Options# TOC
-
-
The following options are supported by the libmp3lame wrapper. The
-lame
-equivalent of the options are listed in parentheses.
-
-
-b (-b )
-Set bitrate expressed in bits/s for CBR or ABR. LAME bitrate
is
-expressed in kilobits/s.
-
-
-q (-V )
-Set constant quality setting for VBR. This option is valid only
-using the ffmpeg
command-line tool. For library interface
-users, use global_quality .
-
-
-compression_level (-q )
-Set algorithm quality. Valid arguments are integers in the 0-9 range,
-with 0 meaning highest quality but slowest, and 9 meaning fastest
-while producing the worst quality.
-
-
-reservoir
-Enable use of bit reservoir when set to 1. Default value is 1. LAME
-has this enabled by default, but can be overridden by use
---nores option.
-
-
-joint_stereo (-m j )
-Enable the encoder to use (on a frame by frame basis) either L/R
-stereo or mid/side stereo. Default value is 1.
-
-
-abr (--abr )
-Enable the encoder to use ABR when set to 1. The lame
---abr sets the target bitrate, while this options only
-tells FFmpeg to use ABR still relies on b to set bitrate.
-
-
-
-
-
-
17.6 libopencore-amrnb# TOC
-
-
OpenCORE Adaptive Multi-Rate Narrowband encoder.
-
-
Requires the presence of the libopencore-amrnb headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopencore-amrnb --enable-version3
.
-
-
This is a mono-only encoder. Officially it only supports 8000Hz sample rate,
-but you can override it by setting strict to ‘unofficial ’ or
-lower.
-
-
-
17.6.1 Options# TOC
-
-
-b
-Set bitrate in bits per second. Only the following bitrates are supported,
-otherwise libavcodec will round to the nearest valid bitrate.
-
-
-4750
-5150
-5900
-6700
-7400
-7950
-10200
-12200
-
-
-
-dtx
-Allow discontinuous transmission (generate comfort noise) when set to 1. The
-default value is 0 (disabled).
-
-
-
-
-
-
17.7 libshine# TOC
-
-
Shine Fixed-Point MP3 encoder wrapper.
-
-
Shine is a fixed-point MP3 encoder. It has a far better performance on
-platforms without an FPU, e.g. armel CPUs, and some phones and tablets.
-However, as it is more targeted on performance than quality, it is not on par
-with LAME and other production-grade encoders quality-wise. Also, according to
-the project’s homepage, this encoder may not be free of bugs as the code was
-written a long time ago and the project was dead for at least 5 years.
-
-
This encoder only supports stereo and mono input. This is also CBR-only.
-
-
The original project (last updated in early 2007) is at
-http://sourceforge.net/projects/libshine-fxp/ . We only support the
-updated fork by the Savonet/Liquidsoap project at https://github.com/savonet/shine .
-
-
Requires the presence of the libshine headers and library during
-configuration. You need to explicitly configure the build with
---enable-libshine
.
-
-
See also libmp3lame .
-
-
-
17.7.1 Options# TOC
-
-
The following options are supported by the libshine wrapper. The
-shineenc
-equivalent of the options are listed in parentheses.
-
-
-b (-b )
-Set bitrate expressed in bits/s for CBR. shineenc
-b option
-is expressed in kilobits/s.
-
-
-
-
-
-
17.8 libtwolame# TOC
-
-
TwoLAME MP2 encoder wrapper.
-
-
Requires the presence of the libtwolame headers and library during
-configuration. You need to explicitly configure the build with
---enable-libtwolame
.
-
-
-
17.8.1 Options# TOC
-
-
The following options are supported by the libtwolame wrapper. The
-twolame
-equivalent options follow the FFmpeg ones and are in
-parentheses.
-
-
-b (-b )
-Set bitrate expressed in bits/s for CBR. twolame
b
-option is expressed in kilobits/s. Default value is 128k.
-
-
-q (-V )
-Set quality for experimental VBR support. Maximum value range is
-from -50 to 50, useful range is from -10 to 10. The higher the
-value, the better the quality. This option is valid only using the
-ffmpeg
command-line tool. For library interface users,
-use global_quality .
-
-
-mode (--mode )
-Set the mode of the resulting audio. Possible values:
-
-
-‘auto ’
-Choose mode automatically based on the input. This is the default.
-
-‘stereo ’
-Stereo
-
-‘joint_stereo ’
-Joint stereo
-
-‘dual_channel ’
-Dual channel
-
-‘mono ’
-Mono
-
-
-
-
-psymodel (--psyc-mode )
-Set psychoacoustic model to use in encoding. The argument must be
-an integer between -1 and 4, inclusive. The higher the value, the
-better the quality. The default value is 3.
-
-
-energy_levels (--energy )
-Enable energy levels extensions when set to 1. The default value is
-0 (disabled).
-
-
-error_protection (--protect )
-Enable CRC error protection when set to 1. The default value is 0
-(disabled).
-
-
-copyright (--copyright )
-Set MPEG audio copyright flag when set to 1. The default value is 0
-(disabled).
-
-
-original (--original )
-Set MPEG audio original flag when set to 1. The default value is 0
-(disabled).
-
-
-
-
-
-
17.9 libvo-aacenc# TOC
-
-
VisualOn AAC encoder.
-
-
Requires the presence of the libvo-aacenc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libvo-aacenc --enable-version3
.
-
-
This encoder is considered to be worse than the
-native experimental FFmpeg AAC encoder , according to
-multiple sources.
-
-
-
17.9.1 Options# TOC
-
-
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
-channels. It is also CBR-only.
-
-
-b
-Set bit rate in bits/s.
-
-
-
-
-
-
17.10 libvo-amrwbenc# TOC
-
-
VisualOn Adaptive Multi-Rate Wideband encoder.
-
-
Requires the presence of the libvo-amrwbenc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libvo-amrwbenc --enable-version3
.
-
-
This is a mono-only encoder. Officially it only supports 16000Hz sample
-rate, but you can override it by setting strict to
-‘unofficial ’ or lower.
-
-
-
17.10.1 Options# TOC
-
-
-b
-Set bitrate in bits/s. Only the following bitrates are supported, otherwise
-libavcodec will round to the nearest valid bitrate.
-
-
-‘6600 ’
-‘8850 ’
-‘12650 ’
-‘14250 ’
-‘15850 ’
-‘18250 ’
-‘19850 ’
-‘23050 ’
-‘23850 ’
-
-
-
-dtx
-Allow discontinuous transmission (generate comfort noise) when set to 1. The
-default value is 0 (disabled).
-
-
-
-
-
-
17.11 libopus# TOC
-
-
libopus Opus Interactive Audio Codec encoder wrapper.
-
-
Requires the presence of the libopus headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopus
.
-
-
-
17.11.1 Option Mapping# TOC
-
-
Most libopus options are modelled after the opusenc
utility from
-opus-tools. The following is an option mapping chart describing options
-supported by the libopus wrapper, and their opusenc
-equivalent
-in parentheses.
-
-
-b (bitrate )
-Set the bit rate in bits/s. FFmpeg’s b option is
-expressed in bits/s, while opusenc
’s bitrate in
-kilobits/s.
-
-
-vbr (vbr , hard-cbr , and cvbr )
-Set VBR mode. The FFmpeg vbr option has the following
-valid arguments, with the their opusenc
equivalent options
-in parentheses:
-
-
-‘off (hard-cbr ) ’
-Use constant bit rate encoding.
-
-
-‘on (vbr ) ’
-Use variable bit rate encoding (the default).
-
-
-‘constrained (cvbr ) ’
-Use constrained variable bit rate encoding.
-
-
-
-
-compression_level (comp )
-Set encoding algorithm complexity. Valid options are integers in
-the 0-10 range. 0 gives the fastest encodes but lower quality, while 10
-gives the highest quality but slowest encoding. The default is 10.
-
-
-frame_duration (framesize )
-Set maximum frame size, or duration of a frame in milliseconds. The
-argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
-frame sizes achieve lower latency but less quality at a given bitrate.
-Sizes greater than 20ms are only interesting at fairly low bitrates.
-The default is 20ms.
-
-
-packet_loss (expect-loss )
-Set expected packet loss percentage. The default is 0.
-
-
-application (N.A.)
-Set intended application type. Valid options are listed below:
-
-
-‘voip ’
-Favor improved speech intelligibility.
-
-‘audio ’
-Favor faithfulness to the input (the default).
-
-‘lowdelay ’
-Restrict to only the lowest delay modes.
-
-
-
-
-cutoff (N.A.)
-Set cutoff bandwidth in Hz. The argument must be exactly one of the
-following: 4000, 6000, 8000, 12000, or 20000, corresponding to
-narrowband, mediumband, wideband, super wideband, and fullband
-respectively. The default is 0 (cutoff disabled).
-
-
-
-
-
-
17.12 libvorbis# TOC
-
-
libvorbis encoder wrapper.
-
-
Requires the presence of the libvorbisenc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libvorbis
.
-
-
-
17.12.1 Options# TOC
-
-
The following options are supported by the libvorbis wrapper. The
-oggenc
-equivalent of the options are listed in parentheses.
-
-
To get a more accurate and extensive documentation of the libvorbis
-options, consult the libvorbisenc’s and oggenc
’s documentations.
-See http://xiph.org/vorbis/ ,
-http://wiki.xiph.org/Vorbis-tools , and oggenc(1).
-
-
-b (-b )
-Set bitrate expressed in bits/s for ABR. oggenc
-b is
-expressed in kilobits/s.
-
-
-q (-q )
-Set constant quality setting for VBR. The value should be a float
-number in the range of -1.0 to 10.0. The higher the value, the better
-the quality. The default value is ‘3.0 ’.
-
-This option is valid only using the ffmpeg
command-line tool.
-For library interface users, use global_quality .
-
-
-cutoff (--advanced-encode-option lowpass_frequency=N )
-Set cutoff bandwidth in Hz, a value of 0 disables cutoff. oggenc
’s
-related option is expressed in kHz. The default value is ‘0 ’ (cutoff
-disabled).
-
-
-minrate (-m )
-Set minimum bitrate expressed in bits/s. oggenc
-m is
-expressed in kilobits/s.
-
-
-maxrate (-M )
-Set maximum bitrate expressed in bits/s. oggenc
-M is
-expressed in kilobits/s. This only has effect on ABR mode.
-
-
-iblock (--advanced-encode-option impulse_noisetune=N )
-Set noise floor bias for impulse blocks. The value is a float number from
--15.0 to 0.0. A negative bias instructs the encoder to pay special attention
-to the crispness of transients in the encoded audio. The tradeoff for better
-transient response is a higher bitrate.
-
-
-
-
-
-
17.13 libwavpack# TOC
-
-
A wrapper providing WavPack encoding through libwavpack.
-
-
Only lossless mode using 32-bit integer samples is supported currently.
-
-
Requires the presence of the libwavpack headers and library during
-configuration. You need to explicitly configure the build with
---enable-libwavpack
.
-
-
Note that a libavcodec-native encoder for the WavPack codec exists so users can
-encode audios with this codec without using this encoder. See wavpackenc .
-
-
-
17.13.1 Options# TOC
-
-
wavpack
command line utility’s corresponding options are listed in
-parentheses, if any.
-
-
-frame_size (--blocksize )
-Default is 32768.
-
-
-compression_level
-Set speed vs. compression tradeoff. Acceptable arguments are listed below:
-
-
-‘0 (-f ) ’
-Fast mode.
-
-
-‘1 ’
-Normal (default) settings.
-
-
-‘2 (-h ) ’
-High quality.
-
-
-‘3 (-hh ) ’
-Very high quality.
-
-
-‘4-8 (-hh -x EXTRAPROC ) ’
-Same as ‘3 ’, but with extra processing enabled.
-
-‘4 ’ is the same as -x2 and ‘8 ’ is the same as -x6 .
-
-
-
-
-
-
-
-
17.14 wavpack# TOC
-
-
WavPack lossless audio encoder.
-
-
This is a libavcodec-native WavPack encoder. There is also an encoder based on
-libwavpack, but there is virtually no reason to use that encoder.
-
-
See also libwavpack .
-
-
-
17.14.1 Options# TOC
-
-
The equivalent options for wavpack
command line utility are listed in
-parentheses.
-
-
-
17.14.1.1 Shared options# TOC
-
-
The following shared options are effective for this encoder. Only special notes
-about this particular encoder will be documented here. For the general meaning
-of the options, see the Codec Options chapter .
-
-
-frame_size (--blocksize )
-For this encoder, the range for this option is between 128 and 131072. Default
-is automatically decided based on sample rate and number of channel.
-
-For the complete formula of calculating default, see
-libavcodec/wavpackenc.c .
-
-
-compression_level (-f , -h , -hh , and -x )
-This option’s syntax is consistent with libwavpack ’s.
-
-
-
-
-
17.14.1.2 Private options# TOC
-
-
-joint_stereo (-j )
-Set whether to enable joint stereo. Valid values are:
-
-
-‘on (1 ) ’
-Force mid/side audio encoding.
-
-‘off (0 ) ’
-Force left/right audio encoding.
-
-‘auto ’
-Let the encoder decide automatically.
-
-
-
-
-optimize_mono
-Set whether to enable optimization for mono. This option is only effective for
-non-mono streams. Available values:
-
-
-‘on ’
-enabled
-
-‘off ’
-disabled
-
-
-
-
-
-
-
-
-
18 Video Encoders# TOC
-
-
A description of some of the currently available video encoders
-follows.
-
-
-
18.1 libtheora# TOC
-
-
libtheora Theora encoder wrapper.
-
-
Requires the presence of the libtheora headers and library during
-configuration. You need to explicitly configure the build with
---enable-libtheora
.
-
-
For more information about the libtheora project see
-http://www.theora.org/ .
-
-
-
18.1.1 Options# TOC
-
-
The following global options are mapped to internal libtheora options
-which affect the quality and the bitrate of the encoded stream.
-
-
-b
-Set the video bitrate in bit/s for CBR (Constant Bit Rate) mode. In
-case VBR (Variable Bit Rate) mode is enabled this option is ignored.
-
-
-flags
-Used to enable constant quality mode (VBR) encoding through the
-qscale flag, and to enable the pass1
and pass2
-modes.
-
-
-g
-Set the GOP size.
-
-
-global_quality
-Set the global quality as an integer in lambda units.
-
-Only relevant when VBR mode is enabled with flags +qscale
. The
-value is converted to QP units by dividing it by FF_QP2LAMBDA
,
-clipped in the [0 - 10] range, and then multiplied by 6.3 to get a
-value in the native libtheora range [0-63]. A higher value corresponds
-to a higher quality.
-
-
-q
-Enable VBR mode when set to a non-negative value, and set constant
-quality value as a double floating point value in QP units.
-
-The value is clipped in the [0-10] range, and then multiplied by 6.3
-to get a value in the native libtheora range [0-63].
-
-This option is valid only using the ffmpeg
command-line
-tool. For library interface users, use global_quality .
-
-
-
-
-
18.1.2 Examples# TOC
-
-
- Set maximum constant quality (VBR) encoding with ffmpeg
:
-
-
ffmpeg -i INPUT -codec:v libtheora -q:v 10 OUTPUT.ogg
-
-
- Use ffmpeg
to convert a CBR 1000 kbps Theora video stream:
-
-
ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg
-
-
-
-
-
18.2 libvpx# TOC
-
-
VP8/VP9 format supported through libvpx.
-
-
Requires the presence of the libvpx headers and library during configuration.
-You need to explicitly configure the build with --enable-libvpx
.
-
-
-
18.2.1 Options# TOC
-
-
Mapping from FFmpeg to libvpx options with conversion notes in parentheses.
-
-
-threads
-g_threads
-
-
-profile
-g_profile
-
-
-vb
-rc_target_bitrate
-
-
-g
-kf_max_dist
-
-
-keyint_min
-kf_min_dist
-
-
-qmin
-rc_min_quantizer
-
-
-qmax
-rc_max_quantizer
-
-
-bufsize, vb
-rc_buf_sz
-(bufsize * 1000 / vb)
-
-rc_buf_optimal_sz
-(bufsize * 1000 / vb * 5 / 6)
-
-
-rc_init_occupancy, vb
-rc_buf_initial_sz
-(rc_init_occupancy * 1000 / vb)
-
-
-rc_buffer_aggressivity
-rc_undershoot_pct
-
-
-skip_threshold
-rc_dropframe_thresh
-
-
-qcomp
-rc_2pass_vbr_bias_pct
-
-
-maxrate, vb
-rc_2pass_vbr_maxsection_pct
-(maxrate * 100 / vb)
-
-
-minrate, vb
-rc_2pass_vbr_minsection_pct
-(minrate * 100 / vb)
-
-
-minrate, maxrate, vb
-VPX_CBR
-(minrate == maxrate == vb)
-
-
-crf
-VPX_CQ
, VP8E_SET_CQ_LEVEL
-
-
-quality
-
-best
-VPX_DL_BEST_QUALITY
-
-good
-VPX_DL_GOOD_QUALITY
-
-realtime
-VPX_DL_REALTIME
-
-
-
-
-speed
-VP8E_SET_CPUUSED
-
-
-nr
-VP8E_SET_NOISE_SENSITIVITY
-
-
-mb_threshold
-VP8E_SET_STATIC_THRESHOLD
-
-
-slices
-VP8E_SET_TOKEN_PARTITIONS
-
-
-max-intra-rate
-VP8E_SET_MAX_INTRA_BITRATE_PCT
-
-
-force_key_frames
-VPX_EFLAG_FORCE_KF
-
-
-Alternate reference frame related
-
-vp8flags altref
-VP8E_SET_ENABLEAUTOALTREF
-
-arnr_max_frames
-VP8E_SET_ARNR_MAXFRAMES
-
-arnr_type
-VP8E_SET_ARNR_TYPE
-
-arnr_strength
-VP8E_SET_ARNR_STRENGTH
-
-rc_lookahead
-g_lag_in_frames
-
-
-
-
-vp8flags error_resilient
-g_error_resilient
-
-
-aq_mode
-VP9E_SET_AQ_MODE
-
-
-
-
-
For more information about libvpx see:
-http://www.webmproject.org/
-
-
-
-
18.3 libwebp# TOC
-
-
libwebp WebP Image encoder wrapper
-
-
libwebp is Google’s official encoder for WebP images. It can encode in either
-lossy or lossless mode. Lossy images are essentially a wrapper around a VP8
-frame. Lossless images are a separate codec developed by Google.
-
-
-
18.3.1 Pixel Format# TOC
-
-
Currently, libwebp only supports YUV420 for lossy and RGB for lossless due
-to limitations of the format and libwebp. Alpha is supported for either mode.
-Because of API limitations, if RGB is passed in when encoding lossy or YUV is
-passed in for encoding lossless, the pixel format will automatically be
-converted using functions from libwebp. This is not ideal and is done only for
-convenience.
-
-
-
18.3.2 Options# TOC
-
-
--lossless boolean
-Enables/Disables use of lossless mode. Default is 0.
-
-
--compression_level integer
-For lossy, this is a quality/speed tradeoff. Higher values give better quality
-for a given size at the cost of increased encoding time. For lossless, this is
-a size/speed tradeoff. Higher values give smaller size at the cost of increased
-encoding time. More specifically, it controls the number of extra algorithms
-and compression tools used, and varies the combination of these tools. This
-maps to the method option in libwebp. The valid range is 0 to 6.
-Default is 4.
-
-
--qscale float
-For lossy encoding, this controls image quality, 0 to 100. For lossless
-encoding, this controls the effort and time spent at compressing more. The
-default value is 75. Note that for usage via libavcodec, this option is called
-global_quality and must be multiplied by FF_QP2LAMBDA .
-
-
--preset type
-Configuration preset. This does some automatic settings based on the general
-type of the image.
-
-none
-Do not use a preset.
-
-default
-Use the encoder default.
-
-picture
-Digital picture, like portrait, inner shot
-
-photo
-Outdoor photograph, with natural lighting
-
-drawing
-Hand or line drawing, with high-contrast details
-
-icon
-Small-sized colorful images
-
-text
-Text-like
-
-
-
-
-
-
-
-
18.4 libx264, libx264rgb# TOC
-
-
x264 H.264/MPEG-4 AVC encoder wrapper.
-
-
This encoder requires the presence of the libx264 headers and library
-during configuration. You need to explicitly configure the build with
---enable-libx264
.
-
-
libx264 supports an impressive number of features, including 8x8 and
-4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC
-entropy coding, interlacing (MBAFF), lossless mode, psy optimizations
-for detail retention (adaptive quantization, psy-RD, psy-trellis).
-
-
Many libx264 encoder options are mapped to FFmpeg global codec
-options, while unique encoder options are provided through private
-options. Additionally the x264opts and x264-params
-private options allows one to pass a list of key=value tuples as accepted
-by the libx264 x264_param_parse
function.
-
-
The x264 project website is at
-http://www.videolan.org/developers/x264.html .
-
-
The libx264rgb encoder is the same as libx264, except it accepts packed RGB
-pixel formats as input instead of YUV.
-
-
-
18.4.1 Supported Pixel Formats# TOC
-
-
x264 supports 8- to 10-bit color spaces. The exact bit depth is controlled at
-x264’s configure time. FFmpeg only supports one bit depth in one particular
-build. In other words, it is not possible to build one FFmpeg with multiple
-versions of x264 with different bit depths.
-
-
-
18.4.2 Options# TOC
-
-
The following options are supported by the libx264 wrapper. The
-x264
-equivalent options or values are listed in parentheses
-for easy migration.
-
-
To reduce the duplication of documentation, only the private options
-and some others requiring special attention are documented here. For
-the documentation of the undocumented generic options, see
-the Codec Options chapter .
-
-
To get a more accurate and extensive documentation of the libx264
-options, invoke the command x264 --full-help
or consult
-the libx264 documentation.
-
-
-b (bitrate )
-Set bitrate in bits/s. Note that FFmpeg’s b option is
-expressed in bits/s, while x264
’s bitrate is in
-kilobits/s.
-
-
-bf (bframes )
-g (keyint )
-qmin (qpmin )
-Minimum quantizer scale.
-
-
-qmax (qpmax )
-Maximum quantizer scale.
-
-
-qdiff (qpstep )
-Maximum difference between quantizer scales.
-
-
-qblur (qblur )
-Quantizer curve blur
-
-
-qcomp (qcomp )
-Quantizer curve compression factor
-
-
-refs (ref )
-Number of reference frames each P-frame can use. The range is from 0-16 .
-
-
-sc_threshold (scenecut )
-Sets the threshold for the scene change detection.
-
-
-trellis (trellis )
-Performs Trellis quantization to increase efficiency. Enabled by default.
-
-
-nr (nr )
-me_range (merange )
-Maximum range of the motion search in pixels.
-
-
-me_method (me )
-Set motion estimation method. Possible values in the decreasing order
-of speed:
-
-
-‘dia (dia ) ’
-‘epzs (dia ) ’
-Diamond search with radius 1 (fastest). ‘epzs ’ is an alias for
-‘dia ’.
-
-‘hex (hex ) ’
-Hexagonal search with radius 2.
-
-‘umh (umh ) ’
-Uneven multi-hexagon search.
-
-‘esa (esa ) ’
-Exhaustive search.
-
-‘tesa (tesa ) ’
-Hadamard exhaustive search (slowest).
-
-
-
-
-subq (subme )
-Sub-pixel motion estimation method.
-
-
-b_strategy (b-adapt )
-Adaptive B-frame placement decision algorithm. Use only on first-pass.
-
-
-keyint_min (min-keyint )
-Minimum GOP size.
-
-
-coder
-Set entropy encoder. Possible values:
-
-
-‘ac ’
-Enable CABAC.
-
-
-‘vlc ’
-Enable CAVLC and disable CABAC. It generates the same effect as
-x264
’s --no-cabac option.
-
-
-
-
-cmp
-Set full pixel motion estimation comparation algorithm. Possible values:
-
-
-‘chroma ’
-Enable chroma in motion estimation.
-
-
-‘sad ’
-Ignore chroma in motion estimation. It generates the same effect as
-x264
’s --no-chroma-me option.
-
-
-
-
-threads (threads )
-Number of encoding threads.
-
-
-thread_type
-Set multithreading technique. Possible values:
-
-
-‘slice ’
-Slice-based multithreading. It generates the same effect as
-x264
’s --sliced-threads option.
-
-‘frame ’
-Frame-based multithreading.
-
-
-
-
-flags
-Set encoding flags. It can be used to disable closed GOP and enable
-open GOP by setting it to -cgop
. The result is similar to
-the behavior of x264
’s --open-gop option.
-
-
-rc_init_occupancy (vbv-init )
-preset (preset )
-Set the encoding preset.
-
-
-tune (tune )
-Set tuning of the encoding params.
-
-
-profile (profile )
-Set profile restrictions.
-
-
-fastfirstpass
-Enable fast settings when encoding first pass, when set to 1. When set
-to 0, it has the same effect of x264
’s
---slow-firstpass option.
-
-
-crf (crf )
-Set the quality for constant quality mode.
-
-
-crf_max (crf-max )
-In CRF mode, prevents VBV from lowering quality beyond this point.
-
-
-qp (qp )
-Set constant quantization rate control method parameter.
-
-
-aq-mode (aq-mode )
-Set AQ method. Possible values:
-
-
-‘none (0 ) ’
-Disabled.
-
-
-‘variance (1 ) ’
-Variance AQ (complexity mask).
-
-
-‘autovariance (2 ) ’
-Auto-variance AQ (experimental).
-
-
-
-
-aq-strength (aq-strength )
-Set AQ strength, reduce blocking and blurring in flat and textured areas.
-
-
-psy
-Use psychovisual optimizations when set to 1. When set to 0, it has the
-same effect as x264
’s --no-psy option.
-
-
-psy-rd (psy-rd )
-Set strength of psychovisual optimization, in
-psy-rd :psy-trellis format.
-
-
-rc-lookahead (rc-lookahead )
-Set number of frames to look ahead for frametype and ratecontrol.
-
-
-weightb
-Enable weighted prediction for B-frames when set to 1. When set to 0,
-it has the same effect as x264
’s --no-weightb option.
-
-
-weightp (weightp )
-Set weighted prediction method for P-frames. Possible values:
-
-
-‘none (0 ) ’
-Disabled
-
-‘simple (1 ) ’
-Enable only weighted refs
-
-‘smart (2 ) ’
-Enable both weighted refs and duplicates
-
-
-
-
-ssim (ssim )
-Enable calculation and printing SSIM stats after the encoding.
-
-
-intra-refresh (intra-refresh )
-Enable the use of Periodic Intra Refresh instead of IDR frames when set
-to 1.
-
-
-avcintra-class (class )
-Configure the encoder to generate AVC-Intra.
-Valid values are 50,100 and 200
-
-
-bluray-compat (bluray-compat )
-Configure the encoder to be compatible with the bluray standard.
-It is a shorthand for setting "bluray-compat=1 force-cfr=1".
-
-
-b-bias (b-bias )
-Set the influence on how often B-frames are used.
-
-
-b-pyramid (b-pyramid )
-Set method for keeping of some B-frames as references. Possible values:
-
-
-‘none (none ) ’
-Disabled.
-
-‘strict (strict ) ’
-Strictly hierarchical pyramid.
-
-‘normal (normal ) ’
-Non-strict (not Blu-ray compatible).
-
-
-
-
-mixed-refs
-Enable the use of one reference per partition, as opposed to one
-reference per macroblock when set to 1. When set to 0, it has the
-same effect as x264
’s --no-mixed-refs option.
-
-
-8x8dct
-Enable adaptive spatial transform (high profile 8x8 transform)
-when set to 1. When set to 0, it has the same effect as
-x264
’s --no-8x8dct option.
-
-
-fast-pskip
-Enable early SKIP detection on P-frames when set to 1. When set
-to 0, it has the same effect as x264
’s
---no-fast-pskip option.
-
-
-aud (aud )
-Enable use of access unit delimiters when set to 1.
-
-
-mbtree
-Enable use macroblock tree ratecontrol when set to 1. When set
-to 0, it has the same effect as x264
’s
---no-mbtree option.
-
-
-deblock (deblock )
-Set loop filter parameters, in alpha :beta form.
-
-
-cplxblur (cplxblur )
-Set fluctuations reduction in QP (before curve compression).
-
-
-partitions (partitions )
-Set partitions to consider as a comma-separated list of. Possible
-values in the list:
-
-
-‘p8x8 ’
-8x8 P-frame partition.
-
-‘p4x4 ’
-4x4 P-frame partition.
-
-‘b8x8 ’
-4x4 B-frame partition.
-
-‘i8x8 ’
-8x8 I-frame partition.
-
-‘i4x4 ’
-4x4 I-frame partition.
-(Enabling ‘p4x4 ’ requires ‘p8x8 ’ to be enabled. Enabling
-‘i8x8 ’ requires adaptive spatial transform (8x8dct
-option) to be enabled.)
-
-‘none (none ) ’
-Do not consider any partitions.
-
-‘all (all ) ’
-Consider every partition.
-
-
-
-
-direct-pred (direct )
-Set direct MV prediction mode. Possible values:
-
-
-‘none (none ) ’
-Disable MV prediction.
-
-‘spatial (spatial ) ’
-Enable spatial predicting.
-
-‘temporal (temporal ) ’
-Enable temporal predicting.
-
-‘auto (auto ) ’
-Automatically decided.
-
-
-
-
-slice-max-size (slice-max-size )
-Set the limit of the size of each slice in bytes. If not specified
-but RTP payload size (ps ) is specified, that is used.
-
-
-stats (stats )
-Set the file name for multi-pass stats.
-
-
-nal-hrd (nal-hrd )
-Set signal HRD information (requires vbv-bufsize to be set).
-Possible values:
-
-
-‘none (none ) ’
-Disable HRD information signaling.
-
-‘vbr (vbr ) ’
-Variable bit rate.
-
-‘cbr (cbr ) ’
-Constant bit rate (not allowed in MP4 container).
-
-
-
-
-x264opts (N.A.)
-Set any x264 option, see x264 --fullhelp
for a list.
-
-Argument is a list of key =value couples separated by
-":". In filter and psy-rd options that use ":" as a separator
-themselves, use "," instead. They accept it as well since long ago but this
-is kept undocumented for some reason.
-
-For example to specify libx264 encoding options with ffmpeg
:
-
-
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
-
-
-
-x264-params (N.A.)
-Override the x264 configuration using a :-separated list of key=value
-parameters.
-
-This option is functionally the same as the x264opts , but is
-duplicated for compatibility with the Libav fork.
-
-For example to specify libx264 encoding options with ffmpeg
:
-
-
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
-cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
-no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
-
-
-
-
-
Encoding ffpresets for common usages are provided so they can be used with the
-general presets system (e.g. passing the pre option).
-
-
-
18.5 libx265# TOC
-
-
x265 H.265/HEVC encoder wrapper.
-
-
This encoder requires the presence of the libx265 headers and library
-during configuration. You need to explicitly configure the build with
---enable-libx265 .
-
-
-
18.5.1 Options# TOC
-
-
-preset
-Set the x265 preset.
-
-
-tune
-Set the x265 tune parameter.
-
-
-x265-params
-Set x265 options using a list of key =value couples separated
-by ":". See x265 --help
for a list of options.
-
-For example to specify libx265 encoding options with -x265-params :
-
-
-
ffmpeg -i input -c:v libx265 -x265-params crf=26:psy-rd=1 output.mp4
-
-
-
-
-
-
18.6 libxvid# TOC
-
-
Xvid MPEG-4 Part 2 encoder wrapper.
-
-
This encoder requires the presence of the libxvidcore headers and library
-during configuration. You need to explicitly configure the build with
---enable-libxvid --enable-gpl
.
-
-
The native mpeg4
encoder supports the MPEG-4 Part 2 format, so
-users can encode to this format without this library.
-
-
-
18.6.1 Options# TOC
-
-
The following options are supported by the libxvid wrapper. Some of
-the following options are listed but are not documented, and
-correspond to shared codec options. See the Codec
-Options chapter for their documentation. The other shared options
-which are not listed have no effect for the libxvid encoder.
-
-
-b
-g
-qmin
-qmax
-mpeg_quant
-threads
-bf
-b_qfactor
-b_qoffset
-flags
-Set specific encoding flags. Possible values:
-
-
-‘mv4 ’
-Use four motion vector by macroblock.
-
-
-‘aic ’
-Enable high quality AC prediction.
-
-
-‘gray ’
-Only encode grayscale.
-
-
-‘gmc ’
-Enable the use of global motion compensation (GMC).
-
-
-‘qpel ’
-Enable quarter-pixel motion compensation.
-
-
-‘cgop ’
-Enable closed GOP.
-
-
-‘global_header ’
-Place global headers in extradata instead of every keyframe.
-
-
-
-
-
-trellis
-me_method
-Set motion estimation method. Possible values in decreasing order of
-speed and increasing order of quality:
-
-
-‘zero ’
-Use no motion estimation (default).
-
-
-‘phods ’
-‘x1 ’
-‘log ’
-Enable advanced diamond zonal search for 16x16 blocks and half-pixel
-refinement for 16x16 blocks. ‘x1 ’ and ‘log ’ are aliases for
-‘phods ’.
-
-
-‘epzs ’
-Enable all of the things described above, plus advanced diamond zonal
-search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
-estimation on chroma planes.
-
-
-‘full ’
-Enable all of the things described above, plus extended 16x16 and 8x8
-blocks search.
-
-
-
-
-mbd
-Set macroblock decision algorithm. Possible values in the increasing
-order of quality:
-
-
-‘simple ’
-Use macroblock comparing function algorithm (default).
-
-
-‘bits ’
-Enable rate distortion-based half pixel and quarter pixel refinement for
-16x16 blocks.
-
-
-‘rd ’
-Enable all of the things described above, plus rate distortion-based
-half pixel and quarter pixel refinement for 8x8 blocks, and rate
-distortion-based search using square pattern.
-
-
-
-
-lumi_aq
-Enable lumi masking adaptive quantization when set to 1. Default is 0
-(disabled).
-
-
-variance_aq
-Enable variance adaptive quantization when set to 1. Default is 0
-(disabled).
-
-When combined with lumi_aq , the resulting quality will not
-be better than any of the two specified individually. In other
-words, the resulting quality will be the worse one of the two
-effects.
-
-
-ssim
-Set structural similarity (SSIM) displaying method. Possible values:
-
-
-‘off ’
-Disable displaying of SSIM information.
-
-
-‘avg ’
-Output average SSIM at the end of encoding to stdout. The format of
-showing the average SSIM is:
-
-
-
-For users who are not familiar with C, %f means a float number, or
-a decimal (e.g. 0.939232).
-
-
-‘frame ’
-Output both per-frame SSIM data during encoding and average SSIM at
-the end of encoding to stdout. The format of per-frame information
-is:
-
-
-
SSIM: avg: %1.3f min: %1.3f max: %1.3f
-
-
-For users who are not familiar with C, %1.3f means a float number
-rounded to 3 digits after the dot (e.g. 0.932).
-
-
-
-
-
-ssim_acc
-Set SSIM accuracy. Valid options are integers within the range of
-0-4, while 0 gives the most accurate result and 4 computes the
-fastest.
-
-
-
-
-
-
18.7 mpeg2# TOC
-
-
MPEG-2 video encoder.
-
-
-
18.7.1 Options# TOC
-
-
-seq_disp_ext integer
-Specifies if the encoder should write a sequence_display_extension to the
-output.
-
--1
-auto
-Decide automatically to write it or not (this is the default) by checking if
-the data to be written is different from the default or unspecified values.
-
-0
-never
-Never write it.
-
-1
-always
-Always write it.
-
-
-
-
-
-
-
18.8 png# TOC
-
-
PNG image encoder.
-
-
-
18.8.1 Private options# TOC
-
-
-dpi integer
-Set physical density of pixels, in dots per inch, unset by default
-
-dpm integer
-Set physical density of pixels, in dots per meter, unset by default
-
-
-
-
-
18.9 ProRes# TOC
-
-
Apple ProRes encoder.
-
-
FFmpeg contains 2 ProRes encoders, the prores-aw and prores-ks encoder.
-The used encoder can be chosen with the -vcodec
option.
-
-
-
18.9.1 Private Options for prores-ks# TOC
-
-
-profile integer
-Select the ProRes profile to encode
-
-‘proxy ’
-‘lt ’
-‘standard ’
-‘hq ’
-‘4444 ’
-
-
-
-quant_mat integer
-Select quantization matrix.
-
-‘auto ’
-‘default ’
-‘proxy ’
-‘lt ’
-‘standard ’
-‘hq ’
-
-If set to auto , the matrix matching the profile will be picked.
-If not set, the matrix providing the highest quality, default , will be
-picked.
-
-
-bits_per_mb integer
-How many bits to allot for coding one macroblock. Different profiles use
-between 200 and 2400 bits per macroblock, the maximum is 8000.
-
-
-mbs_per_slice integer
-Number of macroblocks in each slice (1-8); the default value (8)
-should be good in almost all situations.
-
-
-vendor string
-Override the 4-byte vendor ID.
-A custom vendor ID like apl0 would claim the stream was produced by
-the Apple encoder.
-
-
-alpha_bits integer
-Specify number of bits for alpha component.
-Possible values are 0 , 8 and 16 .
-Use 0 to disable alpha plane coding.
-
-
-
-
-
-
18.9.2 Speed considerations# TOC
-
-
In the default mode of operation the encoder has to honor frame constraints
-(i.e. not produce frames with size bigger than requested) while still making
-output picture as good as possible.
-A frame containing a lot of small details is harder to compress and the encoder
-would spend more time searching for appropriate quantizers for each slice.
-
-
Setting a higher bits_per_mb limit will improve the speed.
-
-
For the fastest encoding speed set the qscale parameter (4 is the
-recommended value) and do not set a size constraint.
-
-
-
-
19 Subtitles Encoders# TOC
-
-
-
19.1 dvdsub# TOC
-
-
This codec encodes the bitmap subtitle format that is used in DVDs.
-Typically they are stored in VOBSUB file pairs (*.idx + *.sub),
-and they can also be used in Matroska files.
-
-
-
19.1.1 Options# TOC
-
-
-even_rows_fix
-When set to 1, enable a work-around that makes the number of pixel rows
-even in all subtitles. This fixes a problem with some players that
-cut off the bottom row if the number is odd. The work-around just adds
-a fully transparent row if needed. The overhead is low, typically
-one byte per subtitle on average.
-
-By default, this work-around is disabled.
-
-
-
-
-
20 Bitstream Filters# TOC
-
-
When you configure your FFmpeg build, all the supported bitstream
-filters are enabled by default. You can list all available ones using
-the configure option --list-bsfs
.
-
-
You can disable all the bitstream filters using the configure option
---disable-bsfs
, and selectively enable any bitstream filter using
-the option --enable-bsf=BSF
, or you can disable a particular
-bitstream filter using the option --disable-bsf=BSF
.
-
-
The option -bsfs
of the ff* tools will display the list of
-all the supported bitstream filters included in your build.
-
-
The ff* tools have a -bsf option applied per stream, taking a
-comma-separated list of filters, whose parameters follow the filter
-name after a ’=’.
-
-
-
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
-
-
-
Below is a description of the currently available bitstream filters,
-with their parameters, if any.
-
-
-
20.1 aac_adtstoasc# TOC
-
-
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
-bitstream filter.
-
-
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
-ADTS header and removes the ADTS header.
-
-
This is required for example when copying an AAC stream from a raw
-ADTS AAC container to a FLV or a MOV/MP4 file.
-
-
-
20.2 chomp# TOC
-
-
Remove zero padding at the end of a packet.
-
-
-
20.3 dump_extra# TOC
-
-
Add extradata to the beginning of the filtered packets.
-
-
The additional argument specifies which packets should be filtered.
-It accepts the values:
-
-‘a ’
-add extradata to all key packets, but only if local_header is
-set in the flags2 codec context field
-
-
-‘k ’
-add extradata to all key packets
-
-
-‘e ’
-add extradata to all packets
-
-
-
-
If not specified it is assumed ‘k ’.
-
-
For example the following ffmpeg
command forces a global
-header (thus disabling individual packet headers) in the H.264 packets
-generated by the libx264
encoder, but corrects them by adding
-the header stored in extradata to the key packets:
-
-
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
-
-
-
-
20.4 h264_mp4toannexb# TOC
-
-
Convert an H.264 bitstream from length prefixed mode to start code
-prefixed mode (as defined in the Annex B of the ITU-T H.264
-specification).
-
-
This is required by some streaming formats, typically the MPEG-2
-transport stream format ("mpegts").
-
-
For example to remux an MP4 file containing an H.264 stream to mpegts
-format with ffmpeg
, you can use the command:
-
-
-
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
-
-
-
-
20.5 imxdump# TOC
-
-
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
-Pro decoder. This filter only applies to the mpeg2video codec, and is
-likely not needed for Final Cut Pro 7 and newer with the appropriate
--tag:v .
-
-
For example, to remux 30 MB/sec NTSC IMX to MOV:
-
-
-
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
-
-
-
-
20.6 mjpeg2jpeg# TOC
-
-
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
-
-
MJPEG is a video codec wherein each video frame is essentially a
-JPEG image. The individual frames can be extracted without loss,
-e.g. by
-
-
-
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
-
-
-
Unfortunately, these chunks are incomplete JPEG images, because
-they lack the DHT segment required for decoding. Quoting from
-http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml :
-
-
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
-commented that "MJPEG, or at least the MJPEG in AVIs having the
-MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
-Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
-and it must use basic Huffman encoding, not arithmetic or
-progressive. . . . You can indeed extract the MJPEG frames and
-decode them with a regular JPEG decoder, but you have to prepend
-the DHT segment to them, or else the decoder won’t have any idea
-how to decompress the data. The exact table necessary is given in
-the OpenDML spec."
-
-
This bitstream filter patches the header of frames extracted from an MJPEG
-stream (carrying the AVI1 header ID and lacking a DHT segment) to
-produce fully qualified JPEG images.
-
-
-
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
-exiftran -i -9 frame*.jpg
-ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
-
-
-
-
20.7 mjpega_dump_header# TOC
-
-
-
20.8 movsub# TOC
-
-
-
20.9 mp3_header_decompress# TOC
-
-
-
20.10 noise# TOC
-
-
Damages the contents of packets without damaging the container. Can be
-used for fuzzing or testing error resilience/concealment.
-
-
Parameters:
-A numeral string, whose value is related to how often output bytes will
-be modified. Therefore, values below or equal to 0 are forbidden, and
-the lower the more frequent bytes will be modified, with 1 meaning
-every byte is modified.
-
-
-
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
-
-
applies the modification to every byte.
-
-
-
20.11 remove_extra# TOC
-
-
-
21 Format Options# TOC
-
-
The libavformat library provides some generic global options, which
-can be set on all the muxers and demuxers. In addition each muxer or
-demuxer may support so-called private options, which are specific for
-that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follows:
-
-
-avioflags flags (input/output )
-Possible values:
-
-‘direct ’
-Reduce buffering.
-
-
-
-
-probesize integer (input )
-Set probing size in bytes, i.e. the size of the data to analyze to get
-stream information. A higher value will allow to detect more
-information in case it is dispersed into the stream, but will increase
-latency. Must be an integer not lesser than 32. It is 5000000 by default.
-
-
-packetsize integer (output )
-Set packet size.
-
-
-fflags flags (input/output )
-Set format flags.
-
-Possible values:
-
-‘ignidx ’
-Ignore index.
-
-‘genpts ’
-Generate PTS.
-
-‘nofillin ’
-Do not fill in missing values that can be exactly calculated.
-
-‘noparse ’
-Disable AVParsers, this needs +nofillin
too.
-
-‘igndts ’
-Ignore DTS.
-
-‘discardcorrupt ’
-Discard corrupted frames.
-
-‘sortdts ’
-Try to interleave output packets by DTS.
-
-‘keepside ’
-Do not merge side data.
-
-‘latm ’
-Enable RTP MP4A-LATM payload.
-
-‘nobuffer ’
-Reduce the latency introduced by optional buffering
-
-‘bitexact ’
-Only write platform-, build- and time-independent data.
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-
-
-
-seek2any integer (input )
-Allow seeking to non-keyframes on demuxer level when supported if set to 1.
-Default is 0.
-
-
-analyzeduration integer (input )
-Specify how many microseconds are analyzed to probe the input. A
-higher value will allow to detect more accurate information, but will
-increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
-
-
-cryptokey hexadecimal string (input )
-Set decryption key.
-
-
-indexmem integer (input )
-Set max memory used for timestamp index (per stream).
-
-
-rtbufsize integer (input )
-Set max memory used for buffering real-time frames.
-
-
-fdebug flags (input/output )
-Print specific debug info.
-
-Possible values:
-
-‘ts ’
-
-
-
-max_delay integer (input/output )
-Set maximum muxing or demuxing delay in microseconds.
-
-
-fpsprobesize integer (input )
-Set number of frames used to probe fps.
-
-
-audio_preload integer (output )
-Set microseconds by which audio packets should be interleaved earlier.
-
-
-chunk_duration integer (output )
-Set microseconds for each chunk.
-
-
-chunk_size integer (output )
-Set size in bytes for each chunk.
-
-
-err_detect, f_err_detect flags (input )
-Set error detection flags. f_err_detect
is deprecated and
-should be used only via the ffmpeg
tool.
-
-Possible values:
-
-‘crccheck ’
-Verify embedded CRCs.
-
-‘bitstream ’
-Detect bitstream specification deviations.
-
-‘buffer ’
-Detect improper bitstream length.
-
-‘explode ’
-Abort decoding on minor error detection.
-
-‘careful ’
-Consider things that violate the spec and have not been seen in the
-wild as errors.
-
-‘compliant ’
-Consider all spec non compliancies as errors.
-
-‘aggressive ’
-Consider things that a sane encoder should not do as an error.
-
-
-
-
-use_wallclock_as_timestamps integer (input )
-Use wallclock as timestamps.
-
-
-avoid_negative_ts integer (output )
-
-Possible values:
-
-‘make_non_negative ’
-Shift timestamps to make them non-negative.
-Also note that this affects only leading negative timestamps, and not
-non-monotonic negative timestamps.
-
-‘make_zero ’
-Shift timestamps so that the first timestamp is 0.
-
-‘auto (default) ’
-Enables shifting when required by the target format.
-
-‘disabled ’
-Disables shifting of timestamp.
-
-
-
-When shifting is enabled, all output timestamps are shifted by the
-same amount. Audio, video, and subtitles desynching and relative
-timestamp differences are preserved compared to how they would have
-been without shifting.
-
-
-skip_initial_bytes integer (input )
-Set number of bytes to skip before reading header and frames if set to 1.
-Default is 0.
-
-
-correct_ts_overflow integer (input )
-Correct single timestamp overflows if set to 1. Default is 1.
-
-
-flush_packets integer (output )
-Flush the underlying I/O stream after each packet. Default 1 enables it, and
-has the effect of reducing the latency; 0 disables it and may slightly
-increase performance in some cases.
-
-
-output_ts_offset offset (output )
-Set the output time offset.
-
-offset must be a time duration specification,
-see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-The offset is added by the muxer to the output timestamps.
-
-Specifying a positive offset means that the corresponding streams are
-delayed bt the time duration specified in offset . Default value
-is 0
(meaning that no offset is applied).
-
-
-format_whitelist list (input )
-"," separated List of allowed demuxers. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
21.1 Format stream specifiers# TOC
-
-
Format stream specifiers allow selection of one or more streams that
-match specific properties.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index.
-
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio,
-’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If
-stream_index is given, then it matches the stream number
-stream_index of this type. Otherwise, it matches all streams of
-this type.
-
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number
-stream_index in the program with the id
-program_id . Otherwise, it matches all streams in the program.
-
-
-#stream_id
-Matches the stream by a format-specific ID.
-
-
-
-
The exact semantics of stream specifiers is defined by the
-avformat_match_stream_specifier()
function declared in the
-libavformat/avformat.h header.
-
-
-
22 Demuxers# TOC
-
-
Demuxers are configured elements in FFmpeg that can read the
-multimedia streams from a particular type of file.
-
-
When you configure your FFmpeg build, all the supported demuxers
-are enabled by default. You can list all available ones using the
-configure option --list-demuxers
.
-
-
You can disable all the demuxers using the configure option
---disable-demuxers
, and selectively enable a single demuxer with
-the option --enable-demuxer=DEMUXER
, or disable it
-with the option --disable-demuxer=DEMUXER
.
-
-
The option -formats
of the ff* tools will display the list of
-enabled demuxers.
-
-
The description of some of the currently available demuxers follows.
-
-
-
22.1 applehttp# TOC
-
-
Apple HTTP Live Streaming demuxer.
-
-
This demuxer presents all AVStreams from all variant streams.
-The id field is set to the bitrate variant index number. By setting
-the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay),
-the caller can decide which variant streams to actually receive.
-The total bitrate of the variant that the stream belongs to is
-available in a metadata key named "variant_bitrate".
-
-
-
22.2 apng# TOC
-
-
Animated Portable Network Graphics demuxer.
-
-
This demuxer is used to demux APNG files.
-All headers, but the PNG signature, up to (but not including) the first
-fcTL chunk are transmitted as extradata.
-Frames are then split as being all the chunks between two fcTL ones, or
-between the last fcTL and IEND chunks.
-
-
--ignore_loop bool
-Ignore the loop variable in the file if set.
-
--max_fps int
-Maximum framerate in frames per second (0 for no limit).
-
--default_fps int
-Default framerate in frames per second when none is specified in the file
-(0 meaning as fast as possible).
-
-
-
-
-
22.3 asf# TOC
-
-
Advanced Systems Format demuxer.
-
-
This demuxer is used to demux ASF files and MMS network streams.
-
-
--no_resync_search bool
-Do not try to resynchronize by looking for a certain optional start code.
-
-
-
-
-
22.4 concat# TOC
-
-
Virtual concatenation script demuxer.
-
-
This demuxer reads a list of files and other directives from a text file and
-demuxes them one after the other, as if all their packet had been muxed
-together.
-
-
The timestamps in the files are adjusted so that the first file starts at 0
-and each next file starts where the previous one finishes. Note that it is
-done globally and may cause gaps if all streams do not have exactly the same
-length.
-
-
All files must have the same streams (same codecs, same time base, etc.).
-
-
The duration of each file is used to adjust the timestamps of the next file:
-if the duration is incorrect (because it was computed using the bit-rate or
-because the file is truncated, for example), it can cause artifacts. The
-duration
directive can be used to override the duration stored in
-each file.
-
-
-
22.4.1 Syntax# TOC
-
-
The script is a text file in extended-ASCII, with one directive per line.
-Empty lines, leading spaces and lines starting with ’#’ are ignored. The
-following directive is recognized:
-
-
-file path
-Path to a file to read; special characters and spaces must be escaped with
-backslash or single quotes.
-
-All subsequent file-related directives apply to that file.
-
-
-ffconcat version 1.0
-Identify the script type and version. It also sets the safe option
-to 1 if it was to its default -1.
-
-To make FFmpeg recognize the format automatically, this directive must
-appears exactly as is (no extra space or byte-order-mark) on the very first
-line of the script.
-
-
-duration dur
-Duration of the file. This information can be specified from the file;
-specifying it here may be more efficient or help if the information from the
-file is not available or accurate.
-
-If the duration is set for all files, then it is possible to seek in the
-whole concatenated video.
-
-
-stream
-Introduce a stream in the virtual file.
-All subsequent stream-related directives apply to the last introduced
-stream.
-Some streams properties must be set in order to allow identifying the
-matching streams in the subfiles.
-If no streams are defined in the script, the streams from the first file are
-copied.
-
-
-exact_stream_id id
-Set the id of the stream.
-If this directive is given, the string with the corresponding id in the
-subfiles will be used.
-This is especially useful for MPEG-PS (VOB) files, where the order of the
-streams is not reliable.
-
-
-
-
-
-
22.4.2 Options# TOC
-
-
This demuxer accepts the following option:
-
-
-safe
-If set to 1, reject unsafe file paths. A file path is considered safe if it
-does not contain a protocol specification and is relative and all components
-only contain characters from the portable character set (letters, digits,
-period, underscore and hyphen) and have no period at the beginning of a
-component.
-
-If set to 0, any file name is accepted.
-
-The default is -1, it is equivalent to 1 if the format was automatically
-probed and 0 otherwise.
-
-
-auto_convert
-If set to 1, try to perform automatic conversions on packet data to make the
-streams concatenable.
-
-Currently, the only conversion is adding the h264_mp4toannexb bitstream
-filter to H.264 streams in MP4 format. This is necessary in particular if
-there are resolution changes.
-
-
-
-
-
-
22.5 flv# TOC
-
-
Adobe Flash Video Format demuxer.
-
-
This demuxer is used to demux FLV files and RTMP network streams.
-
-
--flv_metadata bool
-Allocate the streams according to the onMetaData array content.
-
-
-
-
-
22.6 libgme# TOC
-
-
The Game Music Emu library is a collection of video game music file emulators.
-
-
See http://code.google.com/p/game-music-emu/ for more information.
-
-
Some files have multiple tracks. The demuxer will pick the first track by
-default. The track_index option can be used to select a different
-track. Track indexes start at 0. The demuxer exports the number of tracks as
-tracks meta data entry.
-
-
For very large files, the max_size option may have to be adjusted.
-
-
-
22.7 libquvi# TOC
-
-
Play media from Internet services using the quvi project.
-
-
The demuxer accepts a format option to request a specific quality. It
-is by default set to best .
-
-
See http://quvi.sourceforge.net/ for more information.
-
-
FFmpeg needs to be built with --enable-libquvi
for this demuxer to be
-enabled.
-
-
-
22.8 gif# TOC
-
-
Animated GIF demuxer.
-
-
It accepts the following options:
-
-
-min_delay
-Set the minimum valid delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 2.
-
-
-default_delay
-Set the default delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 10.
-
-
-ignore_loop
-GIF files can contain information to loop a certain number of times (or
-infinitely). If ignore_loop is set to 1, then the loop setting
-from the input will be ignored and looping will not occur. If set to 0,
-then looping will occur and will cycle the number of times according to
-the GIF. Default value is 1.
-
-
-
-
For example, with the overlay filter, place an infinitely looping GIF
-over another video:
-
-
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
-
-
-
Note that in the above example the shortest option for overlay filter is
-used to end the output video at the length of the shortest input file,
-which in this case is input.mp4 as the GIF in this example loops
-infinitely.
-
-
-
22.9 image2# TOC
-
-
Image file demuxer.
-
-
This demuxer reads from a list of image files specified by a pattern.
-The syntax and meaning of the pattern is specified by the
-option pattern_type .
-
-
The pattern may contain a suffix which is used to automatically
-determine the format of the images contained in the files.
-
-
The size, the pixel format, and the format of each image must be the
-same for all the files in the sequence.
-
-
This demuxer accepts the following options:
-
-framerate
-Set the frame rate for the video stream. It defaults to 25.
-
-loop
-If set to 1, loop over the input. Default value is 0.
-
-pattern_type
-Select the pattern type used to interpret the provided filename.
-
-pattern_type accepts one of the following values.
-
-sequence
-Select a sequence pattern type, used to specify a sequence of files
-indexed by sequential numbers.
-
-A sequence pattern may contain the string "%d" or "%0N d", which
-specifies the position of the characters representing a sequential
-number in each filename matched by the pattern. If the form
-"%d0N d" is used, the string representing the number in each
-filename is 0-padded and N is the total number of 0-padded
-digits representing the number. The literal character ’%’ can be
-specified in the pattern with the string "%%".
-
-If the sequence pattern contains "%d" or "%0N d", the first filename of
-the file list specified by the pattern must contain a number
-inclusively contained between start_number and
-start_number +start_number_range -1, and all the following
-numbers must be sequential.
-
-For example the pattern "img-%03d.bmp" will match a sequence of
-filenames of the form img-001.bmp , img-002.bmp , ...,
-img-010.bmp , etc.; the pattern "i%%m%%g-%d.jpg" will match a
-sequence of filenames of the form i%m%g-1.jpg ,
-i%m%g-2.jpg , ..., i%m%g-10.jpg , etc.
-
-Note that the pattern must not necessarily contain "%d" or
-"%0N d", for example to convert a single image file
-img.jpeg you can employ the command:
-
-
ffmpeg -i img.jpeg img.png
-
-
-
-glob
-Select a glob wildcard pattern type.
-
-The pattern is interpreted like a glob()
pattern. This is only
-selectable if libavformat was compiled with globbing support.
-
-
-glob_sequence (deprecated, will be removed)
-Select a mixed glob wildcard/sequence pattern.
-
-If your version of libavformat was compiled with globbing support, and
-the provided pattern contains at least one glob meta character among
-%*?[]{}
that is preceded by an unescaped "%", the pattern is
-interpreted like a glob()
pattern, otherwise it is interpreted
-like a sequence pattern.
-
-All glob special characters %*?[]{}
must be prefixed
-with "%". To escape a literal "%" you shall use "%%".
-
-For example the pattern foo-%*.jpeg
will match all the
-filenames prefixed by "foo-" and terminating with ".jpeg", and
-foo-%?%?%?.jpeg
will match all the filenames prefixed with
-"foo-", followed by a sequence of three characters, and terminating
-with ".jpeg".
-
-This pattern type is deprecated in favor of glob and
-sequence .
-
-
-
-Default value is glob_sequence .
-
-pixel_format
-Set the pixel format of the images to read. If not specified the pixel
-format is guessed from the first image file in the sequence.
-
-start_number
-Set the index of the file matched by the image file pattern to start
-to read from. Default value is 0.
-
-start_number_range
-Set the index interval range to check when looking for the first image
-file in the sequence, starting from start_number . Default value
-is 5.
-
-ts_from_file
-If set to 1, will set frame timestamp to modification time of image file. Note
-that monotonity of timestamps is not provided: images go in the same order as
-without this option. Default value is 0.
-If set to 2, will set frame timestamp to the modification time of the image file in
-nanosecond precision.
-
-video_size
-Set the video size of the images to read. If not specified the video
-size is guessed from the first image file in the sequence.
-
-
-
-
-
22.9.1 Examples# TOC
-
-
- Use ffmpeg
for creating a video from the images in the file
-sequence img-001.jpeg , img-002.jpeg , ..., assuming an
-input frame rate of 10 frames per second:
-
-
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
-
-
- As above, but start by reading from a file with index 100 in the sequence:
-
-
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
-
-
- Read images matching the "*.png" glob pattern , that is all the files
-terminating with the ".png" suffix:
-
-
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
-
-
-
-
-
22.10 mpegts# TOC
-
-
MPEG-2 transport stream demuxer.
-
-
-fix_teletext_pts
-Overrides teletext packet PTS and DTS values with the timestamps calculated
-from the PCR of the first program which the teletext stream is part of and is
-not discarded. Default value is 1, set this option to 0 if you want your
-teletext packet PTS and DTS values untouched.
-
-
-
-
-
22.11 rawvideo# TOC
-
-
Raw video demuxer.
-
-
This demuxer allows one to read raw video data. Since there is no header
-specifying the assumed video parameters, the user must specify them
-in order to be able to decode the data correctly.
-
-
This demuxer accepts the following options:
-
-framerate
-Set input video frame rate. Default value is 25.
-
-
-pixel_format
-Set the input video pixel format. Default value is yuv420p
.
-
-
-video_size
-Set the input video size. This value must be specified explicitly.
-
-
-
-
For example to read a rawvideo file input.raw with
-ffplay
, assuming a pixel format of rgb24
, a video
-size of 320x240
, and a frame rate of 10 images per second, use
-the command:
-
-
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
-
-
-
-
22.12 sbg# TOC
-
-
SBaGen script demuxer.
-
-
This demuxer reads the script language used by SBaGen
-http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG
-script looks like that:
-
-
-SE
-a: 300-2.5/3 440+4.5/0
-b: 300-2.5/0 440+4.5/3
-off: -
-NOW == a
-+0:07:00 == b
-+0:14:00 == a
-+0:21:00 == b
-+0:30:00 off
-
-
-
A SBG script can mix absolute and relative timestamps. If the script uses
-either only absolute timestamps (including the script start time) or only
-relative ones, then its layout is fixed, and the conversion is
-straightforward. On the other hand, if the script mixes both kind of
-timestamps, then the NOW reference for relative timestamps will be
-taken from the current time of day at the time the script is read, and the
-script layout will be frozen according to that reference. That means that if
-the script is directly played, the actual times will match the absolute
-timestamps up to the sound controller’s clock accuracy, but if the user
-somehow pauses the playback or seeks, all times will be shifted accordingly.
-
-
-
22.13 tedcaptions# TOC
-
-
JSON captions used for TED Talks .
-
-
TED does not provide links to the captions, but they can be guessed from the
-page. The file tools/bookmarklets.html from the FFmpeg source tree
-contains a bookmarklet to expose them.
-
-
This demuxer accepts the following option:
-
-start_time
-Set the start time of the TED talk, in milliseconds. The default is 15000
-(15s). It is used to sync the captions with the downloadable videos, because
-they include a 15s intro.
-
-
-
-
Example: convert the captions to a format most players understand:
-
-
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
-
-
-
-
23 Muxers# TOC
-
-
Muxers are configured elements in FFmpeg which allow writing
-multimedia streams to a particular type of file.
-
-
When you configure your FFmpeg build, all the supported muxers
-are enabled by default. You can list all available muxers using the
-configure option --list-muxers
.
-
-
You can disable all the muxers with the configure option
---disable-muxers
and selectively enable / disable single muxers
-with the options --enable-muxer=MUXER
/
---disable-muxer=MUXER
.
-
-
The option -formats
of the ff* tools will display the list of
-enabled muxers.
-
-
A description of some of the currently available muxers follows.
-
-
-
23.1 aiff# TOC
-
-
Audio Interchange File Format muxer.
-
-
-
23.1.1 Options# TOC
-
-
It accepts the following options:
-
-
-write_id3v2
-Enable ID3v2 tags writing when set to 1. Default is 0 (disabled).
-
-
-id3v2_version
-Select ID3v2 version to write. Currently only version 3 and 4 (aka.
-ID3v2.3 and ID3v2.4) are supported. The default is version 4.
-
-
-
-
-
-
23.2 crc# TOC
-
-
CRC (Cyclic Redundancy Check) testing format.
-
-
This muxer computes and prints the Adler-32 CRC of all the input audio
-and video frames. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-CRC.
-
-
The output of the muxer consists of a single line of the form:
-CRC=0xCRC , where CRC is a hexadecimal number 0-padded to
-8 digits containing the CRC for all the decoded input frames.
-
-
See also the framecrc muxer.
-
-
-
23.2.1 Examples# TOC
-
-
For example to compute the CRC of the input, and store it in the file
-out.crc :
-
-
ffmpeg -i INPUT -f crc out.crc
-
-
-
You can print the CRC to stdout with the command:
-
-
ffmpeg -i INPUT -f crc -
-
-
-
You can select the output format of each frame with ffmpeg
by
-specifying the audio and video codec and format. For example to
-compute the CRC of the input audio converted to PCM unsigned 8-bit
-and the input video converted to MPEG-2 video, use the command:
-
-
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
-
-
-
-
23.3 framecrc# TOC
-
-
Per-packet CRC (Cyclic Redundancy Check) testing format.
-
-
This muxer computes and prints the Adler-32 CRC for each audio
-and video packet. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-CRC.
-
-
The output of the muxer consists of a line for each audio and video
-packet of the form:
-
-
stream_index , packet_dts , packet_pts , packet_duration , packet_size , 0xCRC
-
-
-
CRC is a hexadecimal number 0-padded to 8 digits containing the
-CRC of the packet.
-
-
-
23.3.1 Examples# TOC
-
-
For example to compute the CRC of the audio and video frames in
-INPUT , converted to raw audio and video packets, and store it
-in the file out.crc :
-
-
ffmpeg -i INPUT -f framecrc out.crc
-
-
-
To print the information to stdout, use the command:
-
-
ffmpeg -i INPUT -f framecrc -
-
-
-
With ffmpeg
, you can select the output format to which the
-audio and video frames are encoded before computing the CRC for each
-packet by specifying the audio and video codec. For example, to
-compute the CRC of each decoded input audio frame converted to PCM
-unsigned 8-bit and of each decoded input video frame converted to
-MPEG-2 video, use the command:
-
-
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
-
-
-
See also the crc muxer.
-
-
-
23.4 framemd5# TOC
-
-
Per-packet MD5 testing format.
-
-
This muxer computes and prints the MD5 hash for each audio
-and video packet. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-hash.
-
-
The output of the muxer consists of a line for each audio and video
-packet of the form:
-
-
stream_index , packet_dts , packet_pts , packet_duration , packet_size , MD5
-
-
-
MD5 is a hexadecimal number representing the computed MD5 hash
-for the packet.
-
-
-
23.4.1 Examples# TOC
-
-
For example to compute the MD5 of the audio and video frames in
-INPUT , converted to raw audio and video packets, and store it
-in the file out.md5 :
-
-
ffmpeg -i INPUT -f framemd5 out.md5
-
-
-
To print the information to stdout, use the command:
-
-
ffmpeg -i INPUT -f framemd5 -
-
-
-
See also the md5 muxer.
-
-
-
23.5 gif# TOC
-
-
Animated GIF muxer.
-
-
It accepts the following options:
-
-
-loop
-Set the number of times to loop the output. Use -1
for no loop, 0
-for looping indefinitely (default).
-
-
-final_delay
-Force the delay (expressed in centiseconds) after the last frame. Each frame
-ends with a delay until the next frame. The default is -1
, which is a
-special value to tell the muxer to re-use the previous delay. In case of a
-loop, you might want to customize this value to mark a pause for instance.
-
-
-
-
For example, to encode a gif looping 10 times, with a 5 seconds delay between
-the loops:
-
-
ffmpeg -i INPUT -loop 10 -final_delay 500 out.gif
-
-
-
Note 1: if you wish to extract the frames in separate GIF files, you need to
-force the image2 muxer:
-
-
ffmpeg -i INPUT -c:v gif -f image2 "out%d.gif"
-
-
-
Note 2: the GIF format has a very small time base: the delay between two frames
-can not be smaller than one centi second.
-
-
-
23.6 hls# TOC
-
-
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
-the HTTP Live Streaming (HLS) specification.
-
-
It creates a playlist file, and one or more segment files. The output filename
-specifies the playlist filename.
-
-
By default, the muxer creates a file for each segment produced. These files
-have the same name as the playlist, followed by a sequential number and a
-.ts extension.
-
-
For example, to convert an input file with ffmpeg
:
-
-
ffmpeg -i in.nut out.m3u8
-
-
This example will produce the playlist, out.m3u8 , and segment files:
-out0.ts , out1.ts , out2.ts , etc.
-
-
See also the segment muxer, which provides a more generic and
-flexible implementation of a segmenter, and can be used to perform HLS
-segmentation.
-
-
-
23.6.1 Options# TOC
-
-
This muxer supports the following options:
-
-
-hls_time seconds
-Set the segment length in seconds. Default value is 2.
-
-
-hls_list_size size
-Set the maximum number of playlist entries. If set to 0 the list file
-will contain all the segments. Default value is 5.
-
-
-hls_ts_options options_list
-Set output format options using a :-separated list of key=value
-parameters. Values containing :
special characters must be
-escaped.
-
-
-hls_wrap wrap
-Set the number after which the segment filename number (the number
-specified in each segment file) wraps. If set to 0 the number will be
-never wrapped. Default value is 0.
-
-This option is useful to avoid to fill the disk with many segment
-files, and limits the maximum number of segment files written to disk
-to wrap .
-
-
-start_number number
-Start the playlist sequence number from number . Default value is
-0.
-
-
-hls_allow_cache allowcache
-Explicitly set whether the client MAY (1) or MUST NOT (0) cache media segments.
-
-
-hls_base_url baseurl
-Append baseurl to every entry in the playlist.
-Useful to generate playlists with absolute paths.
-
-Note that the playlist sequence number must be unique for each segment
-and it is not to be confused with the segment filename sequence number
-which can be cyclic, for example if the wrap option is
-specified.
-
-
-hls_segment_filename filename
-Set the segment filename. Unless hls_flags single_file is set filename
-is used as a string format with the segment number:
-
-
ffmpeg in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
-
-This example will produce the playlist, out.m3u8 , and segment files:
-file000.ts , file001.ts , file002.ts , etc.
-
-
-hls_flags single_file
-If this flag is set, the muxer will store all segments in a single MPEG-TS
-file, and will use byte ranges in the playlist. HLS playlists generated with
-this way will have the version number 4.
-For example:
-
-
ffmpeg -i in.nut -hls_flags single_file out.m3u8
-
-Will produce the playlist, out.m3u8 , and a single segment file,
-out.ts .
-
-
-hls_flags delete_segments
-Segment files removed from the playlist are deleted after a period of time
-equal to the duration of the segment plus the duration of the playlist.
-
-
-
-
-
23.7 ico# TOC
-
-
ICO file muxer.
-
-
Microsoft’s icon file format (ICO) has some strict limitations that should be noted:
-
-
- Size cannot exceed 256 pixels in any dimension
-
- Only BMP and PNG images can be stored
-
- If a BMP image is used, it must be one of the following pixel formats:
-
-
BMP Bit Depth FFmpeg Pixel Format
-1bit pal8
-4bit pal8
-8bit pal8
-16bit rgb555le
-24bit bgr24
-32bit bgra
-
-
- If a BMP image is used, it must use the BITMAPINFOHEADER DIB header
-
- If a PNG image is used, it must use the rgba pixel format
-
-
-
-
23.8 image2# TOC
-
-
Image file muxer.
-
-
The image file muxer writes video frames to image files.
-
-
The output filenames are specified by a pattern, which can be used to
-produce sequentially numbered series of files.
-The pattern may contain the string "%d" or "%0N d", this string
-specifies the position of the characters representing a numbering in
-the filenames. If the form "%0N d" is used, the string
-representing the number in each filename is 0-padded to N
-digits. The literal character ’%’ can be specified in the pattern with
-the string "%%".
-
-
If the pattern contains "%d" or "%0N d", the first filename of
-the file list specified will contain the number 1, all the following
-numbers will be sequential.
-
-
The pattern may contain a suffix which is used to automatically
-determine the format of the image files to write.
-
-
For example the pattern "img-%03d.bmp" will specify a sequence of
-filenames of the form img-001.bmp , img-002.bmp , ...,
-img-010.bmp , etc.
-The pattern "img%%-%d.jpg" will specify a sequence of filenames of the
-form img%-1.jpg , img%-2.jpg , ..., img%-10.jpg ,
-etc.
-
-
-
23.8.1 Examples# TOC
-
-
The following example shows how to use ffmpeg
for creating a
-sequence of files img-001.jpeg , img-002.jpeg , ...,
-taking one image every second from the input video:
-
-
ffmpeg -i in.avi -vsync 1 -r 1 -f image2 'img-%03d.jpeg'
-
-
-
Note that with ffmpeg
, if the format is not specified with the
--f
option and the output filename specifies an image file
-format, the image2 muxer is automatically selected, so the previous
-command can be written as:
-
-
ffmpeg -i in.avi -vsync 1 -r 1 'img-%03d.jpeg'
-
-
-
Note also that the pattern must not necessarily contain "%d" or
-"%0N d", for example to create a single image file
-img.jpeg from the input video you can employ the command:
-
-
ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg
-
-
-
The strftime option allows you to expand the filename with
-date and time information. Check the documentation of
-the strftime()
function for the syntax.
-
-
For example to generate image files from the strftime()
-"%Y-%m-%d_%H-%M-%S" pattern, the following ffmpeg
command
-can be used:
-
-
ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
-
-
-
-
23.8.2 Options# TOC
-
-
-start_number
-Start the sequence from the specified number. Default value is 1. Must
-be a non-negative number.
-
-
-update
-If set to 1, the filename will always be interpreted as just a
-filename, not a pattern, and the corresponding file will be continuously
-overwritten with new images. Default value is 0.
-
-
-strftime
-If set to 1, expand the filename with date and time information from
-strftime()
. Default value is 0.
-
-
-
-
The image muxer supports the .Y.U.V image file format. This format is
-special in that that each image frame consists of three files, for
-each of the YUV420P components. To read or write this image file format,
-specify the name of the ’.Y’ file. The muxer will automatically open the
-’.U’ and ’.V’ files as required.
-
-
-
23.9 matroska# TOC
-
-
Matroska container muxer.
-
-
This muxer implements the matroska and webm container specs.
-
-
-
23.9.1 Metadata# TOC
-
-
The recognized metadata settings in this muxer are:
-
-
-title
-Set title name provided to a single track.
-
-
-language
-Specify the language of the track in the Matroska languages form.
-
-The language can be either the 3 letters bibliographic ISO-639-2 (ISO
-639-2/B) form (like "fre" for French), or a language code mixed with a
-country code for specialities in languages (like "fre-ca" for Canadian
-French).
-
-
-stereo_mode
-Set stereo 3D video layout of two views in a single video track.
-
-The following values are recognized:
-
-‘mono ’
-video is not stereo
-
-‘left_right ’
-Both views are arranged side by side, Left-eye view is on the left
-
-‘bottom_top ’
-Both views are arranged in top-bottom orientation, Left-eye view is at bottom
-
-‘top_bottom ’
-Both views are arranged in top-bottom orientation, Left-eye view is on top
-
-‘checkerboard_rl ’
-Each view is arranged in a checkerboard interleaved pattern, Left-eye view being first
-
-‘checkerboard_lr ’
-Each view is arranged in a checkerboard interleaved pattern, Right-eye view being first
-
-‘row_interleaved_rl ’
-Each view is constituted by a row based interleaving, Right-eye view is first row
-
-‘row_interleaved_lr ’
-Each view is constituted by a row based interleaving, Left-eye view is first row
-
-‘col_interleaved_rl ’
-Both views are arranged in a column based interleaving manner, Right-eye view is first column
-
-‘col_interleaved_lr ’
-Both views are arranged in a column based interleaving manner, Left-eye view is first column
-
-‘anaglyph_cyan_red ’
-All frames are in anaglyph format viewable through red-cyan filters
-
-‘right_left ’
-Both views are arranged side by side, Right-eye view is on the left
-
-‘anaglyph_green_magenta ’
-All frames are in anaglyph format viewable through green-magenta filters
-
-‘block_lr ’
-Both eyes laced in one Block, Left-eye view is first
-
-‘block_rl ’
-Both eyes laced in one Block, Right-eye view is first
-
-
-
-
-
-
For example a 3D WebM clip can be created using the following command line:
-
-
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
-
-
-
-
23.9.2 Options# TOC
-
-
This muxer supports the following options:
-
-
-reserve_index_space
-By default, this muxer writes the index for seeking (called cues in Matroska
-terms) at the end of the file, because it cannot know in advance how much space
-to leave for the index at the beginning of the file. However for some use cases
-– e.g. streaming where seeking is possible but slow – it is useful to put the
-index at the beginning of the file.
-
-If this option is set to a non-zero value, the muxer will reserve a given amount
-of space in the file header and then try to write the cues there when the muxing
-finishes. If the available space does not suffice, muxing will fail. A safe size
-for most use cases should be about 50kB per hour of video.
-
-Note that cues are only written if the output is seekable and this option will
-have no effect if it is not.
-
-
-
-
-
23.10 md5# TOC
-
-
MD5 testing format.
-
-
This muxer computes and prints the MD5 hash of all the input audio
-and video frames. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-hash.
-
-
The output of the muxer consists of a single line of the form:
-MD5=MD5 , where MD5 is a hexadecimal number representing
-the computed MD5 hash.
-
-
For example to compute the MD5 hash of the input converted to raw
-audio and video, and store it in the file out.md5 :
-
-
ffmpeg -i INPUT -f md5 out.md5
-
-
-
You can print the MD5 to stdout with the command:
-
-
ffmpeg -i INPUT -f md5 -
-
-
-
See also the framemd5 muxer.
-
-
-
23.11 mov, mp4, ismv# TOC
-
-
MOV/MP4/ISMV (Smooth Streaming) muxer.
-
-
The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4
-file has all the metadata about all packets stored in one location
-(written at the end of the file, it can be moved to the start for
-better playback by adding faststart to the movflags , or
-using the qt-faststart
tool). A fragmented
-file consists of a number of fragments, where packets and metadata
-about these packets are stored together. Writing a fragmented
-file has the advantage that the file is decodable even if the
-writing is interrupted (while a normal MOV/MP4 is undecodable if
-it is not properly finished), and it requires less memory when writing
-very long files (since writing normal MOV/MP4 files stores info about
-every single packet in memory until the file is closed). The downside
-is that it is less compatible with other applications.
-
-
-
23.11.1 Options# TOC
-
-
Fragmentation is enabled by setting one of the AVOptions that define
-how to cut the file into fragments:
-
-
--moov_size bytes
-Reserves space for the moov atom at the beginning of the file instead of placing the
-moov atom at the end. If the space reserved is insufficient, muxing will fail.
-
--movflags frag_keyframe
-Start a new fragment at each video keyframe.
-
--frag_duration duration
-Create fragments that are duration microseconds long.
-
--frag_size size
-Create fragments that contain up to size bytes of payload data.
-
--movflags frag_custom
-Allow the caller to manually choose when to cut fragments, by
-calling av_write_frame(ctx, NULL)
to write a fragment with
-the packets written so far. (This is only useful with other
-applications integrating libavformat, not from ffmpeg
.)
-
--min_frag_duration duration
-Don’t create fragments that are shorter than duration microseconds long.
-
-
-
-
If more than one condition is specified, fragments are cut when
-one of the specified conditions is fulfilled. The exception to this is
--min_frag_duration
, which has to be fulfilled for any of the other
-conditions to apply.
-
-
Additionally, the way the output file is written can be adjusted
-through a few other options:
-
-
--movflags empty_moov
-Write an initial moov atom directly at the start of the file, without
-describing any samples in it. Generally, an mdat/moov pair is written
-at the start of the file, as a normal MOV/MP4 file, containing only
-a short portion of the file. With this option set, there is no initial
-mdat atom, and the moov atom only describes the tracks but has
-a zero duration.
-
-This option is implicitly set when writing ismv (Smooth Streaming) files.
-
--movflags separate_moof
-Write a separate moof (movie fragment) atom for each track. Normally,
-packets for all tracks are written in a moof atom (which is slightly
-more efficient), but with this option set, the muxer writes one moof/mdat
-pair for each track, making it easier to separate tracks.
-
-This option is implicitly set when writing ismv (Smooth Streaming) files.
-
--movflags faststart
-Run a second pass moving the index (moov atom) to the beginning of the file.
-This operation can take a while, and will not work in various situations such
-as fragmented output, thus it is not enabled by default.
-
--movflags rtphint
-Add RTP hinting tracks to the output file.
-
--movflags disable_chpl
-Disable Nero chapter markers (chpl atom). Normally, both Nero chapters
-and a QuickTime chapter track are written to the file. With this option
-set, only the QuickTime chapter track will be written. Nero chapters can
-cause failures when the file is reprocessed with certain tagging programs, like
-mp3Tag 2.61a and iTunes 11.3, most likely other versions are affected as well.
-
--movflags omit_tfhd_offset
-Do not write any absolute base_data_offset in tfhd atoms. This avoids
-tying fragments to absolute byte positions in the file/streams.
-
--movflags default_base_moof
-Similarly to the omit_tfhd_offset, this flag avoids writing the
-absolute base_data_offset field in tfhd atoms, but does so by using
-the new default-base-is-moof flag instead. This flag is new from
-14496-12:2012. This may make the fragments easier to parse in certain
-circumstances (avoiding basing track fragment location calculations
-on the implicit end of the previous track fragment).
-
-
-
-
-
23.11.2 Example# TOC
-
-
Smooth Streaming content can be pushed in real time to a publishing
-point on IIS with this muxer. Example:
-
-
ffmpeg -re <normal input/transcoding options> -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1)
-
-
-
-
23.12 mp3# TOC
-
-
The MP3 muxer writes a raw MP3 stream with the following optional features:
-
- An ID3v2 metadata header at the beginning (enabled by default). Versions 2.3 and
-2.4 are supported, the id3v2_version
private option controls which one is
-used (3 or 4). Setting id3v2_version
to 0 disables the ID3v2 header
-completely.
-
-The muxer supports writing attached pictures (APIC frames) to the ID3v2 header.
-The pictures are supplied to the muxer in form of a video stream with a single
-packet. There can be any number of those streams, each will correspond to a
-single APIC frame. The stream metadata tags title and comment map
-to APIC description and picture type respectively. See
-http://id3.org/id3v2.4.0-frames for allowed picture types.
-
-Note that the APIC frames must be written at the beginning, so the muxer will
-buffer the audio frames until it gets all the pictures. It is therefore advised
-to provide the pictures as soon as possible to avoid excessive buffering.
-
- A Xing/LAME frame right after the ID3v2 header (if present). It is enabled by
-default, but will be written only if the output is seekable. The
-write_xing
private option can be used to disable it. The frame contains
-various information that may be useful to the decoder, like the audio duration
-or encoder delay.
-
- A legacy ID3v1 tag at the end of the file (disabled by default). It may be
-enabled with the write_id3v1
private option, but as its capabilities are
-very limited, its usage is not recommended.
-
-
-
Examples:
-
-
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
-
-
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
-
-
-
To attach a picture to an mp3 file select both the audio and the picture stream
-with map
:
-
-
ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
--metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
-
-
-
Write a "clean" MP3 without any extra features:
-
-
ffmpeg -i input.wav -write_xing 0 -id3v2_version 0 out.mp3
-
-
-
-
23.13 mpegts# TOC
-
-
MPEG transport stream muxer.
-
-
This muxer implements ISO 13818-1 and part of ETSI EN 300 468.
-
-
The recognized metadata settings in mpegts muxer are service_provider
-and service_name
. If they are not set the default for
-service_provider
is "FFmpeg" and the default for
-service_name
is "Service01".
-
-
-
23.13.1 Options# TOC
-
-
The muxer options are:
-
-
--mpegts_original_network_id number
-Set the original_network_id (default 0x0001). This is unique identifier
-of a network in DVB. Its main use is in the unique identification of a
-service through the path Original_Network_ID, Transport_Stream_ID.
-
--mpegts_transport_stream_id number
-Set the transport_stream_id (default 0x0001). This identifies a
-transponder in DVB.
-
--mpegts_service_id number
-Set the service_id (default 0x0001) also known as program in DVB.
-
--mpegts_pmt_start_pid number
-Set the first PID for PMT (default 0x1000, max 0x1f00).
-
--mpegts_start_pid number
-Set the first PID for data packets (default 0x0100, max 0x0f00).
-
--mpegts_m2ts_mode number
-Enable m2ts mode if set to 1. Default value is -1 which disables m2ts mode.
-
--muxrate number
-Set a constant muxrate (default VBR).
-
--pcr_period numer
-Override the default PCR retransmission time (default 20ms), ignored
-if variable muxrate is selected.
-
--pes_payload_size number
-Set minimum PES packet payload in bytes.
-
--mpegts_flags flags
-Set flags (see below).
-
--mpegts_copyts number
-Preserve original timestamps, if value is set to 1. Default value is -1, which
-results in shifting timestamps so that they start from 0.
-
--tables_version number
-Set PAT, PMT and SDT version (default 0, valid values are from 0 to 31, inclusively).
-This option allows updating stream structure so that standard consumer may
-detect the change. To do so, reopen output AVFormatContext (in case of API
-usage) or restart ffmpeg instance, cyclically changing tables_version value:
-
-
ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
-ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
-...
-ffmpeg -i source3.ts -codec copy -f mpegts -tables_version 31 udp://1.1.1.1:1111
-ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
-ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
-...
-
-
-
-
-
Option mpegts_flags may take a set of such flags:
-
-
-resend_headers
-Reemit PAT/PMT before writing the next packet.
-
-latm
-Use LATM packetization for AAC.
-
-
-
-
-
23.13.2 Example# TOC
-
-
-
ffmpeg -i file.mpg -c copy \
- -mpegts_original_network_id 0x1122 \
- -mpegts_transport_stream_id 0x3344 \
- -mpegts_service_id 0x5566 \
- -mpegts_pmt_start_pid 0x1500 \
- -mpegts_start_pid 0x150 \
- -metadata service_provider="Some provider" \
- -metadata service_name="Some Channel" \
- -y out.ts
-
-
-
-
23.14 null# TOC
-
-
Null muxer.
-
-
This muxer does not generate any output file, it is mainly useful for
-testing or benchmarking purposes.
-
-
For example to benchmark decoding with ffmpeg
you can use the
-command:
-
-
ffmpeg -benchmark -i INPUT -f null out.null
-
-
-
Note that the above command does not read or write the out.null
-file, but specifying the output file is required by the ffmpeg
-syntax.
-
-
Alternatively you can write the command as:
-
-
ffmpeg -benchmark -i INPUT -f null -
-
-
-
-
23.15 nut# TOC
-
-
--syncpoints flags
-Change the syncpoint usage in nut:
-
-default use the normal low-overhead seeking aids.
-none do not use the syncpoints at all, reducing the overhead but making the stream non-seekable;
-Use of this option is not recommended, as the resulting files are very damage
- sensitive and seeking is not possible. Also in general the overhead from
- syncpoints is negligible. Note, -write_index
0 can be used to disable
- all growing data tables, allowing to mux endless streams with limited memory
- and without these disadvantages.
-
-timestamped extend the syncpoint with a wallclock field.
-
-The none and timestamped flags are experimental.
-
--write_index bool
-Write index at the end, the default is to write an index.
-
-
-
-
-
ffmpeg -i INPUT -f_strict experimental -syncpoints none - | processor
-
-
-
-
23.16 ogg# TOC
-
-
Ogg container muxer.
-
-
--page_duration duration
-Preferred page duration, in microseconds. The muxer will attempt to create
-pages that are approximately duration microseconds long. This allows the
-user to compromise between seek granularity and container overhead. The default
-is 1 second. A value of 0 will fill all segments, making pages as large as
-possible. A value of 1 will effectively use 1 packet-per-page in most
-situations, giving a small seek granularity at the cost of additional container
-overhead.
-
-
-
-
-
23.17 segment, stream_segment, ssegment# TOC
-
-
Basic stream segmenter.
-
-
This muxer outputs streams to a number of separate files of nearly
-fixed duration. Output filename pattern can be set in a fashion similar to
-image2 .
-
-
stream_segment
is a variant of the muxer used to write to
-streaming output formats, i.e. which do not require global headers,
-and is recommended for outputting e.g. to MPEG transport stream segments.
-ssegment
is a shorter alias for stream_segment
.
-
-
Every segment starts with a keyframe of the selected reference stream,
-which is set through the reference_stream option.
-
-
Note that if you want accurate splitting for a video file, you need to
-make the input key frames correspond to the exact splitting times
-expected by the segmenter, or the segment muxer will start the new
-segment with the key frame found next after the specified start
-time.
-
-
The segment muxer works best with a single constant frame rate video.
-
-
Optionally it can generate a list of the created segments, by setting
-the option segment_list . The list type is specified by the
-segment_list_type option. The entry filenames in the segment
-list are set by default to the basename of the corresponding segment
-files.
-
-
See also the hls muxer, which provides a more specific
-implementation for HLS segmentation.
-
-
-
23.17.1 Options# TOC
-
-
The segment muxer supports the following options:
-
-
-reference_stream specifier
-Set the reference stream, as specified by the string specifier .
-If specifier is set to auto
, the reference is chosen
-automatically. Otherwise it must be a stream specifier (see the “Stream
-specifiers” chapter in the ffmpeg manual) which specifies the
-reference stream. The default value is auto
.
-
-
-segment_format format
-Override the inner container format, by default it is guessed by the filename
-extension.
-
-
-segment_format_options options_list
-Set output format options using a :-separated list of key=value
-parameters. Values containing the :
special character must be
-escaped.
-
-
-segment_list name
-Generate also a listfile named name . If not specified no
-listfile is generated.
-
-
-segment_list_flags flags
-Set flags affecting the segment list generation.
-
-It currently supports the following flags:
-
-‘cache ’
-Allow caching (only affects M3U8 list files).
-
-
-‘live ’
-Allow live-friendly file generation.
-
-
-
-
-segment_list_type type
-Select the listing format.
-
-flat use a simple flat list of entries.
-hls use a m3u8-like structure.
-
-
-
-segment_list_size size
-Update the list file so that it contains at most size
-segments. If 0 the list file will contain all the segments. Default
-value is 0.
-
-
-segment_list_entry_prefix prefix
-Prepend prefix to each entry. Useful to generate absolute paths.
-By default no prefix is applied.
-
-The following values are recognized:
-
-‘flat ’
-Generate a flat list for the created segments, one segment per line.
-
-
-‘csv, ext ’
-Generate a list for the created segments, one segment per line,
-each line matching the format (comma-separated values):
-
-
segment_filename ,segment_start_time ,segment_end_time
-
-
-segment_filename is the name of the output file generated by the
-muxer according to the provided pattern. CSV escaping (according to
-RFC4180) is applied if required.
-
-segment_start_time and segment_end_time specify
-the segment start and end time expressed in seconds.
-
-A list file with the suffix ".csv"
or ".ext"
will
-auto-select this format.
-
-‘ext ’ is deprecated in favor or ‘csv ’.
-
-
-‘ffconcat ’
-Generate an ffconcat file for the created segments. The resulting file
-can be read using the FFmpeg concat demuxer.
-
-A list file with the suffix ".ffcat"
or ".ffconcat"
will
-auto-select this format.
-
-
-‘m3u8 ’
-Generate an extended M3U8 file, version 3, compliant with
-http://tools.ietf.org/id/draft-pantos-http-live-streaming .
-
-A list file with the suffix ".m3u8"
will auto-select this format.
-
-
-
-If not specified the type is guessed from the list file name suffix.
-
-
-segment_time time
-Set segment duration to time , the value must be a duration
-specification. Default value is "2". See also the
-segment_times option.
-
-Note that splitting may not be accurate, unless you force the
-reference stream key-frames at the given time. See the introductory
-notice and the examples below.
-
-
-segment_atclocktime 1|0
-If set to "1" split at regular clock time intervals starting from 00:00
-o’clock. The time value specified in segment_time is
-used for setting the length of the splitting interval.
-
-For example with segment_time set to "900" this makes it possible
-to create files at 12:00 o’clock, 12:15, 12:30, etc.
-
-Default value is "0".
-
-
-segment_time_delta delta
-Specify the accuracy time when selecting the start time for a
-segment, expressed as a duration specification. Default value is "0".
-
-When delta is specified a key-frame will start a new segment if its
-PTS satisfies the relation:
-
-
PTS >= start_time - time_delta
-
-
-This option is useful when splitting video content, which is always
-split at GOP boundaries, in case a key frame is found just before the
-specified split time.
-
-In particular may be used in combination with the ffmpeg option
-force_key_frames . The key frame times specified by
-force_key_frames may not be set accurately because of rounding
-issues, with the consequence that a key frame time may result set just
-before the specified time. For constant frame rate videos a value of
-1/(2*frame_rate ) should address the worst case mismatch between
-the specified time and the time set by force_key_frames .
-
-
-segment_times times
-Specify a list of split points. times contains a list of comma
-separated duration specifications, in increasing order. See also
-the segment_time option.
-
-
-segment_frames frames
-Specify a list of split video frame numbers. frames contains a
-list of comma separated integer numbers, in increasing order.
-
-This option specifies to start a new segment whenever a reference
-stream key frame is found and the sequential number (starting from 0)
-of the frame is greater or equal to the next value in the list.
-
-
-segment_wrap limit
-Wrap around segment index once it reaches limit .
-
-
-segment_start_number number
-Set the sequence number of the first segment. Defaults to 0
.
-
-
-reset_timestamps 1|0
-Reset timestamps at the begin of each segment, so that each segment
-will start with near-zero timestamps. It is meant to ease the playback
-of the generated segments. May not work with some combinations of
-muxers/codecs. It is set to 0
by default.
-
-
-initial_offset offset
-Specify timestamp offset to apply to the output packet timestamps. The
-argument must be a time duration specification, and defaults to 0.
-
-
-
-
-
23.17.2 Examples# TOC
-
-
-
-
-
23.18 smoothstreaming# TOC
-
-
Smooth Streaming muxer generates a set of files (Manifest, chunks) suitable for serving with conventional web server.
-
-
-window_size
-Specify the number of fragments kept in the manifest. Default 0 (keep all).
-
-
-extra_window_size
-Specify the number of fragments kept outside of the manifest before removing from disk. Default 5.
-
-
-lookahead_count
-Specify the number of lookahead fragments. Default 2.
-
-
-min_frag_duration
-Specify the minimum fragment duration (in microseconds). Default 5000000.
-
-
-remove_at_exit
-Specify whether to remove all fragments when finished. Default 0 (do not remove).
-
-
-
-
-
-
23.19 tee# TOC
-
-
The tee muxer can be used to write the same data to several files or any
-other kind of muxer. It can be used, for example, to both stream a video to
-the network and save it to disk at the same time.
-
-
It is different from specifying several outputs to the ffmpeg
-command-line tool because the audio and video data will be encoded only once
-with the tee muxer; encoding can be a very expensive process. It is not
-useful when using the libavformat API directly because it is then possible
-to feed the same packets to several muxers directly.
-
-
The slave outputs are specified in the file name given to the muxer,
-separated by ’|’. If any of the slave name contains the ’|’ separator,
-leading or trailing spaces or any special character, it must be
-escaped (see (ffmpeg-utils)the "Quoting and escaping"
-section in the ffmpeg-utils(1) manual ).
-
-
Muxer options can be specified for each slave by prepending them as a list of
-key =value pairs separated by ’:’, between square brackets. If
-the options values contain a special character or the ’:’ separator, they
-must be escaped; note that this is a second level escaping.
-
-
The following special options are also recognized:
-
-f
-Specify the format name. Useful if it cannot be guessed from the
-output name suffix.
-
-
-bsfs[/spec ]
-Specify a list of bitstream filters to apply to the specified
-output.
-
-It is possible to specify to which streams a given bitstream filter
-applies, by appending a stream specifier to the option separated by
-/
. spec must be a stream specifier (see Format stream specifiers ). If the stream specifier is not specified, the
-bitstream filters will be applied to all streams in the output.
-
-Several bitstream filters can be specified, separated by ",".
-
-
-select
-Select the streams that should be mapped to the slave output,
-specified by a stream specifier. If not specified, this defaults to
-all the input streams.
-
-
-
-
-
23.19.1 Examples# TOC
-
-
- Encode something and both archive it in a WebM file and stream it
-as MPEG-TS over UDP (the streams need to be explicitly mapped):
-
-
ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a
- "archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/"
-
-
- Use ffmpeg
to encode the input, and send the output
-to three different destinations. The dump_extra
bitstream
-filter is used to add extradata information to all the output video
-keyframes packets, as requested by the MPEG-TS format. The select
-option is applied to out.aac in order to make it contain only
-audio packets.
-
-
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
- -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac"
-
-
- As below, but select only stream a:1
for the audio output. Note
-that a second level escaping must be performed, as ":" is a special
-character used to separate options.
-
-
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
- -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac"
-
-
-
-
Note: some codecs may need different options depending on the output format;
-the auto-detection of this can not work with the tee muxer. The main example
-is the global_header flag.
-
-
-
23.20 webm_dash_manifest# TOC
-
-
WebM DASH Manifest muxer.
-
-
This muxer implements the WebM DASH Manifest specification to generate the DASH manifest XML.
-
-
-
23.20.1 Options# TOC
-
-
This muxer supports the following options:
-
-
-adaptation_sets
-This option has the following syntax: "id=x,streams=a,b,c id=y,streams=d,e" where x and y are the
-unique identifiers of the adaptation sets and a,b,c,d and e are the indices of the corresponding
-audio and video streams. Any number of adaptation sets can be added using this option.
-
-
-
-
-
23.20.2 Example# TOC
-
-
ffmpeg -f webm_dash_manifest -i video1.webm \
- -f webm_dash_manifest -i video2.webm \
- -f webm_dash_manifest -i audio1.webm \
- -f webm_dash_manifest -i audio2.webm \
- -map 0 -map 1 -map 2 -map 3 \
- -c copy \
- -f webm_dash_manifest \
- -adaptation_sets "id=0,streams=0,1 id=1,streams=2,3" \
- manifest.xml
-
-
-
-
24 Metadata# TOC
-
-
FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded
-INI-like text file and then load it back using the metadata muxer/demuxer.
-
-
The file format is as follows:
-
- A file consists of a header and a number of metadata tags divided into sections,
-each on its own line.
-
- The header is a ’;FFMETADATA’ string, followed by a version number (now 1).
-
- Metadata tags are of the form ’key=value’
-
- Immediately after header follows global metadata
-
- After global metadata there may be sections with per-stream/per-chapter
-metadata.
-
- A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
-brackets (’[’, ’]’) and ends with next section or end of file.
-
- At the beginning of a chapter section there may be an optional timebase to be
-used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and
-den are integers. If the timebase is missing then start/end times are assumed to
-be in milliseconds.
-Next a chapter section must contain chapter start and end times in form
-’START=num’, ’END=num’, where num is a positive integer.
-
- Empty lines and lines starting with ’;’ or ’#’ are ignored.
-
- Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a
-newline) must be escaped with a backslash ’\’.
-
- Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
-the tag (in the example above key is ’foo ’, value is ’ bar’).
-
-
-
A ffmetadata file might look like this:
-
-
;FFMETADATA1
-title=bike\\shed
-;this is a comment
-artist=FFmpeg troll team
-
-[CHAPTER]
-TIMEBASE=1/1000
-START=0
-#chapter ends at 0:01:00
-END=60000
-title=chapter \#1
-[STREAM]
-title=multi\
-line
-
-
-
By using the ffmetadata muxer and demuxer it is possible to extract
-metadata from an input file to an ffmetadata file, and then transcode
-the file into an output file with the edited ffmetadata file.
-
-
Extracting an ffmetadata file with ffmpeg goes as follows:
-
-
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
-
-
-
Reinserting edited metadata information from the FFMETADATAFILE file can
-be done as:
-
-
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
-
-
-
-
25 Protocols# TOC
-
-
Protocols are configured elements in FFmpeg that enable access to
-resources that require specific protocols.
-
-
When you configure your FFmpeg build, all the supported protocols are
-enabled by default. You can list all available ones using the
-configure option "–list-protocols".
-
-
You can disable all the protocols using the configure option
-"–disable-protocols", and selectively enable a protocol using the
-option "–enable-protocol=PROTOCOL ", or you can disable a
-particular protocol using the option
-"–disable-protocol=PROTOCOL ".
-
-
The option "-protocols" of the ff* tools will display the list of
-supported protocols.
-
-
A description of the currently available protocols follows.
-
-
-
25.1 bluray# TOC
-
-
Read BluRay playlist.
-
-
The accepted options are:
-
-angle
-BluRay angle
-
-
-chapter
-Start chapter (1...N)
-
-
-playlist
-Playlist to read (BDMV/PLAYLIST/?????.mpls)
-
-
-
-
-
Examples:
-
-
Read longest playlist from BluRay mounted to /mnt/bluray:
-
-
-
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
-
-
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
-
-
-
-
25.2 cache# TOC
-
-
Caching wrapper for input stream.
-
-
Cache the input stream to temporary file. It brings seeking capability to live streams.
-
-
-
-
-
25.3 concat# TOC
-
-
Physical concatenation protocol.
-
-
Allow to read and seek from many resource in sequence as if they were
-a unique resource.
-
-
A URL accepted by this protocol has the syntax:
-
-
concat:URL1 |URL2 |...|URLN
-
-
-
where URL1 , URL2 , ..., URLN are the urls of the
-resource to be concatenated, each one possibly specifying a distinct
-protocol.
-
-
For example to read a sequence of files split1.mpeg ,
-split2.mpeg , split3.mpeg with ffplay
use the
-command:
-
-
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
-
-
-
Note that you may need to escape the character "|" which is special for
-many shells.
-
-
-
25.4 crypto# TOC
-
-
AES-encrypted stream reading protocol.
-
-
The accepted options are:
-
-key
-Set the AES decryption key binary block from given hexadecimal representation.
-
-
-iv
-Set the AES decryption initialization vector binary block from given hexadecimal representation.
-
-
-
-
Accepted URL formats:
-
-
crypto:URL
-crypto+URL
-
-
-
-
25.5 data# TOC
-
-
Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme .
-
-
For example, to convert a GIF file given inline with ffmpeg
:
-
-
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
-
-
-
-
25.6 file# TOC
-
-
File access protocol.
-
-
Allow to read from or write to a file.
-
-
A file URL can have the form:
-
-
-
where filename is the path of the file to read.
-
-
An URL that does not have a protocol prefix will be assumed to be a
-file URL. Depending on the build, an URL that looks like a Windows
-path with the drive letter at the beginning will also be assumed to be
-a file URL (usually not the case in builds for unix-like systems).
-
-
For example to read from a file input.mpeg with ffmpeg
-use the command:
-
-
ffmpeg -i file:input.mpeg output.mpeg
-
-
-
This protocol accepts the following options:
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable for files on slow medium.
-
-
-
-
-
25.7 ftp# TOC
-
-
FTP (File Transfer Protocol).
-
-
Allow to read from or write to remote resources using FTP protocol.
-
-
Following syntax is required.
-
-
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-ftp-anonymous-password
-Password used when login as anonymous user. Typically an e-mail address
-should be used.
-
-
-ftp-write-seekable
-Control seekability of connection during encoding. If set to 1 the
-resource is supposed to be seekable, if set to 0 it is assumed not
-to be seekable. Default value is 0.
-
-
-
-
NOTE: Protocol can be used as output, but it is recommended to not do
-it, unless special care is taken (tests, customized server configuration
-etc.). Different FTP servers behave in different way during seek
-operation. ff* tools may produce incomplete content due to server limitations.
-
-
-
25.8 gopher# TOC
-
-
Gopher protocol.
-
-
-
25.9 hls# TOC
-
-
Read Apple HTTP Live Streaming compliant segmented stream as
-a uniform one. The M3U8 playlists describing the segments can be
-remote HTTP resources or local files, accessed using the standard
-file protocol.
-The nested protocol is declared by specifying
-"+proto " after the hls URI scheme name, where proto
-is either "file" or "http".
-
-
-
hls+http://host/path/to/remote/resource.m3u8
-hls+file://path/to/local/resource.m3u8
-
-
-
Using this protocol is discouraged - the hls demuxer should work
-just as well (if not, please report the issues) and is more complete.
-To use the hls demuxer instead, simply use the direct URLs to the
-m3u8 files.
-
-
-
25.10 http# TOC
-
-
HTTP (Hyper Text Transfer Protocol).
-
-
This protocol accepts the following options:
-
-
-seekable
-Control seekability of connection. If set to 1 the resource is
-supposed to be seekable, if set to 0 it is assumed not to be seekable,
-if set to -1 it will try to autodetect if it is seekable. Default
-value is -1.
-
-
-chunked_post
-If set to 1 use chunked Transfer-Encoding for posts, default is 1.
-
-
-content_type
-Set a specific content type for the POST messages.
-
-
-headers
-Set custom HTTP headers, can override built in default headers. The
-value must be a string encoding the headers.
-
-
-multiple_requests
-Use persistent connections if set to 1, default is 0.
-
-
-post_data
-Set custom HTTP post data.
-
-
-user-agent
-user_agent
-Override the User-Agent header. If not specified the protocol will use a
-string describing the libavformat build. ("Lavf/<version>")
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-mime_type
-Export the MIME type.
-
-
-icy
-If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
-supports this, the metadata has to be retrieved by the application by reading
-the icy_metadata_headers and icy_metadata_packet options.
-The default is 1.
-
-
-icy_metadata_headers
-If the server supports ICY metadata, this contains the ICY-specific HTTP reply
-headers, separated by newline characters.
-
-
-icy_metadata_packet
-If the server supports ICY metadata, and icy was set to 1, this
-contains the last non-empty metadata packet sent by the server. It should be
-polled in regular intervals by applications interested in mid-stream metadata
-updates.
-
-
-cookies
-Set the cookies to be sent in future requests. The format of each cookie is the
-same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
-delimited by a newline character.
-
-
-offset
-Set initial byte offset.
-
-
-end_offset
-Try to limit the request to bytes preceding this offset.
-
-
-
-
-
25.10.1 HTTP Cookies# TOC
-
-
Some HTTP requests will be denied unless cookie values are passed in with the
-request. The cookies option allows these cookies to be specified. At
-the very least, each cookie must specify a value along with a path and domain.
-HTTP requests that match both the domain and path will automatically include the
-cookie value in the HTTP Cookie header field. Multiple cookies can be delimited
-by a newline.
-
-
The required syntax to play a stream specifying a cookie is:
-
-
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
-
-
-
-
25.11 Icecast# TOC
-
-
Icecast protocol (stream to Icecast servers)
-
-
This protocol accepts the following options:
-
-
-ice_genre
-Set the stream genre.
-
-
-ice_name
-Set the stream name.
-
-
-ice_description
-Set the stream description.
-
-
-ice_url
-Set the stream website URL.
-
-
-ice_public
-Set if the stream should be public.
-The default is 0 (not public).
-
-
-user_agent
-Override the User-Agent header. If not specified a string of the form
-"Lavf/<version>" will be used.
-
-
-password
-Set the Icecast mountpoint password.
-
-
-content_type
-Set the stream content type. This must be set if it is different from
-audio/mpeg.
-
-
-legacy_icecast
-This enables support for Icecast versions < 2.4.0, that do not support the
-HTTP PUT method but the SOURCE method.
-
-
-
-
-
-
icecast://[username [:password ]@]server :port /mountpoint
-
-
-
-
25.12 mmst# TOC
-
-
MMS (Microsoft Media Server) protocol over TCP.
-
-
-
25.13 mmsh# TOC
-
-
MMS (Microsoft Media Server) protocol over HTTP.
-
-
The required syntax is:
-
-
mmsh://server [:port ][/app ][/playpath ]
-
-
-
-
25.14 md5# TOC
-
-
MD5 output protocol.
-
-
Computes the MD5 hash of the data to be written, and on close writes
-this to the designated output or stdout if none is specified. It can
-be used to test muxers without writing an actual file.
-
-
Some examples follow.
-
-
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
-ffmpeg -i input.flv -f avi -y md5:output.avi.md5
-
-# Write the MD5 hash of the encoded AVI file to stdout.
-ffmpeg -i input.flv -f avi -y md5:
-
-
-
Note that some formats (typically MOV) require the output protocol to
-be seekable, so they will fail with the MD5 output protocol.
-
-
-
25.15 pipe# TOC
-
-
UNIX pipe access protocol.
-
-
Allow to read and write from UNIX pipes.
-
-
The accepted syntax is:
-
-
-
number is the number corresponding to the file descriptor of the
-pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number
-is not specified, by default the stdout file descriptor will be used
-for writing, stdin for reading.
-
-
For example to read from stdin with ffmpeg
:
-
-
cat test.wav | ffmpeg -i pipe:0
-# ...this is the same as...
-cat test.wav | ffmpeg -i pipe:
-
-
-
For writing to stdout with ffmpeg
:
-
-
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
-# ...this is the same as...
-ffmpeg -i test.wav -f avi pipe: | cat > test.avi
-
-
-
This protocol accepts the following options:
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable if data transmission is slow.
-
-
-
-
Note that some formats (typically MOV), require the output protocol to
-be seekable, so they will fail with the pipe output protocol.
-
-
-
25.16 rtmp# TOC
-
-
Real-Time Messaging Protocol.
-
-
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
-content across a TCP/IP network.
-
-
The required syntax is:
-
-
rtmp://[username :password @]server [:port ][/app ][/instance ][/playpath ]
-
-
-
The accepted parameters are:
-
-username
-An optional username (mostly for publishing).
-
-
-password
-An optional password (mostly for publishing).
-
-
-server
-The address of the RTMP server.
-
-
-port
-The number of the TCP port to use (by default is 1935).
-
-
-app
-It is the name of the application to access. It usually corresponds to
-the path where the application is installed on the RTMP server
-(e.g. /ondemand/ , /flash/live/ , etc.). You can override
-the value parsed from the URI through the rtmp_app
option, too.
-
-
-playpath
-It is the path or name of the resource to play with reference to the
-application specified in app , may be prefixed by "mp4:". You
-can override the value parsed from the URI through the rtmp_playpath
-option, too.
-
-
-listen
-Act as a server, listening for an incoming connection.
-
-
-timeout
-Maximum time to wait for the incoming connection. Implies listen.
-
-
-
-
Additionally, the following parameters can be set via command line options
-(or in code via AVOption
s):
-
-rtmp_app
-Name of application to connect on the RTMP server. This option
-overrides the parameter specified in the URI.
-
-
-rtmp_buffer
-Set the client buffer time in milliseconds. The default is 3000.
-
-
-rtmp_conn
-Extra arbitrary AMF connection parameters, parsed from a string,
-e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0
.
-Each value is prefixed by a single character denoting the type,
-B for Boolean, N for number, S for string, O for object, or Z for null,
-followed by a colon. For Booleans the data must be either 0 or 1 for
-FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
-1 to end or begin an object, respectively. Data items in subobjects may
-be named, by prefixing the type with ’N’ and specifying the name before
-the value (i.e. NB:myFlag:1
). This option may be used multiple
-times to construct arbitrary AMF sequences.
-
-
-rtmp_flashver
-Version of the Flash plugin used to run the SWF player. The default
-is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
-<libavformat version>).)
-
-
-rtmp_flush_interval
-Number of packets flushed in the same request (RTMPT only). The default
-is 10.
-
-
-rtmp_live
-Specify that the media is a live stream. No resuming or seeking in
-live streams is possible. The default value is any
, which means the
-subscriber first tries to play the live stream specified in the
-playpath. If a live stream of that name is not found, it plays the
-recorded stream. The other possible values are live
and
-recorded
.
-
-
-rtmp_pageurl
-URL of the web page in which the media was embedded. By default no
-value will be sent.
-
-
-rtmp_playpath
-Stream identifier to play or to publish. This option overrides the
-parameter specified in the URI.
-
-
-rtmp_subscribe
-Name of live stream to subscribe to. By default no value will be sent.
-It is only sent if the option is specified or if rtmp_live
-is set to live.
-
-
-rtmp_swfhash
-SHA256 hash of the decompressed SWF file (32 bytes).
-
-
-rtmp_swfsize
-Size of the decompressed SWF file, required for SWFVerification.
-
-
-rtmp_swfurl
-URL of the SWF player for the media. By default no value will be sent.
-
-
-rtmp_swfverify
-URL to player swf file, compute hash/size automatically.
-
-
-rtmp_tcurl
-URL of the target stream. Defaults to proto://host[:port]/app.
-
-
-
-
-
For example to read with ffplay
a multimedia resource named
-"sample" from the application "vod" from an RTMP server "myserver":
-
-
ffplay rtmp://myserver/vod/sample
-
-
-
To publish to a password protected server, passing the playpath and
-app names separately:
-
-
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
-
-
-
-
25.17 rtmpe# TOC
-
-
Encrypted Real-Time Messaging Protocol.
-
-
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
-streaming multimedia content within standard cryptographic primitives,
-consisting of Diffie-Hellman key exchange and HMACSHA256, generating
-a pair of RC4 keys.
-
-
-
25.18 rtmps# TOC
-
-
Real-Time Messaging Protocol over a secure SSL connection.
-
-
The Real-Time Messaging Protocol (RTMPS) is used for streaming
-multimedia content across an encrypted connection.
-
-
-
25.19 rtmpt# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
-for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
25.20 rtmpte# TOC
-
-
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
-is used for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
25.21 rtmpts# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTPS.
-
-
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
-for streaming multimedia content within HTTPS requests to traverse
-firewalls.
-
-
-
25.22 libsmbclient# TOC
-
-
libsmbclient permits one to manipulate CIFS/SMB network resources.
-
-
Following syntax is required.
-
-
-
smb://[[domain:]user[:password@]]server[/share[/path[/file]]]
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in miliseconds of socket I/O operations used by the underlying
-low level operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-workgroup
-Set the workgroup used for making connections. By default workgroup is not specified.
-
-
-
-
-
For more information see: http://www.samba.org/ .
-
-
-
25.23 libssh# TOC
-
-
Secure File Transfer Protocol via libssh
-
-
Allow to read from or write to remote resources using SFTP protocol.
-
-
Following syntax is required.
-
-
-
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-private_key
-Specify the path of the file containing private key to use during authorization.
-By default libssh searches for keys in the ~/.ssh/ directory.
-
-
-
-
-
Example: Play a file stored on remote server.
-
-
-
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
-
-
-
-
25.24 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte# TOC
-
-
Real-Time Messaging Protocol and its variants supported through
-librtmp.
-
-
Requires the presence of the librtmp headers and library during
-configuration. You need to explicitly configure the build with
-"–enable-librtmp". If enabled this will replace the native RTMP
-protocol.
-
-
This protocol provides most client functions and a few server
-functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT),
-encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled
-variants of these encrypted types (RTMPTE, RTMPTS).
-
-
The required syntax is:
-
-
rtmp_proto ://server [:port ][/app ][/playpath ] options
-
-
-
where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe",
-"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and
-server , port , app and playpath have the same
-meaning as specified for the RTMP native protocol.
-options contains a list of space-separated options of the form
-key =val .
-
-
See the librtmp manual page (man 3 librtmp) for more information.
-
-
For example, to stream a file in real-time to an RTMP server using
-ffmpeg
:
-
-
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
-
-
-
To play the same stream using ffplay
:
-
-
ffplay "rtmp://myserver/live/mystream live=1"
-
-
-
-
25.25 rtp# TOC
-
-
Real-time Transport Protocol.
-
-
The required syntax for an RTP URL is:
-rtp://hostname [:port ][?option =val ...]
-
-
port specifies the RTP port to use.
-
-
The following URL options are supported:
-
-
-ttl=n
-Set the TTL (Time-To-Live) value (for multicast only).
-
-
-rtcpport=n
-Set the remote RTCP port to n .
-
-
-localrtpport=n
-Set the local RTP port to n .
-
-
-localrtcpport=n '
-Set the local RTCP port to n .
-
-
-pkt_size=n
-Set max packet size (in bytes) to n .
-
-
-connect=0|1
-Do a connect()
on the UDP socket (if set to 1) or not (if set
-to 0).
-
-
-sources=ip [,ip ]
-List allowed source IP addresses.
-
-
-block=ip [,ip ]
-List disallowed (blocked) source IP addresses.
-
-
-write_to_source=0|1
-Send packets to the source address of the latest received packet (if
-set to 1) or to a default remote address (if set to 0).
-
-
-localport=n
-Set the local RTP port to n .
-
-This is a deprecated option. Instead, localrtpport should be
-used.
-
-
-
-
-
Important notes:
-
-
- If rtcpport is not set the RTCP port will be set to the RTP
-port value plus 1.
-
- If localrtpport (the local RTP port) is not set any available
-port will be used for the local RTP and RTCP ports.
-
- If localrtcpport (the local RTCP port) is not set it will be
-set to the local RTP port value plus 1.
-
-
-
-
25.26 rtsp# TOC
-
-
Real-Time Streaming Protocol.
-
-
RTSP is not technically a protocol handler in libavformat, it is a demuxer
-and muxer. The demuxer supports both normal RTSP (with data transferred
-over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
-data transferred over RDT).
-
-
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
-supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s
-RTSP server ).
-
-
The required syntax for a RTSP url is:
-
-
rtsp://hostname [:port ]/path
-
-
-
Options can be set on the ffmpeg
/ffplay
command
-line, or set in code via AVOption
s or in
-avformat_open_input
.
-
-
The following options are supported.
-
-
-initial_pause
-Do not start playing the stream immediately if set to 1. Default value
-is 0.
-
-
-rtsp_transport
-Set RTSP transport protocols.
-
-It accepts the following values:
-
-‘udp ’
-Use UDP as lower transport protocol.
-
-
-‘tcp ’
-Use TCP (interleaving within the RTSP control channel) as lower
-transport protocol.
-
-
-‘udp_multicast ’
-Use UDP multicast as lower transport protocol.
-
-
-‘http ’
-Use HTTP tunneling as lower transport protocol, which is useful for
-passing proxies.
-
-
-
-Multiple lower transport protocols may be specified, in that case they are
-tried one at a time (if the setup of one fails, the next one is tried).
-For the muxer, only the ‘tcp ’ and ‘udp ’ options are supported.
-
-
-rtsp_flags
-Set RTSP flags.
-
-The following values are accepted:
-
-‘filter_src ’
-Accept packets only from negotiated peer address and port.
-
-‘listen ’
-Act as a server, listening for an incoming connection.
-
-‘prefer_tcp ’
-Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
-
-
-
-Default value is ‘none ’.
-
-
-allowed_media_types
-Set media types to accept from the server.
-
-The following flags are accepted:
-
-‘video ’
-‘audio ’
-‘data ’
-
-
-By default it accepts all media types.
-
-
-min_port
-Set minimum local UDP port. Default value is 5000.
-
-
-max_port
-Set maximum local UDP port. Default value is 65000.
-
-
-timeout
-Set maximum timeout (in seconds) to wait for incoming connections.
-
-A value of -1 means infinite (default). This option implies the
-rtsp_flags set to ‘listen ’.
-
-
-reorder_queue_size
-Set number of packets to buffer for handling of reordered packets.
-
-
-stimeout
-Set socket TCP I/O timeout in microseconds.
-
-
-user-agent
-Override User-Agent header. If not specified, it defaults to the
-libavformat identifier string.
-
-
-
-
When receiving data over UDP, the demuxer tries to reorder received packets
-(since they may arrive out of order, or packets may get lost totally). This
-can be disabled by setting the maximum demuxing delay to zero (via
-the max_delay
field of AVFormatContext).
-
-
When watching multi-bitrate Real-RTSP streams with ffplay
, the
-streams to display can be chosen with -vst
n and
--ast
n for video and audio respectively, and can be switched
-on the fly by pressing v
and a
.
-
-
-
25.26.1 Examples# TOC
-
-
The following examples all make use of the ffplay
and
-ffmpeg
tools.
-
-
- Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
-
-
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
-
-
- Watch a stream tunneled over HTTP:
-
-
ffplay -rtsp_transport http rtsp://server/video.mp4
-
-
- Send a stream in realtime to a RTSP server, for others to watch:
-
-
ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
-
-
- Receive a stream in realtime:
-
-
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
-
-
-
-
-
25.27 sap# TOC
-
-
Session Announcement Protocol (RFC 2974). This is not technically a
-protocol handler in libavformat, it is a muxer and demuxer.
-It is used for signalling of RTP streams, by announcing the SDP for the
-streams regularly on a separate port.
-
-
-
25.27.1 Muxer# TOC
-
-
The syntax for a SAP url given to the muxer is:
-
-
sap://destination [:port ][?options ]
-
-
-
The RTP packets are sent to destination on port port ,
-or to port 5004 if no port is specified.
-options is a &
-separated list. The following options
-are supported:
-
-
-announce_addr=address
-Specify the destination IP address for sending the announcements to.
-If omitted, the announcements are sent to the commonly used SAP
-announcement multicast address 224.2.127.254 (sap.mcast.net), or
-ff0e::2:7ffe if destination is an IPv6 address.
-
-
-announce_port=port
-Specify the port to send the announcements on, defaults to
-9875 if not specified.
-
-
-ttl=ttl
-Specify the time to live value for the announcements and RTP packets,
-defaults to 255.
-
-
-same_port=0|1
-If set to 1, send all RTP streams on the same port pair. If zero (the
-default), all streams are sent on unique ports, with each stream on a
-port 2 numbers higher than the previous.
-VLC/Live555 requires this to be set to 1, to be able to receive the stream.
-The RTP stack in libavformat for receiving requires all streams to be sent
-on unique ports.
-
-
-
-
Example command lines follow.
-
-
To broadcast a stream on the local subnet, for watching in VLC:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
-
-
-
Similarly, for watching in ffplay
:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255
-
-
-
And for watching in ffplay
, over IPv6:
-
-
-
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
-
-
-
-
25.27.2 Demuxer# TOC
-
-
The syntax for a SAP url given to the demuxer is:
-
-
sap://[address ][:port ]
-
-
-
address is the multicast address to listen for announcements on,
-if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port
-is the port that is listened on, 9875 if omitted.
-
-
The demuxers listens for announcements on the given address and port.
-Once an announcement is received, it tries to receive that particular stream.
-
-
Example command lines follow.
-
-
To play back the first stream announced on the normal SAP multicast address:
-
-
-
-
To play back the first stream announced on one the default IPv6 SAP multicast address:
-
-
-
ffplay sap://[ff0e::2:7ffe]
-
-
-
-
25.28 sctp# TOC
-
-
Stream Control Transmission Protocol.
-
-
The accepted URL syntax is:
-
-
sctp://host :port [?options ]
-
-
-
The protocol accepts the following options:
-
-listen
-If set to any value, listen for an incoming connection. Outgoing connection is done by default.
-
-
-max_streams
-Set the maximum number of streams. By default no limit is set.
-
-
-
-
-
25.29 srtp# TOC
-
-
Secure Real-time Transport Protocol.
-
-
The accepted options are:
-
-srtp_in_suite
-srtp_out_suite
-Select input and output encoding suites.
-
-Supported values:
-
-‘AES_CM_128_HMAC_SHA1_80 ’
-‘SRTP_AES128_CM_HMAC_SHA1_80 ’
-‘AES_CM_128_HMAC_SHA1_32 ’
-‘SRTP_AES128_CM_HMAC_SHA1_32 ’
-
-
-
-srtp_in_params
-srtp_out_params
-Set input and output encoding parameters, which are expressed by a
-base64-encoded representation of a binary block. The first 16 bytes of
-this binary block are used as master key, the following 14 bytes are
-used as master salt.
-
-
-
-
-
25.30 subfile# TOC
-
-
Virtually extract a segment of a file or another stream.
-The underlying stream must be seekable.
-
-
Accepted options:
-
-start
-Start offset of the extracted segment, in bytes.
-
-end
-End offset of the extracted segment, in bytes.
-
-
-
-
Examples:
-
-
Extract a chapter from a DVD VOB file (start and end sectors obtained
-externally and multiplied by 2048):
-
-
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
-
-
-
Play an AVI file directly from a TAR archive:
-subfile,,start,183241728,end,366490624,,:archive.tar
-
-
-
25.31 tcp# TOC
-
-
Transmission Control Protocol.
-
-
The required syntax for a TCP url is:
-
-
tcp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form
-key =val .
-
-
The list of supported options follows.
-
-
-listen=1|0
-Listen for an incoming connection. Default value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-listen_timeout=microseconds
-Set listen timeout, expressed in microseconds.
-
-
-
-
The following example shows how to setup a listening TCP connection
-with ffmpeg
, which is then accessed with ffplay
:
-
-
ffmpeg -i input -f format tcp://hostname :port ?listen
-ffplay tcp://hostname :port
-
-
-
-
25.32 tls# TOC
-
-
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
-
-
The required syntax for a TLS/SSL url is:
-
-
tls://hostname :port [?options ]
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-ca_file, cafile=filename
-A file containing certificate authority (CA) root certificates to treat
-as trusted. If the linked TLS library contains a default this might not
-need to be specified for verification to work, but not all libraries and
-setups have defaults built in.
-The file must be in OpenSSL PEM format.
-
-
-tls_verify=1|0
-If enabled, try to verify the peer that we are communicating with.
-Note, if using OpenSSL, this currently only makes sure that the
-peer certificate is signed by one of the root certificates in the CA
-database, but it does not validate that the certificate actually
-matches the host name we are trying to connect to. (With GnuTLS,
-the host name is validated as well.)
-
-This is disabled by default since it requires a CA database to be
-provided by the caller in many cases.
-
-
-cert_file, cert=filename
-A file containing a certificate to use in the handshake with the peer.
-(When operating as server, in listen mode, this is more often required
-by the peer, while client certificates only are mandated in certain
-setups.)
-
-
-key_file, key=filename
-A file containing the private key for the certificate.
-
-
-listen=1|0
-If enabled, listen for connections on the provided port, and assume
-the server role in the handshake instead of the client role.
-
-
-
-
-
Example command lines:
-
-
To create a TLS/SSL server that serves an input stream.
-
-
-
ffmpeg -i input -f format tls://hostname :port ?listen&cert=server.crt &key=server.key
-
-
-
To play back a stream from the TLS/SSL server using ffplay
:
-
-
-
ffplay tls://hostname :port
-
-
-
-
25.33 udp# TOC
-
-
User Datagram Protocol.
-
-
The required syntax for an UDP URL is:
-
-
udp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form key =val .
-
-
In case threading is enabled on the system, a circular buffer is used
-to store the incoming data, which allows one to reduce loss of data due to
-UDP socket buffer overruns. The fifo_size and
-overrun_nonfatal options are related to this buffer.
-
-
The list of supported options follows.
-
-
-buffer_size=size
-Set the UDP maximum socket buffer size in bytes. This is used to set either
-the receive or send buffer size, depending on what the socket is used for.
-Default is 64KB. See also fifo_size .
-
-
-localport=port
-Override the local UDP port to bind with.
-
-
-localaddr=addr
-Choose the local IP address. This is useful e.g. if sending multicast
-and the host has multiple interfaces, where the user can choose
-which interface to send on by specifying the IP address of that interface.
-
-
-pkt_size=size
-Set the size in bytes of UDP packets.
-
-
-reuse=1|0
-Explicitly allow or disallow reusing UDP sockets.
-
-
-ttl=ttl
-Set the time to live value (for multicast only).
-
-
-connect=1|0
-Initialize the UDP socket with connect()
. In this case, the
-destination address can’t be changed with ff_udp_set_remote_url later.
-If the destination address isn’t known at the start, this option can
-be specified in ff_udp_set_remote_url, too.
-This allows finding out the source address for the packets with getsockname,
-and makes writes return with AVERROR(ECONNREFUSED) if "destination
-unreachable" is received.
-For receiving, this gives the benefit of only receiving packets from
-the specified peer address/port.
-
-
-sources=address [,address ]
-Only receive packets sent to the multicast group from one of the
-specified sender IP addresses.
-
-
-block=address [,address ]
-Ignore packets sent to the multicast group from the specified
-sender IP addresses.
-
-
-fifo_size=units
-Set the UDP receiving circular buffer size, expressed as a number of
-packets with size of 188 bytes. If not specified defaults to 7*4096.
-
-
-overrun_nonfatal=1|0
-Survive in case of UDP receiving circular buffer overrun. Default
-value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-broadcast=1|0
-Explicitly allow or disallow UDP broadcasting.
-
-Note that broadcasting may not work properly on networks having
-a broadcast storm protection.
-
-
-
-
-
25.33.1 Examples# TOC
-
-
- Use ffmpeg
to stream over UDP to a remote endpoint:
-
-
ffmpeg -i input -f format udp://hostname :port
-
-
- Use ffmpeg
to stream in mpegts format over UDP using 188
-sized UDP packets, using a large input buffer:
-
-
ffmpeg -i input -f mpegts udp://hostname :port ?pkt_size=188&buffer_size=65535
-
-
- Use ffmpeg
to receive over UDP from a remote endpoint:
-
-
ffmpeg -i udp://[multicast-address ]:port ...
-
-
-
-
-
25.34 unix# TOC
-
-
Unix local socket
-
-
The required syntax for a Unix socket URL is:
-
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-timeout
-Timeout in ms.
-
-listen
-Create the Unix socket in listening mode.
-
-
-
-
-
26 Device Options# TOC
-
-
The libavdevice library provides the same interface as
-libavformat. Namely, an input device is considered like a demuxer, and
-an output device like a muxer, and the interface and generic device
-options are the same provided by libavformat (see the ffmpeg-formats
-manual).
-
-
In addition each input or output device may support so-called private
-options, which are specific for that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the device
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
-
-
27 Input Devices# TOC
-
-
Input devices are configured elements in FFmpeg which allow to access
-the data coming from a multimedia device attached to your system.
-
-
When you configure your FFmpeg build, all the supported input devices
-are enabled by default. You can list all available ones using the
-configure option "–list-indevs".
-
-
You can disable all the input devices using the configure option
-"–disable-indevs", and selectively enable an input device using the
-option "–enable-indev=INDEV ", or you can disable a particular
-input device using the option "–disable-indev=INDEV ".
-
-
The option "-devices" of the ff* tools will display the list of
-supported input devices.
-
-
A description of the currently available input devices follows.
-
-
-
27.1 alsa# TOC
-
-
ALSA (Advanced Linux Sound Architecture) input device.
-
-
To enable this input device during configuration you need libasound
-installed on your system.
-
-
This device allows capturing from an ALSA device. The name of the
-device to capture has to be an ALSA card identifier.
-
-
An ALSA identifier has the syntax:
-
-
hw:CARD [,DEV [,SUBDEV ]]
-
-
-
where the DEV and SUBDEV components are optional.
-
-
The three arguments (in order: CARD ,DEV ,SUBDEV )
-specify card number or identifier, device number and subdevice number
-(-1 means any).
-
-
To see the list of cards currently recognized by your system check the
-files /proc/asound/cards and /proc/asound/devices .
-
-
For example to capture with ffmpeg
from an ALSA device with
-card id 0, you may run the command:
-
-
ffmpeg -f alsa -i hw:0 alsaout.wav
-
-
-
For more information see:
-http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html
-
-
-
27.2 avfoundation# TOC
-
-
AVFoundation input device.
-
-
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
-The older QTKit framework has been marked deprecated since OSX version 10.7.
-
-
The input filename has to be given in the following syntax:
-
-
-i "[[VIDEO]:[AUDIO]]"
-
-
The first entry selects the video input while the latter selects the audio input.
-The stream has to be specified by the device name or the device index as shown by the device list.
-Alternatively, the video and/or audio input device can be chosen by index using the
-
- -video_device_index <INDEX>
-
-and/or
-
- -audio_device_index <INDEX>
-
-, overriding any
-device name or index given in the input filename.
-
-
All available devices can be enumerated by using -list_devices true , listing
-all device names and corresponding indices.
-
-
There are two device name aliases:
-
-default
-Select the AVFoundation default device of the corresponding type.
-
-
-none
-Do not record the corresponding media type.
-This is equivalent to specifying an empty device name or index.
-
-
-
-
-
-
27.2.1 Options# TOC
-
-
AVFoundation supports the following options:
-
-
--list_devices <TRUE|FALSE>
-If set to true, a list of all available input devices is given showing all
-device names and indices.
-
-
--video_device_index <INDEX>
-Specify the video device by its index. Overrides anything given in the input filename.
-
-
--audio_device_index <INDEX>
-Specify the audio device by its index. Overrides anything given in the input filename.
-
-
--pixel_format <FORMAT>
-Request the video device to use a specific pixel format.
-If the specified format is not supported, a list of available formats is given
-und the first one in this list is used instead. Available pixel formats are:
-monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
- bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
- yuv420p, nv12, yuyv422, gray
-
-
-
-
-
-
27.2.2 Examples# TOC
-
-
- Print the list of AVFoundation supported devices and exit:
-
-
$ ffmpeg -f avfoundation -list_devices true -i ""
-
-
- Record video from video device 0 and audio from audio device 0 into out.avi:
-
-
$ ffmpeg -f avfoundation -i "0:0" out.avi
-
-
- Record video from video device 2 and audio from audio device 1 into out.avi:
-
-
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
-
-
- Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
-
-
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
-
-
-
-
-
-
27.3 bktr# TOC
-
-
BSD video input device.
-
-
-
27.4 dshow# TOC
-
-
Windows DirectShow input device.
-
-
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
-Currently only audio and video devices are supported.
-
-
Multiple devices may be opened as separate inputs, but they may also be
-opened on the same input, which should improve synchronism between them.
-
-
The input name should be in the format:
-
-
-
-
where TYPE can be either audio or video ,
-and NAME is the device’s name.
-
-
-
27.4.1 Options# TOC
-
-
If no options are specified, the device’s defaults are used.
-If the device does not support the requested options, it will
-fail to open.
-
-
-video_size
-Set the video size in the captured video.
-
-
-framerate
-Set the frame rate in the captured video.
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-
-
-sample_size
-Set the sample size (in bits) of the captured audio.
-
-
-channels
-Set the number of channels in the captured audio.
-
-
-list_devices
-If set to true , print a list of devices and exit.
-
-
-list_options
-If set to true , print a list of selected device’s options
-and exit.
-
-
-video_device_number
-Set video device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-audio_device_number
-Set audio device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-pixel_format
-Select pixel format to be used by DirectShow. This may only be set when
-the video codec is not set or set to rawvideo.
-
-
-audio_buffer_size
-Set audio device buffer size in milliseconds (which can directly
-impact latency, depending on the device).
-Defaults to using the audio device’s
-default buffer size (typically some multiple of 500ms).
-Setting this value too low can degrade performance.
-See also
-http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx
-
-
-
-
-
-
27.4.2 Examples# TOC
-
-
- Print the list of DirectShow supported devices and exit:
-
-
$ ffmpeg -list_devices true -f dshow -i dummy
-
-
- Open video device Camera :
-
-
$ ffmpeg -f dshow -i video="Camera"
-
-
- Open second video device with name Camera :
-
-
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
-
-
- Open video device Camera and audio device Microphone :
-
-
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
-
-
- Print the list of supported options in selected device and exit:
-
-
$ ffmpeg -list_options true -f dshow -i video="Camera"
-
-
-
-
-
-
27.5 dv1394# TOC
-
-
Linux DV 1394 input device.
-
-
-
27.6 fbdev# TOC
-
-
Linux framebuffer input device.
-
-
The Linux framebuffer is a graphic hardware-independent abstraction
-layer to show graphics on a computer monitor, typically on the
-console. It is accessed through a file device node, usually
-/dev/fb0 .
-
-
For more detailed information read the file
-Documentation/fb/framebuffer.txt included in the Linux source tree.
-
-
To record from the framebuffer device /dev/fb0 with
-ffmpeg
:
-
-
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
-
-
-
You can take a single screenshot image with the command:
-
-
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
-
-
-
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
-
-
-
27.7 gdigrab# TOC
-
-
Win32 GDI-based screen capture device.
-
-
This device allows you to capture a region of the display on Windows.
-
-
There are two options for the input filename:
-
-
or
-
-
-
The first option will capture the entire desktop, or a fixed region of the
-desktop. The second option will instead capture the contents of a single
-window, regardless of its position on the screen.
-
-
For example, to grab the entire desktop using ffmpeg
:
-
-
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
-
-
-
Grab a 640x480 region at position 10,20
:
-
-
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
-
-
-
Grab the contents of the window named "Calculator"
-
-
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
-
-
-
-
27.7.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. Use the value 0
to
-not draw the pointer. Default value is 1
.
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-Note that show_region is incompatible with grabbing the contents
-of a single window.
-
-For example:
-
-
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
-
-
-
-video_size
-Set the video frame size. The default is to capture the full screen if desktop is selected, or the full window size if title=window_title is selected.
-
-
-offset_x
-When capturing a region with video_size , set the distance from the left edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative offset_x value to move the region to that monitor.
-
-
-offset_y
-When capturing a region with video_size , set the distance from the top edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative offset_y value to move the region to that monitor.
-
-
-
-
-
-
27.8 iec61883# TOC
-
-
FireWire DV/HDV input device using libiec61883.
-
-
To enable this input device, you need libiec61883, libraw1394 and
-libavc1394 installed on your system. Use the configure option
---enable-libiec61883
to compile with the device enabled.
-
-
The iec61883 capture device supports capturing from a video device
-connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
-FireWire stack (juju). This is the default DV/HDV input method in Linux
-Kernel 2.6.37 and later, since the old FireWire stack was removed.
-
-
Specify the FireWire port to be used as input file, or "auto"
-to choose the first port connected.
-
-
-
27.8.1 Options# TOC
-
-
-dvtype
-Override autodetection of DV/HDV. This should only be used if auto
-detection does not work, or if usage of a different device type
-should be prohibited. Treating a DV device as HDV (or vice versa) will
-not work and result in undefined behavior.
-The values auto , dv and hdv are supported.
-
-
-dvbuffer
-Set maximum size of buffer for incoming data, in frames. For DV, this
-is an exact value. For HDV, it is not frame exact, since HDV does
-not have a fixed frame size.
-
-
-dvguid
-Select the capture device by specifying it’s GUID. Capturing will only
-be performed from the specified device and fails if no device with the
-given GUID is found. This is useful to select the input if multiple
-devices are connected at the same time.
-Look at /sys/bus/firewire/devices to find out the GUIDs.
-
-
-
-
-
-
27.8.2 Examples# TOC
-
-
- Grab and show the input of a FireWire DV/HDV device.
-
-
ffplay -f iec61883 -i auto
-
-
- Grab and record the input of a FireWire DV/HDV device,
-using a packet buffer of 100000 packets if the source is HDV.
-
-
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
-
-
-
-
-
-
27.9 jack# TOC
-
-
JACK input device.
-
-
To enable this input device during configuration you need libjack
-installed on your system.
-
-
A JACK input device creates one or more JACK writable clients, one for
-each audio channel, with name client_name :input_N , where
-client_name is the name provided by the application, and N
-is a number which identifies the channel.
-Each writable client will send the acquired data to the FFmpeg input
-device.
-
-
Once you have created one or more JACK readable clients, you need to
-connect them to one or more JACK writable clients.
-
-
To connect or disconnect JACK clients you can use the jack_connect
-and jack_disconnect
programs, or do it through a graphical interface,
-for example with qjackctl
.
-
-
To list the JACK clients and their properties you can invoke the command
-jack_lsp
.
-
-
Follows an example which shows how to capture a JACK readable client
-with ffmpeg
.
-
-
# Create a JACK writable client with name "ffmpeg".
-$ ffmpeg -f jack -i ffmpeg -y out.wav
-
-# Start the sample jack_metro readable client.
-$ jack_metro -b 120 -d 0.2 -f 4000
-
-# List the current JACK clients.
-$ jack_lsp -c
-system:capture_1
-system:capture_2
-system:playback_1
-system:playback_2
-ffmpeg:input_1
-metro:120_bpm
-
-# Connect metro to the ffmpeg writable client.
-$ jack_connect metro:120_bpm ffmpeg:input_1
-
-
-
For more information read:
-http://jackaudio.org/
-
-
-
27.10 lavfi# TOC
-
-
Libavfilter input virtual device.
-
-
This input device reads data from the open output pads of a libavfilter
-filtergraph.
-
-
For each filtergraph open output, the input device will create a
-corresponding stream which is mapped to the generated output. Currently
-only video data is supported. The filtergraph is specified through the
-option graph .
-
-
-
27.10.1 Options# TOC
-
-
-graph
-Specify the filtergraph to use as input. Each video open output must be
-labelled by a unique string of the form "outN ", where N is a
-number starting from 0 corresponding to the mapped input stream
-generated by the device.
-The first unlabelled output is automatically assigned to the "out0"
-label, but all the others need to be specified explicitly.
-
-The suffix "+subcc" can be appended to the output label to create an extra
-stream with the closed captions packets attached to that output
-(experimental; only for EIA-608 / CEA-708 for now).
-The subcc streams are created after all the normal streams, in the order of
-the corresponding stream.
-For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
-stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
-
-If not specified defaults to the filename specified for the input
-device.
-
-
-graph_file
-Set the filename of the filtergraph to be read and sent to the other
-filters. Syntax of the filtergraph is the same as the one specified by
-the option graph .
-
-
-
-
-
-
27.10.2 Examples# TOC
-
-
- Create a color video stream and play it back with ffplay
:
-
-
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
-
-
- As the previous example, but use filename for specifying the graph
-description, and omit the "out0" label:
-
-
ffplay -f lavfi color=c=pink
-
-
- Create three different video test filtered sources and play them:
-
-
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
-
-
- Read an audio stream from a file using the amovie source and play it
-back with ffplay
:
-
-
ffplay -f lavfi "amovie=test.wav"
-
-
- Read an audio stream and a video stream and play it back with
-ffplay
:
-
-
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
-
-
- Dump decoded frames to images and closed captions to a file (experimental):
-
-
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
-
-
-
-
-
-
27.11 libcdio# TOC
-
-
Audio-CD input device based on cdio.
-
-
To enable this input device during configuration you need libcdio
-installed on your system. Requires the configure option
---enable-libcdio
.
-
-
This device allows playing and grabbing from an Audio-CD.
-
-
For example to copy with ffmpeg
the entire Audio-CD in /dev/sr0,
-you may run the command:
-
-
ffmpeg -f libcdio -i /dev/sr0 cd.wav
-
-
-
-
27.12 libdc1394# TOC
-
-
IIDC1394 input device, based on libdc1394 and libraw1394.
-
-
Requires the configure option --enable-libdc1394
.
-
-
-
27.13 openal# TOC
-
-
The OpenAL input device provides audio capture on all systems with a
-working OpenAL 1.1 implementation.
-
-
To enable this input device during configuration, you need OpenAL
-headers and libraries installed on your system, and need to configure
-FFmpeg with --enable-openal
.
-
-
OpenAL headers and libraries should be provided as part of your OpenAL
-implementation, or as an additional download (an SDK). Depending on your
-installation you may need to specify additional flags via the
---extra-cflags
and --extra-ldflags
for allowing the build
-system to locate the OpenAL headers and libraries.
-
-
An incomplete list of OpenAL implementations follows:
-
-
-Creative
-The official Windows implementation, providing hardware acceleration
-with supported devices and software fallback.
-See http://openal.org/ .
-
-OpenAL Soft
-Portable, open source (LGPL) software implementation. Includes
-backends for the most common sound APIs on the Windows, Linux,
-Solaris, and BSD operating systems.
-See http://kcat.strangesoft.net/openal.html .
-
-Apple
-OpenAL is part of Core Audio, the official Mac OS X Audio interface.
-See http://developer.apple.com/technologies/mac/audio-and-video.html
-
-
-
-
This device allows one to capture from an audio input device handled
-through OpenAL.
-
-
You need to specify the name of the device to capture in the provided
-filename. If the empty string is provided, the device will
-automatically select the default device. You can get the list of the
-supported devices by using the option list_devices .
-
-
-
27.13.1 Options# TOC
-
-
-channels
-Set the number of channels in the captured audio. Only the values
-1 (monaural) and 2 (stereo) are currently supported.
-Defaults to 2 .
-
-
-sample_size
-Set the sample size (in bits) of the captured audio. Only the values
-8 and 16 are currently supported. Defaults to
-16 .
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-Defaults to 44.1k .
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-
-
-
-
27.13.2 Examples# TOC
-
-
Print the list of OpenAL supported devices and exit:
-
-
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
-
-
-
Capture from the OpenAL device DR-BT101 via PulseAudio :
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
-
-
-
Capture from the default device (note the empty string ” as filename):
-
-
$ ffmpeg -f openal -i '' out.ogg
-
-
-
Capture from two devices simultaneously, writing to two different files,
-within the same ffmpeg
command:
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
-
-
Note: not all OpenAL implementations support multiple simultaneous capture -
-try the latest OpenAL Soft if the above does not work.
-
-
-
27.14 oss# TOC
-
-
Open Sound System input device.
-
-
The filename to provide to the input device is the device node
-representing the OSS input device, and is usually set to
-/dev/dsp .
-
-
For example to grab from /dev/dsp using ffmpeg
use the
-command:
-
-
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
-
-
-
For more information about OSS see:
-http://manuals.opensound.com/usersguide/dsp.html
-
-
-
27.15 pulse# TOC
-
-
PulseAudio input device.
-
-
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
-
-
The filename to provide to the input device is a source device or the
-string "default"
-
-
To list the PulseAudio source devices and their properties you can invoke
-the command pactl list sources
.
-
-
More information about PulseAudio can be found on http://www.pulseaudio.org .
-
-
-
27.15.1 Options# TOC
-
-server
-Connect to a specific PulseAudio server, specified by an IP address.
-Default server is used when not provided.
-
-
-name
-Specify the application name PulseAudio will use when showing active clients,
-by default it is the LIBAVFORMAT_IDENT
string.
-
-
-stream_name
-Specify the stream name PulseAudio will use when showing active streams,
-by default it is "record".
-
-
-sample_rate
-Specify the samplerate in Hz, by default 48kHz is used.
-
-
-channels
-Specify the channels in use, by default 2 (stereo) is set.
-
-
-frame_size
-Specify the number of bytes per frame, by default it is set to 1024.
-
-
-fragment_size
-Specify the minimal buffering fragment in PulseAudio, it will affect the
-audio latency. By default it is unset.
-
-
-
-
-
27.15.2 Examples# TOC
-
Record a stream from default device:
-
-
ffmpeg -f pulse -i default /tmp/pulse.wav
-
-
-
-
27.16 qtkit# TOC
-
-
QTKit input device.
-
-
The filename passed as input is parsed to contain either a device name or index.
-The device index can also be given by using -video_device_index.
-A given device index will override any given device name.
-If the desired device consists of numbers only, use -video_device_index to identify it.
-The default device will be chosen if an empty string or the device name "default" is given.
-The available devices can be enumerated by using -list_devices.
-
-
-
ffmpeg -f qtkit -i "0" out.mpg
-
-
-
-
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
-
-
-
-
ffmpeg -f qtkit -i "default" out.mpg
-
-
-
-
ffmpeg -f qtkit -list_devices true -i ""
-
-
-
-
27.17 sndio# TOC
-
-
sndio input device.
-
-
To enable this input device during configuration you need libsndio
-installed on your system.
-
-
The filename to provide to the input device is the device node
-representing the sndio input device, and is usually set to
-/dev/audio0 .
-
-
For example to grab from /dev/audio0 using ffmpeg
use the
-command:
-
-
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
-
-
-
-
27.18 video4linux2, v4l2# TOC
-
-
Video4Linux2 input video device.
-
-
"v4l2" can be used as alias for "video4linux2".
-
-
If FFmpeg is built with v4l-utils support (by using the
---enable-libv4l2
configure option), it is possible to use it with the
--use_libv4l2
input device option.
-
-
The name of the device to grab is a file device node, usually Linux
-systems tend to automatically create such nodes when the device
-(e.g. an USB webcam) is plugged into the system, and has a name of the
-kind /dev/videoN , where N is a number associated to
-the device.
-
-
Video4Linux2 devices usually support a limited set of
-width xheight sizes and frame rates. You can check which are
-supported using -list_formats all
for Video4Linux2 devices.
-Some devices, like TV cards, support one or more standards. It is possible
-to list all the supported standards using -list_standards all
.
-
-
The time base for the timestamps is 1 microsecond. Depending on the kernel
-version and configuration, the timestamps may be derived from the real time
-clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
-boot time, unaffected by NTP or manual changes to the clock). The
--timestamps abs or -ts abs option can be used to force
-conversion into the real time clock.
-
-
Some usage examples of the video4linux2 device with ffmpeg
-and ffplay
:
-
- Grab and show the input of a video4linux2 device:
-
-
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
-
-
- Grab and record the input of a video4linux2 device, leave the
-frame rate and size as previously set:
-
-
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
-
-
-
-
For more information about Video4Linux, check http://linuxtv.org/ .
-
-
-
27.18.1 Options# TOC
-
-
-standard
-Set the standard. Must be the name of a supported standard. To get a
-list of the supported standards, use the list_standards
-option.
-
-
-channel
-Set the input channel number. Default to -1, which means using the
-previously selected channel.
-
-
-video_size
-Set the video frame size. The argument must be a string in the form
-WIDTH xHEIGHT or a valid size abbreviation.
-
-
-pixel_format
-Select the pixel format (only valid for raw video input).
-
-
-input_format
-Set the preferred pixel format (for raw video) or a codec name.
-This option allows one to select the input format, when several are
-available.
-
-
-framerate
-Set the preferred video frame rate.
-
-
-list_formats
-List available formats (supported pixel formats, codecs, and frame
-sizes) and exit.
-
-Available values are:
-
-‘all ’
-Show all available (compressed and non-compressed) formats.
-
-
-‘raw ’
-Show only raw video (non-compressed) formats.
-
-
-‘compressed ’
-Show only compressed formats.
-
-
-
-
-list_standards
-List supported standards and exit.
-
-Available values are:
-
-‘all ’
-Show all supported standards.
-
-
-
-
-timestamps, ts
-Set type of timestamps for grabbed frames.
-
-Available values are:
-
-‘default ’
-Use timestamps from the kernel.
-
-
-‘abs ’
-Use absolute timestamps (wall clock).
-
-
-‘mono2abs ’
-Force conversion from monotonic to absolute timestamps.
-
-
-
-Default value is default
.
-
-
-
-
-
27.19 vfwcap# TOC
-
-
VfW (Video for Windows) capture input device.
-
-
The filename passed as input is the capture driver number, ranging from
-0 to 9. You may use "list" as filename to print a list of drivers. Any
-other filename will be interpreted as device number 0.
-
-
-
27.20 x11grab# TOC
-
-
X11 video input device.
-
-
Depends on X11, Xext, and Xfixes. Requires the configure option
---enable-x11grab
.
-
-
This device allows one to capture a region of an X11 display.
-
-
The filename passed as input has the syntax:
-
-
[hostname ]:display_number .screen_number [+x_offset ,y_offset ]
-
-
-
hostname :display_number .screen_number specifies the
-X11 display name of the screen to grab from. hostname can be
-omitted, and defaults to "localhost". The environment variable
-DISPLAY
contains the default display name.
-
-
x_offset and y_offset specify the offsets of the grabbed
-area with respect to the top-left border of the X11 screen. They
-default to 0.
-
-
Check the X11 documentation (e.g. man X) for more detailed information.
-
-
Use the dpyinfo
program for getting basic information about the
-properties of your X11 display (e.g. grep for "name" or "dimensions").
-
-
For example to grab from :0.0 using ffmpeg
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
Grab at position 10,20
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-
-
27.20.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. A value of 0
specify
-not to draw the pointer. Default value is 1
.
-
-
-follow_mouse
-Make the grabbed area follow the mouse. The argument can be
-centered
or a number of pixels PIXELS .
-
-When it is specified with "centered", the grabbing region follows the mouse
-pointer and keeps the pointer at the center of region; otherwise, the region
-follows only when the mouse pointer reaches within PIXELS (greater than
-zero) to the edge of region.
-
-For example:
-
-
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-To follow only when the mouse pointer reaches within 100 pixels to edge:
-
-
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-For example:
-
-
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-With follow_mouse :
-
-
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-video_size
-Set the video frame size. Default value is vga
.
-
-
-use_shm
-Use the MIT-SHM extension for shared memory. Default value is 1
.
-It may be necessary to disable it for remote displays.
-
-
-
-
-
27.21 decklink# TOC
-
-
The decklink input device provides capture capabilities for Blackmagic
-DeckLink devices.
-
-
To enable this input device, you need the Blackmagic DeckLink SDK and you
-need to configure with the appropriate --extra-cflags
-and --extra-ldflags
.
-On Windows, you need to run the IDL files through widl
.
-
-
DeckLink is very picky about the formats it supports. Pixel format is always
-uyvy422, framerate and video size must be determined for your device with
--list_formats 1
. Audio sample rate is always 48 kHz and the number
-of channels currently is limited to 2 (stereo).
-
-
-
27.21.1 Options# TOC
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-list_formats
-If set to true , print a list of supported formats and exit.
-Defaults to false .
-
-
-
-
-
-
27.21.2 Examples# TOC
-
-
- List input devices:
-
-
ffmpeg -f decklink -list_devices 1 -i dummy
-
-
- List supported formats:
-
-
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
-
-
- Capture video clip at 1080i50 (format 11):
-
-
ffmpeg -f decklink -i 'Intensity Pro@11' -acodec copy -vcodec copy output.avi
-
-
-
-
-
-
-
28 Output Devices# TOC
-
-
Output devices are configured elements in FFmpeg that can write
-multimedia data to an output device attached to your system.
-
-
When you configure your FFmpeg build, all the supported output devices
-are enabled by default. You can list all available ones using the
-configure option "–list-outdevs".
-
-
You can disable all the output devices using the configure option
-"–disable-outdevs", and selectively enable an output device using the
-option "–enable-outdev=OUTDEV ", or you can disable a particular
-input device using the option "–disable-outdev=OUTDEV ".
-
-
The option "-devices" of the ff* tools will display the list of
-enabled output devices.
-
-
A description of the currently available output devices follows.
-
-
-
28.1 alsa# TOC
-
-
ALSA (Advanced Linux Sound Architecture) output device.
-
-
-
28.1.1 Examples# TOC
-
-
- Play a file on default ALSA device:
-
-
ffmpeg -i INPUT -f alsa default
-
-
- Play a file on soundcard 1, audio device 7:
-
-
ffmpeg -i INPUT -f alsa hw:1,7
-
-
-
-
-
28.2 caca# TOC
-
-
CACA output device.
-
-
This output device allows one to show a video stream in CACA window.
-Only one CACA window is allowed per application, so you can
-have only one instance of this output device in an application.
-
-
To enable this output device you need to configure FFmpeg with
---enable-libcaca
.
-libcaca is a graphics library that outputs text instead of pixels.
-
-
For more information about libcaca, check:
-http://caca.zoy.org/wiki/libcaca
-
-
-
28.2.1 Options# TOC
-
-
-window_title
-Set the CACA window title, if not specified default to the filename
-specified for the output device.
-
-
-window_size
-Set the CACA window size, can be a string of the form
-width xheight or a video size abbreviation.
-If not specified it defaults to the size of the input video.
-
-
-driver
-Set display driver.
-
-
-algorithm
-Set dithering algorithm. Dithering is necessary
-because the picture being rendered has usually far more colours than
-the available palette.
-The accepted values are listed with -list_dither algorithms
.
-
-
-antialias
-Set antialias method. Antialiasing smoothens the rendered
-image and avoids the commonly seen staircase effect.
-The accepted values are listed with -list_dither antialiases
.
-
-
-charset
-Set which characters are going to be used when rendering text.
-The accepted values are listed with -list_dither charsets
.
-
-
-color
-Set color to be used when rendering text.
-The accepted values are listed with -list_dither colors
.
-
-
-list_drivers
-If set to true , print a list of available drivers and exit.
-
-
-list_dither
-List available dither options related to the argument.
-The argument must be one of algorithms
, antialiases
,
-charsets
, colors
.
-
-
-
-
-
28.2.2 Examples# TOC
-
-
- The following command shows the ffmpeg
output is an
-CACA window, forcing its size to 80x25:
-
-
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca -
-
-
- Show the list of available drivers and exit:
-
-
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_drivers true -
-
-
- Show the list of available dither colors and exit:
-
-
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_dither colors -
-
-
-
-
-
28.3 decklink# TOC
-
-
The decklink output device provides playback capabilities for Blackmagic
-DeckLink devices.
-
-
To enable this output device, you need the Blackmagic DeckLink SDK and you
-need to configure with the appropriate --extra-cflags
-and --extra-ldflags
.
-On Windows, you need to run the IDL files through widl
.
-
-
DeckLink is very picky about the formats it supports. Pixel format is always
-uyvy422, framerate and video size must be determined for your device with
--list_formats 1
. Audio sample rate is always 48 kHz.
-
-
-
28.3.1 Options# TOC
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-list_formats
-If set to true , print a list of supported formats and exit.
-Defaults to false .
-
-
-preroll
-Amount of time to preroll video in seconds.
-Defaults to 0.5 .
-
-
-
-
-
-
28.3.2 Examples# TOC
-
-
- List output devices:
-
-
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
-
-
- List supported formats:
-
-
ffmpeg -i test.avi -f decklink -list_formats 1 'DeckLink Mini Monitor'
-
-
- Play video clip:
-
-
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 'DeckLink Mini Monitor'
-
-
- Play video clip with non-standard framerate or video size:
-
-
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLink Mini Monitor'
-
-
-
-
-
-
28.4 fbdev# TOC
-
-
Linux framebuffer output device.
-
-
The Linux framebuffer is a graphic hardware-independent abstraction
-layer to show graphics on a computer monitor, typically on the
-console. It is accessed through a file device node, usually
-/dev/fb0 .
-
-
For more detailed information read the file
-Documentation/fb/framebuffer.txt included in the Linux source tree.
-
-
-
28.4.1 Options# TOC
-
-xoffset
-yoffset
-Set x/y coordinate of top left corner. Default is 0.
-
-
-
-
-
28.4.2 Examples# TOC
-
Play a file on framebuffer device /dev/fb0 .
-Required pixel format depends on current framebuffer settings.
-
-
ffmpeg -re -i INPUT -vcodec rawvideo -pix_fmt bgra -f fbdev /dev/fb0
-
-
-
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
-
-
-
28.5 opengl# TOC
-
OpenGL output device.
-
-
To enable this output device you need to configure FFmpeg with --enable-opengl
.
-
-
This output device allows one to render to OpenGL context.
-Context may be provided by application or default SDL window is created.
-
-
When device renders to external context, application must implement handlers for following messages:
-AV_DEV_TO_APP_CREATE_WINDOW_BUFFER
- create OpenGL context on current thread.
-AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER
- make OpenGL context current.
-AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER
- swap buffers.
-AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER
- destroy OpenGL context.
-Application is also required to inform a device about current resolution by sending AV_APP_TO_DEV_WINDOW_SIZE
message.
-
-
-
28.5.1 Options# TOC
-
-background
-Set background color. Black is a default.
-
-no_window
-Disables default SDL window when set to non-zero value.
-Application must provide OpenGL context and both window_size_cb
and window_swap_buffers_cb
callbacks when set.
-
-window_title
-Set the SDL window title, if not specified default to the filename specified for the output device.
-Ignored when no_window is set.
-
-window_size
-Set preferred window size, can be a string of the form widthxheight or a video size abbreviation.
-If not specified it defaults to the size of the input video, downscaled according to the aspect ratio.
-Mostly usable when no_window is not set.
-
-
-
-
-
-
28.5.2 Examples# TOC
-
Play a file on SDL window using OpenGL rendering:
-
-
ffmpeg -i INPUT -f opengl "window title"
-
-
-
-
28.6 oss# TOC
-
-
OSS (Open Sound System) output device.
-
-
-
28.7 pulse# TOC
-
-
PulseAudio output device.
-
-
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
-
-
More information about PulseAudio can be found on http://www.pulseaudio.org
-
-
-
28.7.1 Options# TOC
-
-server
-Connect to a specific PulseAudio server, specified by an IP address.
-Default server is used when not provided.
-
-
-name
-Specify the application name PulseAudio will use when showing active clients,
-by default it is the LIBAVFORMAT_IDENT
string.
-
-
-stream_name
-Specify the stream name PulseAudio will use when showing active streams,
-by default it is set to the specified output name.
-
-
-device
-Specify the device to use. Default device is used when not provided.
-List of output devices can be obtained with command pactl list sinks
.
-
-
-buffer_size
-buffer_duration
-Control the size and duration of the PulseAudio buffer. A small buffer
-gives more control, but requires more frequent updates.
-
-buffer_size specifies size in bytes while
-buffer_duration specifies duration in milliseconds.
-
-When both options are provided then the highest value is used
-(duration is recalculated to bytes using stream parameters). If they
-are set to 0 (which is default), the device will use the default
-PulseAudio duration value. By default PulseAudio set buffer duration
-to around 2 seconds.
-
-
-prebuf
-Specify pre-buffering size in bytes. The server does not start with
-playback before at least prebuf bytes are available in the
-buffer. By default this option is initialized to the same value as
-buffer_size or buffer_duration (whichever is bigger).
-
-
-minreq
-Specify minimum request size in bytes. The server does not request less
-than minreq bytes from the client, instead waits until the buffer
-is free enough to request more bytes at once. It is recommended to not set
-this option, which will initialize this to a value that is deemed sensible
-by the server.
-
-
-
-
-
-
28.7.2 Examples# TOC
-
Play a file on default device on default server:
-
-
ffmpeg -i INPUT -f pulse "stream name"
-
-
-
-
28.8 sdl# TOC
-
-
SDL (Simple DirectMedia Layer) output device.
-
-
This output device allows one to show a video stream in an SDL
-window. Only one SDL window is allowed per application, so you can
-have only one instance of this output device in an application.
-
-
To enable this output device you need libsdl installed on your system
-when configuring your build.
-
-
For more information about SDL, check:
-http://www.libsdl.org/
-
-
-
28.8.1 Options# TOC
-
-
-window_title
-Set the SDL window title, if not specified default to the filename
-specified for the output device.
-
-
-icon_title
-Set the name of the iconified SDL window, if not specified it is set
-to the same value of window_title .
-
-
-window_size
-Set the SDL window size, can be a string of the form
-width xheight or a video size abbreviation.
-If not specified it defaults to the size of the input video,
-downscaled according to the aspect ratio.
-
-
-window_fullscreen
-Set fullscreen mode when non-zero value is provided.
-Default value is zero.
-
-
-
-
-
28.8.2 Interactive commands# TOC
-
-
The window created by the device can be controlled through the
-following interactive commands.
-
-
-q, ESC
-Quit the device immediately.
-
-
-
-
-
28.8.3 Examples# TOC
-
-
The following command shows the ffmpeg
output is an
-SDL window, forcing its size to the qcif format:
-
-
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
-
-
-
-
28.9 sndio# TOC
-
-
sndio audio output device.
-
-
-
28.10 xv# TOC
-
-
XV (XVideo) output device.
-
-
This output device allows one to show a video stream in a X Window System
-window.
-
-
-
28.10.1 Options# TOC
-
-
-display_name
-Specify the hardware display name, which determines the display and
-communications domain to be used.
-
-The display name or DISPLAY environment variable can be a string in
-the format hostname [:number [.screen_number ]].
-
-hostname specifies the name of the host machine on which the
-display is physically attached. number specifies the number of
-the display server on that host machine. screen_number specifies
-the screen to be used on that server.
-
-If unspecified, it defaults to the value of the DISPLAY environment
-variable.
-
-For example, dual-headed:0.1
would specify screen 1 of display
-0 on the machine named “dual-headed”.
-
-Check the X11 specification for more detailed information about the
-display name format.
-
-
-window_id
-When set to non-zero value then device doesn’t create new window,
-but uses existing one with provided window_id . By default
-this options is set to zero and device creates its own window.
-
-
-window_size
-Set the created window size, can be a string of the form
-width xheight or a video size abbreviation. If not
-specified it defaults to the size of the input video.
-Ignored when window_id is set.
-
-
-window_x
-window_y
-Set the X and Y window offsets for the created window. They are both
-set to 0 by default. The values may be ignored by the window manager.
-Ignored when window_id is set.
-
-
-window_title
-Set the window title, if not specified default to the filename
-specified for the output device. Ignored when window_id is set.
-
-
-
-
For more information about XVideo see http://www.x.org/ .
-
-
-
28.10.2 Examples# TOC
-
-
- Decode, display and encode video input with ffmpeg
at the
-same time:
-
-
ffmpeg -i INPUT OUTPUT -f xv display
-
-
- Decode and display the input video to multiple X11 windows:
-
-
ffmpeg -i INPUT -f xv normal -vf negate -f xv negated
-
-
-
-
-
29 Resampler Options# TOC
-
-
The audio resampler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, option =value for the aresample filter,
-by setting the value explicitly in the
-SwrContext
options or using the libavutil/opt.h API for
-programmatic use.
-
-
-ich, in_channel_count
-Set the number of input channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-in_channel_layout is set.
-
-
-och, out_channel_count
-Set the number of output channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-out_channel_layout is set.
-
-
-uch, used_channel_count
-Set the number of used input channels. Default value is 0. This option is
-only used for special remapping.
-
-
-isr, in_sample_rate
-Set the input sample rate. Default value is 0.
-
-
-osr, out_sample_rate
-Set the output sample rate. Default value is 0.
-
-
-isf, in_sample_fmt
-Specify the input sample format. It is set by default to none
.
-
-
-osf, out_sample_fmt
-Specify the output sample format. It is set by default to none
.
-
-
-tsf, internal_sample_fmt
-Set the internal sample format. Default value is none
.
-This will automatically be chosen when it is not explicitly set.
-
-
-icl, in_channel_layout
-ocl, out_channel_layout
-Set the input/output channel layout.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-clev, center_mix_level
-Set the center mix level. It is a value expressed in deciBel, and must be
-in the interval [-32,32].
-
-
-slev, surround_mix_level
-Set the surround mix level. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-lfe_mix_level
-Set LFE mix into non LFE level. It is used when there is a LFE input but no
-LFE output. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-rmvol, rematrix_volume
-Set rematrix volume. Default value is 1.0.
-
-
-rematrix_maxval
-Set maximum output value for rematrixing.
-This can be used to prevent clipping vs. preventing volumn reduction
-A value of 1.0 prevents cliping.
-
-
-flags, swr_flags
-Set flags used by the converter. Default value is 0.
-
-It supports the following individual flags:
-
-res
-force resampling, this flag forces resampling to be used even when the
-input and output sample rates match.
-
-
-
-
-dither_scale
-Set the dither scale. Default value is 1.
-
-
-dither_method
-Set dither method. Default value is 0.
-
-Supported values:
-
-‘rectangular ’
-select rectangular dither
-
-‘triangular ’
-select triangular dither
-
-‘triangular_hp ’
-select triangular dither with high pass
-
-‘lipshitz ’
-select lipshitz noise shaping dither
-
-‘shibata ’
-select shibata noise shaping dither
-
-‘low_shibata ’
-select low shibata noise shaping dither
-
-‘high_shibata ’
-select high shibata noise shaping dither
-
-‘f_weighted ’
-select f-weighted noise shaping dither
-
-‘modified_e_weighted ’
-select modified-e-weighted noise shaping dither
-
-‘improved_e_weighted ’
-select improved-e-weighted noise shaping dither
-
-
-
-
-
-resampler
-Set resampling engine. Default value is swr.
-
-Supported values:
-
-‘swr ’
-select the native SW Resampler; filter options precision and cheby are not
-applicable in this case.
-
-‘soxr ’
-select the SoX Resampler (where available); compensation, and filter options
-filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
-case.
-
-
-
-
-filter_size
-For swr only, set resampling filter size, default value is 32.
-
-
-phase_shift
-For swr only, set resampling phase shift, default value is 10, and must be in
-the interval [0,30].
-
-
-linear_interp
-Use Linear Interpolation if set to 1, default value is 0.
-
-
-cutoff
-Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
-value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
-(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
-
-
-precision
-For soxr only, the precision in bits to which the resampled signal will be
-calculated. The default value of 20 (which, with suitable dithering, is
-appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
-value of 28 gives SoX’s ’Very High Quality’.
-
-
-cheby
-For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
-approximation for ’irrational’ ratios. Default value is 0.
-
-
-async
-For swr only, simple 1 parameter audio sync to timestamps using stretching,
-squeezing, filling and trimming. Setting this to 1 will enable filling and
-trimming, larger values represent the maximum amount in samples that the data
-may be stretched or squeezed for each second.
-Default value is 0, thus no compensation is applied to make the samples match
-the audio timestamps.
-
-
-first_pts
-For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
-This allows for padding/trimming at the start of stream. By default, no
-assumption is made about the first frame’s expected pts, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative pts due to encoder delay.
-
-
-min_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger stretching/squeezing/filling or trimming of the
-data to make it match the timestamps. The default is that
-stretching/squeezing/filling and trimming is disabled
-(min_comp = FLT_MAX
).
-
-
-min_hard_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger adding/dropping samples to make it match the
-timestamps. This option effectively is a threshold to select between
-hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
-all compensation is by default disabled through min_comp .
-The default is 0.1.
-
-
-comp_duration
-For swr only, set duration (in seconds) over which data is stretched/squeezed
-to make it match the timestamps. Must be a non-negative double float value,
-default value is 1.0.
-
-
-max_soft_comp
-For swr only, set maximum factor by which data is stretched/squeezed to make it
-match the timestamps. Must be a non-negative double float value, default value
-is 0.
-
-
-matrix_encoding
-Select matrixed stereo encoding.
-
-It accepts the following values:
-
-‘none ’
-select none
-
-‘dolby ’
-select Dolby
-
-‘dplii ’
-select Dolby Pro Logic II
-
-
-
-Default value is none
.
-
-
-filter_type
-For swr only, select resampling filter type. This only affects resampling
-operations.
-
-It accepts the following values:
-
-‘cubic ’
-select cubic
-
-‘blackman_nuttall ’
-select Blackman Nuttall Windowed Sinc
-
-‘kaiser ’
-select Kaiser Windowed Sinc
-
-
-
-
-kaiser_beta
-For swr only, set Kaiser Window Beta value. Must be an integer in the
-interval [2,16], default value is 9.
-
-
-output_sample_bits
-For swr only, set number of used output sample bits for dithering. Must be an integer in the
-interval [0,64], default value is 0, which means it’s not used.
-
-
-
-
-
-
30 Scaler Options# TOC
-
-
The video scaler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools. For programmatic use, they can be set explicitly in the
-SwsContext
options or through the libavutil/opt.h API.
-
-
-
-
-sws_flags
-Set the scaler flags. This is also used to set the scaling
-algorithm. Only a single algorithm should be selected.
-
-It accepts the following values:
-
-‘fast_bilinear ’
-Select fast bilinear scaling algorithm.
-
-
-‘bilinear ’
-Select bilinear scaling algorithm.
-
-
-‘bicubic ’
-Select bicubic scaling algorithm.
-
-
-‘experimental ’
-Select experimental scaling algorithm.
-
-
-‘neighbor ’
-Select nearest neighbor rescaling algorithm.
-
-
-‘area ’
-Select averaging area rescaling algorithm.
-
-
-‘bicublin ’
-Select bicubic scaling algorithm for the luma component, bilinear for
-chroma components.
-
-
-‘gauss ’
-Select Gaussian rescaling algorithm.
-
-
-‘sinc ’
-Select sinc rescaling algorithm.
-
-
-‘lanczos ’
-Select lanczos rescaling algorithm.
-
-
-‘spline ’
-Select natural bicubic spline rescaling algorithm.
-
-
-‘print_info ’
-Enable printing/debug logging.
-
-
-‘accurate_rnd ’
-Enable accurate rounding.
-
-
-‘full_chroma_int ’
-Enable full chroma interpolation.
-
-
-‘full_chroma_inp ’
-Select full chroma input.
-
-
-‘bitexact ’
-Enable bitexact output.
-
-
-
-
-srcw
-Set source width.
-
-
-srch
-Set source height.
-
-
-dstw
-Set destination width.
-
-
-dsth
-Set destination height.
-
-
-src_format
-Set source pixel format (must be expressed as an integer).
-
-
-dst_format
-Set destination pixel format (must be expressed as an integer).
-
-
-src_range
-Select source range.
-
-
-dst_range
-Select destination range.
-
-
-param0, param1
-Set scaling algorithm parameters. The specified values are specific of
-some scaling algorithms and ignored by others. The specified values
-are floating point number values.
-
-
-sws_dither
-Set the dithering algorithm. Accepts one of the following
-values. Default value is ‘auto ’.
-
-
-‘auto ’
-automatic choice
-
-
-‘none ’
-no dithering
-
-
-‘bayer ’
-bayer dither
-
-
-‘ed ’
-error diffusion dither
-
-
-‘a_dither ’
-arithmetic dither, based using addition
-
-
-‘x_dither ’
-arithmetic dither, based using xor (more random/less apparent patterning that
-a_dither).
-
-
-
-
-
-
-
-
-
31 Filtering Introduction# TOC
-
-
Filtering in FFmpeg is enabled through the libavfilter library.
-
-
In libavfilter, a filter can have multiple inputs and multiple
-outputs.
-To illustrate the sorts of things that are possible, we consider the
-following filtergraph.
-
-
-
[main]
-input --> split ---------------------> overlay --> output
- | ^
- |[tmp] [flip]|
- +-----> crop --> vflip -------+
-
-
-
This filtergraph splits the input stream in two streams, then sends one
-stream through the crop filter and the vflip filter, before merging it
-back with the other stream by overlaying it on top. You can use the
-following command to achieve this:
-
-
-
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
-
-
-
The result will be that the top half of the video is mirrored
-onto the bottom half of the output video.
-
-
Filters in the same linear chain are separated by commas, and distinct
-linear chains of filters are separated by semicolons. In our example,
-crop,vflip are in one linear chain, split and
-overlay are separately in another. The points where the linear
-chains join are labelled by names enclosed in square brackets. In the
-example, the split filter generates two outputs that are associated to
-the labels [main] and [tmp] .
-
-
The stream sent to the second output of split , labelled as
-[tmp] , is processed through the crop filter, which crops
-away the lower half part of the video, and then vertically flipped. The
-overlay filter takes in input the first unchanged output of the
-split filter (which was labelled as [main] ), and overlay on its
-lower half the output generated by the crop,vflip filterchain.
-
-
Some filters take in input a list of parameters: they are specified
-after the filter name and an equal sign, and are separated from each other
-by a colon.
-
-
There exist so-called source filters that do not have an
-audio/video input, and sink filters that will not have audio/video
-output.
-
-
-
-
32 graph2dot# TOC
-
-
The graph2dot program included in the FFmpeg tools
-directory can be used to parse a filtergraph description and issue a
-corresponding textual representation in the dot language.
-
-
Invoke the command:
-
-
-
to see how to use graph2dot .
-
-
You can then pass the dot description to the dot program (from
-the graphviz suite of programs) and obtain a graphical representation
-of the filtergraph.
-
-
For example the sequence of commands:
-
-
echo GRAPH_DESCRIPTION | \
-tools/graph2dot -o graph.tmp && \
-dot -Tpng graph.tmp -o graph.png && \
-display graph.png
-
-
-
can be used to create and display an image representing the graph
-described by the GRAPH_DESCRIPTION string. Note that this string must be
-a complete self-contained graph, with its inputs and outputs explicitly defined.
-For example if your command line is of the form:
-
-
ffmpeg -i infile -vf scale=640:360 outfile
-
-
your GRAPH_DESCRIPTION string will need to be of the form:
-
-
nullsrc,scale=640:360,nullsink
-
-
you may also need to set the nullsrc parameters and add a format
-filter in order to simulate a specific input file.
-
-
-
-
33 Filtergraph description# TOC
-
-
A filtergraph is a directed graph of connected filters. It can contain
-cycles, and there can be multiple links between a pair of
-filters. Each link has one input pad on one side connecting it to one
-filter from which it takes its input, and one output pad on the other
-side connecting it to one filter accepting its output.
-
-
Each filter in a filtergraph is an instance of a filter class
-registered in the application, which defines the features and the
-number of input and output pads of the filter.
-
-
A filter with no input pads is called a "source", and a filter with no
-output pads is called a "sink".
-
-
-
33.1 Filtergraph syntax# TOC
-
-
A filtergraph has a textual representation, which is
-recognized by the -filter /-vf and -filter_complex
-options in ffmpeg
and -vf in ffplay
, and by the
-avfilter_graph_parse()
/avfilter_graph_parse2()
functions defined in
-libavfilter/avfilter.h .
-
-
A filterchain consists of a sequence of connected filters, each one
-connected to the previous one in the sequence. A filterchain is
-represented by a list of ","-separated filter descriptions.
-
-
A filtergraph consists of a sequence of filterchains. A sequence of
-filterchains is represented by a list of ";"-separated filterchain
-descriptions.
-
-
A filter is represented by a string of the form:
-[in_link_1 ]...[in_link_N ]filter_name =arguments [out_link_1 ]...[out_link_M ]
-
-
filter_name is the name of the filter class of which the
-described filter is an instance of, and has to be the name of one of
-the filter classes registered in the program.
-The name of the filter class is optionally followed by a string
-"=arguments ".
-
-
arguments is a string which contains the parameters used to
-initialize the filter instance. It may have one of two forms:
-
- A ’:’-separated list of key=value pairs.
-
- A ’:’-separated list of value . In this case, the keys are assumed to be
-the option names in the order they are declared. E.g. the fade
filter
-declares three options in this order – type , start_frame and
-nb_frames . Then the parameter list in:0:30 means that the value
-in is assigned to the option type , 0 to
-start_frame and 30 to nb_frames .
-
- A ’:’-separated list of mixed direct value and long key=value
-pairs. The direct value must precede the key=value pairs, and
-follow the same constraints order of the previous point. The following
-key=value pairs can be set in any preferred order.
-
-
-
-
If the option value itself is a list of items (e.g. the format
filter
-takes a list of pixel formats), the items in the list are usually separated by
-’|’.
-
-
The list of arguments can be quoted using the character "’" as initial
-and ending mark, and the character ’\’ for escaping the characters
-within the quoted text; otherwise the argument string is considered
-terminated when the next special character (belonging to the set
-"[]=;,") is encountered.
-
-
The name and arguments of the filter are optionally preceded and
-followed by a list of link labels.
-A link label allows one to name a link and associate it to a filter output
-or input pad. The preceding labels in_link_1
-... in_link_N , are associated to the filter input pads,
-the following labels out_link_1 ... out_link_M , are
-associated to the output pads.
-
-
When two link labels with the same name are found in the
-filtergraph, a link between the corresponding input and output pad is
-created.
-
-
If an output pad is not labelled, it is linked by default to the first
-unlabelled input pad of the next filter in the filterchain.
-For example in the filterchain
-
-
nullsrc, split[L1], [L2]overlay, nullsink
-
-
the split filter instance has two output pads, and the overlay filter
-instance two input pads. The first output pad of split is labelled
-"L1", the first input pad of overlay is labelled "L2", and the second
-output pad of split is linked to the second input pad of overlay,
-which are both unlabelled.
-
-
In a complete filterchain all the unlabelled filter input and output
-pads must be connected. A filtergraph is considered valid if all the
-filter input and output pads of all the filterchains are connected.
-
-
Libavfilter will automatically insert scale filters where format
-conversion is required. It is possible to specify swscale flags
-for those automatically inserted scalers by prepending
-sws_flags=flags ;
-to the filtergraph description.
-
-
Here is a BNF description of the filtergraph syntax:
-
-
NAME ::= sequence of alphanumeric characters and '_'
-LINKLABEL ::= "[" NAME "]"
-LINKLABELS ::= LINKLABEL [LINKLABELS ]
-FILTER_ARGUMENTS ::= sequence of chars (possibly quoted)
-FILTER ::= [LINKLABELS ] NAME ["=" FILTER_ARGUMENTS ] [LINKLABELS ]
-FILTERCHAIN ::= FILTER [,FILTERCHAIN ]
-FILTERGRAPH ::= [sws_flags=flags ;] FILTERCHAIN [;FILTERGRAPH ]
-
-
-
-
33.2 Notes on filtergraph escaping# TOC
-
-
Filtergraph description composition entails several levels of
-escaping. See (ffmpeg-utils)the "Quoting and escaping"
-section in the ffmpeg-utils(1) manual for more
-information about the employed escaping procedure.
-
-
A first level escaping affects the content of each filter option
-value, which may contain the special character :
used to
-separate values, or one of the escaping characters \'
.
-
-
A second level escaping affects the whole filter description, which
-may contain the escaping characters \'
or the special
-characters [],;
used by the filtergraph description.
-
-
Finally, when you specify a filtergraph on a shell commandline, you
-need to perform a third level escaping for the shell special
-characters contained within it.
-
-
For example, consider the following string to be embedded in
-the drawtext filter description text value:
-
-
this is a 'string': may contain one, or more, special characters
-
-
-
This string contains the '
special escaping character, and the
-:
special character, so it needs to be escaped in this way:
-
-
text=this is a \'string\'\: may contain one, or more, special characters
-
-
-
A second level of escaping is required when embedding the filter
-description in a filtergraph description, in order to escape all the
-filtergraph special characters. Thus the example above becomes:
-
-
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
-
-
(note that in addition to the \'
escaping special characters,
-also ,
needs to be escaped).
-
-
Finally an additional level of escaping is needed when writing the
-filtergraph description in a shell command, which depends on the
-escaping rules of the adopted shell. For example, assuming that
-\
is special and needs to be escaped with another \
, the
-previous string will finally result in:
-
-
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
-
-
-
-
34 Timeline editing# TOC
-
-
Some filters support a generic enable option. For the filters
-supporting timeline editing, this option can be set to an expression which is
-evaluated before sending a frame to the filter. If the evaluation is non-zero,
-the filter will be enabled, otherwise the frame will be sent unchanged to the
-next filter in the filtergraph.
-
-
The expression accepts the following values:
-
-‘t ’
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-‘n ’
-sequential number of the input frame, starting from 0
-
-
-‘pos ’
-the position in the file of the input frame, NAN if unknown
-
-
-‘w ’
-‘h ’
-width and height of the input frame if video
-
-
-
-
Additionally, these filters support an enable command that can be used
-to re-define the expression.
-
-
Like any other filtering option, the enable option follows the same
-rules.
-
-
For example, to enable a blur filter (smartblur ) from 10 seconds to 3
-minutes, and a curves filter starting at 3 seconds:
-
-
smartblur = enable='between(t,10,3*60)',
-curves = enable='gte(t,3)' : preset=cross_process
-
-
-
-
-
35 Audio Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the audio filters included in your
-build.
-
-
Below is a description of the currently available audio filters.
-
-
-
35.1 adelay# TOC
-
-
Delay one or more audio channels.
-
-
Samples in delayed channel are filled with silence.
-
-
The filter accepts the following option:
-
-
-delays
-Set list of delays in milliseconds for each channel separated by ’|’.
-At least one delay greater than 0 should be provided.
-Unused delays will be silently ignored. If number of given delays is
-smaller than number of channels all remaining channels will not be delayed.
-
-
-
-
-
35.1.1 Examples# TOC
-
-
- Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave
-the second channel (and any other channels that may be present) unchanged.
-
-
-
-
-
35.2 aecho# TOC
-
-
Apply echoing to the input audio.
-
-
Echoes are reflected sound and can occur naturally amongst mountains
-(and sometimes large buildings) when talking or shouting; digital echo
-effects emulate this behaviour and are often used to help fill out the
-sound of a single instrument or vocal. The time difference between the
-original signal and the reflection is the delay
, and the
-loudness of the reflected signal is the decay
.
-Multiple echoes can have different delays and decays.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain of reflected signal. Default is 0.6
.
-
-
-out_gain
-Set output gain of reflected signal. Default is 0.3
.
-
-
-delays
-Set list of time intervals in milliseconds between original signal and reflections
-separated by ’|’. Allowed range for each delay
is (0 - 90000.0]
.
-Default is 1000
.
-
-
-decays
-Set list of loudnesses of reflected signals separated by ’|’.
-Allowed range for each decay
is (0 - 1.0]
.
-Default is 0.5
.
-
-
-
-
-
35.2.1 Examples# TOC
-
-
- Make it sound as if there are twice as many instruments as are actually playing:
-
-
- If delay is very short, then it sound like a (metallic) robot playing music:
-
-
- A longer delay will sound like an open air concert in the mountains:
-
-
aecho=0.8:0.9:1000:0.3
-
-
- Same as above but with one more mountain:
-
-
aecho=0.8:0.9:1000|1800:0.3|0.25
-
-
-
-
-
35.3 aeval# TOC
-
-
Modify an audio signal according to the specified expressions.
-
-
This filter accepts one or more expressions (one for each channel),
-which are evaluated and used to modify a corresponding audio signal.
-
-
It accepts the following parameters:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. If
-the number of input channels is greater than the number of
-expressions, the last specified expression is used for the remaining
-output channels.
-
-
-channel_layout, c
-Set output channel layout. If not specified, the channel layout is
-specified by the number of expressions. If set to ‘same ’, it will
-use by default the same input channel layout.
-
-
-
-
Each expression in exprs can contain the following constants and functions:
-
-
-ch
-channel number of the current expression
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-s
-sample rate
-
-
-t
-time of the evaluated sample expressed in seconds
-
-
-nb_in_channels
-nb_out_channels
-input and output number of channels
-
-
-val(CH)
-the value of input channel with number CH
-
-
-
-
Note: this filter is slow. For faster processing you should use a
-dedicated filter.
-
-
-
35.3.1 Examples# TOC
-
-
- Half volume:
-
-
aeval=val(ch)/2:c=same
-
-
- Invert phase of the second channel:
-
-
-
-
-
35.4 afade# TOC
-
-
Apply fade-in/out effect to input audio.
-
-
A description of the accepted parameters follows.
-
-
-type, t
-Specify the effect type, can be either in
for fade-in, or
-out
for a fade-out effect. Default is in
.
-
-
-start_sample, ss
-Specify the number of the start sample for starting to apply the fade
-effect. Default is 0.
-
-
-nb_samples, ns
-Specify the number of samples for which the fade effect has to last. At
-the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence. Default is 44100.
-
-
-start_time, st
-Specify the start time of the fade effect. Default is 0.
-The value must be specified as a time duration; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-If set this option is used instead of start_sample .
-
-
-duration, d
-Specify the duration of the fade effect. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-At the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence.
-By default the duration is determined by nb_samples .
-If set this option is used instead of nb_samples .
-
-
-curve
-Set curve for fade transition.
-
-It accepts the following values:
-
-tri
-select triangular, linear slope (default)
-
-qsin
-select quarter of sine wave
-
-hsin
-select half of sine wave
-
-esin
-select exponential sine wave
-
-log
-select logarithmic
-
-par
-select inverted parabola
-
-qua
-select quadratic
-
-cub
-select cubic
-
-squ
-select square root
-
-cbr
-select cubic root
-
-
-
-
-
-
-
35.4.1 Examples# TOC
-
-
- Fade in first 15 seconds of audio:
-
-
- Fade out last 25 seconds of a 900 seconds audio:
-
-
afade=t=out:st=875:d=25
-
-
-
-
-
35.5 aformat# TOC
-
-
Set output format constraints for the input audio. The framework will
-negotiate the most appropriate format to minimize conversions.
-
-
It accepts the following parameters:
-
-sample_fmts
-A ’|’-separated list of requested sample formats.
-
-
-sample_rates
-A ’|’-separated list of requested sample rates.
-
-
-channel_layouts
-A ’|’-separated list of requested channel layouts.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-
-
If a parameter is omitted, all values are allowed.
-
-
Force the output to either unsigned 8-bit or signed 16-bit stereo
-
-
aformat=sample_fmts=u8|s16:channel_layouts=stereo
-
-
-
-
35.6 allpass# TOC
-
-
Apply a two-pole all-pass filter with central frequency (in Hz)
-frequency , and filter-width width .
-An all-pass filter changes the audio’s frequency to phase relationship
-without changing its frequency to amplitude relationship.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
35.7 amerge# TOC
-
-
Merge two or more audio streams into a single multi-channel stream.
-
-
The filter accepts the following options:
-
-
-inputs
-Set the number of inputs. Default is 2.
-
-
-
-
-
If the channel layouts of the inputs are disjoint, and therefore compatible,
-the channel layout of the output will be set accordingly and the channels
-will be reordered as necessary. If the channel layouts of the inputs are not
-disjoint, the output will have all the channels of the first input then all
-the channels of the second input, in that order, and the channel layout of
-the output will be the default value corresponding to the total number of
-channels.
-
-
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
-is FC+BL+BR, then the output will be in 5.1, with the channels in the
-following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
-first input, b1 is the first channel of the second input).
-
-
On the other hand, if both input are in stereo, the output channels will be
-in the default order: a1, a2, b1, b2, and the channel layout will be
-arbitrarily set to 4.0, which may or may not be the expected value.
-
-
All inputs must have the same sample rate, and format.
-
-
If inputs do not have the same duration, the output will stop with the
-shortest.
-
-
-
35.7.1 Examples# TOC
-
-
- Merge two mono files into a stereo stream:
-
-
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
-
-
- Multiple merges assuming 1 video stream and 6 audio streams in input.mkv :
-
-
ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
-
-
-
-
-
35.8 amix# TOC
-
-
Mixes multiple audio inputs into a single output.
-
-
Note that this filter only supports float samples (the amerge
-and pan audio filters support many formats). If the amix
-input has integer samples then aresample will be automatically
-inserted to perform the conversion to float samples.
-
-
For example
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
-
-
will mix 3 input audio streams to a single output with the same duration as the
-first input and a dropout transition time of 3 seconds.
-
-
It accepts the following parameters:
-
-inputs
-The number of inputs. If unspecified, it defaults to 2.
-
-
-duration
-How to determine the end-of-stream.
-
-longest
-The duration of the longest input. (default)
-
-
-shortest
-The duration of the shortest input.
-
-
-first
-The duration of the first input.
-
-
-
-
-
-dropout_transition
-The transition time, in seconds, for volume renormalization when an input
-stream ends. The default value is 2 seconds.
-
-
-
-
-
-
35.9 anull# TOC
-
-
Pass the audio source unchanged to the output.
-
-
-
35.10 apad# TOC
-
-
Pad the end of an audio stream with silence.
-
-
This can be used together with ffmpeg
-shortest to
-extend audio streams to the same length as the video stream.
-
-
A description of the accepted options follows.
-
-
-packet_size
-Set silence packet size. Default value is 4096.
-
-
-pad_len
-Set the number of samples of silence to add to the end. After the
-value is reached, the stream is terminated. This option is mutually
-exclusive with whole_len .
-
-
-whole_len
-Set the minimum total number of samples in the output audio stream. If
-the value is longer than the input audio length, silence is added to
-the end, until the value is reached. This option is mutually exclusive
-with pad_len .
-
-
-
-
If neither the pad_len nor the whole_len option is
-set, the filter will add silence to the end of the input stream
-indefinitely.
-
-
-
35.10.1 Examples# TOC
-
-
- Add 1024 samples of silence to the end of the input:
-
-
- Make sure the audio output will contain at least 10000 samples, pad
-the input with silence if required:
-
-
- Use ffmpeg
to pad the audio input with silence, so that the
-video stream will always result the shortest and will be converted
-until the end in the output file when using the shortest
-option:
-
-
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
-
-
-
-
-
35.11 aphaser# TOC
-
Add a phasing effect to the input audio.
-
-
A phaser filter creates series of peaks and troughs in the frequency spectrum.
-The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain. Default is 0.4.
-
-
-out_gain
-Set output gain. Default is 0.74
-
-
-delay
-Set delay in milliseconds. Default is 3.0.
-
-
-decay
-Set decay. Default is 0.4.
-
-
-speed
-Set modulation speed in Hz. Default is 0.5.
-
-
-type
-Set modulation type. Default is triangular.
-
-It accepts the following values:
-
-‘triangular, t ’
-‘sinusoidal, s ’
-
-
-
-
-
-
35.12 aresample# TOC
-
-
Resample the input audio to the specified parameters, using the
-libswresample library. If none are specified then the filter will
-automatically convert between its input and output.
-
-
This filter is also able to stretch/squeeze the audio data to make it match
-the timestamps or to inject silence / cut out audio to make it match the
-timestamps, do a combination of both or do neither.
-
-
The filter accepts the syntax
-[sample_rate :]resampler_options , where sample_rate
-expresses a sample rate and resampler_options is a list of
-key =value pairs, separated by ":". See the
-ffmpeg-resampler manual for the complete list of supported options.
-
-
-
35.12.1 Examples# TOC
-
-
- Resample the input audio to 44100Hz:
-
-
- Stretch/squeeze samples to the given timestamps, with a maximum of 1000
-samples per second compensation:
-
-
-
-
-
35.13 asetnsamples# TOC
-
-
Set the number of samples per each output audio frame.
-
-
The last output packet may contain a different number of samples, as
-the filter will flush all the remaining samples when the input audio
-signal its end.
-
-
The filter accepts the following options:
-
-
-nb_out_samples, n
-Set the number of frames per each output audio frame. The number is
-intended as the number of samples per each channel .
-Default value is 1024.
-
-
-pad, p
-If set to 1, the filter will pad the last audio frame with zeroes, so
-that the last frame will contain the same number of samples as the
-previous ones. Default value is 1.
-
-
-
-
For example, to set the number of per-frame samples to 1234 and
-disable padding for the last frame, use:
-
-
asetnsamples=n=1234:p=0
-
-
-
-
35.14 asetrate# TOC
-
-
Set the sample rate without altering the PCM data.
-This will result in a change of speed and pitch.
-
-
The filter accepts the following options:
-
-
-sample_rate, r
-Set the output sample rate. Default is 44100 Hz.
-
-
-
-
-
35.15 ashowinfo# TOC
-
-
Show a line containing various information for each input audio frame.
-The input audio is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The presentation timestamp of the input frame, in time base units; the time base
-depends on the filter input pad, and is usually 1/sample_rate .
-
-
-pts_time
-The presentation timestamp of the input frame in seconds.
-
-
-pos
-position of the frame in the input stream, -1 if this information in
-unavailable and/or meaningless (for example in case of synthetic audio)
-
-
-fmt
-The sample format.
-
-
-chlayout
-The channel layout.
-
-
-rate
-The sample rate for the audio frame.
-
-
-nb_samples
-The number of samples (per channel) in the frame.
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of the audio data. For planar
-audio, the data is treated as if all the planes were concatenated.
-
-
-plane_checksums
-A list of Adler-32 checksums for each data plane.
-
-
-
-
-
35.16 astats# TOC
-
-
Display time domain statistical information about the audio channels.
-Statistics are calculated and displayed for each audio channel and,
-where applicable, an overall figure is also given.
-
-
It accepts the following option:
-
-length
-Short window length in seconds, used for peak and trough RMS measurement.
-Default is 0.05
(50 milliseconds). Allowed range is [0.1 - 10]
.
-
-
-
-
A description of each shown parameter follows:
-
-
-DC offset
-Mean amplitude displacement from zero.
-
-
-Min level
-Minimal sample level.
-
-
-Max level
-Maximal sample level.
-
-
-Peak level dB
-RMS level dB
-Standard peak and RMS level measured in dBFS.
-
-
-RMS peak dB
-RMS trough dB
-Peak and trough values for RMS level measured over a short window.
-
-
-Crest factor
-Standard ratio of peak to RMS level (note: not in dB).
-
-
-Flat factor
-Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels
-(i.e. either Min level or Max level ).
-
-
-Peak count
-Number of occasions (not the number of samples) that the signal attained either
-Min level or Max level .
-
-
-
-
-
35.17 astreamsync# TOC
-
-
Forward two audio streams and control the order the buffers are forwarded.
-
-
The filter accepts the following options:
-
-
-expr, e
-Set the expression deciding which stream should be
-forwarded next: if the result is negative, the first stream is forwarded; if
-the result is positive or zero, the second stream is forwarded. It can use
-the following variables:
-
-
-b1 b2
-number of buffers forwarded so far on each stream
-
-s1 s2
-number of samples forwarded so far on each stream
-
-t1 t2
-current timestamp of each stream
-
-
-
-The default value is t1-t2
, which means to always forward the stream
-that has a smaller timestamp.
-
-
-
-
-
35.17.1 Examples# TOC
-
-
Stress-test amerge
by randomly sending buffers on the wrong
-input, while avoiding too much of a desynchronization:
-
-
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
-[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
-[a2] [b2] amerge
-
-
-
-
35.18 asyncts# TOC
-
-
Synchronize audio data with timestamps by squeezing/stretching it and/or
-dropping samples/adding silence when needed.
-
-
This filter is not built by default, please use aresample to do squeezing/stretching.
-
-
It accepts the following parameters:
-
-compensate
-Enable stretching/squeezing the data to make it match the timestamps. Disabled
-by default. When disabled, time gaps are covered with silence.
-
-
-min_delta
-The minimum difference between timestamps and audio data (in seconds) to trigger
-adding/dropping samples. The default value is 0.1. If you get an imperfect
-sync with this filter, try setting this parameter to 0.
-
-
-max_comp
-The maximum compensation in samples per second. Only relevant with compensate=1.
-The default value is 500.
-
-
-first_pts
-Assume that the first PTS should be this value. The time base is 1 / sample
-rate. This allows for padding/trimming at the start of the stream. By default,
-no assumption is made about the first frame’s expected PTS, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative PTS due to encoder delay.
-
-
-
-
-
-
35.19 atempo# TOC
-
-
Adjust audio tempo.
-
-
The filter accepts exactly one parameter, the audio tempo. If not
-specified then the filter will assume nominal 1.0 tempo. Tempo must
-be in the [0.5, 2.0] range.
-
-
-
35.19.1 Examples# TOC
-
-
- Slow down audio to 80% tempo:
-
-
- To speed up audio to 125% tempo:
-
-
-
-
-
35.20 atrim# TOC
-
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Timestamp (in seconds) of the start of the section to keep. I.e. the audio
-sample with the timestamp start will be the first sample in the output.
-
-
-end
-Specify time of the first audio sample that will be dropped, i.e. the
-audio sample immediately preceding the one with the timestamp end will be
-the last sample in the output.
-
-
-start_pts
-Same as start , except this option sets the start timestamp in samples
-instead of seconds.
-
-
-end_pts
-Same as end , except this option sets the end timestamp in samples instead
-of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_sample
-The number of the first sample that should be output.
-
-
-end_sample
-The number of the first sample that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _sample options simply count the
-samples that pass through the filter. So start/end_pts and start/end_sample will
-give different results when the timestamps are wrong, inexact or do not start at
-zero. Also note that this filter does not modify the timestamps. If you wish
-to have the output timestamps start at zero, insert the asetpts filter after the
-atrim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all samples that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple atrim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -af atrim=60:120
-
-
- Keep only the first 1000 samples:
-
-
ffmpeg -i INPUT -af atrim=end_sample=1000
-
-
-
-
-
-
35.21 bandpass# TOC
-
-
Apply a two-pole Butterworth band-pass filter with central
-frequency frequency , and (3dB-point) band-width width.
-The csg option selects a constant skirt gain (peak gain = Q)
-instead of the default: constant 0dB peak gain.
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-csg
-Constant skirt gain if set to 1. Defaults to 0.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
35.22 bandreject# TOC
-
-
Apply a two-pole Butterworth band-reject filter with central
-frequency frequency , and (3dB-point) band-width width .
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
35.23 bass# TOC
-
-
Boost or cut the bass (lower) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at 0 Hz. Its useful range is about -20
-(for a large cut) to +20 (for a large boost).
-Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 100
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
35.24 biquad# TOC
-
-
Apply a biquad IIR filter with the given coefficients.
-Where b0 , b1 , b2 and a0 , a1 , a2
-are the numerator and denominator coefficients respectively.
-
-
-
35.25 bs2b# TOC
-
Bauer stereo to binaural transformation, which improves headphone listening of
-stereo audio records.
-
-
It accepts the following parameters:
-
-profile
-Pre-defined crossfeed level.
-
-default
-Default level (fcut=700, feed=50).
-
-
-cmoy
-Chu Moy circuit (fcut=700, feed=60).
-
-
-jmeier
-Jan Meier circuit (fcut=650, feed=95).
-
-
-
-
-
-fcut
-Cut frequency (in Hz).
-
-
-feed
-Feed level (in Hz).
-
-
-
-
-
-
35.26 channelmap# TOC
-
-
Remap input channels to new locations.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the output stream.
-
-
-map
-Map channels from input to output. The argument is a ’|’-separated list of
-mappings, each in the in_channel -out_channel
or
-in_channel form. in_channel can be either the name of the input
-channel (e.g. FL for front left) or its index in the input channel layout.
-out_channel is the name of the output channel or its index in the output
-channel layout. If out_channel is not given then it is implicitly an
-index, starting with zero and increasing by one for each mapping.
-
-
-
-
If no mapping is present, the filter will implicitly map input channels to
-output channels, preserving indices.
-
-
For example, assuming a 5.1+downmix input MOV file,
-
-
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
-
-
will create an output WAV file tagged as stereo from the downmix channels of
-the input.
-
-
To fix a 5.1 WAV improperly encoded in AAC’s native channel order
-
-
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
-
-
-
-
35.27 channelsplit# TOC
-
-
Split each channel from an input audio stream into a separate output stream.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the input stream. The default is "stereo".
-
-
-
-
For example, assuming a stereo input MP3 file,
-
-
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
-
-
will create an output Matroska file with two audio streams, one containing only
-the left channel and the other the right channel.
-
-
Split a 5.1 WAV file into per-channel files:
-
-
ffmpeg -i in.wav -filter_complex
-'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
--map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
-front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
-side_right.wav
-
-
-
-
35.28 compand# TOC
-
Compress or expand the audio’s dynamic range.
-
-
It accepts the following parameters:
-
-
-attacks
-decays
-A list of times in seconds for each channel over which the instantaneous level
-of the input signal is averaged to determine its volume. attacks refers to
-increase of volume and decays refers to decrease of volume. For most
-situations, the attack time (response to the audio getting louder) should be
-shorter than the decay time, because the human ear is more sensitive to sudden
-loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and
-a typical value for decay is 0.8 seconds.
-
-
-points
-A list of points for the transfer function, specified in dB relative to the
-maximum possible signal amplitude. Each key points list must be defined using
-the following syntax: x0/y0|x1/y1|x2/y2|....
or
-x0/y0 x1/y1 x2/y2 ....
-
-The input values must be in strictly increasing order but the transfer function
-does not have to be monotonically rising. The point 0/0
is assumed but
-may be overridden (by 0/out-dBn
). Typical values for the transfer
-function are -70/-70|-60/-20
.
-
-
-soft-knee
-Set the curve radius in dB for all joints. It defaults to 0.01.
-
-
-gain
-Set the additional gain in dB to be applied at all points on the transfer
-function. This allows for easy adjustment of the overall gain.
-It defaults to 0.
-
-
-volume
-Set an initial volume, in dB, to be assumed for each channel when filtering
-starts. This permits the user to supply a nominal level initially, so that, for
-example, a very large gain is not applied to initial signal levels before the
-companding has begun to operate. A typical value for audio which is initially
-quiet is -90 dB. It defaults to 0.
-
-
-delay
-Set a delay, in seconds. The input audio is analyzed immediately, but audio is
-delayed before being fed to the volume adjuster. Specifying a delay
-approximately equal to the attack/decay times allows the filter to effectively
-operate in predictive rather than reactive mode. It defaults to 0.
-
-
-
-
-
-
35.28.1 Examples# TOC
-
-
- Make music with both quiet and loud passages suitable for listening to in a
-noisy environment:
-
-
compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
-
-
- A noise gate for when the noise is at a lower level than the signal:
-
-
compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
-
-
- Here is another noise gate, this time for when the noise is at a higher level
-than the signal (making it, in some ways, similar to squelch):
-
-
compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
-
-
-
-
-
35.29 earwax# TOC
-
-
Make audio easier to listen to on headphones.
-
-
This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio
-so that when listened to on headphones the stereo image is moved from
-inside your head (standard for headphones) to outside and in front of
-the listener (standard for speakers).
-
-
Ported from SoX.
-
-
-
35.30 equalizer# TOC
-
-
Apply a two-pole peaking equalisation (EQ) filter. With this
-filter, the signal-level at and around a selected frequency can
-be increased or decreased, whilst (unlike bandpass and bandreject
-filters) that at all other frequencies is unchanged.
-
-
In order to produce complex equalisation curves, this filter can
-be given several times, each with a different central frequency.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-gain, g
-Set the required gain or attenuation in dB.
-Beware of clipping when using a positive gain.
-
-
-
-
-
35.30.1 Examples# TOC
-
- Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz:
-
-
equalizer=f=1000:width_type=h:width=200:g=-10
-
-
- Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2:
-
-
equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
-
-
-
-
-
35.31 flanger# TOC
-
Apply a flanging effect to the audio.
-
-
The filter accepts the following options:
-
-
-delay
-Set base delay in milliseconds. Range from 0 to 30. Default value is 0.
-
-
-depth
-Set added swep delay in milliseconds. Range from 0 to 10. Default value is 2.
-
-
-regen
-Set percentage regeneration (delayed signal feedback). Range from -95 to 95.
-Default value is 0.
-
-
-width
-Set percentage of delayed signal mixed with original. Range from 0 to 100.
-Default value is 71.
-
-
-speed
-Set sweeps per second (Hz). Range from 0.1 to 10. Default value is 0.5.
-
-
-shape
-Set swept wave shape, can be triangular or sinusoidal .
-Default value is sinusoidal .
-
-
-phase
-Set swept wave percentage-shift for multi channel. Range from 0 to 100.
-Default value is 25.
-
-
-interp
-Set delay-line interpolation, linear or quadratic .
-Default is linear .
-
-
-
-
-
35.32 highpass# TOC
-
-
Apply a high-pass filter with 3dB point frequency.
-The filter can be either single-pole, or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 3000.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
35.33 join# TOC
-
-
Join multiple input streams into one multi-channel stream.
-
-
It accepts the following parameters:
-
-inputs
-The number of input streams. It defaults to 2.
-
-
-channel_layout
-The desired output channel layout. It defaults to stereo.
-
-
-map
-Map channels from inputs to output. The argument is a ’|’-separated list of
-mappings, each in the input_idx .in_channel -out_channel
-form. input_idx is the 0-based index of the input stream. in_channel
-can be either the name of the input channel (e.g. FL for front left) or its
-index in the specified input stream. out_channel is the name of the output
-channel.
-
-
-
-
The filter will attempt to guess the mappings when they are not specified
-explicitly. It does so by first trying to find an unused matching input channel
-and if that fails it picks the first unused input channel.
-
-
Join 3 inputs (with properly set channel layouts):
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
-
-
-
Build a 5.1 output from 6 single-channel streams:
-
-
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
-'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
-out
-
-
-
-
35.34 ladspa# TOC
-
-
Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-ladspa
.
-
-
-file, f
-Specifies the name of LADSPA plugin library to load. If the environment
-variable LADSPA_PATH
is defined, the LADSPA plugin is searched in
-each one of the directories specified by the colon separated list in
-LADSPA_PATH
, otherwise in the standard LADSPA paths, which are in
-this order: HOME/.ladspa/lib/ , /usr/local/lib/ladspa/ ,
-/usr/lib/ladspa/ .
-
-
-plugin, p
-Specifies the plugin within the library. Some libraries contain only
-one plugin, but others contain many of them. If this is not set filter
-will list all available plugins within the specified library.
-
-
-controls, c
-Set the ’|’ separated list of controls which are zero or more floating point
-values that determine the behavior of the loaded plugin (for example delay,
-threshold or gain).
-Controls need to be defined using the following syntax:
-c0=value0 |c1=value1 |c2=value2 |..., where
-valuei is the value set on the i -th control.
-If controls is set to help
, all available controls and
-their valid ranges are printed.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100. Only used if plugin have
-zero inputs.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame, default
-is 1024. Only used if plugin have zero inputs.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified duration,
-as the generated audio is always cut at the end of a complete frame.
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-Only used if plugin have zero inputs.
-
-
-
-
-
-
35.34.1 Examples# TOC
-
-
- List all available plugins within amp (LADSPA example plugin) library:
-
-
- List all available controls and their valid ranges for vcf_notch
-plugin from VCF
library:
-
-
ladspa=f=vcf:p=vcf_notch:c=help
-
-
- Simulate low quality audio equipment using Computer Music Toolkit
(CMT)
-plugin library:
-
-
ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
-
-
- Add reverberation to the audio using TAP-plugins
-(Tom’s Audio Processing plugins):
-
-
ladspa=file=tap_reverb:tap_reverb
-
-
- Generate white noise, with 0.2 amplitude:
-
-
ladspa=file=cmt:noise_source_white:c=c0=.2
-
-
- Generate 20 bpm clicks using plugin C* Click - Metronome
from the
-C* Audio Plugin Suite
(CAPS) library:
-
-
ladspa=file=caps:Click:c=c1=20'
-
-
- Apply C* Eq10X2 - Stereo 10-band equaliser
effect:
-
-
ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
-
-
-
-
-
35.34.2 Commands# TOC
-
-
This filter supports the following commands:
-
-cN
-Modify the N -th control value.
-
-If the specified value is not valid, it is ignored and prior one is kept.
-
-
-
-
-
35.35 lowpass# TOC
-
-
Apply a low-pass filter with 3dB point frequency.
-The filter can be either single-pole or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 500.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
35.36 pan# TOC
-
-
Mix channels with specific gain levels. The filter accepts the output
-channel layout followed by a set of channels definitions.
-
-
This filter is also designed to efficiently remap the channels of an audio
-stream.
-
-
The filter accepts parameters of the form:
-"l |outdef |outdef |..."
-
-
-l
-output channel layout or number of channels
-
-
-outdef
-output channel specification, of the form:
-"out_name =[gain *]in_name [+[gain *]in_name ...]"
-
-
-out_name
-output channel to define, either a channel name (FL, FR, etc.) or a channel
-number (c0, c1, etc.)
-
-
-gain
-multiplicative coefficient for the channel, 1 leaving the volume unchanged
-
-
-in_name
-input channel to use, see out_name for details; it is not possible to mix
-named and numbered input channels
-
-
-
-
If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for
-that specification will be renormalized so that the total is 1, thus
-avoiding clipping noise.
-
-
-
35.36.1 Mixing examples# TOC
-
-
For example, if you want to down-mix from stereo to mono, but with a bigger
-factor for the left channel:
-
-
pan=1c|c0=0.9*c0+0.1*c1
-
-
-
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
-7-channels surround:
-
-
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
-
-
-
Note that ffmpeg
integrates a default down-mix (and up-mix) system
-that should be preferred (see "-ac" option) unless you have very specific
-needs.
-
-
-
35.36.2 Remapping examples# TOC
-
-
The channel remapping will be effective if, and only if:
-
-
- gain coefficients are zeroes or ones,
- only one input per channel output,
-
-
-
If all these conditions are satisfied, the filter will notify the user ("Pure
-channel mapping detected"), and use an optimized and lossless method to do the
-remapping.
-
-
For example, if you have a 5.1 source and want a stereo audio stream by
-dropping the extra channels:
-
-
pan="stereo| c0=FL | c1=FR"
-
-
-
Given the same source, you can also switch front left and front right channels
-and keep the input channel layout:
-
-
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
-
-
-
If the input is a stereo audio stream, you can mute the front left channel (and
-still keep the stereo channel layout) with:
-
-
-
Still with a stereo audio stream input, you can copy the right channel in both
-front left and right:
-
-
pan="stereo| c0=FR | c1=FR"
-
-
-
-
35.37 replaygain# TOC
-
-
ReplayGain scanner filter. This filter takes an audio stream as an input and
-outputs it unchanged.
-At end of filtering it displays track_gain
and track_peak
.
-
-
-
35.38 resample# TOC
-
-
Convert the audio sample format, sample rate and channel layout. It is
-not meant to be used directly.
-
-
-
35.39 silencedetect# TOC
-
-
Detect silence in an audio stream.
-
-
This filter logs a message when it detects that the input audio volume is less
-or equal to a noise tolerance value for a duration greater or equal to the
-minimum detected noise duration.
-
-
The printed times and duration are expressed in seconds.
-
-
The filter accepts the following options:
-
-
-duration, d
-Set silence duration until notification (default is 2 seconds).
-
-
-noise, n
-Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
-specified value) or amplitude ratio. Default is -60dB, or 0.001.
-
-
-
-
-
35.39.1 Examples# TOC
-
-
- Detect 5 seconds of silence with -50dB noise tolerance:
-
-
silencedetect=n=-50dB:d=5
-
-
- Complete example with ffmpeg
to detect silence with 0.0001 noise
-tolerance in silence.mp3 :
-
-
ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
-
-
-
-
-
35.40 silenceremove# TOC
-
-
Remove silence from the beginning, middle or end of the audio.
-
-
The filter accepts the following options:
-
-
-start_periods
-This value is used to indicate if audio should be trimmed at beginning of
-the audio. A value of zero indicates no silence should be trimmed from the
-beginning. When specifying a non-zero value, it trims audio up until it
-finds non-silence. Normally, when trimming silence from beginning of audio
-the start_periods will be 1
but it can be increased to higher
-values to trim all audio up to specific count of non-silence periods.
-Default value is 0
.
-
-
-start_duration
-Specify the amount of time that non-silence must be detected before it stops
-trimming audio. By increasing the duration, bursts of noises can be treated
-as silence and trimmed off. Default value is 0
.
-
-
-start_threshold
-This indicates what sample value should be treated as silence. For digital
-audio, a value of 0
may be fine but for audio recorded from analog,
-you may wish to increase the value to account for background noise.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-stop_periods
-Set the count for trimming silence from the end of audio.
-To remove silence from the middle of a file, specify a stop_periods
-that is negative. This value is then treated as a positive value and is
-used to indicate the effect should restart processing as specified by
-start_periods , making it suitable for removing periods of silence
-in the middle of the audio.
-Default value is 0
.
-
-
-stop_duration
-Specify a duration of silence that must exist before audio is not copied any
-more. By specifying a higher duration, silence that is wanted can be left in
-the audio.
-Default value is 0
.
-
-
-stop_threshold
-This is the same as start_threshold but for trimming silence from
-the end of audio.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-leave_silence
-This indicate that stop_duration length of audio should be left intact
-at the beginning of each period of silence.
-For example, if you want to remove long pauses between words but do not want
-to remove the pauses completely. Default value is 0
.
-
-
-
-
-
-
35.40.1 Examples# TOC
-
-
- The following example shows how this filter can be used to start a recording
-that does not contain the delay at the start which usually occurs between
-pressing the record button and the start of the performance:
-
-
silenceremove=1:5:0.02
-
-
-
-
-
35.41 treble# TOC
-
-
Boost or cut treble (upper) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at whichever is the lower of ~22 kHz and the
-Nyquist frequency. Its useful range is about -20 (for a large cut)
-to +20 (for a large boost). Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 3000
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
35.42 volume# TOC
-
-
Adjust the input audio volume.
-
-
It accepts the following parameters:
-
-volume
-Set audio volume expression.
-
-Output values are clipped to the maximum value.
-
-The output audio volume is given by the relation:
-
-
output_volume = volume * input_volume
-
-
-The default value for volume is "1.0".
-
-
-precision
-This parameter represents the mathematical precision.
-
-It determines which input sample formats will be allowed, which affects the
-precision of the volume scaling.
-
-
-fixed
-8-bit fixed-point; this limits input sample format to U8, S16, and S32.
-
-float
-32-bit floating-point; this limits input sample format to FLT. (default)
-
-double
-64-bit floating-point; this limits input sample format to DBL.
-
-
-
-
-replaygain
-Choose the behaviour on encountering ReplayGain side data in input frames.
-
-
-drop
-Remove ReplayGain side data, ignoring its contents (the default).
-
-
-ignore
-Ignore ReplayGain side data, but leave it in the frame.
-
-
-track
-Prefer the track gain, if present.
-
-
-album
-Prefer the album gain, if present.
-
-
-
-
-replaygain_preamp
-Pre-amplification gain in dB to apply to the selected replaygain gain.
-
-Default value for replaygain_preamp is 0.0.
-
-
-eval
-Set when the volume expression is evaluated.
-
-It accepts the following values:
-
-‘once ’
-only evaluate expression once during the filter initialization, or
-when the ‘volume ’ command is sent
-
-
-‘frame ’
-evaluate expression for each incoming frame
-
-
-
-Default value is ‘once ’.
-
-
-
-
The volume expression can contain the following parameters.
-
-
-n
-frame number (starting at zero)
-
-nb_channels
-number of channels
-
-nb_consumed_samples
-number of samples consumed by the filter
-
-nb_samples
-number of samples in the current frame
-
-pos
-original frame position in the file
-
-pts
-frame PTS
-
-sample_rate
-sample rate
-
-startpts
-PTS at start of stream
-
-startt
-time at start of stream
-
-t
-frame time
-
-tb
-timestamp timebase
-
-volume
-last set volume value
-
-
-
-
Note that when eval is set to ‘once ’ only the
-sample_rate and tb variables are available, all other
-variables will evaluate to NAN.
-
-
-
35.42.1 Commands# TOC
-
-
This filter supports the following commands:
-
-volume
-Modify the volume expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-replaygain_noclip
-Prevent clipping by limiting the gain applied.
-
-Default value for replaygain_noclip is 1.
-
-
-
-
-
-
35.42.2 Examples# TOC
-
-
-
-
-
35.43 volumedetect# TOC
-
-
Detect the volume of the input video.
-
-
The filter has no parameters. The input is not modified. Statistics about
-the volume will be printed in the log when the input stream end is reached.
-
-
In particular it will show the mean volume (root mean square), maximum
-volume (on a per-sample basis), and the beginning of a histogram of the
-registered volume values (from the maximum value to a cumulated 1/1000 of
-the samples).
-
-
All volumes are in decibels relative to the maximum PCM value.
-
-
-
35.43.1 Examples# TOC
-
-
Here is an excerpt of the output:
-
-
[Parsed_volumedetect_0 0xa23120] mean_volume: -27 dB
-[Parsed_volumedetect_0 0xa23120] max_volume: -4 dB
-[Parsed_volumedetect_0 0xa23120] histogram_4db: 6
-[Parsed_volumedetect_0 0xa23120] histogram_5db: 62
-[Parsed_volumedetect_0 0xa23120] histogram_6db: 286
-[Parsed_volumedetect_0 0xa23120] histogram_7db: 1042
-[Parsed_volumedetect_0 0xa23120] histogram_8db: 2551
-[Parsed_volumedetect_0 0xa23120] histogram_9db: 4609
-[Parsed_volumedetect_0 0xa23120] histogram_10db: 8409
-
-
-
It means that:
-
- The mean square energy is approximately -27 dB, or 10^-2.7.
- The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB.
- There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc.
-
-
-
In other words, raising the volume by +4 dB does not cause any clipping,
-raising it by +5 dB causes clipping for 6 samples, etc.
-
-
-
-
36 Audio Sources# TOC
-
-
Below is a description of the currently available audio sources.
-
-
-
36.1 abuffer# TOC
-
-
Buffer audio frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/asrc_abuffer.h .
-
-
It accepts the following parameters:
-
-time_base
-The timebase which will be used for timestamps of submitted frames. It must be
-either a floating-point number or in numerator /denominator form.
-
-
-sample_rate
-The sample rate of the incoming audio buffers.
-
-
-sample_fmt
-The sample format of the incoming audio buffers.
-Either a sample format name or its corresponding integer representation from
-the enum AVSampleFormat in libavutil/samplefmt.h
-
-
-channel_layout
-The channel layout of the incoming audio buffers.
-Either a channel layout name from channel_layout_map in
-libavutil/channel_layout.c or its corresponding integer representation
-from the AV_CH_LAYOUT_* macros in libavutil/channel_layout.h
-
-
-channels
-The number of channels of the incoming audio buffers.
-If both channels and channel_layout are specified, then they
-must be consistent.
-
-
-
-
-
-
36.1.1 Examples# TOC
-
-
-
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
-
-
-
will instruct the source to accept planar 16bit signed stereo at 44100Hz.
-Since the sample format with name "s16p" corresponds to the number
-6 and the "stereo" channel layout corresponds to the value 0x3, this is
-equivalent to:
-
-
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
-
-
-
-
36.2 aevalsrc# TOC
-
-
Generate an audio signal specified by an expression.
-
-
This source accepts in input one or more expressions (one for each
-channel), which are evaluated and used to generate a corresponding
-audio signal.
-
-
This source accepts the following options:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. In case the
-channel_layout option is not specified, the selected channel layout
-depends on the number of provided expressions. Otherwise the last
-specified expression is applied to the remaining output channels.
-
-
-channel_layout, c
-Set the channel layout. The number of channels in the specified layout
-must be equal to the number of specified expressions.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified
-duration, as the generated audio is always cut at the end of a
-complete frame.
-
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame,
-default to 1024.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100.
-
-
-
-
Each expression in exprs can contain the following constants:
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-t
-time of the evaluated sample expressed in seconds, starting from 0
-
-
-s
-sample rate
-
-
-
-
-
-
36.2.1 Examples# TOC
-
-
- Generate silence:
-
-
- Generate a sin signal with frequency of 440 Hz, set sample rate to
-8000 Hz:
-
-
aevalsrc="sin(440*2*PI*t):s=8000"
-
-
- Generate a two channels signal, specify the channel layout (Front
-Center + Back Center) explicitly:
-
-
aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
-
-
- Generate white noise:
-
-
aevalsrc="-2+random(0)"
-
-
- Generate an amplitude modulated signal:
-
-
aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
-
-
- Generate 2.5 Hz binaural beats on a 360 Hz carrier:
-
-
aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
-
-
-
-
-
-
36.3 anullsrc# TOC
-
-
The null audio source, return unprocessed audio frames. It is mainly useful
-as a template and to be employed in analysis / debugging tools, or as
-the source for filters which ignore the input data (for example the sox
-synth filter).
-
-
This source accepts the following options:
-
-
-channel_layout, cl
-
-Specifies the channel layout, and can be either an integer or a string
-representing a channel layout. The default value of channel_layout
-is "stereo".
-
-Check the channel_layout_map definition in
-libavutil/channel_layout.c for the mapping between strings and
-channel layout values.
-
-
-sample_rate, r
-Specifies the sample rate, and defaults to 44100.
-
-
-nb_samples, n
-Set the number of samples per requested frames.
-
-
-
-
-
-
36.3.1 Examples# TOC
-
-
- Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
-
-
- Do the same operation with a more obvious syntax:
-
-
anullsrc=r=48000:cl=mono
-
-
-
-
All the parameters need to be explicitly defined.
-
-
-
36.4 flite# TOC
-
-
Synthesize a voice utterance using the libflite library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libflite
.
-
-
Note that the flite library is not thread-safe.
-
-
The filter accepts the following options:
-
-
-list_voices
-If set to 1, list the names of the available voices and exit
-immediately. Default value is 0.
-
-
-nb_samples, n
-Set the maximum number of samples per frame. Default value is 512.
-
-
-textfile
-Set the filename containing the text to speak.
-
-
-text
-Set the text to speak.
-
-
-voice, v
-Set the voice to use for the speech synthesis. Default value is
-kal
. See also the list_voices option.
-
-
-
-
-
36.4.1 Examples# TOC
-
-
- Read from file speech.txt , and synthesize the text using the
-standard flite voice:
-
-
flite=textfile=speech.txt
-
-
- Read the specified text selecting the slt
voice:
-
-
flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Input text to ffmpeg:
-
-
ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Make ffplay speak the specified text, using flite
and
-the lavfi
device:
-
-
ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
-
-
-
-
For more information about libflite, check:
-http://www.speech.cs.cmu.edu/flite/
-
-
-
36.5 sine# TOC
-
-
Generate an audio signal made of a sine wave with amplitude 1/8.
-
-
The audio signal is bit-exact.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the carrier frequency. Default is 440 Hz.
-
-
-beep_factor, b
-Enable a periodic beep every second with frequency beep_factor times
-the carrier frequency. Default is 0, meaning the beep is disabled.
-
-
-sample_rate, r
-Specify the sample rate, default is 44100.
-
-
-duration, d
-Specify the duration of the generated audio stream.
-
-
-samples_per_frame
-Set the number of samples per output frame, default is 1024.
-
-
-
-
-
36.5.1 Examples# TOC
-
-
- Generate a simple 440 Hz sine wave:
-
-
- Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
-
-
sine=220:4:d=5
-sine=f=220:b=4:d=5
-sine=frequency=220:beep_factor=4:duration=5
-
-
-
-
-
-
-
37 Audio Sinks# TOC
-
-
Below is a description of the currently available audio sinks.
-
-
-
37.1 abuffersink# TOC
-
-
Buffer audio frames, and make them available to the end of filter chain.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVABufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
37.2 anullsink# TOC
-
-
Null audio sink; do absolutely nothing with the input audio. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
38 Video Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the video filters included in your
-build.
-
-
Below is a description of the currently available video filters.
-
-
-
38.1 alphaextract# TOC
-
-
Extract the alpha component from the input as a grayscale video. This
-is especially useful with the alphamerge filter.
-
-
-
38.2 alphamerge# TOC
-
-
Add or replace the alpha component of the primary input with the
-grayscale value of a second input. This is intended for use with
-alphaextract to allow the transmission or storage of frame
-sequences that have alpha in a format that doesn’t support an alpha
-channel.
-
-
For example, to reconstruct full frames from a normal YUV-encoded video
-and a separate video created with alphaextract , you might use:
-
-
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
-
-
-
Since this filter is designed for reconstruction, it operates on frame
-sequences without considering timestamps, and terminates when either
-input reaches end of stream. This will cause problems if your encoding
-pipeline drops frames. If you’re trying to apply an image as an
-overlay to a video stream, consider the overlay filter instead.
-
-
-
38.3 ass# TOC
-
-
Same as the subtitles filter, except that it doesn’t require libavcodec
-and libavformat to work. On the other hand, it is limited to ASS (Advanced
-Substation Alpha) subtitles files.
-
-
This filter accepts the following option in addition to the common options from
-the subtitles filter:
-
-
-shaping
-Set the shaping engine
-
-Available values are:
-
-‘auto ’
-The default libass shaping engine, which is the best available.
-
-‘simple ’
-Fast, font-agnostic shaper that can do only substitutions
-
-‘complex ’
-Slower shaper using OpenType for substitutions and positioning
-
-
-
-The default is auto
.
-
-
-
-
-
38.4 bbox# TOC
-
-
Compute the bounding box for the non-black pixels in the input frame
-luminance plane.
-
-
This filter computes the bounding box containing all the pixels with a
-luminance value greater than the minimum allowed value.
-The parameters describing the bounding box are printed on the filter
-log.
-
-
The filter accepts the following option:
-
-
-min_val
-Set the minimal luminance value. Default is 16
.
-
-
-
-
-
38.5 blackdetect# TOC
-
-
Detect video intervals that are (almost) completely black. Can be
-useful to detect chapter transitions, commercials, or invalid
-recordings. Output lines contains the time for the start, end and
-duration of the detected black interval expressed in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
The filter accepts the following options:
-
-
-black_min_duration, d
-Set the minimum detected black duration expressed in seconds. It must
-be a non-negative floating point number.
-
-Default value is 2.0.
-
-
-picture_black_ratio_th, pic_th
-Set the threshold for considering a picture "black".
-Express the minimum value for the ratio:
-
-
nb_black_pixels / nb_pixels
-
-
-for which a picture is considered black.
-Default value is 0.98.
-
-
-pixel_black_th, pix_th
-Set the threshold for considering a pixel "black".
-
-The threshold expresses the maximum pixel luminance value for which a
-pixel is considered "black". The provided value is scaled according to
-the following equation:
-
-
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
-
-
-luminance_range_size and luminance_minimum_value depend on
-the input video format, the range is [0-255] for YUV full-range
-formats and [16-235] for YUV non full-range formats.
-
-Default value is 0.10.
-
-
-
-
The following example sets the maximum pixel threshold to the minimum
-value, and detects only black intervals of 2 or more seconds:
-
-
blackdetect=d=2:pix_th=0.00
-
-
-
-
38.6 blackframe# TOC
-
-
Detect frames that are (almost) completely black. Can be useful to
-detect chapter transitions or commercials. Output lines consist of
-the frame number of the detected frame, the percentage of blackness,
-the position in the file if known or -1 and the timestamp in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
It accepts the following parameters:
-
-
-amount
-The percentage of the pixels that have to be below the threshold; it defaults to
-98
.
-
-
-threshold, thresh
-The threshold below which a pixel value is considered black; it defaults to
-32
.
-
-
-
-
-
-
38.7 blend, tblend# TOC
-
-
Blend two video frames into each other.
-
-
The blend
filter takes two input streams and outputs one
-stream, the first input is the "top" layer and second input is
-"bottom" layer. Output terminates when shortest input terminates.
-
-
The tblend
(time blend) filter takes two consecutive frames
-from one single stream, and outputs the result obtained by blending
-the new frame on top of the old frame.
-
-
A description of the accepted options follows.
-
-
-c0_mode
-c1_mode
-c2_mode
-c3_mode
-all_mode
-Set blend mode for specific pixel component or all pixel components in case
-of all_mode . Default value is normal
.
-
-Available values for component modes are:
-
-‘addition ’
-‘and ’
-‘average ’
-‘burn ’
-‘darken ’
-‘difference ’
-‘difference128 ’
-‘divide ’
-‘dodge ’
-‘exclusion ’
-‘hardlight ’
-‘lighten ’
-‘multiply ’
-‘negation ’
-‘normal ’
-‘or ’
-‘overlay ’
-‘phoenix ’
-‘pinlight ’
-‘reflect ’
-‘screen ’
-‘softlight ’
-‘subtract ’
-‘vividlight ’
-‘xor ’
-
-
-
-c0_opacity
-c1_opacity
-c2_opacity
-c3_opacity
-all_opacity
-Set blend opacity for specific pixel component or all pixel components in case
-of all_opacity . Only used in combination with pixel component blend modes.
-
-
-c0_expr
-c1_expr
-c2_expr
-c3_expr
-all_expr
-Set blend expression for specific pixel component or all pixel components in case
-of all_expr . Note that related mode options will be ignored if those are set.
-
-The expressions can use the following variables:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-the coordinates of the current sample
-
-
-W
-H
-the width and height of currently filtered plane
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-TOP, A
-Value of pixel component at current location for first video frame (top layer).
-
-
-BOTTOM, B
-Value of pixel component at current location for second video frame (bottom layer).
-
-
-
-
-shortest
-Force termination when the shortest input terminates. Default is
-0
. This option is only defined for the blend
filter.
-
-
-repeatlast
-Continue applying the last bottom frame after the end of the stream. A value of
-0
disable the filter after the last frame of the bottom layer is reached.
-Default is 1
. This option is only defined for the blend
filter.
-
-
-
-
-
38.7.1 Examples# TOC
-
-
- Apply transition from bottom layer to top layer in first 10 seconds:
-
-
blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
-
-
- Apply 1x1 checkerboard effect:
-
-
blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
-
-
- Apply uncover left effect:
-
-
blend=all_expr='if(gte(N*SW+X,W),A,B)'
-
-
- Apply uncover down effect:
-
-
blend=all_expr='if(gte(Y-N*SH,0),A,B)'
-
-
- Apply uncover up-left effect:
-
-
blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
-
-
- Display differences between the current and the previous frame:
-
-
tblend=all_mode=difference128
-
-
-
-
-
38.8 boxblur# TOC
-
-
Apply a boxblur algorithm to the input video.
-
-
It accepts the following parameters:
-
-
-luma_radius, lr
-luma_power, lp
-chroma_radius, cr
-chroma_power, cp
-alpha_radius, ar
-alpha_power, ap
-
-
-
A description of the accepted options follows.
-
-
-luma_radius, lr
-chroma_radius, cr
-alpha_radius, ar
-Set an expression for the box radius in pixels used for blurring the
-corresponding input plane.
-
-The radius value must be a non-negative number, and must not be
-greater than the value of the expression min(w,h)/2
for the
-luma and alpha planes, and of min(cw,ch)/2
for the chroma
-planes.
-
-Default value for luma_radius is "2". If not specified,
-chroma_radius and alpha_radius default to the
-corresponding value set for luma_radius .
-
-The expressions can contain the following constants:
-
-w
-h
-The input width and height in pixels.
-
-
-cw
-ch
-The input chroma image width and height in pixels.
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p", hsub is 2 and vsub is 1.
-
-
-
-
-luma_power, lp
-chroma_power, cp
-alpha_power, ap
-Specify how many times the boxblur filter is applied to the
-corresponding plane.
-
-Default value for luma_power is 2. If not specified,
-chroma_power and alpha_power default to the
-corresponding value set for luma_power .
-
-A value of 0 will disable the effect.
-
-
-
-
-
38.8.1 Examples# TOC
-
-
- Apply a boxblur filter with the luma, chroma, and alpha radii
-set to 2:
-
-
boxblur=luma_radius=2:luma_power=1
-boxblur=2:1
-
-
- Set the luma radius to 2, and alpha and chroma radius to 0:
-
-
- Set the luma and chroma radii to a fraction of the video dimension:
-
-
boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
-
-
-
-
-
38.9 codecview# TOC
-
-
Visualize information exported by some codecs.
-
-
Some codecs can export information through frames using side-data or other
-means. For example, some MPEG based codecs export motion vectors through the
-export_mvs flag in the codec flags2 option.
-
-
The filter accepts the following option:
-
-
-mv
-Set motion vectors to visualize.
-
-Available flags for mv are:
-
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-
-
-
38.9.1 Examples# TOC
-
-
- Visualizes multi-directionals MVs from P and B-Frames using ffplay
:
-
-
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
-
-
-
-
-
38.10 colorbalance# TOC
-
Modify intensity of primary colors (red, green and blue) of input frames.
-
-
The filter allows an input frame to be adjusted in the shadows, midtones or highlights
-regions for the red-cyan, green-magenta or blue-yellow balance.
-
-
A positive adjustment value shifts the balance towards the primary color, a negative
-value towards the complementary color.
-
-
The filter accepts the following options:
-
-
-rs
-gs
-bs
-Adjust red, green and blue shadows (darkest pixels).
-
-
-rm
-gm
-bm
-Adjust red, green and blue midtones (medium pixels).
-
-
-rh
-gh
-bh
-Adjust red, green and blue highlights (brightest pixels).
-
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-
-
-
38.10.1 Examples# TOC
-
-
- Add red color cast to shadows:
-
-
-
-
-
38.11 colorlevels# TOC
-
-
Adjust video input frames using levels.
-
-
The filter accepts the following options:
-
-
-rimin
-gimin
-bimin
-aimin
-Adjust red, green, blue and alpha input black point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-rimax
-gimax
-bimax
-aimax
-Adjust red, green, blue and alpha input white point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 1
.
-
-Input levels are used to lighten highlights (bright tones), darken shadows
-(dark tones), change the balance of bright and dark tones.
-
-
-romin
-gomin
-bomin
-aomin
-Adjust red, green, blue and alpha output black point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 0
.
-
-
-romax
-gomax
-bomax
-aomax
-Adjust red, green, blue and alpha output white point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 1
.
-
-Output levels allows manual selection of a constrained output level range.
-
-
-
-
-
38.11.1 Examples# TOC
-
-
- Make video output darker:
-
-
colorlevels=rimin=0.058:gimin=0.058:bimin=0.058
-
-
- Increase contrast:
-
-
colorlevels=rimin=0.039:gimin=0.039:bimin=0.039:rimax=0.96:gimax=0.96:bimax=0.96
-
-
- Make video output lighter:
-
-
colorlevels=rimax=0.902:gimax=0.902:bimax=0.902
-
-
- Increase brightness:
-
-
colorlevels=romin=0.5:gomin=0.5:bomin=0.5
-
-
-
-
-
38.12 colorchannelmixer# TOC
-
-
Adjust video input frames by re-mixing color channels.
-
-
This filter modifies a color channel by adding the values associated to
-the other channels of the same pixels. For example if the value to
-modify is red, the output value will be:
-
-
red =red *rr + blue *rb + green *rg + alpha *ra
-
-
-
The filter accepts the following options:
-
-
-rr
-rg
-rb
-ra
-Adjust contribution of input red, green, blue and alpha channels for output red channel.
-Default is 1
for rr , and 0
for rg , rb and ra .
-
-
-gr
-gg
-gb
-ga
-Adjust contribution of input red, green, blue and alpha channels for output green channel.
-Default is 1
for gg , and 0
for gr , gb and ga .
-
-
-br
-bg
-bb
-ba
-Adjust contribution of input red, green, blue and alpha channels for output blue channel.
-Default is 1
for bb , and 0
for br , bg and ba .
-
-
-ar
-ag
-ab
-aa
-Adjust contribution of input red, green, blue and alpha channels for output alpha channel.
-Default is 1
for aa , and 0
for ar , ag and ab .
-
-Allowed ranges for options are [-2.0, 2.0]
.
-
-
-
-
-
38.12.1 Examples# TOC
-
-
- Convert source to grayscale:
-
-
colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
-
- Simulate sepia tones:
-
-
colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
-
-
-
-
-
38.13 colormatrix# TOC
-
-
Convert color matrix.
-
-
The filter accepts the following options:
-
-
-src
-dst
-Specify the source and destination color matrix. Both values must be
-specified.
-
-The accepted values are:
-
-‘bt709 ’
-BT.709
-
-
-‘bt601 ’
-BT.601
-
-
-‘smpte240m ’
-SMPTE-240M
-
-
-‘fcc ’
-FCC
-
-
-
-
-
-
For example to convert from BT.601 to SMPTE-240M, use the command:
-
-
colormatrix=bt601:smpte240m
-
-
-
-
38.14 copy# TOC
-
-
Copy the input source unchanged to the output. This is mainly useful for
-testing purposes.
-
-
-
38.15 crop# TOC
-
-
Crop the input video to given dimensions.
-
-
It accepts the following parameters:
-
-
-w, out_w
-The width of the output video. It defaults to iw
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-h, out_h
-The height of the output video. It defaults to ih
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-x
-The horizontal position, in the input video, of the left edge of the output
-video. It defaults to (in_w-out_w)/2
.
-This expression is evaluated per-frame.
-
-
-y
-The vertical position, in the input video, of the top edge of the output video.
-It defaults to (in_h-out_h)/2
.
-This expression is evaluated per-frame.
-
-
-keep_aspect
-If set to 1 will force the output display aspect ratio
-to be the same of the input, by changing the output sample aspect
-ratio. It defaults to 0.
-
-
-
-
The out_w , out_h , x , y parameters are
-expressions containing the following constants:
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-in_w
-in_h
-The input width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (cropped) width and height.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-n
-The number of the input frame, starting from 0.
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
The expression for out_w may depend on the value of out_h ,
-and the expression for out_h may depend on out_w , but they
-cannot depend on x and y , as x and y are
-evaluated after out_w and out_h .
-
-
The x and y parameters specify the expressions for the
-position of the top-left corner of the output (non-cropped) area. They
-are evaluated for each frame. If the evaluated value is not valid, it
-is approximated to the nearest valid value.
-
-
The expression for x may depend on y , and the expression
-for y may depend on x .
-
-
-
38.15.1 Examples# TOC
-
-
-
-
-
38.16 cropdetect# TOC
-
-
Auto-detect the crop size.
-
-
It calculates the necessary cropping parameters and prints the
-recommended parameters via the logging system. The detected dimensions
-correspond to the non-black area of the input video.
-
-
It accepts the following parameters:
-
-
-limit
-Set higher black value threshold, which can be optionally specified
-from nothing (0) to everything (255 for 8bit based formats). An intensity
-value greater to the set value is considered non-black. It defaults to 24.
-You can also specify a value between 0.0 and 1.0 which will be scaled depending
-on the bitdepth of the pixel format.
-
-
-round
-The value which the width/height should be divisible by. It defaults to
-16. The offset is automatically adjusted to center the video. Use 2 to
-get only even dimensions (needed for 4:2:2 video). 16 is best when
-encoding to most video codecs.
-
-
-reset_count, reset
-Set the counter that determines after how many frames cropdetect will
-reset the previously detected largest video area and start over to
-detect the current optimal crop area. Default value is 0.
-
-This can be useful when channel logos distort the video area. 0
-indicates ’never reset’, and returns the largest area encountered during
-playback.
-
-
-
-
-
38.17 curves# TOC
-
-
Apply color adjustments using curves.
-
-
This filter is similar to the Adobe Photoshop and GIMP curves tools. Each
-component (red, green and blue) has its values defined by N key points
-tied from each other using a smooth curve. The x-axis represents the pixel
-values from the input frame, and the y-axis the new pixel values to be set for
-the output frame.
-
-
By default, a component curve is defined by the two points (0;0) and
-(1;1) . This creates a straight line where each original pixel value is
-"adjusted" to its own value, which means no change to the image.
-
-
The filter allows you to redefine these two points and add some more. A new
-curve (using a natural cubic spline interpolation) will be define to pass
-smoothly through all these new coordinates. The new defined points needs to be
-strictly increasing over the x-axis, and their x and y values must
-be in the [0;1] interval. If the computed curves happened to go outside
-the vector spaces, the values will be clipped accordingly.
-
-
If there is no key point defined in x=0
, the filter will automatically
-insert a (0;0) point. In the same way, if there is no key point defined
-in x=1
, the filter will automatically insert a (1;1) point.
-
-
The filter accepts the following options:
-
-
-preset
-Select one of the available color presets. This option can be used in addition
-to the r , g , b parameters; in this case, the later
-options takes priority on the preset values.
-Available presets are:
-
-‘none ’
-‘color_negative ’
-‘cross_process ’
-‘darker ’
-‘increase_contrast ’
-‘lighter ’
-‘linear_contrast ’
-‘medium_contrast ’
-‘negative ’
-‘strong_contrast ’
-‘vintage ’
-
-Default is none
.
-
-master, m
-Set the master key points. These points will define a second pass mapping. It
-is sometimes called a "luminance" or "value" mapping. It can be used with
-r , g , b or all since it acts like a
-post-processing LUT.
-
-red, r
-Set the key points for the red component.
-
-green, g
-Set the key points for the green component.
-
-blue, b
-Set the key points for the blue component.
-
-all
-Set the key points for all components (not including master).
-Can be used in addition to the other key points component
-options. In this case, the unset component(s) will fallback on this
-all setting.
-
-psfile
-Specify a Photoshop curves file (.asv
) to import the settings from.
-
-
-
-
To avoid some filtergraph syntax conflicts, each key points list need to be
-defined using the following syntax: x0/y0 x1/y1 x2/y2 ...
.
-
-
-
38.17.1 Examples# TOC
-
-
-
-
-
38.18 dctdnoiz# TOC
-
-
Denoise frames using 2D DCT (frequency domain filtering).
-
-
This filter is not designed for real time.
-
-
The filter accepts the following options:
-
-
-sigma, s
-Set the noise sigma constant.
-
-This sigma defines a hard threshold of 3 * sigma
; every DCT
-coefficient (absolute value) below this threshold with be dropped.
-
-If you need a more advanced filtering, see expr .
-
-Default is 0
.
-
-
-overlap
-Set number overlapping pixels for each block. Since the filter can be slow, you
-may want to reduce this value, at the cost of a less effective filter and the
-risk of various artefacts.
-
-If the overlapping value doesn’t allow to process the whole input width or
-height, a warning will be displayed and according borders won’t be denoised.
-
-Default value is blocksize -1, which is the best possible setting.
-
-
-expr, e
-Set the coefficient factor expression.
-
-For each coefficient of a DCT block, this expression will be evaluated as a
-multiplier value for the coefficient.
-
-If this is option is set, the sigma option will be ignored.
-
-The absolute value of the coefficient can be accessed through the c
-variable.
-
-
-n
-Set the blocksize using the number of bits. 1<<n
defines the
-blocksize , which is the width and height of the processed blocks.
-
-The default value is 3 (8x8) and can be raised to 4 for a
-blocksize of 16x16. Note that changing this setting has huge consequences
-on the speed processing. Also, a larger block size does not necessarily means a
-better de-noising.
-
-
-
-
-
38.18.1 Examples# TOC
-
-
Apply a denoise with a sigma of 4.5
:
-
-
-
The same operation can be achieved using the expression system:
-
-
dctdnoiz=e='gte(c, 4.5*3)'
-
-
-
Violent denoise using a block size of 16x16
:
-
-
-
-
38.19 decimate# TOC
-
-
Drop duplicated frames at regular intervals.
-
-
The filter accepts the following options:
-
-
-cycle
-Set the number of frames from which one will be dropped. Setting this to
-N means one frame in every batch of N frames will be dropped.
-Default is 5
.
-
-
-dupthresh
-Set the threshold for duplicate detection. If the difference metric for a frame
-is less than or equal to this value, then it is declared as duplicate. Default
-is 1.1
-
-
-scthresh
-Set scene change threshold. Default is 15
.
-
-
-blockx
-blocky
-Set the size of the x and y-axis blocks used during metric calculations.
-Larger blocks give better noise suppression, but also give worse detection of
-small movements. Must be a power of two. Default is 32
.
-
-
-ppsrc
-Mark main input as a pre-processed input and activate clean source input
-stream. This allows the input to be pre-processed with various filters to help
-the metrics calculation while keeping the frame selection lossless. When set to
-1
, the first stream is for the pre-processed input, and the second
-stream is the clean source from where the kept frames are chosen. Default is
-0
.
-
-
-chroma
-Set whether or not chroma is considered in the metric calculations. Default is
-1
.
-
-
-
-
-
38.20 dejudder# TOC
-
-
Remove judder produced by partially interlaced telecined content.
-
-
Judder can be introduced, for instance, by pullup filter. If the original
-source was partially telecined content then the output of pullup,dejudder
-will have a variable frame rate. May change the recorded frame rate of the
-container. Aside from that change, this filter will not affect constant frame
-rate video.
-
-
The option available in this filter is:
-
-cycle
-Specify the length of the window over which the judder repeats.
-
-Accepts any integer greater than 1. Useful values are:
-
-‘4 ’
-If the original was telecined from 24 to 30 fps (Film to NTSC).
-
-
-‘5 ’
-If the original was telecined from 25 to 30 fps (PAL to NTSC).
-
-
-‘20 ’
-If a mixture of the two.
-
-
-
-The default is ‘4 ’.
-
-
-
-
-
38.21 delogo# TOC
-
-
Suppress a TV station logo by a simple interpolation of the surrounding
-pixels. Just set a rectangle covering the logo and watch it disappear
-(and sometimes something even uglier appear - your mileage may vary).
-
-
It accepts the following parameters:
-
-x
-y
-Specify the top left corner coordinates of the logo. They must be
-specified.
-
-
-w
-h
-Specify the width and height of the logo to clear. They must be
-specified.
-
-
-band, t
-Specify the thickness of the fuzzy edge of the rectangle (added to
-w and h ). The default value is 4.
-
-
-show
-When set to 1, a green rectangle is drawn on the screen to simplify
-finding the right x , y , w , and h parameters.
-The default value is 0.
-
-The rectangle is drawn on the outermost pixels which will be (partly)
-replaced with interpolated values. The values of the next pixels
-immediately outside this rectangle in each direction will be used to
-compute the interpolated pixel values inside the rectangle.
-
-
-
-
-
-
38.21.1 Examples# TOC
-
-
- Set a rectangle covering the area with top left corner coordinates 0,0
-and size 100x77, and a band of size 10:
-
-
delogo=x=0:y=0:w=100:h=77:band=10
-
-
-
-
-
-
38.22 deshake# TOC
-
-
Attempt to fix small changes in horizontal and/or vertical shift. This
-filter helps remove camera shake from hand-holding a camera, bumping a
-tripod, moving on a vehicle, etc.
-
-
The filter accepts the following options:
-
-
-x
-y
-w
-h
-Specify a rectangular area where to limit the search for motion
-vectors.
-If desired the search for motion vectors can be limited to a
-rectangular area of the frame defined by its top left corner, width
-and height. These parameters have the same meaning as the drawbox
-filter which can be used to visualise the position of the bounding
-box.
-
-This is useful when simultaneous movement of subjects within the frame
-might be confused for camera motion by the motion vector search.
-
-If any or all of x , y , w and h are set to -1
-then the full frame is used. This allows later options to be set
-without specifying the bounding box for the motion vector search.
-
-Default - search the whole frame.
-
-
-rx
-ry
-Specify the maximum extent of movement in x and y directions in the
-range 0-64 pixels. Default 16.
-
-
-edge
-Specify how to generate pixels to fill blanks at the edge of the
-frame. Available values are:
-
-‘blank, 0 ’
-Fill zeroes at blank locations
-
-‘original, 1 ’
-Original image at blank locations
-
-‘clamp, 2 ’
-Extruded edge value at blank locations
-
-‘mirror, 3 ’
-Mirrored edge at blank locations
-
-
-Default value is ‘mirror ’.
-
-
-blocksize
-Specify the blocksize to use for motion search. Range 4-128 pixels,
-default 8.
-
-
-contrast
-Specify the contrast threshold for blocks. Only blocks with more than
-the specified contrast (difference between darkest and lightest
-pixels) will be considered. Range 1-255, default 125.
-
-
-search
-Specify the search strategy. Available values are:
-
-‘exhaustive, 0 ’
-Set exhaustive search
-
-‘less, 1 ’
-Set less exhaustive search.
-
-
-Default value is ‘exhaustive ’.
-
-
-filename
-If set then a detailed log of the motion search is written to the
-specified file.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
-
38.23 drawbox# TOC
-
-
Draw a colored box on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the top left corner coordinates of the box. It defaults to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the box; if 0 they are interpreted as
-the input width and height. It defaults to 0.
-
-
-color, c
-Specify the color of the box to write. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the box edge color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the box edge. Default value is 3
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y offset coordinates where the box is drawn.
-
-
-w
-h
-The width and height of the drawn box.
-
-
-t
-The thickness of the drawn box.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
38.23.1 Examples# TOC
-
-
-
-
-
38.24 drawgrid# TOC
-
-
Draw a grid on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the
-input width and height, respectively, minus thickness
, so image gets
-framed. Default to 0.
-
-
-color, c
-Specify the color of the grid. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the grid color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the grid line. Default value is 1
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input grid cell width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y coordinates of some point of grid intersection (meant to configure offset).
-
-
-w
-h
-The width and height of the drawn cell.
-
-
-t
-The thickness of the drawn cell.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
38.24.1 Examples# TOC
-
-
- Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%:
-
-
drawgrid=width=100:height=100:thickness=2:color=red@0.5
-
-
- Draw a white 3x3 grid with an opacity of 50%:
-
-
drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
-
-
-
-
-
38.25 drawtext# TOC
-
-
Draw a text string or text from a specified file on top of a video, using the
-libfreetype library.
-
-
To enable compilation of this filter, you need to configure FFmpeg with
---enable-libfreetype
.
-To enable default font fallback and the font option you need to
-configure FFmpeg with --enable-libfontconfig
.
-To enable the text_shaping option, you need to configure FFmpeg with
---enable-libfribidi
.
-
-
-
38.25.1 Syntax# TOC
-
-
It accepts the following parameters:
-
-
-box
-Used to draw a box around text using the background color.
-The value must be either 1 (enable) or 0 (disable).
-The default value of box is 0.
-
-
-boxcolor
-The color to be used for drawing box around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of boxcolor is "white".
-
-
-borderw
-Set the width of the border to be drawn around the text using bordercolor .
-The default value of borderw is 0.
-
-
-bordercolor
-Set the color to be used for drawing border around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of bordercolor is "black".
-
-
-expansion
-Select how the text is expanded. Can be either none
,
-strftime
(deprecated) or
-normal
(default). See the Text expansion section
-below for details.
-
-
-fix_bounds
-If true, check and fix text coords to avoid clipping.
-
-
-fontcolor
-The color to be used for drawing fonts. For the syntax of this option, check
-the "Color" section in the ffmpeg-utils manual.
-
-The default value of fontcolor is "black".
-
-
-fontcolor_expr
-String which is expanded the same way as text to obtain dynamic
-fontcolor value. By default this option has empty value and is not
-processed. When this option is set, it overrides fontcolor option.
-
-
-font
-The font family to be used for drawing text. By default Sans.
-
-
-fontfile
-The font file to be used for drawing text. The path must be included.
-This parameter is mandatory if the fontconfig support is disabled.
-
-
-fontsize
-The font size to be used for drawing text.
-The default value of fontsize is 16.
-
-
-text_shaping
-If set to 1, attempt to shape the text (for example, reverse the order of
-right-to-left text and join Arabic characters) before drawing it.
-Otherwise, just draw the text exactly as given.
-By default 1 (if supported).
-
-
-ft_load_flags
-The flags to be used for loading the fonts.
-
-The flags map the corresponding flags supported by libfreetype, and are
-a combination of the following values:
-
-default
-no_scale
-no_hinting
-render
-no_bitmap
-vertical_layout
-force_autohint
-crop_bitmap
-pedantic
-ignore_global_advance_width
-no_recurse
-ignore_transform
-monochrome
-linear_design
-no_autohint
-
-
-Default value is "default".
-
-For more information consult the documentation for the FT_LOAD_*
-libfreetype flags.
-
-
-shadowcolor
-The color to be used for drawing a shadow behind the drawn text. For the
-syntax of this option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of shadowcolor is "black".
-
-
-shadowx
-shadowy
-The x and y offsets for the text shadow position with respect to the
-position of the text. They can be either positive or negative
-values. The default value for both is "0".
-
-
-start_number
-The starting frame number for the n/frame_num variable. The default value
-is "0".
-
-
-tabsize
-The size in number of spaces to use for rendering the tab.
-Default value is 4.
-
-
-timecode
-Set the initial timecode representation in "hh:mm:ss[:;.]ff"
-format. It can be used with or without text parameter. timecode_rate
-option must be specified.
-
-
-timecode_rate, rate, r
-Set the timecode frame rate (timecode only).
-
-
-text
-The text string to be drawn. The text must be a sequence of UTF-8
-encoded characters.
-This parameter is mandatory if no file is specified with the parameter
-textfile .
-
-
-textfile
-A text file containing text to be drawn. The text must be a sequence
-of UTF-8 encoded characters.
-
-This parameter is mandatory if no text string is specified with the
-parameter text .
-
-If both text and textfile are specified, an error is thrown.
-
-
-reload
-If set to 1, the textfile will be reloaded before each frame.
-Be sure to update it atomically, or it may be read partially, or even fail.
-
-
-x
-y
-The expressions which specify the offsets where text will be drawn
-within the video frame. They are relative to the top/left border of the
-output image.
-
-The default value of x and y is "0".
-
-See below for the list of accepted constants and functions.
-
-
-
-
The parameters for x and y are expressions containing the
-following constants and functions:
-
-
-dar
-input display aspect ratio, it is the same as (w / h ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-line_h, lh
-the height of each text line
-
-
-main_h, h, H
-the input height
-
-
-main_w, w, W
-the input width
-
-
-max_glyph_a, ascent
-the maximum distance from the baseline to the highest/upper grid
-coordinate used to place a glyph outline point, for all the rendered
-glyphs.
-It is a positive value, due to the grid’s orientation with the Y axis
-upwards.
-
-
-max_glyph_d, descent
-the maximum distance from the baseline to the lowest grid coordinate
-used to place a glyph outline point, for all the rendered glyphs.
-This is a negative value, due to the grid’s orientation, with the Y axis
-upwards.
-
-
-max_glyph_h
-maximum glyph height, that is the maximum height for all the glyphs
-contained in the rendered text, it is equivalent to ascent -
-descent .
-
-
-max_glyph_w
-maximum glyph width, that is the maximum width for all the glyphs
-contained in the rendered text
-
-
-n
-the number of input frame, starting from 0
-
-
-rand(min, max)
-return a random number included between min and max
-
-
-sar
-The input sample aspect ratio.
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-text_h, th
-the height of the rendered text
-
-
-text_w, tw
-the width of the rendered text
-
-
-x
-y
-the x and y offset coordinates where the text is drawn.
-
-These parameters allow the x and y expressions to refer
-each other, so you can for example specify y=x/dar
.
-
-
-
-
-
38.25.2 Text expansion# TOC
-
-
If expansion is set to strftime
,
-the filter recognizes strftime() sequences in the provided text and
-expands them accordingly. Check the documentation of strftime(). This
-feature is deprecated.
-
-
If expansion is set to none
, the text is printed verbatim.
-
-
If expansion is set to normal
(which is the default),
-the following expansion mechanism is used.
-
-
The backslash character ’\’, followed by any character, always expands to
-the second character.
-
-
Sequence of the form %{...}
are expanded. The text between the
-braces is a function name, possibly followed by arguments separated by ’:’.
-If the arguments contain special characters or delimiters (’:’ or ’}’),
-they should be escaped.
-
-
Note that they probably must also be escaped as the value for the
-text option in the filter argument string and as the filter
-argument in the filtergraph description, and possibly also for the shell,
-that makes up to four levels of escaping; using a text file avoids these
-problems.
-
-
The following functions are available:
-
-
-expr, e
-The expression evaluation result.
-
-It must take one argument specifying the expression to be evaluated,
-which accepts the same constants and functions as the x and
-y values. Note that not all constants should be used, for
-example the text size is not known when evaluating the expression, so
-the constants text_w and text_h will have an undefined
-value.
-
-
-expr_int_format, eif
-Evaluate the expression’s value and output as formatted integer.
-
-The first argument is the expression to be evaluated, just as for the expr function.
-The second argument specifies the output format. Allowed values are ’x’, ’X’, ’d’ and
-’u’. They are treated exactly as in the printf function.
-The third parameter is optional and sets the number of positions taken by the output.
-It can be used to add padding with zeros from the left.
-
-
-gmtime
-The time at which the filter is running, expressed in UTC.
-It can accept an argument: a strftime() format string.
-
-
-localtime
-The time at which the filter is running, expressed in the local time zone.
-It can accept an argument: a strftime() format string.
-
-
-metadata
-Frame metadata. It must take one argument specifying metadata key.
-
-
-n, frame_num
-The frame number, starting from 0.
-
-
-pict_type
-A 1 character description of the current picture type.
-
-
-pts
-The timestamp of the current frame.
-It can take up to two arguments.
-
-The first argument is the format of the timestamp; it defaults to flt
-for seconds as a decimal number with microsecond accuracy; hms
stands
-for a formatted [-]HH:MM:SS.mmm timestamp with millisecond accuracy.
-
-The second argument is an offset added to the timestamp.
-
-
-
-
-
-
38.25.3 Examples# TOC
-
-
-
-
For more information about libfreetype, check:
-http://www.freetype.org/ .
-
-
For more information about fontconfig, check:
-http://freedesktop.org/software/fontconfig/fontconfig-user.html .
-
-
For more information about libfribidi, check:
-http://fribidi.org/ .
-
-
-
38.26 edgedetect# TOC
-
-
Detect and draw edges. The filter uses the Canny Edge Detection algorithm.
-
-
The filter accepts the following options:
-
-
-low
-high
-Set low and high threshold values used by the Canny thresholding
-algorithm.
-
-The high threshold selects the "strong" edge pixels, which are then
-connected through 8-connectivity with the "weak" edge pixels selected
-by the low threshold.
-
-low and high threshold values must be chosen in the range
-[0,1], and low should be lesser or equal to high .
-
-Default value for low is 20/255
, and default value for high
-is 50/255
.
-
-
-mode
-Define the drawing mode.
-
-
-‘wires ’
-Draw white/gray wires on black background.
-
-
-‘colormix ’
-Mix the colors to create a paint/cartoon effect.
-
-
-
-Default value is wires .
-
-
-
-
-
38.26.1 Examples# TOC
-
-
- Standard edge detection with custom values for the hysteresis thresholding:
-
-
edgedetect=low=0.1:high=0.4
-
-
- Painting effect without thresholding:
-
-
edgedetect=mode=colormix:high=0
-
-
-
-
-
38.27 extractplanes# TOC
-
-
Extract color channel components from input video stream into
-separate grayscale video streams.
-
-
The filter accepts the following option:
-
-
-planes
-Set plane(s) to extract.
-
-Available values for planes are:
-
-‘y ’
-‘u ’
-‘v ’
-‘a ’
-‘r ’
-‘g ’
-‘b ’
-
-
-Choosing planes not available in the input will result in an error.
-That means you cannot select r
, g
, b
planes
-with y
, u
, v
planes at same time.
-
-
-
-
-
38.27.1 Examples# TOC
-
-
- Extract luma, u and v color channel component from input video frame
-into 3 grayscale outputs:
-
-
ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
-
-
-
-
-
38.28 elbg# TOC
-
-
Apply a posterize effect using the ELBG (Enhanced LBG) algorithm.
-
-
For each input image, the filter will compute the optimal mapping from
-the input to the output given the codebook length, that is the number
-of distinct output colors.
-
-
This filter accepts the following options.
-
-
-codebook_length, l
-Set codebook length. The value must be a positive integer, and
-represents the number of distinct output colors. Default value is 256.
-
-
-nb_steps, n
-Set the maximum number of iterations to apply for computing the optimal
-mapping. The higher the value the better the result and the higher the
-computation time. Default value is 1.
-
-
-seed, s
-Set a random seed, must be an integer included between 0 and
-UINT32_MAX. If not specified, or if explicitly set to -1, the filter
-will try to use a good random seed on a best effort basis.
-
-
-
-
-
38.29 fade# TOC
-
-
Apply a fade-in/out effect to the input video.
-
-
It accepts the following parameters:
-
-
-type, t
-The effect type can be either "in" for a fade-in, or "out" for a fade-out
-effect.
-Default is in
.
-
-
-start_frame, s
-Specify the number of the frame to start applying the fade
-effect at. Default is 0.
-
-
-nb_frames, n
-The number of frames that the fade effect lasts. At the end of the
-fade-in effect, the output video will have the same intensity as the input video.
-At the end of the fade-out transition, the output video will be filled with the
-selected color .
-Default is 25.
-
-
-alpha
-If set to 1, fade only alpha channel, if one exists on the input.
-Default value is 0.
-
-
-start_time, st
-Specify the timestamp (in seconds) of the frame to start to apply the fade
-effect. If both start_frame and start_time are specified, the fade will start at
-whichever comes last. Default is 0.
-
-
-duration, d
-The number of seconds for which the fade effect has to last. At the end of the
-fade-in effect the output video will have the same intensity as the input video,
-at the end of the fade-out transition the output video will be filled with the
-selected color .
-If both duration and nb_frames are specified, duration is used. Default is 0.
-
-
-color, c
-Specify the color of the fade. Default is "black".
-
-
-
-
-
38.29.1 Examples# TOC
-
-
-
-
-
38.30 field# TOC
-
-
Extract a single field from an interlaced image using stride
-arithmetic to avoid wasting CPU time. The output frames are marked as
-non-interlaced.
-
-
The filter accepts the following options:
-
-
-type
-Specify whether to extract the top (if the value is 0
or
-top
) or the bottom field (if the value is 1
or
-bottom
).
-
-
-
-
-
38.31 fieldmatch# TOC
-
-
Field matching filter for inverse telecine. It is meant to reconstruct the
-progressive frames from a telecined stream. The filter does not drop duplicated
-frames, so to achieve a complete inverse telecine fieldmatch
needs to be
-followed by a decimation filter such as decimate in the filtergraph.
-
-
The separation of the field matching and the decimation is notably motivated by
-the possibility of inserting a de-interlacing filter fallback between the two.
-If the source has mixed telecined and real interlaced content,
-fieldmatch
will not be able to match fields for the interlaced parts.
-But these remaining combed frames will be marked as interlaced, and thus can be
-de-interlaced by a later filter such as yadif before decimation.
-
-
In addition to the various configuration options, fieldmatch
can take an
-optional second stream, activated through the ppsrc option. If
-enabled, the frames reconstruction will be based on the fields and frames from
-this second stream. This allows the first input to be pre-processed in order to
-help the various algorithms of the filter, while keeping the output lossless
-(assuming the fields are matched properly). Typically, a field-aware denoiser,
-or brightness/contrast adjustments can help.
-
-
Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project)
-and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
-which fieldmatch
is based on. While the semantic and usage are very
-close, some behaviour and options names can differ.
-
-
The decimate filter currently only works for constant frame rate input.
-Do not use fieldmatch
and decimate if your input has mixed
-telecined and progressive content with changing framerate.
-
-
The filter accepts the following options:
-
-
-order
-Specify the assumed field order of the input stream. Available values are:
-
-
-‘auto ’
-Auto detect parity (use FFmpeg’s internal parity value).
-
-‘bff ’
-Assume bottom field first.
-
-‘tff ’
-Assume top field first.
-
-
-
-Note that it is sometimes recommended not to trust the parity announced by the
-stream.
-
-Default value is auto .
-
-
-mode
-Set the matching mode or strategy to use. pc mode is the safest in the
-sense that it won’t risk creating jerkiness due to duplicate frames when
-possible, but if there are bad edits or blended fields it will end up
-outputting combed frames when a good match might actually exist. On the other
-hand, pcn_ub mode is the most risky in terms of creating jerkiness,
-but will almost always find a good frame if there is one. The other values are
-all somewhere in between pc and pcn_ub in terms of risking
-jerkiness and creating duplicate frames versus finding good matches in sections
-with bad edits, orphaned fields, blended fields, etc.
-
-More details about p/c/n/u/b are available in p/c/n/u/b meaning section.
-
-Available values are:
-
-
-‘pc ’
-2-way matching (p/c)
-
-‘pc_n ’
-2-way matching, and trying 3rd match if still combed (p/c + n)
-
-‘pc_u ’
-2-way matching, and trying 3rd match (same order) if still combed (p/c + u)
-
-‘pc_n_ub ’
-2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if
-still combed (p/c + n + u/b)
-
-‘pcn ’
-3-way matching (p/c/n)
-
-‘pcn_ub ’
-3-way matching, and trying 4th/5th matches if all 3 of the original matches are
-detected as combed (p/c/n + u/b)
-
-
-
-The parenthesis at the end indicate the matches that would be used for that
-mode assuming order =tff (and field on auto or
-top ).
-
-In terms of speed pc mode is by far the fastest and pcn_ub is
-the slowest.
-
-Default value is pc_n .
-
-
-ppsrc
-Mark the main input stream as a pre-processed input, and enable the secondary
-input stream as the clean source to pick the fields from. See the filter
-introduction for more details. It is similar to the clip2 feature from
-VFM/TFM.
-
-Default value is 0
(disabled).
-
-
-field
-Set the field to match from. It is recommended to set this to the same value as
-order unless you experience matching failures with that setting. In
-certain circumstances changing the field that is used to match from can have a
-large impact on matching performance. Available values are:
-
-
-‘auto ’
-Automatic (same value as order ).
-
-‘bottom ’
-Match from the bottom field.
-
-‘top ’
-Match from the top field.
-
-
-
-Default value is auto .
-
-
-mchroma
-Set whether or not chroma is included during the match comparisons. In most
-cases it is recommended to leave this enabled. You should set this to 0
-only if your clip has bad chroma problems such as heavy rainbowing or other
-artifacts. Setting this to 0
could also be used to speed things up at
-the cost of some accuracy.
-
-Default value is 1
.
-
-
-y0
-y1
-These define an exclusion band which excludes the lines between y0 and
-y1 from being included in the field matching decision. An exclusion
-band can be used to ignore subtitles, a logo, or other things that may
-interfere with the matching. y0 sets the starting scan line and
-y1 sets the ending line; all lines in between y0 and
-y1 (including y0 and y1 ) will be ignored. Setting
-y0 and y1 to the same value will disable the feature.
-y0 and y1 defaults to 0
.
-
-
-scthresh
-Set the scene change detection threshold as a percentage of maximum change on
-the luma plane. Good values are in the [8.0, 14.0]
range. Scene change
-detection is only relevant in case combmatch =sc . The range for
-scthresh is [0.0, 100.0]
.
-
-Default value is 12.0
.
-
-
-combmatch
-When combatch is not none , fieldmatch
will take into
-account the combed scores of matches when deciding what match to use as the
-final match. Available values are:
-
-
-‘none ’
-No final matching based on combed scores.
-
-‘sc ’
-Combed scores are only used when a scene change is detected.
-
-‘full ’
-Use combed scores all the time.
-
-
-
-Default is sc .
-
-
-combdbg
-Force fieldmatch
to calculate the combed metrics for certain matches and
-print them. This setting is known as micout in TFM/VFM vocabulary.
-Available values are:
-
-
-‘none ’
-No forced calculation.
-
-‘pcn ’
-Force p/c/n calculations.
-
-‘pcnub ’
-Force p/c/n/u/b calculations.
-
-
-
-Default value is none .
-
-
-cthresh
-This is the area combing threshold used for combed frame detection. This
-essentially controls how "strong" or "visible" combing must be to be detected.
-Larger values mean combing must be more visible and smaller values mean combing
-can be less visible or strong and still be detected. Valid settings are from
--1
(every pixel will be detected as combed) to 255
(no pixel will
-be detected as combed). This is basically a pixel difference value. A good
-range is [8, 12]
.
-
-Default value is 9
.
-
-
-chroma
-Sets whether or not chroma is considered in the combed frame decision. Only
-disable this if your source has chroma problems (rainbowing, etc.) that are
-causing problems for the combed frame detection with chroma enabled. Actually,
-using chroma =0 is usually more reliable, except for the case
-where there is chroma only combing in the source.
-
-Default value is 0
.
-
-
-blockx
-blocky
-Respectively set the x-axis and y-axis size of the window used during combed
-frame detection. This has to do with the size of the area in which
-combpel pixels are required to be detected as combed for a frame to be
-declared combed. See the combpel parameter description for more info.
-Possible values are any number that is a power of 2 starting at 4 and going up
-to 512.
-
-Default value is 16
.
-
-
-combpel
-The number of combed pixels inside any of the blocky by
-blockx size blocks on the frame for the frame to be detected as
-combed. While cthresh controls how "visible" the combing must be, this
-setting controls "how much" combing there must be in any localized area (a
-window defined by the blockx and blocky settings) on the
-frame. Minimum value is 0
and maximum is blocky x blockx
(at
-which point no frames will ever be detected as combed). This setting is known
-as MI in TFM/VFM vocabulary.
-
-Default value is 80
.
-
-
-
-
-
38.31.1 p/c/n/u/b meaning# TOC
-
-
-
38.31.1.1 p/c/n# TOC
-
-
We assume the following telecined stream:
-
-
-
Top fields: 1 2 2 3 4
-Bottom fields: 1 2 3 4 4
-
-
-
The numbers correspond to the progressive frame the fields relate to. Here, the
-first two frames are progressive, the 3rd and 4th are combed, and so on.
-
-
When fieldmatch
is configured to run a matching from bottom
-(field =bottom ) this is how this input stream get transformed:
-
-
-
Input stream:
- T 1 2 2 3 4
- B 1 2 3 4 4 <-- matching reference
-
-Matches: c c n n c
-
-Output stream:
- T 1 2 3 4 4
- B 1 2 3 4 4
-
-
-
As a result of the field matching, we can see that some frames get duplicated.
-To perform a complete inverse telecine, you need to rely on a decimation filter
-after this operation. See for instance the decimate filter.
-
-
The same operation now matching from top fields (field =top )
-looks like this:
-
-
-
Input stream:
- T 1 2 2 3 4 <-- matching reference
- B 1 2 3 4 4
-
-Matches: c c p p c
-
-Output stream:
- T 1 2 2 3 4
- B 1 2 2 3 4
-
-
-
In these examples, we can see what p , c and n mean;
-basically, they refer to the frame and field of the opposite parity:
-
-
- p matches the field of the opposite parity in the previous frame
- c matches the field of the opposite parity in the current frame
- n matches the field of the opposite parity in the next frame
-
-
-
-
38.31.1.2 u/b# TOC
-
-
The u and b matching are a bit special in the sense that they match
-from the opposite parity flag. In the following examples, we assume that we are
-currently matching the 2nd frame (Top:2, bottom:2). According to the match, a
-’x’ is placed above and below each matched fields.
-
-
With bottom matching (field =bottom ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 1 2 2 2
- 2 2 2 1 3
-
-
-
With top matching (field =top ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 2 2 1 2
- 2 1 3 2 2
-
-
-
-
38.31.2 Examples# TOC
-
-
Simple IVTC of a top field first telecined stream:
-
-
fieldmatch=order=tff:combmatch=none, decimate
-
-
-
Advanced IVTC, with fallback on yadif for still combed frames:
-
-
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
-
-
-
-
38.32 fieldorder# TOC
-
-
Transform the field order of the input video.
-
-
It accepts the following parameters:
-
-
-order
-The output field order. Valid values are tff for top field first or bff
-for bottom field first.
-
-
-
-
The default value is ‘tff ’.
-
-
The transformation is done by shifting the picture content up or down
-by one line, and filling the remaining line with appropriate picture content.
-This method is consistent with most broadcast field order converters.
-
-
If the input video is not flagged as being interlaced, or it is already
-flagged as being of the required output field order, then this filter does
-not alter the incoming video.
-
-
It is very useful when converting to or from PAL DV material,
-which is bottom field first.
-
-
For example:
-
-
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
-
-
-
-
38.33 fifo# TOC
-
-
Buffer input images and send them when they are requested.
-
-
It is mainly useful when auto-inserted by the libavfilter
-framework.
-
-
It does not take parameters.
-
-
-
38.34 format# TOC
-
-
Convert the input video to one of the specified pixel formats.
-Libavfilter will try to pick one that is suitable as input to
-the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-"pix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
38.34.1 Examples# TOC
-
-
-
-
-
38.35 fps# TOC
-
-
Convert the video to specified constant frame rate by duplicating or dropping
-frames as necessary.
-
-
It accepts the following parameters:
-
-fps
-The desired output frame rate. The default is 25
.
-
-
-round
-Rounding method.
-
-Possible values are:
-
-zero
-zero round towards 0
-
-inf
-round away from 0
-
-down
-round towards -infinity
-
-up
-round towards +infinity
-
-near
-round to nearest
-
-
-The default is near
.
-
-
-start_time
-Assume the first PTS should be the given value, in seconds. This allows for
-padding/trimming at the start of stream. By default, no assumption is made
-about the first frame’s expected PTS, so no padding or trimming is done.
-For example, this could be set to 0 to pad the beginning with duplicates of
-the first frame if a video stream starts after the audio stream or to trim any
-frames with a negative PTS.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-fps [:round ].
-
-
See also the setpts filter.
-
-
-
38.35.1 Examples# TOC
-
-
- A typical usage in order to set the fps to 25:
-
-
- Sets the fps to 24, using abbreviation and rounding method to round to nearest:
-
-
fps=fps=film:round=near
-
-
-
-
-
38.36 framepack# TOC
-
-
Pack two different video streams into a stereoscopic video, setting proper
-metadata on supported codecs. The two views should have the same size and
-framerate and processing will stop when the shorter video ends. Please note
-that you may conveniently adjust view properties with the scale and
-fps filters.
-
-
It accepts the following parameters:
-
-format
-The desired packing format. Supported values are:
-
-
-sbs
-The views are next to each other (default).
-
-
-tab
-The views are on top of each other.
-
-
-lines
-The views are packed by line.
-
-
-columns
-The views are packed by column.
-
-
-frameseq
-The views are temporally interleaved.
-
-
-
-
-
-
-
-
Some examples:
-
-
-
# Convert left and right views into a frame-sequential video
-ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
-
-# Convert views into a side-by-side video with the same output resolution as the input
-ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
-
-
-
-
38.37 framestep# TOC
-
-
Select one frame every N-th frame.
-
-
This filter accepts the following option:
-
-step
-Select frame after every step
frames.
-Allowed values are positive integers higher than 0. Default value is 1
.
-
-
-
-
-
38.38 frei0r# TOC
-
-
Apply a frei0r effect to the input video.
-
-
To enable the compilation of this filter, you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the frei0r effect to load. If the environment variable
-FREI0R_PATH
is defined, the frei0r effect is searched for in each of the
-directories specified by the colon-separated list in FREIOR_PATH
.
-Otherwise, the standard frei0r paths are searched, in this order:
-HOME/.frei0r-1/lib/ , /usr/local/lib/frei0r-1/ ,
-/usr/lib/frei0r-1/ .
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r effect.
-
-
-
-
-
A frei0r effect parameter can be a boolean (its value is either
-"y" or "n"), a double, a color (specified as
-R /G /B , where R , G , and B are floating point
-numbers between 0.0 and 1.0, inclusive) or by a color description specified in the "Color"
-section in the ffmpeg-utils manual), a position (specified as X /Y , where
-X and Y are floating point numbers) and/or a string.
-
-
The number and types of parameters depend on the loaded effect. If an
-effect parameter is not specified, the default value is set.
-
-
-
38.38.1 Examples# TOC
-
-
- Apply the distort0r effect, setting the first two double parameters:
-
-
frei0r=filter_name=distort0r:filter_params=0.5|0.01
-
-
- Apply the colordistance effect, taking a color as the first parameter:
-
-
frei0r=colordistance:0.2/0.3/0.4
-frei0r=colordistance:violet
-frei0r=colordistance:0x112233
-
-
- Apply the perspective effect, specifying the top left and top right image
-positions:
-
-
frei0r=perspective:0.2/0.2|0.8/0.2
-
-
-
-
For more information, see
-http://frei0r.dyne.org
-
-
-
38.39 fspp# TOC
-
-
Apply fast and simple postprocessing. It is a faster version of spp .
-
-
It splits (I)DCT into horizontal/vertical passes. Unlike the simple post-
-processing filter, one of them is performed once per block, not per pixel.
-This allows for much higher speed.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 4-5. Default value is 4
.
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range 0-63.
-If not set, the filter will use the QP from the video stream (if available).
-
-
-strength
-Set filter strength. It accepts an integer in range -15 to 32. Lower values mean
-more details but also more artifacts, while higher values make the image smoother
-but also blurrier. Default value is 0
− PSNR optimal.
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
-
38.40 geq# TOC
-
-
The filter accepts the following options:
-
-
-lum_expr, lum
-Set the luminance expression.
-
-cb_expr, cb
-Set the chrominance blue expression.
-
-cr_expr, cr
-Set the chrominance red expression.
-
-alpha_expr, a
-Set the alpha expression.
-
-red_expr, r
-Set the red expression.
-
-green_expr, g
-Set the green expression.
-
-blue_expr, b
-Set the blue expression.
-
-
-
-
The colorspace is selected according to the specified options. If one
-of the lum_expr , cb_expr , or cr_expr
-options is specified, the filter will automatically select a YCbCr
-colorspace. If one of the red_expr , green_expr , or
-blue_expr options is specified, it will select an RGB
-colorspace.
-
-
If one of the chrominance expression is not defined, it falls back on the other
-one. If no alpha expression is specified it will evaluate to opaque value.
-If none of chrominance expressions are specified, they will evaluate
-to the luminance expression.
-
-
The expressions can use the following variables and functions:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-The coordinates of the current sample.
-
-
-W
-H
-The width and height of the image.
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-p(x, y)
-Return the value of the pixel at location (x ,y ) of the current
-plane.
-
-
-lum(x, y)
-Return the value of the pixel at location (x ,y ) of the luminance
-plane.
-
-
-cb(x, y)
-Return the value of the pixel at location (x ,y ) of the
-blue-difference chroma plane. Return 0 if there is no such plane.
-
-
-cr(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red-difference chroma plane. Return 0 if there is no such plane.
-
-
-r(x, y)
-g(x, y)
-b(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red/green/blue component. Return 0 if there is no such component.
-
-
-alpha(x, y)
-Return the value of the pixel at location (x ,y ) of the alpha
-plane. Return 0 if there is no such plane.
-
-
-
-
For functions, if x and y are outside the area, the value will be
-automatically clipped to the closer edge.
-
-
-
38.40.1 Examples# TOC
-
-
- Flip the image horizontally:
-
-
- Generate a bidimensional sine wave, with angle PI/3
and a
-wavelength of 100 pixels:
-
-
geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
-
-
- Generate a fancy enigmatic moving light:
-
-
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
-
-
- Generate a quick emboss effect:
-
-
format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
-
-
- Modify RGB components depending on pixel position:
-
-
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
-
-
- Create a radial gradient that is the same size as the input (also see
-the vignette filter):
-
-
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
-
-
- Create a linear gradient to use as a mask for another filter, then
-compose with overlay . In this example the video will gradually
-become more blurry from the top to the bottom of the y-axis as defined
-by the linear gradient:
-
-
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
-
-
-
-
-
38.41 gradfun# TOC
-
-
Fix the banding artifacts that are sometimes introduced into nearly flat
-regions by truncation to 8bit color depth.
-Interpolate the gradients that should go where the bands are, and
-dither them.
-
-
It is designed for playback only. Do not use it prior to
-lossy compression, because compression tends to lose the dither and
-bring back the bands.
-
-
It accepts the following parameters:
-
-
-strength
-The maximum amount by which the filter will change any one pixel. This is also
-the threshold for detecting nearly flat regions. Acceptable values range from
-.51 to 64; the default value is 1.2. Out-of-range values will be clipped to the
-valid range.
-
-
-radius
-The neighborhood to fit the gradient to. A larger radius makes for smoother
-gradients, but also prevents the filter from modifying the pixels near detailed
-regions. Acceptable values are 8-32; the default value is 16. Out-of-range
-values will be clipped to the valid range.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-strength [:radius ]
-
-
-
38.41.1 Examples# TOC
-
-
- Apply the filter with a 3.5
strength and radius of 8
:
-
-
- Specify radius, omitting the strength (which will fall-back to the default
-value):
-
-
-
-
-
-
38.42 haldclut# TOC
-
-
Apply a Hald CLUT to a video stream.
-
-
First input is the video stream to process, and second one is the Hald CLUT.
-The Hald CLUT input can be a simple picture or a complete video stream.
-
-
The filter accepts the following options:
-
-
-shortest
-Force termination when the shortest input terminates. Default is 0
.
-
-repeatlast
-Continue applying the last CLUT after the end of the stream. A value of
-0
disable the filter after the last frame of the CLUT is reached.
-Default is 1
.
-
-
-
-
haldclut
also has the same interpolation options as lut3d (both
-filters share the same internals).
-
-
More information about the Hald CLUT can be found on Eskil Steenberg’s website
-(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html .
-
-
-
38.42.1 Workflow examples# TOC
-
-
-
38.42.1.1 Hald CLUT video stream# TOC
-
-
Generate an identity Hald CLUT stream altered with various effects:
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
-
-
-
Note: make sure you use a lossless codec.
-
-
Then use it with haldclut
to apply it on some random stream:
-
-
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
-
-
-
The Hald CLUT will be applied to the 10 first seconds (duration of
-clut.nut ), then the latest picture of that CLUT stream will be applied
-to the remaining frames of the mandelbrot
stream.
-
-
-
38.42.1.2 Hald CLUT with preview# TOC
-
-
A Hald CLUT is supposed to be a squared image of Level*Level*Level
by
-Level*Level*Level
pixels. For a given Hald CLUT, FFmpeg will select the
-biggest possible square starting at the top left of the picture. The remaining
-padding pixels (bottom or right) will be ignored. This area can be used to add
-a preview of the Hald CLUT.
-
-
Typically, the following generated Hald CLUT will be supported by the
-haldclut
filter:
-
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "
- pad=iw+320 [padded_clut];
- smptebars=s=320x256, split [a][b];
- [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
- [main][b] overlay=W-320" -frames:v 1 clut.png
-
-
-
It contains the original and a preview of the effect of the CLUT: SMPTE color
-bars are displayed on the right-top, and below the same color bars processed by
-the color changes.
-
-
Then, the effect of this Hald CLUT can be visualized with:
-
-
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
-
-
-
-
38.43 hflip# TOC
-
-
Flip the input video horizontally.
-
-
For example, to horizontally flip the input video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "hflip" out.avi
-
-
-
-
38.44 histeq# TOC
-
This filter applies a global color histogram equalization on a
-per-frame basis.
-
-
It can be used to correct video that has a compressed range of pixel
-intensities. The filter redistributes the pixel intensities to
-equalize their distribution across the intensity range. It may be
-viewed as an "automatically adjusting contrast filter". This filter is
-useful only for correcting degraded or poorly captured source
-video.
-
-
The filter accepts the following options:
-
-
-strength
-Determine the amount of equalization to be applied. As the strength
-is reduced, the distribution of pixel intensities more-and-more
-approaches that of the input frame. The value must be a float number
-in the range [0,1] and defaults to 0.200.
-
-
-intensity
-Set the maximum intensity that can generated and scale the output
-values appropriately. The strength should be set as desired and then
-the intensity can be limited if needed to avoid washing-out. The value
-must be a float number in the range [0,1] and defaults to 0.210.
-
-
-antibanding
-Set the antibanding level. If enabled the filter will randomly vary
-the luminance of output pixels by a small amount to avoid banding of
-the histogram. Possible values are none
, weak
or
-strong
. It defaults to none
.
-
-
-
-
-
38.45 histogram# TOC
-
-
Compute and draw a color distribution histogram for the input video.
-
-
The computed histogram is a representation of the color component
-distribution in an image.
-
-
The filter accepts the following options:
-
-
-mode
-Set histogram mode.
-
-It accepts the following values:
-
-‘levels ’
-Standard histogram that displays the color components distribution in an
-image. Displays color graph for each color component. Shows distribution of
-the Y, U, V, A or R, G, B components, depending on input format, in the
-current frame. Below each graph a color component scale meter is shown.
-
-
-‘color ’
-Displays chroma values (U/V color placement) in a two dimensional
-graph (which is called a vectorscope). The brighter a pixel in the
-vectorscope, the more pixels of the input frame correspond to that pixel
-(i.e., more pixels have this chroma value). The V component is displayed on
-the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost
-side being V = 255. The U component is displayed on the vertical (Y) axis,
-with the top representing U = 0 and the bottom representing U = 255.
-
-The position of a white pixel in the graph corresponds to the chroma value of
-a pixel of the input clip. The graph can therefore be used to read the hue
-(color flavor) and the saturation (the dominance of the hue in the color). As
-the hue of a color changes, it moves around the square. At the center of the
-square the saturation is zero, which means that the corresponding pixel has no
-color. If the amount of a specific color is increased (while leaving the other
-colors unchanged) the saturation increases, and the indicator moves towards
-the edge of the square.
-
-
-‘color2 ’
-Chroma values in vectorscope, similar as color
but actual chroma values
-are displayed.
-
-
-‘waveform ’
-Per row/column color component graph. In row mode, the graph on the left side
-represents color component value 0 and the right side represents value = 255.
-In column mode, the top side represents color component value = 0 and bottom
-side represents value = 255.
-
-
-Default value is levels
.
-
-
-level_height
-Set height of level in levels
. Default value is 200
.
-Allowed range is [50, 2048].
-
-
-scale_height
-Set height of color scale in levels
. Default value is 12
.
-Allowed range is [0, 40].
-
-
-step
-Set step for waveform
mode. Smaller values are useful to find out how
-many values of the same luminance are distributed across input rows/columns.
-Default value is 10
. Allowed range is [1, 255].
-
-
-waveform_mode
-Set mode for waveform
. Can be either row
, or column
.
-Default is row
.
-
-
-waveform_mirror
-Set mirroring mode for waveform
. 0
means unmirrored, 1
-means mirrored. In mirrored mode, higher values will be represented on the left
-side for row
mode and at the top for column
mode. Default is
-0
(unmirrored).
-
-
-display_mode
-Set display mode for waveform
and levels
.
-It accepts the following values:
-
-‘parade ’
-Display separate graph for the color components side by side in
-row
waveform mode or one below the other in column
waveform mode
-for waveform
histogram mode. For levels
histogram mode,
-per color component graphs are placed below each other.
-
-Using this display mode in waveform
histogram mode makes it easy to
-spot color casts in the highlights and shadows of an image, by comparing the
-contours of the top and the bottom graphs of each waveform. Since whites,
-grays, and blacks are characterized by exactly equal amounts of red, green,
-and blue, neutral areas of the picture should display three waveforms of
-roughly equal width/height. If not, the correction is easy to perform by
-making level adjustments the three waveforms.
-
-
-‘overlay ’
-Presents information identical to that in the parade
, except
-that the graphs representing color components are superimposed directly
-over one another.
-
-This display mode in waveform
histogram mode makes it easier to spot
-relative differences or similarities in overlapping areas of the color
-components that are supposed to be identical, such as neutral whites, grays,
-or blacks.
-
-
-Default is parade
.
-
-
-levels_mode
-Set mode for levels
. Can be either linear
, or logarithmic
.
-Default is linear
.
-
-
-
-
-
38.45.1 Examples# TOC
-
-
- Calculate and draw histogram:
-
-
ffplay -i input -vf histogram
-
-
-
-
-
-
38.46 hqdn3d# TOC
-
-
This is a high precision/quality 3d denoise filter. It aims to reduce
-image noise, producing smooth images and making still images really
-still. It should enhance compressibility.
-
-
It accepts the following optional parameters:
-
-
-luma_spatial
-A non-negative floating point number which specifies spatial luma strength.
-It defaults to 4.0.
-
-
-chroma_spatial
-A non-negative floating point number which specifies spatial chroma strength.
-It defaults to 3.0*luma_spatial /4.0.
-
-
-luma_tmp
-A floating point number which specifies luma temporal strength. It defaults to
-6.0*luma_spatial /4.0.
-
-
-chroma_tmp
-A floating point number which specifies chroma temporal strength. It defaults to
-luma_tmp *chroma_spatial /luma_spatial .
-
-
-
-
-
38.47 hqx# TOC
-
-
Apply a high-quality magnification filter designed for pixel art. This filter
-was originally created by Maxim Stepin.
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for hq2x
, 3
for
-hq3x
and 4
for hq4x
.
-Default is 3
.
-
-
-
-
-
38.48 hue# TOC
-
-
Modify the hue and/or the saturation of the input.
-
-
It accepts the following parameters:
-
-
-h
-Specify the hue angle as a number of degrees. It accepts an expression,
-and defaults to "0".
-
-
-s
-Specify the saturation in the [-10,10] range. It accepts an expression and
-defaults to "1".
-
-
-H
-Specify the hue angle as a number of radians. It accepts an
-expression, and defaults to "0".
-
-
-b
-Specify the brightness in the [-10,10] range. It accepts an expression and
-defaults to "0".
-
-
-
-
h and H are mutually exclusive, and can’t be
-specified at the same time.
-
-
The b , h , H and s option values are
-expressions containing the following constants:
-
-
-n
-frame count of the input frame starting from 0
-
-
-pts
-presentation timestamp of the input frame expressed in time base units
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-tb
-time base of the input video
-
-
-
-
-
38.48.1 Examples# TOC
-
-
-
-
-
38.48.2 Commands# TOC
-
-
This filter supports the following commands:
-
-b
-s
-h
-H
-Modify the hue and/or the saturation and/or brightness of the input video.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
38.49 idet# TOC
-
-
Detect video interlacing type.
-
-
This filter tries to detect if the input frames as interlaced, progressive,
-top or bottom field first. It will also try and detect fields that are
-repeated between adjacent frames (a sign of telecine).
-
-
Single frame detection considers only immediately adjacent frames when classifying each frame.
-Multiple frame detection incorporates the classification history of previous frames.
-
-
The filter will log these metadata values:
-
-
-single.current_frame
-Detected type of current frame using single-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-single.tff
-Cumulative number of frames detected as top field first using single-frame detection.
-
-
-multiple.tff
-Cumulative number of frames detected as top field first using multiple-frame detection.
-
-
-single.bff
-Cumulative number of frames detected as bottom field first using single-frame detection.
-
-
-multiple.current_frame
-Detected type of current frame using multiple-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-multiple.bff
-Cumulative number of frames detected as bottom field first using multiple-frame detection.
-
-
-single.progressive
-Cumulative number of frames detected as progressive using single-frame detection.
-
-
-multiple.progressive
-Cumulative number of frames detected as progressive using multiple-frame detection.
-
-
-single.undetermined
-Cumulative number of frames that could not be classified using single-frame detection.
-
-
-multiple.undetermined
-Cumulative number of frames that could not be classified using multiple-frame detection.
-
-
-repeated.current_frame
-Which field in the current frame is repeated from the last. One of “neither”, “top”, or “bottom”.
-
-
-repeated.neither
-Cumulative number of frames with no repeated field.
-
-
-repeated.top
-Cumulative number of frames with the top field repeated from the previous frame’s top field.
-
-
-repeated.bottom
-Cumulative number of frames with the bottom field repeated from the previous frame’s bottom field.
-
-
-
-
The filter accepts the following options:
-
-
-intl_thres
-Set interlacing threshold.
-
-prog_thres
-Set progressive threshold.
-
-repeat_thres
-Threshold for repeated field detection.
-
-half_life
-Number of frames after which a given frame’s contribution to the
-statistics is halved (i.e., it contributes only 0.5 to it’s
-classification). The default of 0 means that all frames seen are given
-full weight of 1.0 forever.
-
-analyze_interlaced_flag
-When this is not 0 then idet will use the specified number of frames to determine
-if the interlaced flag is accurate, it will not count undetermined frames.
-If the flag is found to be accurate it will be used without any further
-computations, if it is found to be inaccuarte it will be cleared without any
-further computations. This allows inserting the idet filter as a low computational
-method to clean up the interlaced flag
-
-
-
-
-
38.50 il# TOC
-
-
Deinterleave or interleave fields.
-
-
This filter allows one to process interlaced images fields without
-deinterlacing them. Deinterleaving splits the input frame into 2
-fields (so called half pictures). Odd lines are moved to the top
-half of the output image, even lines to the bottom half.
-You can process (filter) them independently and then re-interleave them.
-
-
The filter accepts the following options:
-
-
-luma_mode, l
-chroma_mode, c
-alpha_mode, a
-Available values for luma_mode , chroma_mode and
-alpha_mode are:
-
-
-‘none ’
-Do nothing.
-
-
-‘deinterleave, d ’
-Deinterleave fields, placing one above the other.
-
-
-‘interleave, i ’
-Interleave fields. Reverse the effect of deinterleaving.
-
-
-Default value is none
.
-
-
-luma_swap, ls
-chroma_swap, cs
-alpha_swap, as
-Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0
.
-
-
-
-
-
38.51 interlace# TOC
-
-
Simple interlacing filter from progressive contents. This interleaves upper (or
-lower) lines from odd frames with lower (or upper) lines from even frames,
-halving the frame rate and preserving image height.
-
-
-
Original Original New Frame
- Frame 'j' Frame 'j+1' (tff)
- ========== =========== ==================
- Line 0 --------------------> Frame 'j' Line 0
- Line 1 Line 1 ----> Frame 'j+1' Line 1
- Line 2 ---------------------> Frame 'j' Line 2
- Line 3 Line 3 ----> Frame 'j+1' Line 3
- ... ... ...
-New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
-
-
-
It accepts the following optional parameters:
-
-
-scan
-This determines whether the interlaced frame is taken from the even
-(tff - default) or odd (bff) lines of the progressive frame.
-
-
-lowpass
-Enable (default) or disable the vertical lowpass filter to avoid twitter
-interlacing and reduce moire patterns.
-
-
-
-
-
38.52 kerndeint# TOC
-
-
Deinterlace input video by applying Donald Graft’s adaptive kernel
-deinterling. Work on interlaced parts of a video to produce
-progressive frames.
-
-
The description of the accepted parameters follows.
-
-
-thresh
-Set the threshold which affects the filter’s tolerance when
-determining if a pixel line must be processed. It must be an integer
-in the range [0,255] and defaults to 10. A value of 0 will result in
-applying the process on every pixels.
-
-
-map
-Paint pixels exceeding the threshold value to white if set to 1.
-Default is 0.
-
-
-order
-Set the fields order. Swap fields if set to 1, leave fields alone if
-0. Default is 0.
-
-
-sharp
-Enable additional sharpening if set to 1. Default is 0.
-
-
-twoway
-Enable twoway sharpening if set to 1. Default is 0.
-
-
-
-
-
38.52.1 Examples# TOC
-
-
- Apply default values:
-
-
kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
-
-
- Enable additional sharpening:
-
-
- Paint processed pixels in white:
-
-
-
-
-
38.53 lenscorrection# TOC
-
-
Correct radial lens distortion
-
-
This filter can be used to correct for radial distortion as can result from the use
-of wide angle lenses, and thereby re-rectify the image. To find the right parameters
-one can use tools available for example as part of opencv or simply trial-and-error.
-To use opencv use the calibration sample (under samples/cpp) from the opencv sources
-and extract the k1 and k2 coefficients from the resulting matrix.
-
-
Note that effectively the same filter is available in the open-source tools Krita and
-Digikam from the KDE project.
-
-
In contrast to the vignette filter, which can also be used to compensate lens errors,
-this filter corrects the distortion of the image, whereas vignette corrects the
-brightness distribution, so you may want to use both filters together in certain
-cases, though you will have to take care of ordering, i.e. whether vignetting should
-be applied before or after lens correction.
-
-
-
38.53.1 Options# TOC
-
-
The filter accepts the following options:
-
-
-cx
-Relative x-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-width.
-
-cy
-Relative y-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-height.
-
-k1
-Coefficient of the quadratic correction term. 0.5 means no correction.
-
-k2
-Coefficient of the double quadratic correction term. 0.5 means no correction.
-
-
-
-
The formula that generates the correction is:
-
-
r_src = r_tgt * (1 + k1 * (r_tgt / r_0 )^2 + k2 * (r_tgt / r_0 )^4)
-
-
where r_0 is halve of the image diagonal and r_src and r_tgt are the
-distances from the focal point in the source and target images, respectively.
-
-
-
38.54 lut3d# TOC
-
-
Apply a 3D LUT to an input video.
-
-
The filter accepts the following options:
-
-
-file
-Set the 3D LUT file name.
-
-Currently supported formats:
-
-‘3dl ’
-AfterEffects
-
-‘cube ’
-Iridas
-
-‘dat ’
-DaVinci
-
-‘m3d ’
-Pandora
-
-
-
-interp
-Select interpolation mode.
-
-Available values are:
-
-
-‘nearest ’
-Use values from the nearest defined point.
-
-‘trilinear ’
-Interpolate values using the 8 points defining a cube.
-
-‘tetrahedral ’
-Interpolate values using a tetrahedron.
-
-
-
-
-
-
-
38.55 lut, lutrgb, lutyuv# TOC
-
-
Compute a look-up table for binding each pixel component input value
-to an output value, and apply it to the input video.
-
-
lutyuv applies a lookup table to a YUV input video, lutrgb
-to an RGB input video.
-
-
These filters accept the following parameters:
-
-c0
-set first pixel component expression
-
-c1
-set second pixel component expression
-
-c2
-set third pixel component expression
-
-c3
-set fourth pixel component expression, corresponds to the alpha component
-
-
-r
-set red component expression
-
-g
-set green component expression
-
-b
-set blue component expression
-
-a
-alpha component expression
-
-
-y
-set Y/luminance component expression
-
-u
-set U/Cb component expression
-
-v
-set V/Cr component expression
-
-
-
-
Each of them specifies the expression to use for computing the lookup table for
-the corresponding pixel component values.
-
-
The exact component associated to each of the c* options depends on the
-format in input.
-
-
The lut filter requires either YUV or RGB pixel formats in input,
-lutrgb requires RGB pixel formats in input, and lutyuv requires YUV.
-
-
The expressions can contain the following constants and functions:
-
-
-w
-h
-The input width and height.
-
-
-val
-The input value for the pixel component.
-
-
-clipval
-The input value, clipped to the minval -maxval range.
-
-
-maxval
-The maximum value for the pixel component.
-
-
-minval
-The minimum value for the pixel component.
-
-
-negval
-The negated value for the pixel component value, clipped to the
-minval -maxval range; it corresponds to the expression
-"maxval-clipval+minval".
-
-
-clip(val)
-The computed value in val , clipped to the
-minval -maxval range.
-
-
-gammaval(gamma)
-The computed gamma correction value of the pixel component value,
-clipped to the minval -maxval range. It corresponds to the
-expression
-"pow((clipval-minval)/(maxval-minval)\,gamma )*(maxval-minval)+minval"
-
-
-
-
-
All expressions default to "val".
-
-
-
38.55.1 Examples# TOC
-
-
-
-
-
38.56 mergeplanes# TOC
-
-
Merge color channel components from several video streams.
-
-
The filter accepts up to 4 input streams, and merge selected input
-planes to the output video.
-
-
This filter accepts the following options:
-
-mapping
-Set input to output plane mapping. Default is 0
.
-
-The mappings is specified as a bitmap. It should be specified as a
-hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the
-mapping for the first plane of the output stream. ’A’ sets the number of
-the input stream to use (from 0 to 3), and ’a’ the plane number of the
-corresponding input to use (from 0 to 3). The rest of the mappings is
-similar, ’Bb’ describes the mapping for the output stream second
-plane, ’Cc’ describes the mapping for the output stream third plane and
-’Dd’ describes the mapping for the output stream fourth plane.
-
-
-format
-Set output pixel format. Default is yuva444p
.
-
-
-
-
-
38.56.1 Examples# TOC
-
-
- Merge three gray video streams of same width and height into single video stream:
-
-
[a0][a1][a2]mergeplanes=0x001020:yuv444p
-
-
- Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream:
-
-
[a0][a1]mergeplanes=0x00010210:yuva444p
-
-
- Swap Y and A plane in yuva444p stream:
-
-
format=yuva444p,mergeplanes=0x03010200:yuva444p
-
-
- Swap U and V plane in yuv420p stream:
-
-
format=yuv420p,mergeplanes=0x000201:yuv420p
-
-
- Cast a rgb24 clip to yuv444p:
-
-
format=rgb24,mergeplanes=0x000102:yuv444p
-
-
-
-
-
38.57 mcdeint# TOC
-
-
Apply motion-compensation deinterlacing.
-
-
It needs one field per frame as input and must thus be used together
-with yadif=1/3 or equivalent.
-
-
This filter accepts the following options:
-
-mode
-Set the deinterlacing mode.
-
-It accepts one of the following values:
-
-‘fast ’
-‘medium ’
-‘slow ’
-use iterative motion estimation
-
-‘extra_slow ’
-like ‘slow ’, but use multiple reference frames.
-
-
-Default value is ‘fast ’.
-
-
-parity
-Set the picture field parity assumed for the input video. It must be
-one of the following values:
-
-
-‘0, tff ’
-assume top field first
-
-‘1, bff ’
-assume bottom field first
-
-
-
-Default value is ‘bff ’.
-
-
-qp
-Set per-block quantization parameter (QP) used by the internal
-encoder.
-
-Higher values should result in a smoother motion vector field but less
-optimal individual vectors. Default value is 1.
-
-
-
-
-
38.58 mp# TOC
-
-
Apply an MPlayer filter to the input video.
-
-
This filter provides a wrapper around some of the filters of
-MPlayer/MEncoder.
-
-
This wrapper is considered experimental. Some of the wrapped filters
-may not work properly and we may drop support for them, as they will
-be implemented natively into FFmpeg. Thus you should avoid
-depending on them when writing portable scripts.
-
-
The filter accepts the parameters:
-filter_name [:=]filter_params
-
-
filter_name is the name of a supported MPlayer filter,
-filter_params is a string containing the parameters accepted by
-the named filter.
-
-
The list of the currently supported filters follows:
-
-eq2
-eq
-ilpack
-softpulldown
-
-
-
The parameter syntax and behavior for the listed filters are the same
-of the corresponding MPlayer filters. For detailed instructions check
-the "VIDEO FILTERS" section in the MPlayer manual.
-
-
-
38.58.1 Examples# TOC
-
-
- Adjust gamma, brightness, contrast:
-
-
-
-
See also mplayer(1), http://www.mplayerhq.hu/ .
-
-
-
38.59 mpdecimate# TOC
-
-
Drop frames that do not differ greatly from the previous frame in
-order to reduce frame rate.
-
-
The main use of this filter is for very-low-bitrate encoding
-(e.g. streaming over dialup modem), but it could in theory be used for
-fixing movies that were inverse-telecined incorrectly.
-
-
A description of the accepted options follows.
-
-
-max
-Set the maximum number of consecutive frames which can be dropped (if
-positive), or the minimum interval between dropped frames (if
-negative). If the value is 0, the frame is dropped unregarding the
-number of previous sequentially dropped frames.
-
-Default value is 0.
-
-
-hi
-lo
-frac
-Set the dropping threshold values.
-
-Values for hi and lo are for 8x8 pixel blocks and
-represent actual pixel value differences, so a threshold of 64
-corresponds to 1 unit of difference for each pixel, or the same spread
-out differently over the block.
-
-A frame is a candidate for dropping if no 8x8 blocks differ by more
-than a threshold of hi , and if no more than frac blocks (1
-meaning the whole image) differ by more than a threshold of lo .
-
-Default value for hi is 64*12, default value for lo is
-64*5, and default value for frac is 0.33.
-
-
-
-
-
-
38.60 negate# TOC
-
-
Negate input video.
-
-
It accepts an integer in input; if non-zero it negates the
-alpha component (if available). The default value in input is 0.
-
-
-
38.61 noformat# TOC
-
-
Force libavfilter not to use any of the specified pixel formats for the
-input to the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-apix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
38.61.1 Examples# TOC
-
-
- Force libavfilter to use a format different from yuv420p for the
-input to the vflip filter:
-
-
noformat=pix_fmts=yuv420p,vflip
-
-
- Convert the input video to any of the formats not contained in the list:
-
-
noformat=yuv420p|yuv444p|yuv410p
-
-
-
-
-
38.62 noise# TOC
-
-
Add noise on video input frame.
-
-
The filter accepts the following options:
-
-
-all_seed
-c0_seed
-c1_seed
-c2_seed
-c3_seed
-Set noise seed for specific pixel component or all pixel components in case
-of all_seed . Default value is 123457
.
-
-
-all_strength, alls
-c0_strength, c0s
-c1_strength, c1s
-c2_strength, c2s
-c3_strength, c3s
-Set noise strength for specific pixel component or all pixel components in case
-all_strength . Default value is 0
. Allowed range is [0, 100].
-
-
-all_flags, allf
-c0_flags, c0f
-c1_flags, c1f
-c2_flags, c2f
-c3_flags, c3f
-Set pixel component flags or set flags for all components if all_flags .
-Available values for component flags are:
-
-‘a ’
-averaged temporal noise (smoother)
-
-‘p ’
-mix random noise with a (semi)regular pattern
-
-‘t ’
-temporal noise (noise pattern changes between frames)
-
-‘u ’
-uniform noise (gaussian otherwise)
-
-
-
-
-
-
-
38.62.1 Examples# TOC
-
-
Add temporal and uniform noise to input video:
-
-
noise=alls=20:allf=t+u
-
-
-
-
38.63 null# TOC
-
-
Pass the video source unchanged to the output.
-
-
-
38.64 ocv# TOC
-
-
Apply a video transform using libopencv.
-
-
To enable this filter, install the libopencv library and headers and
-configure FFmpeg with --enable-libopencv
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the libopencv filter to apply.
-
-
-filter_params
-The parameters to pass to the libopencv filter. If not specified, the default
-values are assumed.
-
-
-
-
-
Refer to the official libopencv documentation for more precise
-information:
-http://docs.opencv.org/master/modules/imgproc/doc/filtering.html
-
-
Several libopencv filters are supported; see the following subsections.
-
-
-
38.64.1 dilate# TOC
-
-
Dilate an image by using a specific structuring element.
-It corresponds to the libopencv function cvDilate
.
-
-
It accepts the parameters: struct_el |nb_iterations .
-
-
struct_el represents a structuring element, and has the syntax:
-cols xrows +anchor_x xanchor_y /shape
-
-
cols and rows represent the number of columns and rows of
-the structuring element, anchor_x and anchor_y the anchor
-point, and shape the shape for the structuring element. shape
-must be "rect", "cross", "ellipse", or "custom".
-
-
If the value for shape is "custom", it must be followed by a
-string of the form "=filename ". The file with name
-filename is assumed to represent a binary image, with each
-printable character corresponding to a bright pixel. When a custom
-shape is used, cols and rows are ignored, the number
-or columns and rows of the read file are assumed instead.
-
-
The default value for struct_el is "3x3+0x0/rect".
-
-
nb_iterations specifies the number of times the transform is
-applied to the image, and defaults to 1.
-
-
Some examples:
-
-
# Use the default values
-ocv=dilate
-
-# Dilate using a structuring element with a 5x5 cross, iterating two times
-ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
-
-# Read the shape from the file diamond.shape, iterating two times.
-# The file diamond.shape may contain a pattern of characters like this
-# *
-# ***
-# *****
-# ***
-# *
-# The specified columns and rows are ignored
-# but the anchor point coordinates are not
-ocv=dilate:0x0+2x2/custom=diamond.shape|2
-
-
-
-
38.64.2 erode# TOC
-
-
Erode an image by using a specific structuring element.
-It corresponds to the libopencv function cvErode
.
-
-
It accepts the parameters: struct_el :nb_iterations ,
-with the same syntax and semantics as the dilate filter.
-
-
-
38.64.3 smooth# TOC
-
-
Smooth the input video.
-
-
The filter takes the following parameters:
-type |param1 |param2 |param3 |param4 .
-
-
type is the type of smooth filter to apply, and must be one of
-the following values: "blur", "blur_no_scale", "median", "gaussian",
-or "bilateral". The default value is "gaussian".
-
-
The meaning of param1 , param2 , param3 , and param4
-depend on the smooth type. param1 and
-param2 accept integer positive values or 0. param3 and
-param4 accept floating point values.
-
-
The default value for param1 is 3. The default value for the
-other parameters is 0.
-
-
These parameters correspond to the parameters assigned to the
-libopencv function cvSmooth
.
-
-
-
38.65 overlay# TOC
-
-
Overlay one video on top of another.
-
-
It takes two inputs and has one output. The first input is the "main"
-video on which the second input is overlaid.
-
-
It accepts the following parameters:
-
-
A description of the accepted options follows.
-
-
-x
-y
-Set the expression for the x and y coordinates of the overlaid video
-on the main video. Default value is "0" for both expressions. In case
-the expression is invalid, it is set to a huge value (meaning that the
-overlay will not be displayed within the output visible area).
-
-
-eof_action
-The action to take when EOF is encountered on the secondary input; it accepts
-one of the following values:
-
-
-repeat
-Repeat the last frame (the default).
-
-endall
-End both streams.
-
-pass
-Pass the main input through.
-
-
-
-
-eval
-Set when the expressions for x , and y are evaluated.
-
-It accepts the following values:
-
-‘init ’
-only evaluate expressions once during the filter initialization or
-when a command is processed
-
-
-‘frame ’
-evaluate expressions for each incoming frame
-
-
-
-Default value is ‘frame ’.
-
-
-shortest
-If set to 1, force the output to terminate when the shortest input
-terminates. Default value is 0.
-
-
-format
-Set the format for the output video.
-
-It accepts the following values:
-
-‘yuv420 ’
-force YUV420 output
-
-
-‘yuv422 ’
-force YUV422 output
-
-
-‘yuv444 ’
-force YUV444 output
-
-
-‘rgb ’
-force RGB output
-
-
-
-Default value is ‘yuv420 ’.
-
-
-rgb (deprecated)
-If set to 1, force the filter to accept inputs in the RGB
-color space. Default value is 0. This option is deprecated, use
-format instead.
-
-
-repeatlast
-If set to 1, force the filter to draw the last overlay frame over the
-main input until the end of the stream. A value of 0 disables this
-behavior. Default value is 1.
-
-
-
-
The x , and y expressions can contain the following
-parameters.
-
-
-main_w, W
-main_h, H
-The main input width and height.
-
-
-overlay_w, w
-overlay_h, h
-The overlay input width and height.
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values of the output
-format. For example for the pixel format "yuv422p" hsub is 2 and
-vsub is 1.
-
-
-n
-the number of input frame, starting from 0
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp, expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
Note that the n , pos , t variables are available only
-when evaluation is done per frame , and will evaluate to NAN
-when eval is set to ‘init ’.
-
-
Be aware that frames are taken from each input video in timestamp
-order, hence, if their initial timestamps differ, it is a good idea
-to pass the two inputs through a setpts=PTS-STARTPTS filter to
-have them begin in the same zero timestamp, as the example for
-the movie filter does.
-
-
You can chain together more overlays but you should test the
-efficiency of such approach.
-
-
-
38.65.1 Commands# TOC
-
-
This filter supports the following commands:
-
-x
-y
-Modify the x and y of the overlay input.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
38.65.2 Examples# TOC
-
-
-
-
-
38.66 owdenoise# TOC
-
-
Apply Overcomplete Wavelet denoiser.
-
-
The filter accepts the following options:
-
-
-depth
-Set depth.
-
-Larger depth values will denoise lower frequency components more, but
-slow down filtering.
-
-Must be an int in the range 8-16, default is 8
.
-
-
-luma_strength, ls
-Set luma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-chroma_strength, cs
-Set chroma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-
-
-
38.67 pad# TOC
-
-
Add paddings to the input image, and place the original input at the
-provided x , y coordinates.
-
-
It accepts the following parameters:
-
-
-width, w
-height, h
-Specify an expression for the size of the output image with the
-paddings added. If the value for width or height is 0, the
-corresponding input size is used for the output.
-
-The width expression can reference the value set by the
-height expression, and vice versa.
-
-The default value of width and height is 0.
-
-
-x
-y
-Specify the offsets to place the input image at within the padded area,
-with respect to the top/left border of the output image.
-
-The x expression can reference the value set by the y
-expression, and vice versa.
-
-The default value of x and y is 0.
-
-
-color
-Specify the color of the padded area. For the syntax of this option,
-check the "Color" section in the ffmpeg-utils manual.
-
-The default value of color is "black".
-
-
-
-
The value for the width , height , x , and y
-options are expressions containing the following constants:
-
-
-in_w
-in_h
-The input video width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output width and height (the size of the padded area), as
-specified by the width and height expressions.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-x
-y
-The x and y offsets as specified by the x and y
-expressions, or NAN if not yet specified.
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
38.67.1 Examples# TOC
-
-
-
-
-
38.68 perspective# TOC
-
-
Correct perspective of video not recorded perpendicular to the screen.
-
-
A description of the accepted parameters follows.
-
-
-x0
-y0
-x1
-y1
-x2
-y2
-x3
-y3
-Set coordinates expression for top left, top right, bottom left and bottom right corners.
-Default values are 0:0:W:0:0:H:W:H
with which perspective will remain unchanged.
-If the sense
option is set to source
, then the specified points will be sent
-to the corners of the destination. If the sense
option is set to destination
,
-then the corners of the source will be sent to the specified coordinates.
-
-The expressions can use the following variables:
-
-
-W
-H
-the width and height of video frame.
-
-
-
-
-interpolation
-Set interpolation for perspective correction.
-
-It accepts the following values:
-
-‘linear ’
-‘cubic ’
-
-
-Default value is ‘linear ’.
-
-
-sense
-Set interpretation of coordinate options.
-
-It accepts the following values:
-
-‘0, source ’
-
-Send point in the source specified by the given coordinates to
-the corners of the destination.
-
-
-‘1, destination ’
-
-Send the corners of the source to the point in the destination specified
-by the given coordinates.
-
-Default value is ‘source ’.
-
-
-
-
-
-
-
38.69 phase# TOC
-
-
Delay interlaced video by one field time so that the field order changes.
-
-
The intended use is to fix PAL movies that have been captured with the
-opposite field order to the film-to-video transfer.
-
-
A description of the accepted parameters follows.
-
-
-mode
-Set phase mode.
-
-It accepts the following values:
-
-‘t ’
-Capture field order top-first, transfer bottom-first.
-Filter will delay the bottom field.
-
-
-‘b ’
-Capture field order bottom-first, transfer top-first.
-Filter will delay the top field.
-
-
-‘p ’
-Capture and transfer with the same field order. This mode only exists
-for the documentation of the other options to refer to, but if you
-actually select it, the filter will faithfully do nothing.
-
-
-‘a ’
-Capture field order determined automatically by field flags, transfer
-opposite.
-Filter selects among ‘t ’ and ‘b ’ modes on a frame by frame
-basis using field flags. If no field information is available,
-then this works just like ‘u ’.
-
-
-‘u ’
-Capture unknown or varying, transfer opposite.
-Filter selects among ‘t ’ and ‘b ’ on a frame by frame basis by
-analyzing the images and selecting the alternative that produces best
-match between the fields.
-
-
-‘T ’
-Capture top-first, transfer unknown or varying.
-Filter selects among ‘t ’ and ‘p ’ using image analysis.
-
-
-‘B ’
-Capture bottom-first, transfer unknown or varying.
-Filter selects among ‘b ’ and ‘p ’ using image analysis.
-
-
-‘A ’
-Capture determined by field flags, transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using field flags and
-image analysis. If no field information is available, then this works just
-like ‘U ’. This is the default mode.
-
-
-‘U ’
-Both capture and transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using image analysis only.
-
-
-
-
-
-
-
38.70 pixdesctest# TOC
-
-
Pixel format descriptor test filter, mainly useful for internal
-testing. The output video should be equal to the input video.
-
-
For example:
-
-
format=monow, pixdesctest
-
-
-
can be used to test the monowhite pixel format descriptor definition.
-
-
-
38.71 pp# TOC
-
-
Enable the specified chain of postprocessing subfilters using libpostproc. This
-library should be automatically selected with a GPL build (--enable-gpl
).
-Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’.
-Each subfilter and some options have a short and a long name that can be used
-interchangeably, i.e. dr/dering are the same.
-
-
The filters accept the following options:
-
-
-subfilters
-Set postprocessing subfilters string.
-
-
-
-
All subfilters share common options to determine their scope:
-
-
-a/autoq
-Honor the quality commands for this subfilter.
-
-
-c/chrom
-Do chrominance filtering, too (default).
-
-
-y/nochrom
-Do luminance filtering only (no chrominance).
-
-
-n/noluma
-Do chrominance filtering only (no luminance).
-
-
-
-
These options can be appended after the subfilter name, separated by a ’|’.
-
-
Available subfilters are:
-
-
-hb/hdeblock[|difference[|flatness]]
-Horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-vb/vdeblock[|difference[|flatness]]
-Vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-ha/hadeblock[|difference[|flatness]]
-Accurate horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-va/vadeblock[|difference[|flatness]]
-Accurate vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-
-
The horizontal and vertical deblocking filters share the difference and
-flatness values so you cannot set different horizontal and vertical
-thresholds.
-
-
-h1/x1hdeblock
-Experimental horizontal deblocking filter
-
-
-v1/x1vdeblock
-Experimental vertical deblocking filter
-
-
-dr/dering
-Deringing filter
-
-
-tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
-
-threshold1
-larger -> stronger filtering
-
-threshold2
-larger -> stronger filtering
-
-threshold3
-larger -> stronger filtering
-
-
-
-
-al/autolevels[:f/fullyrange], automatic brightness / contrast correction
-
-f/fullyrange
-Stretch luminance to 0-255
.
-
-
-
-
-lb/linblenddeint
-Linear blend deinterlacing filter that deinterlaces the given block by
-filtering all lines with a (1 2 1)
filter.
-
-
-li/linipoldeint
-Linear interpolating deinterlacing filter that deinterlaces the given block by
-linearly interpolating every second line.
-
-
-ci/cubicipoldeint
-Cubic interpolating deinterlacing filter deinterlaces the given block by
-cubically interpolating every second line.
-
-
-md/mediandeint
-Median deinterlacing filter that deinterlaces the given block by applying a
-median filter to every second line.
-
-
-fd/ffmpegdeint
-FFmpeg deinterlacing filter that deinterlaces the given block by filtering every
-second line with a (-1 4 2 4 -1)
filter.
-
-
-l5/lowpass5
-Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given
-block by filtering all lines with a (-1 2 6 2 -1)
filter.
-
-
-fq/forceQuant[|quantizer]
-Overrides the quantizer table from the input with the constant quantizer you
-specify.
-
-quantizer
-Quantizer to use
-
-
-
-
-de/default
-Default pp filter combination (hb|a,vb|a,dr|a
)
-
-
-fa/fast
-Fast pp filter combination (h1|a,v1|a,dr|a
)
-
-
-ac
-High quality pp filter combination (ha|a|128|7,va|a,dr|a
)
-
-
-
-
-
38.71.1 Examples# TOC
-
-
- Apply horizontal and vertical deblocking, deringing and automatic
-brightness/contrast:
-
-
- Apply default filters without brightness/contrast correction:
-
-
- Apply default filters and temporal denoiser:
-
-
pp=default/tmpnoise|1|2|3
-
-
- Apply deblocking on luminance only, and switch vertical deblocking on or off
-automatically depending on available CPU time:
-
-
-
-
-
38.72 pp7# TOC
-
Apply Postprocessing filter 7. It is variant of the spp filter,
-similar to spp = 6 with 7 point DCT, where only the center sample is
-used after IDCT.
-
-
The filter accepts the following options:
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range
-0 to 63. If not set, the filter will use the QP from the video stream
-(if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding.
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-‘medium ’
-Set medium thresholding (good results, default).
-
-
-
-
-
-
-
38.73 psnr# TOC
-
-
Obtain the average, maximum and minimum PSNR (Peak Signal to Noise
-Ratio) between two input videos.
-
-
This filter takes in input two input videos, the first input is
-considered the "main" source and is passed unchanged to the
-output. The second input is used as a "reference" video for computing
-the PSNR.
-
-
Both video inputs must have the same resolution and pixel format for
-this filter to work correctly. Also it assumes that both inputs
-have the same number of frames, which are compared one by one.
-
-
The obtained average PSNR is printed through the logging system.
-
-
The filter stores the accumulated MSE (mean squared error) of each
-frame, and at the end of the processing it is averaged across all frames
-equally, and the following formula is applied to obtain the PSNR:
-
-
-
PSNR = 10*log10(MAX^2/MSE)
-
-
-
Where MAX is the average of the maximum values of each component of the
-image.
-
-
The description of the accepted parameters follows.
-
-
-stats_file, f
-If specified the filter will use the named file to save the PSNR of
-each individual frame.
-
-
-
-
The file printed if stats_file is selected, contains a sequence of
-key/value pairs of the form key :value for each compared
-couple of frames.
-
-
A description of each shown parameter follows:
-
-
-n
-sequential number of the input frame, starting from 1
-
-
-mse_avg
-Mean Square Error pixel-by-pixel average difference of the compared
-frames, averaged over all the image components.
-
-
-mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
-Mean Square Error pixel-by-pixel average difference of the compared
-frames for the component specified by the suffix.
-
-
-psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
-Peak Signal to Noise ratio of the compared frames for the component
-specified by the suffix.
-
-
-
-
For example:
-
-
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
-[main][ref] psnr="stats_file=stats.log" [out]
-
-
-
On this example the input file being processed is compared with the
-reference file ref_movie.mpg . The PSNR of each individual frame
-is stored in stats.log .
-
-
-
38.74 pullup# TOC
-
-
Pulldown reversal (inverse telecine) filter, capable of handling mixed
-hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive
-content.
-
-
The pullup filter is designed to take advantage of future context in making
-its decisions. This filter is stateless in the sense that it does not lock
-onto a pattern to follow, but it instead looks forward to the following
-fields in order to identify matches and rebuild progressive frames.
-
-
To produce content with an even framerate, insert the fps filter after
-pullup, use fps=24000/1001
if the input frame rate is 29.97fps,
-fps=24
for 30fps and the (rare) telecined 25fps input.
-
-
The filter accepts the following options:
-
-
-jl
-jr
-jt
-jb
-These options set the amount of "junk" to ignore at the left, right, top, and
-bottom of the image, respectively. Left and right are in units of 8 pixels,
-while top and bottom are in units of 2 lines.
-The default is 8 pixels on each side.
-
-
-sb
-Set the strict breaks. Setting this option to 1 will reduce the chances of
-filter generating an occasional mismatched frame, but it may also cause an
-excessive number of frames to be dropped during high motion sequences.
-Conversely, setting it to -1 will make filter match fields more easily.
-This may help processing of video where there is slight blurring between
-the fields, but may also cause there to be interlaced frames in the output.
-Default value is 0
.
-
-
-mp
-Set the metric plane to use. It accepts the following values:
-
-‘l ’
-Use luma plane.
-
-
-‘u ’
-Use chroma blue plane.
-
-
-‘v ’
-Use chroma red plane.
-
-
-
-This option may be set to use chroma plane instead of the default luma plane
-for doing filter’s computations. This may improve accuracy on very clean
-source material, but more likely will decrease accuracy, especially if there
-is chroma noise (rainbow effect) or any grayscale video.
-The main purpose of setting mp to a chroma plane is to reduce CPU
-load and make pullup usable in realtime on slow machines.
-
-
-
-
For best results (without duplicated frames in the output file) it is
-necessary to change the output frame rate. For example, to inverse
-telecine NTSC input:
-
-
ffmpeg -i input -vf pullup -r 24000/1001 ...
-
-
-
-
38.75 qp# TOC
-
-
Change video quantization parameters (QP).
-
-
The filter accepts the following option:
-
-
-qp
-Set expression for quantization parameter.
-
-
-
-
The expression is evaluated through the eval API and can contain, among others,
-the following constants:
-
-
-known
-1 if index is not 129, 0 otherwise.
-
-
-qp
-Sequentional index starting from -129 to 128.
-
-
-
-
-
38.75.1 Examples# TOC
-
-
- Some equation like:
-
-
-
-
-
38.76 removelogo# TOC
-
-
Suppress a TV station logo, using an image file to determine which
-pixels comprise the logo. It works by filling in the pixels that
-comprise the logo with neighboring pixels.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filter bitmap file, which can be any image format supported by
-libavformat. The width and height of the image file must match those of the
-video stream being processed.
-
-
-
-
Pixels in the provided bitmap image with a value of zero are not
-considered part of the logo, non-zero pixels are considered part of
-the logo. If you use white (255) for the logo and black (0) for the
-rest, you will be safe. For making the filter bitmap, it is
-recommended to take a screen capture of a black frame with the logo
-visible, and then using a threshold filter followed by the erode
-filter once or twice.
-
-
If needed, little splotches can be fixed manually. Remember that if
-logo pixels are not covered, the filter quality will be much
-reduced. Marking too many pixels as part of the logo does not hurt as
-much, but it will increase the amount of blurring needed to cover over
-the image and will destroy more information than necessary, and extra
-pixels will slow things down on a large logo.
-
-
-
38.77 rotate# TOC
-
-
Rotate video by an arbitrary angle expressed in radians.
-
-
The filter accepts the following options:
-
-
A description of the optional parameters follows.
-
-angle, a
-Set an expression for the angle by which to rotate the input video
-clockwise, expressed as a number of radians. A negative value will
-result in a counter-clockwise rotation. By default it is set to "0".
-
-This expression is evaluated for each frame.
-
-
-out_w, ow
-Set the output width expression, default value is "iw".
-This expression is evaluated just once during configuration.
-
-
-out_h, oh
-Set the output height expression, default value is "ih".
-This expression is evaluated just once during configuration.
-
-
-bilinear
-Enable bilinear interpolation if set to 1, a value of 0 disables
-it. Default value is 1.
-
-
-fillcolor, c
-Set the color used to fill the output area not covered by the rotated
-image. For the general syntax of this option, check the "Color" section in the
-ffmpeg-utils manual. If the special value "none" is selected then no
-background is printed (useful for example if the background is never shown).
-
-Default value is "black".
-
-
-
-
The expressions for the angle and the output size can contain the
-following constants and functions:
-
-
-n
-sequential number of the input frame, starting from 0. It is always NAN
-before the first frame is filtered.
-
-
-t
-time in seconds of the input frame, it is set to 0 when the filter is
-configured. It is always NAN before the first frame is filtered.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_w, iw
-in_h, ih
-the input video width and height
-
-
-out_w, ow
-out_h, oh
-the output width and height, that is the size of the padded area as
-specified by the width and height expressions
-
-
-rotw(a)
-roth(a)
-the minimal width/height required for completely containing the input
-video rotated by a radians.
-
-These are only available when computing the out_w and
-out_h expressions.
-
-
-
-
-
38.77.1 Examples# TOC
-
-
- Rotate the input by PI/6 radians clockwise:
-
-
- Rotate the input by PI/6 radians counter-clockwise:
-
-
- Rotate the input by 45 degrees clockwise:
-
-
- Apply a constant rotation with period T, starting from an angle of PI/3:
-
-
- Make the input video rotation oscillating with a period of T
-seconds and an amplitude of A radians:
-
-
rotate=A*sin(2*PI/T*t)
-
-
- Rotate the video, output size is chosen so that the whole rotating
-input video is always completely contained in the output:
-
-
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
-
-
- Rotate the video, reduce the output size so that no background is ever
-shown:
-
-
rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
-
-
-
-
-
38.77.2 Commands# TOC
-
-
The filter supports the following commands:
-
-
-a, angle
-Set the angle expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
38.78 sab# TOC
-
-
Apply Shape Adaptive Blur.
-
-
The filter accepts the following options:
-
-
-luma_radius, lr
-Set luma blur filter strength, must be a value in range 0.1-4.0, default
-value is 1.0. A greater value will result in a more blurred image, and
-in slower processing.
-
-
-luma_pre_filter_radius, lpfr
-Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default
-value is 1.0.
-
-
-luma_strength, ls
-Set luma maximum difference between pixels to still be considered, must
-be a value in the 0.1-100.0 range, default value is 1.0.
-
-
-chroma_radius, cr
-Set chroma blur filter strength, must be a value in range 0.1-4.0. A
-greater value will result in a more blurred image, and in slower
-processing.
-
-
-chroma_pre_filter_radius, cpfr
-Set chroma pre-filter radius, must be a value in the 0.1-2.0 range.
-
-
-chroma_strength, cs
-Set chroma maximum difference between pixels to still be considered,
-must be a value in the 0.1-100.0 range.
-
-
-
-
Each chroma option value, if not explicitly specified, is set to the
-corresponding luma option value.
-
-
-
38.79 scale# TOC
-
-
Scale (resize) the input video, using the libswscale library.
-
-
The scale filter forces the output display aspect ratio to be the same
-of the input, by changing the output sample aspect ratio.
-
-
If the input image format is different from the format requested by
-the next filter, the scale filter will convert the input to the
-requested format.
-
-
-
38.79.1 Options# TOC
-
The filter accepts the following options, or any of the options
-supported by the libswscale scaler.
-
-
See (ffmpeg-scaler)the ffmpeg-scaler manual for
-the complete list of scaler options.
-
-
-width, w
-height, h
-Set the output video dimension expression. Default value is the input
-dimension.
-
-If the value is 0, the input width is used for the output.
-
-If one of the values is -1, the scale filter will use a value that
-maintains the aspect ratio of the input image, calculated from the
-other specified dimension. If both of them are -1, the input size is
-used
-
-If one of the values is -n with n > 1, the scale filter will also use a value
-that maintains the aspect ratio of the input image, calculated from the other
-specified dimension. After that it will, however, make sure that the calculated
-dimension is divisible by n and adjust the value if necessary.
-
-See below for the list of accepted constants for use in the dimension
-expression.
-
-
-interl
-Set the interlacing mode. It accepts the following values:
-
-
-‘1 ’
-Force interlaced aware scaling.
-
-
-‘0 ’
-Do not apply interlaced scaling.
-
-
-‘-1 ’
-Select interlaced aware scaling depending on whether the source frames
-are flagged as interlaced or not.
-
-
-
-Default value is ‘0 ’.
-
-
-flags
-Set libswscale scaling flags. See
-(ffmpeg-scaler)the ffmpeg-scaler manual for the
-complete list of values. If not explicitly specified the filter applies
-the default flags.
-
-
-size, s
-Set the video size. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-in_color_matrix
-out_color_matrix
-Set in/output YCbCr color space type.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder.
-
-If not specified, the color space type depends on the pixel format.
-
-Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘bt709 ’
-Format conforming to International Telecommunication Union (ITU)
-Recommendation BT.709.
-
-
-‘fcc ’
-Set color space conforming to the United States Federal Communications
-Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a).
-
-
-‘bt601 ’
-Set color space conforming to:
-
-
- ITU Radiocommunication Sector (ITU-R) Recommendation BT.601
-
- ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G
-
- Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004
-
-
-
-
-‘smpte240m ’
-Set color space conforming to SMPTE ST 240:1999.
-
-
-
-
-in_range
-out_range
-Set in/output YCbCr sample range.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder. If not specified, the
-range depends on the pixel format. Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘jpeg/full/pc ’
-Set full range (0-255 in case of 8-bit luma).
-
-
-‘mpeg/tv ’
-Set "MPEG" range (16-235 in case of 8-bit luma).
-
-
-
-
-force_original_aspect_ratio
-Enable decreasing or increasing output video width or height if necessary to
-keep the original aspect ratio. Possible values:
-
-
-‘disable ’
-Scale the video as specified and disable this feature.
-
-
-‘decrease ’
-The output video dimensions will automatically be decreased if needed.
-
-
-‘increase ’
-The output video dimensions will automatically be increased if needed.
-
-
-
-
-One useful instance of this option is that when you know a specific device’s
-maximum allowed resolution, you can use this to limit the output video to
-that, while retaining the aspect ratio. For example, device A allows
-1280x720 playback, and your video is 1920x800. Using this option (set it to
-decrease) and specifying 1280x720 to the command line makes the output
-1280x533.
-
-Please note that this is a different thing than specifying -1 for w
-or h , you still need to specify the output resolution for this option
-to work.
-
-
-
-
-
The values of the w and h options are expressions
-containing the following constants:
-
-
-in_w
-in_h
-The input width and height
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (scaled) width and height
-
-
-ow
-oh
-These are the same as out_w and out_h
-
-
-a
-The same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-The input display aspect ratio. Calculated from (iw / ih) * sar
.
-
-
-hsub
-vsub
-horizontal and vertical input chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-ohsub
-ovsub
-horizontal and vertical output chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
38.79.2 Examples# TOC
-
-
-
-
-
38.80 separatefields# TOC
-
-
The separatefields
takes a frame-based video input and splits
-each frame into its components fields, producing a new half height clip
-with twice the frame rate and twice the frame count.
-
-
This filter use field-dominance information in frame to decide which
-of each pair of fields to place first in the output.
-If it gets it wrong use setfield filter before separatefields
filter.
-
-
-
38.81 setdar, setsar# TOC
-
-
The setdar
filter sets the Display Aspect Ratio for the filter
-output video.
-
-
This is done by changing the specified Sample (aka Pixel) Aspect
-Ratio, according to the following equation:
-
-
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
-
-
-
Keep in mind that the setdar
filter does not modify the pixel
-dimensions of the video frame. Also, the display aspect ratio set by
-this filter may be changed by later filters in the filterchain,
-e.g. in case of scaling or if another "setdar" or a "setsar" filter is
-applied.
-
-
The setsar
filter sets the Sample (aka Pixel) Aspect Ratio for
-the filter output video.
-
-
Note that as a consequence of the application of this filter, the
-output display aspect ratio will change according to the equation
-above.
-
-
Keep in mind that the sample aspect ratio set by the setsar
-filter may be changed by later filters in the filterchain, e.g. if
-another "setsar" or a "setdar" filter is applied.
-
-
It accepts the following parameters:
-
-
-r, ratio, dar (setdar
only), sar (setsar
only)
-Set the aspect ratio used by the filter.
-
-The parameter can be a floating point number string, an expression, or
-a string of the form num :den , where num and
-den are the numerator and denominator of the aspect ratio. If
-the parameter is not specified, it is assumed the value "0".
-In case the form "num :den " is used, the :
character
-should be escaped.
-
-
-max
-Set the maximum integer value to use for expressing numerator and
-denominator when reducing the expressed aspect ratio to a rational.
-Default value is 100
.
-
-
-
-
-
The parameter sar is an expression containing
-the following constants:
-
-
-E, PI, PHI
-These are approximated values for the mathematical constants e
-(Euler’s number), pi (Greek pi), and phi (the golden ratio).
-
-
-w, h
-The input width and height.
-
-
-a
-These are the same as w / h .
-
-
-sar
-The input sample aspect ratio.
-
-
-dar
-The input display aspect ratio. It is the same as
-(w / h ) * sar .
-
-
-hsub, vsub
-Horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
38.81.1 Examples# TOC
-
-
- To change the display aspect ratio to 16:9, specify one of the following:
-
-
setdar=dar=1.77777
-setdar=dar=16/9
-setdar=dar=1.77777
-
-
- To change the sample aspect ratio to 10:11, specify:
-
-
- To set a display aspect ratio of 16:9, and specify a maximum integer value of
-1000 in the aspect ratio reduction, use the command:
-
-
setdar=ratio=16/9:max=1000
-
-
-
-
-
-
38.82 setfield# TOC
-
-
Force field for the output video frame.
-
-
The setfield
filter marks the interlace type field for the
-output frames. It does not change the input frame, but only sets the
-corresponding property, which affects how the frame is treated by
-following filters (e.g. fieldorder
or yadif
).
-
-
The filter accepts the following options:
-
-
-mode
-Available values are:
-
-
-‘auto ’
-Keep the same field property.
-
-
-‘bff ’
-Mark the frame as bottom-field-first.
-
-
-‘tff ’
-Mark the frame as top-field-first.
-
-
-‘prog ’
-Mark the frame as progressive.
-
-
-
-
-
-
-
38.83 showinfo# TOC
-
-
Show a line containing various information for each input video frame.
-The input video is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The Presentation TimeStamp of the input frame, expressed as a number of
-time base units. The time base unit depends on the filter input pad.
-
-
-pts_time
-The Presentation TimeStamp of the input frame, expressed as a number of
-seconds.
-
-
-pos
-The position of the frame in the input stream, or -1 if this information is
-unavailable and/or meaningless (for example in case of synthetic video).
-
-
-fmt
-The pixel format name.
-
-
-sar
-The sample aspect ratio of the input frame, expressed in the form
-num /den .
-
-
-s
-The size of the input frame. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-i
-The type of interlaced mode ("P" for "progressive", "T" for top field first, "B"
-for bottom field first).
-
-
-iskey
-This is 1 if the frame is a key frame, 0 otherwise.
-
-
-type
-The picture type of the input frame ("I" for an I-frame, "P" for a
-P-frame, "B" for a B-frame, or "?" for an unknown type).
-Also refer to the documentation of the AVPictureType
enum and of
-the av_get_picture_type_char
function defined in
-libavutil/avutil.h .
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame.
-
-
-plane_checksum
-The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
-expressed in the form "[c0 c1 c2 c3 ]".
-
-
-
-
-
38.84 shuffleplanes# TOC
-
-
Reorder and/or duplicate video planes.
-
-
It accepts the following parameters:
-
-
-map0
-The index of the input plane to be used as the first output plane.
-
-
-map1
-The index of the input plane to be used as the second output plane.
-
-
-map2
-The index of the input plane to be used as the third output plane.
-
-
-map3
-The index of the input plane to be used as the fourth output plane.
-
-
-
-
-
The first plane has the index 0. The default is to keep the input unchanged.
-
-
Swap the second and third planes of the input:
-
-
ffmpeg -i INPUT -vf shuffleplanes=0:2:1:3 OUTPUT
-
-
-
-
38.85 signalstats# TOC
-
Evaluate various visual metrics that assist in determining issues associated
-with the digitization of analog video media.
-
-
By default the filter will log these metadata values:
-
-
-YMIN
-Display the minimal Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-YLOW
-Display the Y value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YAVG
-Display the average Y value within the input frame. Expressed in range of
-[0-255].
-
-
-YHIGH
-Display the Y value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YMAX
-Display the maximum Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-UMIN
-Display the minimal U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-ULOW
-Display the U value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UAVG
-Display the average U value within the input frame. Expressed in range of
-[0-255].
-
-
-UHIGH
-Display the U value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UMAX
-Display the maximum U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VMIN
-Display the minimal V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VLOW
-Display the V value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VAVG
-Display the average V value within the input frame. Expressed in range of
-[0-255].
-
-
-VHIGH
-Display the V value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VMAX
-Display the maximum V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-SATMIN
-Display the minimal saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATLOW
-Display the saturation value at the 10% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATAVG
-Display the average saturation value within the input frame. Expressed in range
-of [0-~181.02].
-
-
-SATHIGH
-Display the saturation value at the 90% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATMAX
-Display the maximum saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-HUEMED
-Display the median value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-HUEAVG
-Display the average value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-YDIF
-Display the average of sample value difference between all values of the Y
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-UDIF
-Display the average of sample value difference between all values of the U
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-VDIF
-Display the average of sample value difference between all values of the V
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-
-
The filter accepts the following options:
-
-
-stat
-out
-
-stat specify an additional form of image analysis.
-out output video with the specified type of pixel highlighted.
-
-Both options accept the following values:
-
-
-‘tout ’
-Identify temporal outliers pixels. A temporal outlier is a pixel
-unlike the neighboring pixels of the same field. Examples of temporal outliers
-include the results of video dropouts, head clogs, or tape tracking issues.
-
-
-‘vrep ’
-Identify vertical line repetition . Vertical line repetition includes
-similar rows of pixels within a frame. In born-digital video vertical line
-repetition is common, but this pattern is uncommon in video digitized from an
-analog source. When it occurs in video that results from the digitization of an
-analog source it can indicate concealment from a dropout compensator.
-
-
-‘brng ’
-Identify pixels that fall outside of legal broadcast range.
-
-
-
-
-color, c
-Set the highlight color for the out option. The default color is
-yellow.
-
-
-
-
-
38.85.1 Examples# TOC
-
-
-
-
-
38.86 smartblur# TOC
-
-
Blur the input video without impacting the outlines.
-
-
It accepts the following options:
-
-
-luma_radius, lr
-Set the luma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-luma_strength, ls
-Set the luma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-luma_threshold, lt
-Set the luma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-chroma_radius, cr
-Set the chroma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-chroma_strength, cs
-Set the chroma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-chroma_threshold, ct
-Set the chroma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-
-
If a chroma option is not explicitly set, the corresponding luma value
-is set.
-
-
-
38.87 stereo3d# TOC
-
-
Convert between different stereoscopic image formats.
-
-
The filters accept the following options:
-
-
-in
-Set stereoscopic image format of input.
-
-Available values for input image formats are:
-
-‘sbsl ’
-side by side parallel (left eye left, right eye right)
-
-
-‘sbsr ’
-side by side crosseye (right eye left, left eye right)
-
-
-‘sbs2l ’
-side by side parallel with half width resolution
-(left eye left, right eye right)
-
-
-‘sbs2r ’
-side by side crosseye with half width resolution
-(right eye left, left eye right)
-
-
-‘abl ’
-above-below (left eye above, right eye below)
-
-
-‘abr ’
-above-below (right eye above, left eye below)
-
-
-‘ab2l ’
-above-below with half height resolution
-(left eye above, right eye below)
-
-
-‘ab2r ’
-above-below with half height resolution
-(right eye above, left eye below)
-
-
-‘al ’
-alternating frames (left eye first, right eye second)
-
-
-‘ar ’
-alternating frames (right eye first, left eye second)
-
-Default value is ‘sbsl ’.
-
-
-
-
-out
-Set stereoscopic image format of output.
-
-Available values for output image formats are all the input formats as well as:
-
-‘arbg ’
-anaglyph red/blue gray
-(red filter on left eye, blue filter on right eye)
-
-
-‘argg ’
-anaglyph red/green gray
-(red filter on left eye, green filter on right eye)
-
-
-‘arcg ’
-anaglyph red/cyan gray
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arch ’
-anaglyph red/cyan half colored
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcc ’
-anaglyph red/cyan color
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcd ’
-anaglyph red/cyan color optimized with the least squares projection of dubois
-(red filter on left eye, cyan filter on right eye)
-
-
-‘agmg ’
-anaglyph green/magenta gray
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmh ’
-anaglyph green/magenta half colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmc ’
-anaglyph green/magenta colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmd ’
-anaglyph green/magenta color optimized with the least squares projection of dubois
-(green filter on left eye, magenta filter on right eye)
-
-
-‘aybg ’
-anaglyph yellow/blue gray
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybh ’
-anaglyph yellow/blue half colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybc ’
-anaglyph yellow/blue colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybd ’
-anaglyph yellow/blue color optimized with the least squares projection of dubois
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘irl ’
-interleaved rows (left eye has top row, right eye starts on next row)
-
-
-‘irr ’
-interleaved rows (right eye has top row, left eye starts on next row)
-
-
-‘ml ’
-mono output (left eye only)
-
-
-‘mr ’
-mono output (right eye only)
-
-
-
-Default value is ‘arcd ’.
-
-
-
-
-
38.87.1 Examples# TOC
-
-
- Convert input video from side by side parallel to anaglyph yellow/blue dubois:
-
-
- Convert input video from above bellow (left eye above, right eye below) to side by side crosseye.
-
-
-
-
-
38.88 spp# TOC
-
-
Apply a simple postprocessing filter that compresses and decompresses the image
-at several (or - in the case of quality level 6
- all) shifts
-and average the results.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-6. If set to 0
, the filter will have no
-effect. A value of 6
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding (default).
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
38.89 subtitles# TOC
-
-
Draw subtitles on top of input video using the libass library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libass
. This filter also requires a build with libavcodec and
-libavformat to convert the passed subtitles file to ASS (Advanced Substation
-Alpha) subtitles format.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filename of the subtitle file to read. It must be specified.
-
-
-original_size
-Specify the size of the original video, the video for which the ASS file
-was composed. For the syntax of this option, check the "Video size" section in
-the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic,
-this is necessary to correctly scale the fonts if the aspect ratio has been
-changed.
-
-
-charenc
-Set subtitles input character encoding. subtitles
filter only. Only
-useful if not UTF-8.
-
-
-stream_index, si
-Set subtitles stream index. subtitles
filter only.
-
-
-
-
If the first key is not specified, it is assumed that the first value
-specifies the filename .
-
-
For example, to render the file sub.srt on top of the input
-video, use the command:
-
-
-
which is equivalent to:
-
-
subtitles=filename=sub.srt
-
-
-
To render the default subtitles stream from file video.mkv , use:
-
-
-
To render the second subtitles stream from that file, use:
-
-
subtitles=video.mkv:si=1
-
-
-
-
38.90 super2xsai# TOC
-
-
Scale the input by 2x and smooth using the Super2xSaI (Scale and
-Interpolate) pixel art scaling algorithm.
-
-
Useful for enlarging pixel art images without reducing sharpness.
-
-
-
38.91 swapuv# TOC
-
Swap U & V plane.
-
-
-
38.92 telecine# TOC
-
-
Apply telecine process to the video.
-
-
This filter accepts the following options:
-
-
-first_field
-
-‘top, t ’
-top field first
-
-‘bottom, b ’
-bottom field first
-The default value is top
.
-
-
-
-
-pattern
-A string of numbers representing the pulldown pattern you wish to apply.
-The default value is 23
.
-
-
-
-
-
Some typical patterns:
-
-NTSC output (30i):
-27.5p: 32222
-24p: 23 (classic)
-24p: 2332 (preferred)
-20p: 33
-18p: 334
-16p: 3444
-
-PAL output (25i):
-27.5p: 12222
-24p: 222222222223 ("Euro pulldown")
-16.67p: 33
-16p: 33333334
-
-
-
-
38.93 thumbnail# TOC
-
Select the most representative frame in a given sequence of consecutive frames.
-
-
The filter accepts the following options:
-
-
-n
-Set the frames batch size to analyze; in a set of n frames, the filter
-will pick one of them, and then handle the next batch of n frames until
-the end. Default is 100
.
-
-
-
-
Since the filter keeps track of the whole frames sequence, a bigger n
-value will result in a higher memory usage, so a high value is not recommended.
-
-
-
38.93.1 Examples# TOC
-
-
- Extract one picture each 50 frames:
-
-
- Complete example of a thumbnail creation with ffmpeg
:
-
-
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
-
-
-
-
-
38.94 tile# TOC
-
-
Tile several successive frames together.
-
-
The filter accepts the following options:
-
-
-layout
-Set the grid size (i.e. the number of lines and columns). For the syntax of
-this option, check the "Video size" section in the ffmpeg-utils manual.
-
-
-nb_frames
-Set the maximum number of frames to render in the given area. It must be less
-than or equal to w xh . The default value is 0
, meaning all
-the area will be used.
-
-
-margin
-Set the outer border margin in pixels.
-
-
-padding
-Set the inner border thickness (i.e. the number of pixels between frames). For
-more advanced padding options (such as having different values for the edges),
-refer to the pad video filter.
-
-
-color
-Specify the color of the unused area. For the syntax of this option, check the
-"Color" section in the ffmpeg-utils manual. The default value of color
-is "black".
-
-
-
-
-
38.94.1 Examples# TOC
-
-
-
-
-
38.95 tinterlace# TOC
-
-
Perform various types of temporal field interlacing.
-
-
Frames are counted starting from 1, so the first input frame is
-considered odd.
-
-
The filter accepts the following options:
-
-
-mode
-Specify the mode of the interlacing. This option can also be specified
-as a value alone. See below for a list of values for this option.
-
-Available values are:
-
-
-‘merge, 0 ’
-Move odd frames into the upper field, even into the lower field,
-generating a double height frame at half frame rate.
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-‘drop_odd, 1 ’
-Only output even frames, odd frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
- 22222 44444
- 22222 44444
- 22222 44444
- 22222 44444
-
-
-
-‘drop_even, 2 ’
-Only output odd frames, even frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-11111 33333
-11111 33333
-11111 33333
-
-
-
-‘pad, 3 ’
-Expand each frame to full height, but pad alternate lines with black,
-generating a frame with double height at the same input frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-
-
-
-
-‘interleave_top, 4 ’
-Interleave the upper field from odd frames with the lower field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-
-‘interleave_bottom, 5 ’
-Interleave the lower field from odd frames with the upper field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-
-Output:
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-
-
-
-
-‘interlacex2, 6 ’
-Double frame rate with unchanged height. Frames are inserted each
-containing the second temporal field from the previous input frame and
-the first temporal field from the next input frame. This mode relies on
-the top_field_first flag. Useful for interlaced video displays with no
-field synchronisation.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
- 11111 22222 33333 44444
-11111 22222 33333 44444
- 11111 22222 33333 44444
-
-Output:
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-
-
-
-
-
-
-Numeric values are deprecated but are accepted for backward
-compatibility reasons.
-
-Default mode is merge
.
-
-
-flags
-Specify flags influencing the filter process.
-
-Available value for flags is:
-
-
-low_pass_filter, vlfp
-Enable vertical low-pass filtering in the filter.
-Vertical low-pass filtering is required when creating an interlaced
-destination from a progressive source which contains high-frequency
-vertical detail. Filtering will reduce interlace ’twitter’ and Moire
-patterning.
-
-Vertical low-pass filtering can only be enabled for mode
-interleave_top and interleave_bottom .
-
-
-
-
-
-
-
-
38.96 transpose# TOC
-
-
Transpose rows with columns in the input video and optionally flip it.
-
-
It accepts the following parameters:
-
-
-dir
-Specify the transposition direction.
-
-Can assume the following values:
-
-‘0, 4, cclock_flip ’
-Rotate by 90 degrees counterclockwise and vertically flip (default), that is:
-
-
L.R L.l
-. . -> . .
-l.r R.r
-
-
-
-‘1, 5, clock ’
-Rotate by 90 degrees clockwise, that is:
-
-
L.R l.L
-. . -> . .
-l.r r.R
-
-
-
-‘2, 6, cclock ’
-Rotate by 90 degrees counterclockwise, that is:
-
-
L.R R.r
-. . -> . .
-l.r L.l
-
-
-
-‘3, 7, clock_flip ’
-Rotate by 90 degrees clockwise and vertically flip, that is:
-
-
L.R r.R
-. . -> . .
-l.r l.L
-
-
-
-
-For values between 4-7, the transposition is only done if the input
-video geometry is portrait and not landscape. These values are
-deprecated, the passthrough
option should be used instead.
-
-Numerical values are deprecated, and should be dropped in favor of
-symbolic constants.
-
-
-passthrough
-Do not apply the transposition if the input geometry matches the one
-specified by the specified value. It accepts the following values:
-
-‘none ’
-Always apply transposition.
-
-‘portrait ’
-Preserve portrait geometry (when height >= width ).
-
-‘landscape ’
-Preserve landscape geometry (when width >= height ).
-
-
-
-Default value is none
.
-
-
-
-
For example to rotate by 90 degrees clockwise and preserve portrait
-layout:
-
-
transpose=dir=1:passthrough=portrait
-
-
-
The command above can also be specified as:
-
-
-
-
38.97 trim# TOC
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Specify the time of the start of the kept section, i.e. the frame with the
-timestamp start will be the first frame in the output.
-
-
-end
-Specify the time of the first frame that will be dropped, i.e. the frame
-immediately preceding the one with the timestamp end will be the last
-frame in the output.
-
-
-start_pts
-This is the same as start , except this option sets the start timestamp
-in timebase units instead of seconds.
-
-
-end_pts
-This is the same as end , except this option sets the end timestamp
-in timebase units instead of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_frame
-The number of the first frame that should be passed to the output.
-
-
-end_frame
-The number of the first frame that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _frame variants simply count the
-frames that pass through the filter. Also note that this filter does not modify
-the timestamps. If you wish for the output timestamps to start at zero, insert a
-setpts filter after the trim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all the frames that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple trim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -vf trim=60:120
-
-
- Keep only the first second:
-
-
ffmpeg -i INPUT -vf trim=duration=1
-
-
-
-
-
-
-
38.98 unsharp# TOC
-
-
Sharpen or blur the input video.
-
-
It accepts the following parameters:
-
-
-luma_msize_x, lx
-Set the luma matrix horizontal size. It must be an odd integer between
-3 and 63. The default value is 5.
-
-
-luma_msize_y, ly
-Set the luma matrix vertical size. It must be an odd integer between 3
-and 63. The default value is 5.
-
-
-luma_amount, la
-Set the luma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 1.0.
-
-
-chroma_msize_x, cx
-Set the chroma matrix horizontal size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_msize_y, cy
-Set the chroma matrix vertical size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_amount, ca
-Set the chroma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 0.0.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
All parameters are optional and default to the equivalent of the
-string ’5:5:1.0:5:5:0.0’.
-
-
-
38.98.1 Examples# TOC
-
-
- Apply strong luma sharpen effect:
-
-
unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
-
-
- Apply a strong blur of both luma and chroma parameters:
-
-
-
-
-
38.99 uspp# TOC
-
-
Apply ultra slow/simple postprocessing filter that compresses and decompresses
-the image at several (or - in the case of quality level 8
- all)
-shifts and average the results.
-
-
The way this differs from the behavior of spp is that uspp actually encodes &
-decodes each case with libavcodec Snow, whereas spp uses a simplified intra only 8x8
-DCT similar to MJPEG.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-8. If set to 0
, the filter will have no
-effect. A value of 8
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-
-
-
38.100 vidstabdetect# TOC
-
-
Analyze video stabilization/deshaking. Perform pass 1 of 2, see
-vidstabtransform for pass 2.
-
-
This filter generates a file with relative translation and rotation
-transform information about subsequent frames, which is then used by
-the vidstabtransform filter.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
This filter accepts the following options:
-
-
-result
-Set the path to the file used to write the transforms information.
-Default value is transforms.trf .
-
-
-shakiness
-Set how shaky the video is and how quick the camera is. It accepts an
-integer in the range 1-10, a value of 1 means little shakiness, a
-value of 10 means strong shakiness. Default value is 5.
-
-
-accuracy
-Set the accuracy of the detection process. It must be a value in the
-range 1-15. A value of 1 means low accuracy, a value of 15 means high
-accuracy. Default value is 15.
-
-
-stepsize
-Set stepsize of the search process. The region around minimum is
-scanned with 1 pixel resolution. Default value is 6.
-
-
-mincontrast
-Set minimum contrast. Below this value a local measurement field is
-discarded. Must be a floating point value in the range 0-1. Default
-value is 0.3.
-
-
-tripod
-Set reference frame number for tripod mode.
-
-If enabled, the motion of the frames is compared to a reference frame
-in the filtered stream, identified by the specified number. The idea
-is to compensate all movements in a more-or-less static scene and keep
-the camera view absolutely still.
-
-If set to 0, it is disabled. The frames are counted starting from 1.
-
-
-show
-Show fields and transforms in the resulting frames. It accepts an
-integer in the range 0-2. Default value is 0, which disables any
-visualization.
-
-
-
-
-
38.100.1 Examples# TOC
-
-
- Use default values:
-
-
- Analyze strongly shaky movie and put the results in file
-mytransforms.trf :
-
-
vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
-
-
- Visualize the result of internal transformations in the resulting
-video:
-
-
- Analyze a video with medium shakiness using ffmpeg
:
-
-
ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
-
-
-
-
-
38.101 vidstabtransform# TOC
-
-
Video stabilization/deshaking: pass 2 of 2,
-see vidstabdetect for pass 1.
-
-
Read a file with transform information for each frame and
-apply/compensate them. Together with the vidstabdetect
-filter this can be used to deshake videos. See also
-http://public.hronopik.de/vid.stab . It is important to also use
-the unsharp filter, see below.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
-
38.101.1 Options# TOC
-
-
-input
-Set path to the file used to read the transforms. Default value is
-transforms.trf .
-
-
-smoothing
-Set the number of frames (value*2 + 1) used for lowpass filtering the
-camera movements. Default value is 10.
-
-For example a number of 10 means that 21 frames are used (10 in the
-past and 10 in the future) to smoothen the motion in the video. A
-larger value leads to a smoother video, but limits the acceleration of
-the camera (pan/tilt movements). 0 is a special case where a static
-camera is simulated.
-
-
-optalgo
-Set the camera path optimization algorithm.
-
-Accepted values are:
-
-‘gauss ’
-gaussian kernel low-pass filter on camera motion (default)
-
-‘avg ’
-averaging on transformations
-
-
-
-
-maxshift
-Set maximal number of pixels to translate frames. Default value is -1,
-meaning no limit.
-
-
-maxangle
-Set maximal angle in radians (degree*PI/180) to rotate frames. Default
-value is -1, meaning no limit.
-
-
-crop
-Specify how to deal with borders that may be visible due to movement
-compensation.
-
-Available values are:
-
-‘keep ’
-keep image information from previous frame (default)
-
-‘black ’
-fill the border black
-
-
-
-
-invert
-Invert transforms if set to 1. Default value is 0.
-
-
-relative
-Consider transforms as relative to previous frame if set to 1,
-absolute if set to 0. Default value is 0.
-
-
-zoom
-Set percentage to zoom. A positive value will result in a zoom-in
-effect, a negative value in a zoom-out effect. Default value is 0 (no
-zoom).
-
-
-optzoom
-Set optimal zooming to avoid borders.
-
-Accepted values are:
-
-‘0 ’
-disabled
-
-‘1 ’
-optimal static zoom value is determined (only very strong movements
-will lead to visible borders) (default)
-
-‘2 ’
-optimal adaptive zoom value is determined (no borders will be
-visible), see zoomspeed
-
-
-
-Note that the value given at zoom is added to the one calculated here.
-
-
-zoomspeed
-Set percent to zoom maximally each frame (enabled when
-optzoom is set to 2). Range is from 0 to 5, default value is
-0.25.
-
-
-interpol
-Specify type of interpolation.
-
-Available values are:
-
-‘no ’
-no interpolation
-
-‘linear ’
-linear only horizontal
-
-‘bilinear ’
-linear in both directions (default)
-
-‘bicubic ’
-cubic in both directions (slow)
-
-
-
-
-tripod
-Enable virtual tripod mode if set to 1, which is equivalent to
-relative=0:smoothing=0
. Default value is 0.
-
-Use also tripod
option of vidstabdetect .
-
-
-debug
-Increase log verbosity if set to 1. Also the detected global motions
-are written to the temporary file global_motions.trf . Default
-value is 0.
-
-
-
-
-
38.101.2 Examples# TOC
-
-
-
-
-
38.102 vflip# TOC
-
-
Flip the input video vertically.
-
-
For example, to vertically flip a video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "vflip" out.avi
-
-
-
-
38.103 vignette# TOC
-
-
Make or reverse a natural vignetting effect.
-
-
The filter accepts the following options:
-
-
-angle, a
-Set lens angle expression as a number of radians.
-
-The value is clipped in the [0,PI/2]
range.
-
-Default value: "PI/5"
-
-
-x0
-y0
-Set center coordinates expressions. Respectively "w/2"
and "h/2"
-by default.
-
-
-mode
-Set forward/backward mode.
-
-Available modes are:
-
-‘forward ’
-The larger the distance from the central point, the darker the image becomes.
-
-
-‘backward ’
-The larger the distance from the central point, the brighter the image becomes.
-This can be used to reverse a vignette effect, though there is no automatic
-detection to extract the lens angle and other settings (yet). It can
-also be used to create a burning effect.
-
-
-
-Default value is ‘forward ’.
-
-
-eval
-Set evaluation mode for the expressions (angle , x0 , y0 ).
-
-It accepts the following values:
-
-‘init ’
-Evaluate expressions only once during the filter initialization.
-
-
-‘frame ’
-Evaluate expressions for each incoming frame. This is way slower than the
-‘init ’ mode since it requires all the scalers to be re-computed, but it
-allows advanced dynamic expressions.
-
-
-
-Default value is ‘init ’.
-
-
-dither
-Set dithering to reduce the circular banding effects. Default is 1
-(enabled).
-
-
-aspect
-Set vignette aspect. This setting allows one to adjust the shape of the vignette.
-Setting this value to the SAR of the input will make a rectangular vignetting
-following the dimensions of the video.
-
-Default is 1/1
.
-
-
-
-
-
38.103.1 Expressions# TOC
-
-
The alpha , x0 and y0 expressions can contain the
-following parameters.
-
-
-w
-h
-input width and height
-
-
-n
-the number of input frame, starting from 0
-
-
-pts
-the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in
-TB units, NAN if undefined
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-the PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in seconds, NAN if undefined
-
-
-tb
-time base of the input video
-
-
-
-
-
-
38.103.2 Examples# TOC
-
-
- Apply simple strong vignetting effect:
-
-
- Make a flickering vignetting:
-
-
vignette='PI/4+random(1)*PI/50':eval=frame
-
-
-
-
-
-
38.104 w3fdif# TOC
-
-
Deinterlace the input video ("w3fdif" stands for "Weston 3 Field
-Deinterlacing Filter").
-
-
Based on the process described by Martin Weston for BBC R&D, and
-implemented based on the de-interlace algorithm written by Jim
-Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter
-uses filter coefficients calculated by BBC R&D.
-
-
There are two sets of filter coefficients, so called "simple":
-and "complex". Which set of filter coefficients is used can
-be set by passing an optional parameter:
-
-
-filter
-Set the interlacing filter coefficients. Accepts one of the following values:
-
-
-‘simple ’
-Simple filter coefficient set.
-
-‘complex ’
-More-complex filter coefficient set.
-
-
-Default value is ‘complex ’.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following values:
-
-
-‘all ’
-Deinterlace all frames,
-
-‘interlaced ’
-Only deinterlace frames marked as interlaced.
-
-
-
-Default value is ‘all ’.
-
-
-
-
-
38.105 xbr# TOC
-
Apply the xBR high-quality magnification filter which is designed for pixel
-art. It follows a set of edge-detection rules, see
-http://www.libretro.com/forums/viewtopic.php?f=6&t=134 .
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for 2xBR
, 3
for
-3xBR
and 4
for 4xBR
.
-Default is 3
.
-
-
-
-
-
38.106 yadif# TOC
-
-
Deinterlace the input video ("yadif" means "yet another deinterlacing
-filter").
-
-
It accepts the following parameters:
-
-
-
-mode
-The interlacing mode to adopt. It accepts one of the following values:
-
-
-0, send_frame
-Output one frame for each frame.
-
-1, send_field
-Output one frame for each field.
-
-2, send_frame_nospatial
-Like send_frame
, but it skips the spatial interlacing check.
-
-3, send_field_nospatial
-Like send_field
, but it skips the spatial interlacing check.
-
-
-
-The default value is send_frame
.
-
-
-parity
-The picture field parity assumed for the input interlaced video. It accepts one
-of the following values:
-
-
-0, tff
-Assume the top field is first.
-
-1, bff
-Assume the bottom field is first.
-
--1, auto
-Enable automatic detection of field parity.
-
-
-
-The default value is auto
.
-If the interlacing is unknown or the decoder does not export this information,
-top field first will be assumed.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following
-values:
-
-
-0, all
-Deinterlace all frames.
-
-1, interlaced
-Only deinterlace frames marked as interlaced.
-
-
-
-The default value is all
.
-
-
-
-
-
38.107 zoompan# TOC
-
-
Apply Zoom & Pan effect.
-
-
This filter accepts the following options:
-
-
-zoom, z
-Set the zoom expression. Default is 1.
-
-
-x
-y
-Set the x and y expression. Default is 0.
-
-
-d
-Set the duration expression in number of frames.
-This sets for how many number of frames effect will last for
-single input image.
-
-
-s
-Set the output image size, default is ’hd720’.
-
-
-
-
Each expression can contain the following constants:
-
-
-in_w, iw
-Input width.
-
-
-in_h, ih
-Input height.
-
-
-out_w, ow
-Output width.
-
-
-out_h, oh
-Output height.
-
-
-in
-Input frame count.
-
-
-on
-Output frame count.
-
-
-x
-y
-Last calculated ’x’ and ’y’ position from ’x’ and ’y’ expression
-for current input frame.
-
-
-px
-py
-’x’ and ’y’ of last output frame of previous input frame or 0 when there was
-not yet such frame (first input frame).
-
-
-zoom
-Last calculated zoom from ’z’ expression for current input frame.
-
-
-pzoom
-Last calculated zoom of last output frame of previous input frame.
-
-
-duration
-Number of output frames for current input frame. Calculated from ’d’ expression
-for each input frame.
-
-
-pduration
-number of output frames created for previous input frame
-
-
-a
-Rational number: input width / input height
-
-
-sar
-sample aspect ratio
-
-
-dar
-display aspect ratio
-
-
-
-
-
-
38.107.1 Examples# TOC
-
-
- Zoom-in up to 1.5 and pan at same time to some spot near center of picture:
-
-
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
-
-
-
-
-
-
39 Video Sources# TOC
-
-
Below is a description of the currently available video sources.
-
-
-
39.1 buffer# TOC
-
-
Buffer video frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/vsrc_buffer.h .
-
-
It accepts the following parameters:
-
-
-video_size
-Specify the size (width and height) of the buffered video frames. For the
-syntax of this option, check the "Video size" section in the ffmpeg-utils
-manual.
-
-
-width
-The input video width.
-
-
-height
-The input video height.
-
-
-pix_fmt
-A string representing the pixel format of the buffered video frames.
-It may be a number corresponding to a pixel format, or a pixel format
-name.
-
-
-time_base
-Specify the timebase assumed by the timestamps of the buffered frames.
-
-
-frame_rate
-Specify the frame rate expected for the video stream.
-
-
-pixel_aspect, sar
-The sample (pixel) aspect ratio of the input video.
-
-
-sws_param
-Specify the optional parameters to be used for the scale filter which
-is automatically inserted when an input change is detected in the
-input size or format.
-
-
-
-
For example:
-
-
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
-
-
-
will instruct the source to accept video frames with size 320x240 and
-with format "yuv410p", assuming 1/24 as the timestamps timebase and
-square pixels (1:1 sample aspect ratio).
-Since the pixel format with name "yuv410p" corresponds to the number 6
-(check the enum AVPixelFormat definition in libavutil/pixfmt.h ),
-this example corresponds to:
-
-
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
-
-
-
Alternatively, the options can be specified as a flat string, but this
-syntax is deprecated:
-
-
width :height :pix_fmt :time_base.num :time_base.den :pixel_aspect.num :pixel_aspect.den [:sws_param ]
-
-
-
39.2 cellauto# TOC
-
-
Create a pattern generated by an elementary cellular automaton.
-
-
The initial state of the cellular automaton can be defined through the
-filename , and pattern options. If such options are
-not specified an initial state is created randomly.
-
-
At each new frame a new row in the video is filled with the result of
-the cellular automaton next generation. The behavior when the whole
-frame is filled is defined by the scroll option.
-
-
This source accepts the following options:
-
-
-filename, f
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified file.
-In the file, each non-whitespace character is considered an alive
-cell, a newline will terminate the row, and further characters in the
-file will be ignored.
-
-
-pattern, p
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified string.
-
-Each non-whitespace character in the string is considered an alive
-cell, a newline will terminate the row, and further characters in the
-string will be ignored.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial cellular automaton row. It
-is a floating point number value ranging from 0 to 1, defaults to
-1/PHI.
-
-This option is ignored when a file or a pattern is specified.
-
-
-random_seed, seed
-Set the seed for filling randomly the initial row, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the cellular automaton rule, it is a number ranging from 0 to 255.
-Default value is 110.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual.
-
-If filename or pattern is specified, the size is set
-by default to the width of the specified initial state row, and the
-height is set to width * PHI.
-
-If size is set, it must contain the width of the specified
-pattern string, and the specified pattern will be centered in the
-larger row.
-
-If a filename or a pattern string is not specified, the size value
-defaults to "320x518" (used for a randomly generated initial state).
-
-
-scroll
-If set to 1, scroll the output upward when all the rows in the output
-have been already filled. If set to 0, the new generated row will be
-written over the top row just after the bottom row is filled.
-Defaults to 1.
-
-
-start_full, full
-If set to 1, completely fill the output with generated rows before
-outputting the first frame.
-This is the default behavior, for disabling set the value to 0.
-
-
-stitch
-If set to 1, stitch the left and right row edges together.
-This is the default behavior, for disabling set the value to 0.
-
-
-
-
-
39.2.1 Examples# TOC
-
-
- Read the initial state from pattern , and specify an output of
-size 200x400.
-
-
cellauto=f=pattern:s=200x400
-
-
- Generate a random initial row with a width of 200 cells, with a fill
-ratio of 2/3:
-
-
cellauto=ratio=2/3:s=200x200
-
-
- Create a pattern generated by rule 18 starting by a single alive cell
-centered on an initial row with width 100:
-
-
cellauto=p=@:s=100x400:full=0:rule=18
-
-
- Specify a more elaborated initial pattern:
-
-
cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
-
-
-
-
-
-
39.3 mandelbrot# TOC
-
-
Generate a Mandelbrot set fractal, and progressively zoom towards the
-point specified with start_x and start_y .
-
-
This source accepts the following options:
-
-
-end_pts
-Set the terminal pts value. Default value is 400.
-
-
-end_scale
-Set the terminal scale value.
-Must be a floating point value. Default value is 0.3.
-
-
-inner
-Set the inner coloring mode, that is the algorithm used to draw the
-Mandelbrot fractal internal region.
-
-It shall assume one of the following values:
-
-black
-Set black mode.
-
-convergence
-Show time until convergence.
-
-mincol
-Set color based on point closest to the origin of the iterations.
-
-period
-Set period mode.
-
-
-
-Default value is mincol .
-
-
-bailout
-Set the bailout value. Default value is 10.0.
-
-
-maxiter
-Set the maximum of iterations performed by the rendering
-algorithm. Default value is 7189.
-
-
-outer
-Set outer coloring mode.
-It shall assume one of following values:
-
-iteration_count
-Set iteration cound mode.
-
-normalized_iteration_count
-set normalized iteration count mode.
-
-
-Default value is normalized_iteration_count .
-
-
-rate, r
-Set frame rate, expressed as number of frames per second. Default
-value is "25".
-
-
-size, s
-Set frame size. For the syntax of this option, check the "Video
-size" section in the ffmpeg-utils manual. Default value is "640x480".
-
-
-start_scale
-Set the initial scale value. Default value is 3.0.
-
-
-start_x
-Set the initial x position. Must be a floating point value between
--100 and 100. Default value is -0.743643887037158704752191506114774.
-
-
-start_y
-Set the initial y position. Must be a floating point value between
--100 and 100. Default value is -0.131825904205311970493132056385139.
-
-
-
-
-
39.4 mptestsrc# TOC
-
-
Generate various test patterns, as generated by the MPlayer test filter.
-
-
The size of the generated video is fixed, and is 256x256.
-This source is useful in particular for testing encoding features.
-
-
This source accepts the following options:
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-test, t
-
-Set the number or the name of the test to perform. Supported tests are:
-
-dc_luma
-dc_chroma
-freq_luma
-freq_chroma
-amp_luma
-amp_chroma
-cbp
-mv
-ring1
-ring2
-all
-
-
-Default value is "all", which will cycle through the list of all tests.
-
-
-
-
Some examples:
-
-
-
will generate a "dc_luma" test pattern.
-
-
-
39.5 frei0r_src# TOC
-
-
Provide a frei0r source.
-
-
To enable compilation of this filter you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
This source accepts the following parameters:
-
-
-size
-The size of the video to generate. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-
-framerate
-The framerate of the generated video. It may be a string of the form
-num /den or a frame rate abbreviation.
-
-
-filter_name
-The name to the frei0r source to load. For more information regarding frei0r and
-how to set the parameters, read the frei0r section in the video filters
-documentation.
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r source.
-
-
-
-
-
For example, to generate a frei0r partik0l source with size 200x200
-and frame rate 10 which is overlaid on the overlay filter main input:
-
-
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
-
-
-
-
39.6 life# TOC
-
-
Generate a life pattern.
-
-
This source is based on a generalization of John Conway’s life game.
-
-
The sourced input represents a life grid, each pixel represents a cell
-which can be in one of two possible states, alive or dead. Every cell
-interacts with its eight neighbours, which are the cells that are
-horizontally, vertically, or diagonally adjacent.
-
-
At each interaction the grid evolves according to the adopted rule,
-which specifies the number of neighbor alive cells which will make a
-cell stay alive or born. The rule option allows one to specify
-the rule to adopt.
-
-
This source accepts the following options:
-
-
-filename, f
-Set the file from which to read the initial grid state. In the file,
-each non-whitespace character is considered an alive cell, and newline
-is used to delimit the end of each row.
-
-If this option is not specified, the initial grid is generated
-randomly.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial random grid. It is a
-floating point number value ranging from 0 to 1, defaults to 1/PHI.
-It is ignored when a file is specified.
-
-
-random_seed, seed
-Set the seed for filling the initial random grid, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the life rule.
-
-A rule can be specified with a code of the kind "SNS /BNB ",
-where NS and NB are sequences of numbers in the range 0-8,
-NS specifies the number of alive neighbor cells which make a
-live cell stay alive, and NB the number of alive neighbor cells
-which make a dead cell to become alive (i.e. to "born").
-"s" and "b" can be used in place of "S" and "B", respectively.
-
-Alternatively a rule can be specified by an 18-bits integer. The 9
-high order bits are used to encode the next cell state if it is alive
-for each number of neighbor alive cells, the low order bits specify
-the rule for "borning" new cells. Higher order bits encode for an
-higher number of neighbor cells.
-For example the number 6153 = (12<<9)+9
specifies a stay alive
-rule of 12 and a born rule of 9, which corresponds to "S23/B03".
-
-Default value is "S23/B3", which is the original Conway’s game of life
-rule, and will keep a cell alive if it has 2 or 3 neighbor alive
-cells, and will born a new cell if there are three alive cells around
-a dead cell.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-If filename is specified, the size is set by default to the
-same size of the input file. If size is set, it must contain
-the size specified in the input file, and the initial grid defined in
-that file is centered in the larger resulting area.
-
-If a filename is not specified, the size value defaults to "320x240"
-(used for a randomly generated initial grid).
-
-
-stitch
-If set to 1, stitch the left and right grid edges together, and the
-top and bottom edges also. Defaults to 1.
-
-
-mold
-Set cell mold speed. If set, a dead cell will go from death_color to
-mold_color with a step of mold . mold can have a
-value from 0 to 255.
-
-
-life_color
-Set the color of living (or new born) cells.
-
-
-death_color
-Set the color of dead cells. If mold is set, this is the first color
-used to represent a dead cell.
-
-
-mold_color
-Set mold color, for definitely dead and moldy cells.
-
-For the syntax of these 3 color options, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-
-
-
39.6.1 Examples# TOC
-
-
- Read a grid from pattern , and center it on a grid of size
-300x300 pixels:
-
-
life=f=pattern:s=300x300
-
-
- Generate a random grid of size 200x200, with a fill ratio of 2/3:
-
-
life=ratio=2/3:s=200x200
-
-
- Specify a custom rule for evolving a randomly generated grid:
-
-
- Full example with slow death effect (mold) using ffplay
:
-
-
ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
-
-
-
-
-
39.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc# TOC
-
-
The color
source provides an uniformly colored input.
-
-
The haldclutsrc
source provides an identity Hald CLUT. See also
-haldclut filter.
-
-
The nullsrc
source returns unprocessed video frames. It is
-mainly useful to be employed in analysis / debugging tools, or as the
-source for filters which ignore the input data.
-
-
The rgbtestsrc
source generates an RGB test pattern useful for
-detecting RGB vs BGR issues. You should see a red, green and blue
-stripe from top to bottom.
-
-
The smptebars
source generates a color bars pattern, based on
-the SMPTE Engineering Guideline EG 1-1990.
-
-
The smptehdbars
source generates a color bars pattern, based on
-the SMPTE RP 219-2002.
-
-
The testsrc
source generates a test video pattern, showing a
-color pattern, a scrolling gradient and a timestamp. This is mainly
-intended for testing purposes.
-
-
The sources accept the following parameters:
-
-
-color, c
-Specify the color of the source, only available in the color
-source. For the syntax of this option, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-level
-Specify the level of the Hald CLUT, only available in the haldclutsrc
-source. A level of N
generates a picture of N*N*N
by N*N*N
-pixels to be used as identity matrix for 3D lookup tables. Each component is
-coded on a 1/(N*N)
scale.
-
-
-size, s
-Specify the size of the sourced video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual. The default value is
-"320x240".
-
-This option is not available with the haldclutsrc
filter.
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-sar
-Set the sample aspect ratio of the sourced video.
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-decimals, n
-Set the number of decimals to show in the timestamp, only available in the
-testsrc
source.
-
-The displayed timestamp value will correspond to the original
-timestamp value multiplied by the power of 10 of the specified
-value. Default value is 0.
-
-
-
-
For example the following:
-
-
testsrc=duration=5.3:size=qcif:rate=10
-
-
-
will generate a video with a duration of 5.3 seconds, with size
-176x144 and a frame rate of 10 frames per second.
-
-
The following graph description will generate a red source
-with an opacity of 0.2, with size "qcif" and a frame rate of 10
-frames per second.
-
-
color=c=red@0.2:s=qcif:r=10
-
-
-
If the input content is to be ignored, nullsrc
can be used. The
-following command generates noise in the luminance plane by employing
-the geq
filter:
-
-
nullsrc=s=256x256, geq=random(1)*255:128:128
-
-
-
-
39.7.1 Commands# TOC
-
-
The color
source supports the following commands:
-
-
-c, color
-Set the color of the created image. Accepts the same syntax of the
-corresponding color option.
-
-
-
-
-
-
40 Video Sinks# TOC
-
-
Below is a description of the currently available video sinks.
-
-
-
40.1 buffersink# TOC
-
-
Buffer video frames, and make them available to the end of the filter
-graph.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVBufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
-
40.2 nullsink# TOC
-
-
Null video sink: do absolutely nothing with the input video. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
41 Multimedia Filters# TOC
-
-
Below is a description of the currently available multimedia filters.
-
-
-
41.1 avectorscope# TOC
-
-
Convert input audio to a video output, representing the audio vector
-scope.
-
-
The filter is used to measure the difference between channels of stereo
-audio stream. A monoaural signal, consisting of identical left and right
-signal, results in straight vertical line. Any stereo separation is visible
-as a deviation from this line, creating a Lissajous figure.
-If the straight (or deviation from it) but horizontal line appears this
-indicates that the left and right channels are out of phase.
-
-
The filter accepts the following options:
-
-
-mode, m
-Set the vectorscope mode.
-
-Available values are:
-
-‘lissajous ’
-Lissajous rotated by 45 degrees.
-
-
-‘lissajous_xy ’
-Same as above but not rotated.
-
-
-
-Default value is ‘lissajous ’.
-
-
-size, s
-Set the video size for the output. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual. Default value is 400x400
.
-
-
-rate, r
-Set the output frame rate. Default value is 25
.
-
-
-rc
-gc
-bc
-Specify the red, green and blue contrast. Default values are 40
, 160
and 80
.
-Allowed range is [0, 255]
.
-
-
-rf
-gf
-bf
-Specify the red, green and blue fade. Default values are 15
, 10
and 5
.
-Allowed range is [0, 255]
.
-
-
-zoom
-Set the zoom factor. Default value is 1
. Allowed range is [1, 10]
.
-
-
-
-
-
41.1.1 Examples# TOC
-
-
- Complete example using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
-
-
-
-
-
41.2 concat# TOC
-
-
Concatenate audio and video streams, joining them together one after the
-other.
-
-
The filter works on segments of synchronized video and audio streams. All
-segments must have the same number of streams of each type, and that will
-also be the number of streams at output.
-
-
The filter accepts the following options:
-
-
-n
-Set the number of segments. Default is 2.
-
-
-v
-Set the number of output video streams, that is also the number of video
-streams in each segment. Default is 1.
-
-
-a
-Set the number of output audio streams, that is also the number of audio
-streams in each segment. Default is 0.
-
-
-unsafe
-Activate unsafe mode: do not fail if segments have a different format.
-
-
-
-
-
The filter has v +a outputs: first v video outputs, then
-a audio outputs.
-
-
There are n x(v +a ) inputs: first the inputs for the first
-segment, in the same order as the outputs, then the inputs for the second
-segment, etc.
-
-
Related streams do not always have exactly the same duration, for various
-reasons including codec frame size or sloppy authoring. For that reason,
-related synchronized streams (e.g. a video and its audio track) should be
-concatenated at once. The concat filter will use the duration of the longest
-stream in each segment (except the last one), and if necessary pad shorter
-audio streams with silence.
-
-
For this filter to work correctly, all segments must start at timestamp 0.
-
-
All corresponding streams must have the same parameters in all segments; the
-filtering system will automatically select a common pixel format for video
-streams, and a common sample format, sample rate and channel layout for
-audio streams, but other settings, such as resolution, must be converted
-explicitly by the user.
-
-
Different frame rates are acceptable but will result in variable frame rate
-at output; be sure to configure the output file to handle it.
-
-
-
41.2.1 Examples# TOC
-
-
-
-
-
41.3 ebur128# TOC
-
-
EBU R128 scanner filter. This filter takes an audio stream as input and outputs
-it unchanged. By default, it logs a message at a frequency of 10Hz with the
-Momentary loudness (identified by M
), Short-term loudness (S
),
-Integrated loudness (I
) and Loudness Range (LRA
).
-
-
The filter also has a video output (see the video option) with a real
-time graph to observe the loudness evolution. The graphic contains the logged
-message mentioned above, so it is not printed anymore when this option is set,
-unless the verbose logging is set. The main graphing area contains the
-short-term loudness (3 seconds of analysis), and the gauge on the right is for
-the momentary loudness (400 milliseconds).
-
-
More information about the Loudness Recommendation EBU R128 on
-http://tech.ebu.ch/loudness .
-
-
The filter accepts the following options:
-
-
-video
-Activate the video output. The audio stream is passed unchanged whether this
-option is set or no. The video stream will be the first output stream if
-activated. Default is 0
.
-
-
-size
-Set the video size. This option is for video only. For the syntax of this
-option, check the "Video size" section in the ffmpeg-utils manual. Default
-and minimum resolution is 640x480
.
-
-
-meter
-Set the EBU scale meter. Default is 9
. Common values are 9
and
-18
, respectively for EBU scale meter +9 and EBU scale meter +18. Any
-other integer value between this range is allowed.
-
-
-metadata
-Set metadata injection. If set to 1
, the audio input will be segmented
-into 100ms output frames, each of them containing various loudness information
-in metadata. All the metadata keys are prefixed with lavfi.r128.
.
-
-Default is 0
.
-
-
-framelog
-Force the frame logging level.
-
-Available values are:
-
-‘info ’
-information logging level
-
-‘verbose ’
-verbose logging level
-
-
-
-By default, the logging level is set to info . If the video or
-the metadata options are set, it switches to verbose .
-
-
-peak
-Set peak mode(s).
-
-Available modes can be cumulated (the option is a flag
type). Possible
-values are:
-
-‘none ’
-Disable any peak mode (default).
-
-‘sample ’
-Enable sample-peak mode.
-
-Simple peak mode looking for the higher sample value. It logs a message
-for sample-peak (identified by SPK
).
-
-‘true ’
-Enable true-peak mode.
-
-If enabled, the peak lookup is done on an over-sampled version of the input
-stream for better peak accuracy. It logs a message for true-peak.
-(identified by TPK
) and true-peak per frame (identified by FTPK
).
-This mode requires a build with libswresample
.
-
-
-
-
-
-
-
-
41.3.1 Examples# TOC
-
-
- Real-time graph using ffplay
, with a EBU scale meter +18:
-
-
ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
-
-
- Run an analysis with ffmpeg
:
-
-
ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
-
-
-
-
-
41.4 interleave, ainterleave# TOC
-
-
Temporally interleave frames from several inputs.
-
-
interleave
works with video inputs, ainterleave
with audio.
-
-
These filters read frames from several inputs and send the oldest
-queued frame to the output.
-
-
Input streams must have a well defined, monotonically increasing frame
-timestamp values.
-
-
In order to submit one frame to output, these filters need to enqueue
-at least one frame for each input, so they cannot work in case one
-input is not yet terminated and will not receive incoming frames.
-
-
For example consider the case when one input is a select
filter
-which always drop input frames. The interleave
filter will keep
-reading from that input, but it will never be able to send new frames
-to output until the input will send an end-of-stream signal.
-
-
Also, depending on inputs synchronization, the filters will drop
-frames in case one input receives more frames than the other ones, and
-the queue is already filled.
-
-
These filters accept the following options:
-
-
-nb_inputs, n
-Set the number of different inputs, it is 2 by default.
-
-
-
-
-
41.4.1 Examples# TOC
-
-
- Interleave frames belonging to different streams using ffmpeg
:
-
-
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
-
-
- Add flickering blur effect:
-
-
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
-
-
-
-
-
41.5 perms, aperms# TOC
-
-
Set read/write permissions for the output frames.
-
-
These filters are mainly aimed at developers to test direct path in the
-following filter in the filtergraph.
-
-
The filters accept the following options:
-
-
-mode
-Select the permissions mode.
-
-It accepts the following values:
-
-‘none ’
-Do nothing. This is the default.
-
-‘ro ’
-Set all the output frames read-only.
-
-‘rw ’
-Set all the output frames directly writable.
-
-‘toggle ’
-Make the frame read-only if writable, and writable if read-only.
-
-‘random ’
-Set each output frame read-only or writable randomly.
-
-
-
-
-seed
-Set the seed for the random mode, must be an integer included between
-0
and UINT32_MAX
. If not specified, or if explicitly set to
--1
, the filter will try to use a good random seed on a best effort
-basis.
-
-
-
-
Note: in case of auto-inserted filter between the permission filter and the
-following one, the permission might not be received as expected in that
-following filter. Inserting a format or aformat filter before the
-perms/aperms filter can avoid this problem.
-
-
-
41.6 select, aselect# TOC
-
-
Select frames to pass in output.
-
-
This filter accepts the following options:
-
-
-expr, e
-Set expression, which is evaluated for each input frame.
-
-If the expression is evaluated to zero, the frame is discarded.
-
-If the evaluation result is negative or NaN, the frame is sent to the
-first output; otherwise it is sent to the output with index
-ceil(val)-1
, assuming that the input index starts from 0.
-
-For example a value of 1.2
corresponds to the output with index
-ceil(1.2)-1 = 2-1 = 1
, that is the second output.
-
-
-outputs, n
-Set the number of outputs. The output to which to send the selected
-frame is based on the result of the evaluation. Default value is 1.
-
-
-
-
The expression can contain the following constants:
-
-
-n
-The (sequential) number of the filtered frame, starting from 0.
-
-
-selected_n
-The (sequential) number of the selected frame, starting from 0.
-
-
-prev_selected_n
-The sequential number of the last selected frame. It’s NAN if undefined.
-
-
-TB
-The timebase of the input timestamps.
-
-
-pts
-The PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in TB units. It’s NAN if undefined.
-
-
-t
-The PTS of the filtered video frame,
-expressed in seconds. It’s NAN if undefined.
-
-
-prev_pts
-The PTS of the previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_pts
-The PTS of the last previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_t
-The PTS of the last previously selected video frame. It’s NAN if undefined.
-
-
-start_pts
-The PTS of the first video frame in the video. It’s NAN if undefined.
-
-
-start_t
-The time of the first video frame in the video. It’s NAN if undefined.
-
-
-pict_type (video only)
-The type of the filtered frame. It can assume one of the following
-values:
-
-I
-P
-B
-S
-SI
-SP
-BI
-
-
-
-interlace_type (video only)
-The frame interlace type. It can assume one of the following values:
-
-PROGRESSIVE
-The frame is progressive (not interlaced).
-
-TOPFIRST
-The frame is top-field-first.
-
-BOTTOMFIRST
-The frame is bottom-field-first.
-
-
-
-
-consumed_sample_n (audio only)
-the number of selected samples before the current frame
-
-
-samples_n (audio only)
-the number of samples in the current frame
-
-
-sample_rate (audio only)
-the input sample rate
-
-
-key
-This is 1 if the filtered frame is a key-frame, 0 otherwise.
-
-
-pos
-the position in the file of the filtered frame, -1 if the information
-is not available (e.g. for synthetic video)
-
-
-scene (video only)
-value between 0 and 1 to indicate a new scene; a low value reflects a low
-probability for the current frame to introduce a new scene, while a higher
-value means the current frame is more likely to be one (see the example below)
-
-
-
-
-
The default value of the select expression is "1".
-
-
-
41.6.1 Examples# TOC
-
-
-
-
-
41.7 sendcmd, asendcmd# TOC
-
-
Send commands to filters in the filtergraph.
-
-
These filters read commands to be sent to other filters in the
-filtergraph.
-
-
sendcmd
must be inserted between two video filters,
-asendcmd
must be inserted between two audio filters, but apart
-from that they act the same way.
-
-
The specification of commands can be provided in the filter arguments
-with the commands option, or in a file specified by the
-filename option.
-
-
These filters accept the following options:
-
-commands, c
-Set the commands to be read and sent to the other filters.
-
-filename, f
-Set the filename of the commands to be read and sent to the other
-filters.
-
-
-
-
-
41.7.1 Commands syntax# TOC
-
-
A commands description consists of a sequence of interval
-specifications, comprising a list of commands to be executed when a
-particular event related to that interval occurs. The occurring event
-is typically the current frame time entering or leaving a given time
-interval.
-
-
An interval is specified by the following syntax:
-
-
-
The time interval is specified by the START and END times.
-END is optional and defaults to the maximum time.
-
-
The current frame time is considered within the specified interval if
-it is included in the interval [START , END ), that is when
-the time is greater or equal to START and is lesser than
-END .
-
-
COMMANDS consists of a sequence of one or more command
-specifications, separated by ",", relating to that interval. The
-syntax of a command specification is given by:
-
-
[FLAGS ] TARGET COMMAND ARG
-
-
-
FLAGS is optional and specifies the type of events relating to
-the time interval which enable sending the specified command, and must
-be a non-null sequence of identifier flags separated by "+" or "|" and
-enclosed between "[" and "]".
-
-
The following flags are recognized:
-
-enter
-The command is sent when the current frame timestamp enters the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was not in the given interval, and the
-current is.
-
-
-leave
-The command is sent when the current frame timestamp leaves the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was in the given interval, and the
-current is not.
-
-
-
-
If FLAGS is not specified, a default value of [enter]
is
-assumed.
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional list of argument for
-the given COMMAND .
-
-
Between one interval specification and another, whitespaces, or
-sequences of characters starting with #
until the end of line,
-are ignored and can be used to annotate comments.
-
-
A simplified BNF description of the commands specification syntax
-follows:
-
-
COMMAND_FLAG ::= "enter" | "leave"
-COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG ]
-COMMAND ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG ]
-COMMANDS ::= COMMAND [,COMMANDS ]
-INTERVAL ::= START [-END ] COMMANDS
-INTERVALS ::= INTERVAL [;INTERVALS ]
-
-
-
-
41.7.2 Examples# TOC
-
-
-
-
-
41.8 setpts, asetpts# TOC
-
-
Change the PTS (presentation timestamp) of the input frames.
-
-
setpts
works on video frames, asetpts
on audio frames.
-
-
This filter accepts the following options:
-
-
-expr
-The expression which is evaluated for each frame to construct its timestamp.
-
-
-
-
-
The expression is evaluated through the eval API and can contain the following
-constants:
-
-
-FRAME_RATE
-frame rate, only defined for constant frame-rate video
-
-
-PTS
-The presentation timestamp in input
-
-
-N
-The count of the input frame for video or the number of consumed samples,
-not including the current frame for audio, starting from 0.
-
-
-NB_CONSUMED_SAMPLES
-The number of consumed samples, not including the current frame (only
-audio)
-
-
-NB_SAMPLES, S
-The number of samples in the current frame (only audio)
-
-
-SAMPLE_RATE, SR
-The audio sample rate.
-
-
-STARTPTS
-The PTS of the first frame.
-
-
-STARTT
-the time in seconds of the first frame
-
-
-INTERLACED
-State whether the current frame is interlaced.
-
-
-T
-the time in seconds of the current frame
-
-
-POS
-original position in the file of the frame, or undefined if undefined
-for the current frame
-
-
-PREV_INPTS
-The previous input PTS.
-
-
-PREV_INT
-previous input time in seconds
-
-
-PREV_OUTPTS
-The previous output PTS.
-
-
-PREV_OUTT
-previous output time in seconds
-
-
-RTCTIME
-The wallclock (RTC) time in microseconds. This is deprecated, use time(0)
-instead.
-
-
-RTCSTART
-The wallclock (RTC) time at the start of the movie in microseconds.
-
-
-TB
-The timebase of the input timestamps.
-
-
-
-
-
-
41.8.1 Examples# TOC
-
-
- Start counting PTS from zero
-
-
- Apply fast motion effect:
-
-
- Apply slow motion effect:
-
-
- Set fixed rate of 25 frames per second:
-
-
- Set fixed rate 25 fps with some jitter:
-
-
setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
-
-
- Apply an offset of 10 seconds to the input PTS:
-
-
- Generate timestamps from a "live source" and rebase onto the current timebase:
-
-
setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
-
-
- Generate timestamps by counting samples:
-
-
-
-
-
-
41.9 settb, asettb# TOC
-
-
Set the timebase to use for the output frames timestamps.
-It is mainly useful for testing timebase configuration.
-
-
It accepts the following parameters:
-
-
-expr, tb
-The expression which is evaluated into the output timebase.
-
-
-
-
-
The value for tb is an arithmetic expression representing a
-rational. The expression can contain the constants "AVTB" (the default
-timebase), "intb" (the input timebase) and "sr" (the sample rate,
-audio only). Default value is "intb".
-
-
-
41.9.1 Examples# TOC
-
-
- Set the timebase to 1/25:
-
-
- Set the timebase to 1/10:
-
-
- Set the timebase to 1001/1000:
-
-
- Set the timebase to 2*intb:
-
-
- Set the default timebase value:
-
-
-
-
-
41.10 showcqt# TOC
-
Convert input audio to a video output representing
-frequency spectrum logarithmically (using constant Q transform with
-Brown-Puckette algorithm), with musical tone scale, from E0 to D#10 (10 octaves).
-
-
The filter accepts the following options:
-
-
-volume
-Specify transform volume (multiplier) expression. The expression can contain
-variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-a_weighting(f)
-A-weighting of equal loudness
-
-b_weighting(f)
-B-weighting of equal loudness
-
-c_weighting(f)
-C-weighting of equal loudness
-
-
-Default value is 16
.
-
-
-tlength
-Specify transform length expression. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-Default value is 384/f*tc/(384/f+tc)
.
-
-
-timeclamp
-Specify the transform timeclamp. At low frequency, there is trade-off between
-accuracy in time domain and frequency domain. If timeclamp is lower,
-event in time domain is represented more accurately (such as fast bass drum),
-otherwise event in frequency domain is represented more accurately
-(such as bass guitar). Acceptable value is [0.1, 1.0]. Default value is 0.17
.
-
-
-coeffclamp
-Specify the transform coeffclamp. If coeffclamp is lower, transform is
-more accurate, otherwise transform is faster. Acceptable value is [0.1, 10.0].
-Default value is 1.0
.
-
-
-gamma
-Specify gamma. Lower gamma makes the spectrum more contrast, higher gamma
-makes the spectrum having more range. Acceptable value is [1.0, 7.0].
-Default value is 3.0
.
-
-
-fontfile
-Specify font file for use with freetype. If not specified, use embedded font.
-
-
-fontcolor
-Specify font color expression. This is arithmetic expression that should return
-integer value 0xRRGGBB. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-midi(f)
-midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
-
-r(x), g(x), b(x)
-red, green, and blue value of intensity x
-
-
-Default value is st(0, (midi(f)-59.5)/12);
-st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
-r(1-ld(1)) + b(ld(1))
-
-
-fullhd
-If set to 1 (the default), the video size is 1920x1080 (full HD),
-if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
-
-
-fps
-Specify video fps. Default value is 25
.
-
-
-count
-Specify number of transform per frame, so there are fps*count transforms
-per second. Note that audio data rate must be divisible by fps*count.
-Default value is 6
.
-
-
-
-
-
-
41.10.1 Examples# TOC
-
-
- Playing audio while showing the spectrum:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with frame rate 30 fps:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fps=30:count=5 [out0]'
-
-
- Playing at 960x540 and lower CPU usage:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fullhd=0:count=3 [out0]'
-
-
- A1 and its harmonics: A1, A2, (near)E3, A3:
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with more accuracy in frequency domain (and slower):
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
-
-
- B-weighting of equal loudness
-
-
volume=16*b_weighting(f)
-
-
- Lower Q factor
-
-
tlength=100/f*tc/(100/f+tc)
-
-
- Custom fontcolor, C-note is colored green, others are colored blue
-
-
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
-
-
-
-
-
-
41.11 showspectrum# TOC
-
-
Convert input audio to a video output, representing the audio frequency
-spectrum.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value is
-640x512
.
-
-
-slide
-Specify how the spectrum should slide along the window.
-
-It accepts the following values:
-
-‘replace ’
-the samples start again on the left when they reach the right
-
-‘scroll ’
-the samples scroll from right to left
-
-‘fullframe ’
-frames are only produced when the samples reach the right
-
-
-
-Default value is replace
.
-
-
-mode
-Specify display mode.
-
-It accepts the following values:
-
-‘combined ’
-all channels are displayed in the same row
-
-‘separate ’
-all channels are displayed in separate rows
-
-
-
-Default value is ‘combined ’.
-
-
-color
-Specify display color mode.
-
-It accepts the following values:
-
-‘channel ’
-each channel is displayed in a separate color
-
-‘intensity ’
-each channel is is displayed using the same color scheme
-
-
-
-Default value is ‘channel ’.
-
-
-scale
-Specify scale used for calculating intensity color values.
-
-It accepts the following values:
-
-‘lin ’
-linear
-
-‘sqrt ’
-square root, default
-
-‘cbrt ’
-cubic root
-
-‘log ’
-logarithmic
-
-
-
-Default value is ‘sqrt ’.
-
-
-saturation
-Set saturation modifier for displayed colors. Negative values provide
-alternative color scheme. 0
is no saturation at all.
-Saturation must be in [-10.0, 10.0] range.
-Default value is 1
.
-
-
-win_func
-Set window function.
-
-It accepts the following values:
-
-‘none ’
-No samples pre-processing (do not expect this to be faster)
-
-‘hann ’
-Hann window
-
-‘hamming ’
-Hamming window
-
-‘blackman ’
-Blackman window
-
-
-
-Default value is hann
.
-
-
-
-
The usage is very similar to the showwaves filter; see the examples in that
-section.
-
-
-
41.11.1 Examples# TOC
-
-
- Large window with logarithmic color scaling:
-
-
showspectrum=s=1280x480:scale=log
-
-
- Complete example for a colored and sliding spectrum per channel using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
-
-
-
-
-
41.12 showwaves# TOC
-
-
Convert input audio to a video output, representing the samples waves.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value
-is "600x240".
-
-
-mode
-Set display mode.
-
-Available values are:
-
-‘point ’
-Draw a point for each sample.
-
-
-‘line ’
-Draw a vertical line for each sample.
-
-
-‘p2p ’
-Draw a point for each sample and a line between them.
-
-
-‘cline ’
-Draw a centered vertical line for each sample.
-
-
-
-Default value is point
.
-
-
-n
-Set the number of samples which are printed on the same column. A
-larger value will decrease the frame rate. Must be a positive
-integer. This option can be set only if the value for rate
-is not explicitly specified.
-
-
-rate, r
-Set the (approximate) output frame rate. This is done by setting the
-option n . Default value is "25".
-
-
-split_channels
-Set if channels should be drawn separately or overlap. Default value is 0.
-
-
-
-
-
-
41.12.1 Examples# TOC
-
-
- Output the input file audio and the corresponding video representation
-at the same time:
-
-
amovie=a.mp3,asplit[out0],showwaves[out1]
-
-
- Create a synthetic signal and show it with showwaves, forcing a
-frame rate of 30 frames per second:
-
-
aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
-
-
-
-
-
41.13 split, asplit# TOC
-
-
Split input into several identical outputs.
-
-
asplit
works with audio input, split
with video.
-
-
The filter accepts a single parameter which specifies the number of outputs. If
-unspecified, it defaults to 2.
-
-
-
41.13.1 Examples# TOC
-
-
- Create two separate outputs from the same input:
-
-
[in] split [out0][out1]
-
-
- To create 3 or more outputs, you need to specify the number of
-outputs, like in:
-
-
[in] asplit=3 [out0][out1][out2]
-
-
- Create two separate outputs from the same input, one cropped and
-one padded:
-
-
[in] split [splitout1][splitout2];
-[splitout1] crop=100:100:0:0 [cropout];
-[splitout2] pad=200:200:100:100 [padout];
-
-
- Create 5 copies of the input audio with ffmpeg
:
-
-
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
-
-
-
-
-
41.14 zmq, azmq# TOC
-
-
Receive commands sent through a libzmq client, and forward them to
-filters in the filtergraph.
-
-
zmq
and azmq
work as a pass-through filters. zmq
-must be inserted between two video filters, azmq
between two
-audio filters.
-
-
To enable these filters you need to install the libzmq library and
-headers and configure FFmpeg with --enable-libzmq
.
-
-
For more information about libzmq see:
-http://www.zeromq.org/
-
-
The zmq
and azmq
filters work as a libzmq server, which
-receives messages sent through a network interface defined by the
-bind_address option.
-
-
The received message must be in the form:
-
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional argument list for the
-given COMMAND .
-
-
Upon reception, the message is processed and the corresponding command
-is injected into the filtergraph. Depending on the result, the filter
-will send a reply to the client, adopting the format:
-
-
ERROR_CODE ERROR_REASON
-MESSAGE
-
-
-
MESSAGE is optional.
-
-
-
41.14.1 Examples# TOC
-
-
Look at tools/zmqsend for an example of a zmq client which can
-be used to send commands processed by these filters.
-
-
Consider the following filtergraph generated by ffplay
-
-
ffplay -dumpgraph 1 -f lavfi "
-color=s=100x100:c=red [l];
-color=s=100x100:c=blue [r];
-nullsrc=s=200x100, zmq [bg];
-[bg][l] overlay [bg+l];
-[bg+l][r] overlay=x=100 "
-
-
-
To change the color of the left side of the video, the following
-command can be used:
-
-
echo Parsed_color_0 c yellow | tools/zmqsend
-
-
-
To change the right side:
-
-
echo Parsed_color_1 c pink | tools/zmqsend
-
-
-
-
-
42 Multimedia Sources# TOC
-
-
Below is a description of the currently available multimedia sources.
-
-
-
42.1 amovie# TOC
-
-
This is the same as movie source, except it selects an audio
-stream by default.
-
-
-
42.2 movie# TOC
-
-
Read audio and/or video stream(s) from a movie container.
-
-
It accepts the following parameters:
-
-
-filename
-The name of the resource to read (not necessarily a file; it can also be a
-device or a stream accessed through some protocol).
-
-
-format_name, f
-Specifies the format assumed for the movie to read, and can be either
-the name of a container or an input device. If not specified, the
-format is guessed from movie_name or by probing.
-
-
-seek_point, sp
-Specifies the seek point in seconds. The frames will be output
-starting from this seek point. The parameter is evaluated with
-av_strtod
, so the numerical value may be suffixed by an IS
-postfix. The default value is "0".
-
-
-streams, s
-Specifies the streams to read. Several streams can be specified,
-separated by "+". The source will then have as many outputs, in the
-same order. The syntax is explained in the “Stream specifiers”
-section in the ffmpeg manual. Two special names, "dv" and "da" specify
-respectively the default (best suited) video and audio stream. Default
-is "dv", or "da" if the filter is called as "amovie".
-
-
-stream_index, si
-Specifies the index of the video stream to read. If the value is -1,
-the most suitable video stream will be automatically selected. The default
-value is "-1". Deprecated. If the filter is called "amovie", it will select
-audio instead of video.
-
-
-loop
-Specifies how many times to read the stream in sequence.
-If the value is less than 1, the stream will be read again and again.
-Default value is "1".
-
-Note that when the movie is looped the source timestamps are not
-changed, so it will generate non monotonically increasing timestamps.
-
-
-
-
It allows overlaying a second video on top of the main input of
-a filtergraph, as shown in this graph:
-
-
input -----------> deltapts0 --> overlay --> output
- ^
- |
-movie --> scale--> deltapts1 -------+
-
-
-
42.2.1 Examples# TOC
-
-
- Skip 3.2 seconds from the start of the AVI file in.avi, and overlay it
-on top of the input labelled "in":
-
-
movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read from a video4linux2 device, and overlay it on top of the input
-labelled "in":
-
-
movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read the first video stream and the audio stream with id 0x81 from
-dvd.vob; the video is connected to the pad named "video" and the audio is
-connected to the pad named "audio":
-
-
movie=dvd.vob:s=v:0+#0x81 [video] [audio]
-
-
-
-
-
-
43 See Also# TOC
-
-
ffmpeg
-ffplay , ffprobe , ffserver ,
-ffmpeg-utils ,
-ffmpeg-scaler ,
-ffmpeg-resampler ,
-ffmpeg-codecs ,
-ffmpeg-bitstream-filters ,
-ffmpeg-formats ,
-ffmpeg-devices ,
-ffmpeg-protocols ,
-ffmpeg-filters
-
-
-
-
44 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-bitstream-filters.html b/Externals/ffmpeg/dev/doc/ffmpeg-bitstream-filters.html
deleted file mode 100644
index b7195b944f..0000000000
--- a/Externals/ffmpeg/dev/doc/ffmpeg-bitstream-filters.html
+++ /dev/null
@@ -1,261 +0,0 @@
-
-
-
-
-
-
- FFmpeg Bitstream Filters Documentation
-
-
-
-
-
-
-
-
- FFmpeg Bitstream Filters Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes the bitstream filters provided by the
-libavcodec library.
-
-
A bitstream filter operates on the encoded stream data, and performs
-bitstream level modifications without performing decoding.
-
-
-
-
2 Bitstream Filters# TOC
-
-
When you configure your FFmpeg build, all the supported bitstream
-filters are enabled by default. You can list all available ones using
-the configure option --list-bsfs
.
-
-
You can disable all the bitstream filters using the configure option
---disable-bsfs
, and selectively enable any bitstream filter using
-the option --enable-bsf=BSF
, or you can disable a particular
-bitstream filter using the option --disable-bsf=BSF
.
-
-
The option -bsfs
of the ff* tools will display the list of
-all the supported bitstream filters included in your build.
-
-
The ff* tools have a -bsf option applied per stream, taking a
-comma-separated list of filters, whose parameters follow the filter
-name after a ’=’.
-
-
-
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
-
-
-
Below is a description of the currently available bitstream filters,
-with their parameters, if any.
-
-
-
2.1 aac_adtstoasc# TOC
-
-
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
-bitstream filter.
-
-
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
-ADTS header and removes the ADTS header.
-
-
This is required for example when copying an AAC stream from a raw
-ADTS AAC container to a FLV or a MOV/MP4 file.
-
-
-
2.2 chomp# TOC
-
-
Remove zero padding at the end of a packet.
-
-
-
2.3 dump_extra# TOC
-
-
Add extradata to the beginning of the filtered packets.
-
-
The additional argument specifies which packets should be filtered.
-It accepts the values:
-
-‘a ’
-add extradata to all key packets, but only if local_header is
-set in the flags2 codec context field
-
-
-‘k ’
-add extradata to all key packets
-
-
-‘e ’
-add extradata to all packets
-
-
-
-
If not specified it is assumed ‘k ’.
-
-
For example the following ffmpeg
command forces a global
-header (thus disabling individual packet headers) in the H.264 packets
-generated by the libx264
encoder, but corrects them by adding
-the header stored in extradata to the key packets:
-
-
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
-
-
-
-
2.4 h264_mp4toannexb# TOC
-
-
Convert an H.264 bitstream from length prefixed mode to start code
-prefixed mode (as defined in the Annex B of the ITU-T H.264
-specification).
-
-
This is required by some streaming formats, typically the MPEG-2
-transport stream format ("mpegts").
-
-
For example to remux an MP4 file containing an H.264 stream to mpegts
-format with ffmpeg
, you can use the command:
-
-
-
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
-
-
-
-
2.5 imxdump# TOC
-
-
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
-Pro decoder. This filter only applies to the mpeg2video codec, and is
-likely not needed for Final Cut Pro 7 and newer with the appropriate
--tag:v .
-
-
For example, to remux 30 MB/sec NTSC IMX to MOV:
-
-
-
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
-
-
-
-
2.6 mjpeg2jpeg# TOC
-
-
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
-
-
MJPEG is a video codec wherein each video frame is essentially a
-JPEG image. The individual frames can be extracted without loss,
-e.g. by
-
-
-
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
-
-
-
Unfortunately, these chunks are incomplete JPEG images, because
-they lack the DHT segment required for decoding. Quoting from
-http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml :
-
-
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
-commented that "MJPEG, or at least the MJPEG in AVIs having the
-MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
-Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
-and it must use basic Huffman encoding, not arithmetic or
-progressive. . . . You can indeed extract the MJPEG frames and
-decode them with a regular JPEG decoder, but you have to prepend
-the DHT segment to them, or else the decoder won’t have any idea
-how to decompress the data. The exact table necessary is given in
-the OpenDML spec."
-
-
This bitstream filter patches the header of frames extracted from an MJPEG
-stream (carrying the AVI1 header ID and lacking a DHT segment) to
-produce fully qualified JPEG images.
-
-
-
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
-exiftran -i -9 frame*.jpg
-ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
-
-
-
-
2.7 mjpega_dump_header# TOC
-
-
-
2.8 movsub# TOC
-
-
-
2.9 mp3_header_decompress# TOC
-
-
-
2.10 noise# TOC
-
-
Damages the contents of packets without damaging the container. Can be
-used for fuzzing or testing error resilience/concealment.
-
-
Parameters:
-A numeral string, whose value is related to how often output bytes will
-be modified. Therefore, values below or equal to 0 are forbidden, and
-the lower the more frequent bytes will be modified, with 1 meaning
-every byte is modified.
-
-
-
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
-
-
applies the modification to every byte.
-
-
-
2.11 remove_extra# TOC
-
-
-
-
3 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavcodec
-
-
-
-
4 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-codecs.html b/Externals/ffmpeg/dev/doc/ffmpeg-codecs.html
deleted file mode 100644
index 968b12f421..0000000000
--- a/Externals/ffmpeg/dev/doc/ffmpeg-codecs.html
+++ /dev/null
@@ -1,4474 +0,0 @@
-
-
-
-
-
-
- FFmpeg Codecs Documentation
-
-
-
-
-
-
-
-
- FFmpeg Codecs Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes the codecs (decoders and encoders) provided by
-the libavcodec library.
-
-
-
-
2 Codec Options# TOC
-
-
libavcodec provides some generic global options, which can be set on
-all the encoders and decoders. In addition each codec may support
-so-called private options, which are specific for a given codec.
-
-
Sometimes, a global option may only affect a specific kind of codec,
-and may be nonsensical or ignored by another, so you need to be aware
-of the meaning of the specified options. Also some options are
-meant only for decoding or encoding.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVCodecContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follow:
-
-
-b integer (encoding,audio,video )
-Set bitrate in bits/s. Default value is 200K.
-
-
-ab integer (encoding,audio )
-Set audio bitrate (in bits/s). Default value is 128K.
-
-
-bt integer (encoding,video )
-Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate
-tolerance specifies how far ratecontrol is willing to deviate from the
-target average bitrate value. This is not related to min/max
-bitrate. Lowering tolerance too much has an adverse effect on quality.
-
-
-flags flags (decoding/encoding,audio,video,subtitles )
-Set generic flags.
-
-Possible values:
-
-‘mv4 ’
-Use four motion vector by macroblock (mpeg4).
-
-‘qpel ’
-Use 1/4 pel motion compensation.
-
-‘loop ’
-Use loop filter.
-
-‘qscale ’
-Use fixed qscale.
-
-‘gmc ’
-Use gmc.
-
-‘mv0 ’
-Always try a mb with mv=<0,0>.
-
-‘input_preserved ’
-‘pass1 ’
-Use internal 2pass ratecontrol in first pass mode.
-
-‘pass2 ’
-Use internal 2pass ratecontrol in second pass mode.
-
-‘gray ’
-Only decode/encode grayscale.
-
-‘emu_edge ’
-Do not draw edges.
-
-‘psnr ’
-Set error[?] variables during encoding.
-
-‘truncated ’
-‘naq ’
-Normalize adaptive quantization.
-
-‘ildct ’
-Use interlaced DCT.
-
-‘low_delay ’
-Force low delay.
-
-‘global_header ’
-Place global headers in extradata instead of every keyframe.
-
-‘bitexact ’
-Only write platform-, build- and time-independent data. (except (I)DCT).
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-‘aic ’
-Apply H263 advanced intra coding / mpeg4 ac prediction.
-
-‘cbp ’
-Deprecated, use mpegvideo private options instead.
-
-‘qprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘ilme ’
-Apply interlaced motion estimation.
-
-‘cgop ’
-Use closed gop.
-
-
-
-
-me_method integer (encoding,video )
-Set motion estimation method.
-
-Possible values:
-
-‘zero ’
-zero motion estimation (fastest)
-
-‘full ’
-full motion estimation (slowest)
-
-‘epzs ’
-EPZS motion estimation (default)
-
-‘esa ’
-esa motion estimation (alias for full)
-
-‘tesa ’
-tesa motion estimation
-
-‘dia ’
-dia motion estimation (alias for epzs)
-
-‘log ’
-log motion estimation
-
-‘phods ’
-phods motion estimation
-
-‘x1 ’
-X1 motion estimation
-
-‘hex ’
-hex motion estimation
-
-‘umh ’
-umh motion estimation
-
-‘iter ’
-iter motion estimation
-
-
-
-
-extradata_size integer
-Set extradata size.
-
-
-time_base rational number
-Set codec time base.
-
-It is the fundamental unit of time (in seconds) in terms of which
-frame timestamps are represented. For fixed-fps content, timebase
-should be 1 / frame_rate
and timestamp increments should be
-identically 1.
-
-
-g integer (encoding,video )
-Set the group of picture size. Default value is 12.
-
-
-ar integer (decoding/encoding,audio )
-Set audio sampling rate (in Hz).
-
-
-ac integer (decoding/encoding,audio )
-Set number of audio channels.
-
-
-cutoff integer (encoding,audio )
-Set cutoff bandwidth.
-
-
-frame_size integer (encoding,audio )
-Set audio frame size.
-
-Each submitted frame except the last must contain exactly frame_size
-samples per channel. May be 0 when the codec has
-CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not
-restricted. It is set by some decoders to indicate constant frame
-size.
-
-
-frame_number integer
-Set the frame number.
-
-
-delay integer
-qcomp float (encoding,video )
-Set video quantizer scale compression (VBR). It is used as a constant
-in the ratecontrol equation. Recommended range for default rc_eq:
-0.0-1.0.
-
-
-qblur float (encoding,video )
-Set video quantizer scale blur (VBR).
-
-
-qmin integer (encoding,video )
-Set min video quantizer scale (VBR). Must be included between -1 and
-69, default value is 2.
-
-
-qmax integer (encoding,video )
-Set max video quantizer scale (VBR). Must be included between -1 and
-1024, default value is 31.
-
-
-qdiff integer (encoding,video )
-Set max difference between the quantizer scale (VBR).
-
-
-bf integer (encoding,video )
-Set max number of B frames between non-B-frames.
-
-Must be an integer between -1 and 16. 0 means that B-frames are
-disabled. If a value of -1 is used, it will choose an automatic value
-depending on the encoder.
-
-Default value is 0.
-
-
-b_qfactor float (encoding,video )
-Set qp factor between P and B frames.
-
-
-rc_strategy integer (encoding,video )
-Set ratecontrol method.
-
-
-b_strategy integer (encoding,video )
-Set strategy to choose between I/P/B-frames.
-
-
-ps integer (encoding,video )
-Set RTP payload size in bytes.
-
-
-mv_bits integer
-header_bits integer
-i_tex_bits integer
-p_tex_bits integer
-i_count integer
-p_count integer
-skip_count integer
-misc_bits integer
-frame_bits integer
-codec_tag integer
-bug flags (decoding,video )
-Workaround not auto detected encoder bugs.
-
-Possible values:
-
-‘autodetect ’
-‘old_msmpeg4 ’
-some old lavc generated msmpeg4v3 files (no autodetection)
-
-‘xvid_ilace ’
-Xvid interlacing bug (autodetected if fourcc==XVIX)
-
-‘ump4 ’
-(autodetected if fourcc==UMP4)
-
-‘no_padding ’
-padding bug (autodetected)
-
-‘amv ’
-‘ac_vlc ’
-illegal vlc bug (autodetected per fourcc)
-
-‘qpel_chroma ’
-‘std_qpel ’
-old standard qpel (autodetected per fourcc/version)
-
-‘qpel_chroma2 ’
-‘direct_blocksize ’
-direct-qpel-blocksize bug (autodetected per fourcc/version)
-
-‘edge ’
-edge padding bug (autodetected per fourcc/version)
-
-‘hpel_chroma ’
-‘dc_clip ’
-‘ms ’
-Workaround various bugs in microsoft broken decoders.
-
-‘trunc ’
-trancated frames
-
-
-
-
-lelim integer (encoding,video )
-Set single coefficient elimination threshold for luminance (negative
-values also consider DC coefficient).
-
-
-celim integer (encoding,video )
-Set single coefficient elimination threshold for chrominance (negative
-values also consider dc coefficient)
-
-
-strict integer (decoding/encoding,audio,video )
-Specify how strictly to follow the standards.
-
-Possible values:
-
-‘very ’
-strictly conform to a older more strict version of the spec or reference software
-
-‘strict ’
-strictly conform to all the things in the spec no matter what consequences
-
-‘normal ’
-‘unofficial ’
-allow unofficial extensions
-
-‘experimental ’
-allow non standardized experimental things, experimental
-(unfinished/work in progress/not well tested) decoders and encoders.
-Note: experimental decoders can pose a security risk, do not use this for
-decoding untrusted input.
-
-
-
-
-b_qoffset float (encoding,video )
-Set QP offset between P and B frames.
-
-
-err_detect flags (decoding,audio,video )
-Set error detection flags.
-
-Possible values:
-
-‘crccheck ’
-verify embedded CRCs
-
-‘bitstream ’
-detect bitstream specification deviations
-
-‘buffer ’
-detect improper bitstream length
-
-‘explode ’
-abort decoding on minor error detection
-
-‘ignore_err ’
-ignore decoding errors, and continue decoding.
-This is useful if you want to analyze the content of a video and thus want
-everything to be decoded no matter what. This option will not result in a video
-that is pleasing to watch in case of errors.
-
-‘careful ’
-consider things that violate the spec and have not been seen in the wild as errors
-
-‘compliant ’
-consider all spec non compliancies as errors
-
-‘aggressive ’
-consider things that a sane encoder should not do as an error
-
-
-
-
-has_b_frames integer
-block_align integer
-mpeg_quant integer (encoding,video )
-Use MPEG quantizers instead of H.263.
-
-
-qsquish float (encoding,video )
-How to keep quantizer between qmin and qmax (0 = clip, 1 = use
-differentiable function).
-
-
-rc_qmod_amp float (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_qmod_freq integer (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_override_count integer
-rc_eq string (encoding,video )
-Set rate control equation. When computing the expression, besides the
-standard functions defined in the section ’Expression Evaluation’, the
-following functions are available: bits2qp(bits), qp2bits(qp). Also
-the following constants are available: iTex pTex tex mv fCode iCount
-mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
-avgTex.
-
-
-maxrate integer (encoding,audio,video )
-Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
-
-
-minrate integer (encoding,audio,video )
-Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR
-encode. It is of little use elsewise.
-
-
-bufsize integer (encoding,audio,video )
-Set ratecontrol buffer size (in bits).
-
-
-rc_buf_aggressivity float (encoding,video )
-Currently useless.
-
-
-i_qfactor float (encoding,video )
-Set QP factor between P and I frames.
-
-
-i_qoffset float (encoding,video )
-Set QP offset between P and I frames.
-
-
-rc_init_cplx float (encoding,video )
-Set initial complexity for 1-pass encoding.
-
-
-dct integer (encoding,video )
-Set DCT algorithm.
-
-Possible values:
-
-‘auto ’
-autoselect a good one (default)
-
-‘fastint ’
-fast integer
-
-‘int ’
-accurate integer
-
-‘mmx ’
-‘altivec ’
-‘faan ’
-floating point AAN DCT
-
-
-
-
-lumi_mask float (encoding,video )
-Compress bright areas stronger than medium ones.
-
-
-tcplx_mask float (encoding,video )
-Set temporal complexity masking.
-
-
-scplx_mask float (encoding,video )
-Set spatial complexity masking.
-
-
-p_mask float (encoding,video )
-Set inter masking.
-
-
-dark_mask float (encoding,video )
-Compress dark areas stronger than medium ones.
-
-
-idct integer (decoding/encoding,video )
-Select IDCT implementation.
-
-Possible values:
-
-‘auto ’
-‘int ’
-‘simple ’
-‘simplemmx ’
-‘simpleauto ’
-Automatically pick a IDCT compatible with the simple one
-
-
-‘arm ’
-‘altivec ’
-‘sh4 ’
-‘simplearm ’
-‘simplearmv5te ’
-‘simplearmv6 ’
-‘simpleneon ’
-‘simplealpha ’
-‘ipp ’
-‘xvidmmx ’
-‘faani ’
-floating point AAN IDCT
-
-
-
-
-slice_count integer
-ec flags (decoding,video )
-Set error concealment strategy.
-
-Possible values:
-
-‘guess_mvs ’
-iterative motion vector (MV) search (slow)
-
-‘deblock ’
-use strong deblock filter for damaged MBs
-
-‘favor_inter ’
-favor predicting from the previous frame instead of the current
-
-
-
-
-bits_per_coded_sample integer
-pred integer (encoding,video )
-Set prediction method.
-
-Possible values:
-
-‘left ’
-‘plane ’
-‘median ’
-
-
-
-aspect rational number (encoding,video )
-Set sample aspect ratio.
-
-
-debug flags (decoding/encoding,audio,video,subtitles )
-Print specific debug info.
-
-Possible values:
-
-‘pict ’
-picture info
-
-‘rc ’
-rate control
-
-‘bitstream ’
-‘mb_type ’
-macroblock (MB) type
-
-‘qp ’
-per-block quantization parameter (QP)
-
-‘mv ’
-motion vector
-
-‘dct_coeff ’
-‘skip ’
-‘startcode ’
-‘pts ’
-‘er ’
-error recognition
-
-‘mmco ’
-memory management control operations (H.264)
-
-‘bugs ’
-‘vis_qp ’
-visualize quantization parameter (QP), lower QP are tinted greener
-
-‘vis_mb_type ’
-visualize block types
-
-‘buffers ’
-picture buffer allocations
-
-‘thread_ops ’
-threading operations
-
-‘nomc ’
-skip motion compensation
-
-
-
-
-vismv integer (decoding,video )
-Visualize motion vectors (MVs).
-
-This option is deprecated, see the codecview filter instead.
-
-Possible values:
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-cmp integer (encoding,video )
-Set full pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-subcmp integer (encoding,video )
-Set sub pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-mbcmp integer (encoding,video )
-Set macroblock compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-ildctcmp integer (encoding,video )
-Set interlaced dct compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-dia_size integer (encoding,video )
-Set diamond type & size for motion estimation.
-
-
-last_pred integer (encoding,video )
-Set amount of motion predictors from the previous frame.
-
-
-preme integer (encoding,video )
-Set pre motion estimation.
-
-
-precmp integer (encoding,video )
-Set pre motion estimation compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-pre_dia_size integer (encoding,video )
-Set diamond type & size for motion estimation pre-pass.
-
-
-subq integer (encoding,video )
-Set sub pel motion estimation quality.
-
-
-dtg_active_format integer
-me_range integer (encoding,video )
-Set limit motion vectors range (1023 for DivX player).
-
-
-ibias integer (encoding,video )
-Set intra quant bias.
-
-
-pbias integer (encoding,video )
-Set inter quant bias.
-
-
-color_table_id integer
-global_quality integer (encoding,audio,video )
-coder integer (encoding,video )
-
-Possible values:
-
-‘vlc ’
-variable length coder / huffman coder
-
-‘ac ’
-arithmetic coder
-
-‘raw ’
-raw (no encoding)
-
-‘rle ’
-run-length coder
-
-‘deflate ’
-deflate-based coder
-
-
-
-
-context integer (encoding,video )
-Set context model.
-
-
-slice_flags integer
-xvmc_acceleration integer
-mbd integer (encoding,video )
-Set macroblock decision algorithm (high quality mode).
-
-Possible values:
-
-‘simple ’
-use mbcmp (default)
-
-‘bits ’
-use fewest bits
-
-‘rd ’
-use best rate distortion
-
-
-
-
-stream_codec_tag integer
-sc_threshold integer (encoding,video )
-Set scene change threshold.
-
-
-lmin integer (encoding,video )
-Set min lagrange factor (VBR).
-
-
-lmax integer (encoding,video )
-Set max lagrange factor (VBR).
-
-
-nr integer (encoding,video )
-Set noise reduction.
-
-
-rc_init_occupancy integer (encoding,video )
-Set number of bits which should be loaded into the rc buffer before
-decoding starts.
-
-
-flags2 flags (decoding/encoding,audio,video )
-
-Possible values:
-
-‘fast ’
-Allow non spec compliant speedup tricks.
-
-‘sgop ’
-Deprecated, use mpegvideo private options instead.
-
-‘noout ’
-Skip bitstream encoding.
-
-‘ignorecrop ’
-Ignore cropping information from sps.
-
-‘local_header ’
-Place global headers at every keyframe instead of in extradata.
-
-‘chunks ’
-Frame data might be split into multiple chunks.
-
-‘showall ’
-Show all frames before the first keyframe.
-
-‘skiprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘export_mvs ’
-Export motion vectors into frame side-data (see AV_FRAME_DATA_MOTION_VECTORS
)
-for codecs that support it. See also doc/examples/export_mvs.c .
-
-
-
-
-error integer (encoding,video )
-qns integer (encoding,video )
-Deprecated, use mpegvideo private options instead.
-
-
-threads integer (decoding/encoding,video )
-
-Possible values:
-
-‘auto ’
-detect a good number of threads
-
-
-
-
-me_threshold integer (encoding,video )
-Set motion estimation threshold.
-
-
-mb_threshold integer (encoding,video )
-Set macroblock threshold.
-
-
-dc integer (encoding,video )
-Set intra_dc_precision.
-
-
-nssew integer (encoding,video )
-Set nsse weight.
-
-
-skip_top integer (decoding,video )
-Set number of macroblock rows at the top which are skipped.
-
-
-skip_bottom integer (decoding,video )
-Set number of macroblock rows at the bottom which are skipped.
-
-
-profile integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-‘aac_main ’
-‘aac_low ’
-‘aac_ssr ’
-‘aac_ltp ’
-‘aac_he ’
-‘aac_he_v2 ’
-‘aac_ld ’
-‘aac_eld ’
-‘mpeg2_aac_low ’
-‘mpeg2_aac_he ’
-‘mpeg4_sp ’
-‘mpeg4_core ’
-‘mpeg4_main ’
-‘mpeg4_asp ’
-‘dts ’
-‘dts_es ’
-‘dts_96_24 ’
-‘dts_hd_hra ’
-‘dts_hd_ma ’
-
-
-
-level integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-
-
-
-lowres integer (decoding,audio,video )
-Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
-
-
-skip_threshold integer (encoding,video )
-Set frame skip threshold.
-
-
-skip_factor integer (encoding,video )
-Set frame skip factor.
-
-
-skip_exp integer (encoding,video )
-Set frame skip exponent.
-Negative values behave identical to the corresponding positive ones, except
-that the score is normalized.
-Positive values exist primarily for compatibility reasons and are not so useful.
-
-
-skipcmp integer (encoding,video )
-Set frame skip compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-border_mask float (encoding,video )
-Increase the quantizer for macroblocks close to borders.
-
-
-mblmin integer (encoding,video )
-Set min macroblock lagrange factor (VBR).
-
-
-mblmax integer (encoding,video )
-Set max macroblock lagrange factor (VBR).
-
-
-mepc integer (encoding,video )
-Set motion estimation bitrate penalty compensation (1.0 = 256).
-
-
-skip_loop_filter integer (decoding,video )
-skip_idct integer (decoding,video )
-skip_frame integer (decoding,video )
-
-Make decoder discard processing depending on the frame type selected
-by the option value.
-
-skip_loop_filter skips frame loop filtering, skip_idct
-skips frame IDCT/dequantization, skip_frame skips decoding.
-
-Possible values:
-
-‘none ’
-Discard no frame.
-
-
-‘default ’
-Discard useless frames like 0-sized frames.
-
-
-‘noref ’
-Discard all non-reference frames.
-
-
-‘bidir ’
-Discard all bidirectional frames.
-
-
-‘nokey ’
-Discard all frames excepts keyframes.
-
-
-‘all ’
-Discard all frames.
-
-
-
-Default value is ‘default ’.
-
-
-bidir_refine integer (encoding,video )
-Refine the two motion vectors used in bidirectional macroblocks.
-
-
-brd_scale integer (encoding,video )
-Downscale frames for dynamic B-frame decision.
-
-
-keyint_min integer (encoding,video )
-Set minimum interval between IDR-frames.
-
-
-refs integer (encoding,video )
-Set reference frames to consider for motion compensation.
-
-
-chromaoffset integer (encoding,video )
-Set chroma qp offset from luma.
-
-
-trellis integer (encoding,audio,video )
-Set rate-distortion optimal quantization.
-
-
-sc_factor integer (encoding,video )
-Set value multiplied by qscale for each frame and added to
-scene_change_score.
-
-
-mv0_threshold integer (encoding,video )
-b_sensitivity integer (encoding,video )
-Adjust sensitivity of b_frame_strategy 1.
-
-
-compression_level integer (encoding,audio,video )
-min_prediction_order integer (encoding,audio )
-max_prediction_order integer (encoding,audio )
-timecode_frame_start integer (encoding,video )
-Set GOP timecode frame start number, in non drop frame format.
-
-
-request_channels integer (decoding,audio )
-Set desired number of audio channels.
-
-
-bits_per_raw_sample integer
-channel_layout integer (decoding/encoding,audio )
-
-Possible values:
-
-request_channel_layout integer (decoding,audio )
-
-Possible values:
-
-rc_max_vbv_use float (encoding,video )
-rc_min_vbv_use float (encoding,video )
-ticks_per_frame integer (decoding/encoding,audio,video )
-color_primaries integer (decoding/encoding,video )
-color_trc integer (decoding/encoding,video )
-colorspace integer (decoding/encoding,video )
-color_range integer (decoding/encoding,video )
-chroma_sample_location integer (decoding/encoding,video )
-log_level_offset integer
-Set the log level offset.
-
-
-slices integer (encoding,video )
-Number of slices, used in parallelized encoding.
-
-
-thread_type flags (decoding/encoding,video )
-Select which multithreading methods to use.
-
-Use of ‘frame ’ will increase decoding delay by one frame per
-thread, so clients which cannot provide future frames should not use
-it.
-
-Possible values:
-
-‘slice ’
-Decode more than one part of a single frame at once.
-
-Multithreading using slices works only when the video was encoded with
-slices.
-
-
-‘frame ’
-Decode more than one frame at once.
-
-
-
-Default value is ‘slice+frame ’.
-
-
-audio_service_type integer (encoding,audio )
-Set audio service type.
-
-Possible values:
-
-‘ma ’
-Main Audio Service
-
-‘ef ’
-Effects
-
-‘vi ’
-Visually Impaired
-
-‘hi ’
-Hearing Impaired
-
-‘di ’
-Dialogue
-
-‘co ’
-Commentary
-
-‘em ’
-Emergency
-
-‘vo ’
-Voice Over
-
-‘ka ’
-Karaoke
-
-
-
-
-request_sample_fmt sample_fmt (decoding,audio )
-Set sample format audio decoders should prefer. Default value is
-none
.
-
-
-pkt_timebase rational number
-sub_charenc encoding (decoding,subtitles )
-Set the input subtitles character encoding.
-
-
-field_order field_order (video )
-Set/override the field order of the video.
-Possible values:
-
-‘progressive ’
-Progressive video
-
-‘tt ’
-Interlaced video, top field coded and displayed first
-
-‘bb ’
-Interlaced video, bottom field coded and displayed first
-
-‘tb ’
-Interlaced video, top coded first, bottom displayed first
-
-‘bt ’
-Interlaced video, bottom coded first, top displayed first
-
-
-
-
-skip_alpha integer (decoding,video )
-Set to 1 to disable processing alpha (transparency). This works like the
-‘gray ’ flag in the flags option which skips chroma information
-instead of alpha. Default is 0.
-
-
-codec_whitelist list (input )
-"," separated List of allowed decoders. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
-
3 Decoders# TOC
-
-
Decoders are configured elements in FFmpeg which allow the decoding of
-multimedia streams.
-
-
When you configure your FFmpeg build, all the supported native decoders
-are enabled by default. Decoders requiring an external library must be enabled
-manually via the corresponding --enable-lib
option. You can list all
-available decoders using the configure option --list-decoders
.
-
-
You can disable all the decoders with the configure option
---disable-decoders
and selectively enable / disable single decoders
-with the options --enable-decoder=DECODER
/
---disable-decoder=DECODER
.
-
-
The option -decoders
of the ff* tools will display the list of
-enabled decoders.
-
-
-
-
4 Video Decoders# TOC
-
-
A description of some of the currently available video decoders
-follows.
-
-
-
4.1 rawvideo# TOC
-
-
Raw video decoder.
-
-
This decoder decodes rawvideo streams.
-
-
-
4.1.1 Options# TOC
-
-
-top top_field_first
-Specify the assumed field type of the input video.
-
--1
-the video is assumed to be progressive (default)
-
-0
-bottom-field-first is assumed
-
-1
-top-field-first is assumed
-
-
-
-
-
-
-
-
-
5 Audio Decoders# TOC
-
-
A description of some of the currently available audio decoders
-follows.
-
-
-
-
-
AC-3 audio decoder.
-
-
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
-the undocumented RealAudio 3 (a.k.a. dnet).
-
-
-
5.1.1 AC-3 Decoder Options# TOC
-
-
--drc_scale value
-Dynamic Range Scale Factor. The factor to apply to dynamic range values
-from the AC-3 stream. This factor is applied exponentially.
-There are 3 notable scale factor ranges:
-
-drc_scale == 0
-DRC disabled. Produces full range audio.
-
-0 < drc_scale <= 1
-DRC enabled. Applies a fraction of the stream DRC value.
-Audio reproduction is between full range and full compression.
-
-drc_scale > 1
-DRC enabled. Applies drc_scale asymmetrically.
-Loud sounds are fully compressed. Soft sounds are enhanced.
-
-
-
-
-
-
-
-
5.2 ffwavesynth# TOC
-
-
Internal wave synthetizer.
-
-
This decoder generates wave patterns according to predefined sequences. Its
-use is purely internal and the format of the data it accepts is not publicly
-documented.
-
-
-
5.3 libcelt# TOC
-
-
libcelt decoder wrapper.
-
-
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
-Requires the presence of the libcelt headers and library during configuration.
-You need to explicitly configure the build with --enable-libcelt
.
-
-
-
5.4 libgsm# TOC
-
-
libgsm decoder wrapper.
-
-
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
-the presence of the libgsm headers and library during configuration. You need
-to explicitly configure the build with --enable-libgsm
.
-
-
This decoder supports both the ordinary GSM and the Microsoft variant.
-
-
-
5.5 libilbc# TOC
-
-
libilbc decoder wrapper.
-
-
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
-audio codec. Requires the presence of the libilbc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libilbc
.
-
-
-
5.5.1 Options# TOC
-
-
The following option is supported by the libilbc wrapper.
-
-
-enhance
-
-Enable the enhancement of the decoded audio when set to 1. The default
-value is 0 (disabled).
-
-
-
-
-
-
5.6 libopencore-amrnb# TOC
-
-
libopencore-amrnb decoder wrapper.
-
-
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
-Narrowband audio codec. Using it requires the presence of the
-libopencore-amrnb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrnb
.
-
-
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
-without this library.
-
-
-
5.7 libopencore-amrwb# TOC
-
-
libopencore-amrwb decoder wrapper.
-
-
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
-Wideband audio codec. Using it requires the presence of the
-libopencore-amrwb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrwb
.
-
-
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
-without this library.
-
-
-
5.8 libopus# TOC
-
-
libopus decoder wrapper.
-
-
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
-Requires the presence of the libopus headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopus
.
-
-
An FFmpeg native decoder for Opus exists, so users can decode Opus
-without this library.
-
-
-
-
6 Subtitles Decoders# TOC
-
-
-
6.1 dvdsub# TOC
-
-
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
-also be found in VobSub file pairs and in some Matroska files.
-
-
-
6.1.1 Options# TOC
-
-
-palette
-Specify the global palette used by the bitmaps. When stored in VobSub, the
-palette is normally specified in the index file; in Matroska, the palette is
-stored in the codec extra-data in the same format as in VobSub. In DVDs, the
-palette is stored in the IFO file, and therefore not available when reading
-from dumped VOB files.
-
-The format for this option is a string containing 16 24-bits hexadecimal
-numbers (without 0x prefix) separated by comas, for example 0d00ee,
-ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
-7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b
.
-
-
-ifo_palette
-Specify the IFO file from which the global palette is obtained.
-(experimental)
-
-
-forced_subs_only
-Only decode subtitle entries marked as forced. Some titles have forced
-and non-forced subtitles in the same track. Setting this flag to 1
-will only keep the forced subtitles. Default value is 0
.
-
-
-
-
-
6.2 libzvbi-teletext# TOC
-
-
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
-subtitles. Requires the presence of the libzvbi headers and library during
-configuration. You need to explicitly configure the build with
---enable-libzvbi
.
-
-
-
6.2.1 Options# TOC
-
-
-txt_page
-List of teletext page numbers to decode. You may use the special * string to
-match all pages. Pages that do not match the specified list are dropped.
-Default value is *.
-
-txt_chop_top
-Discards the top teletext line. Default value is 1.
-
-txt_format
-Specifies the format of the decoded subtitles. The teletext decoder is capable
-of decoding the teletext pages to bitmaps or to simple text, you should use
-"bitmap" for teletext pages, because certain graphics and colors cannot be
-expressed in simple text. You might use "text" for teletext based subtitles if
-your application can handle simple text based subtitles. Default value is
-bitmap.
-
-txt_left
-X offset of generated bitmaps, default is 0.
-
-txt_top
-Y offset of generated bitmaps, default is 0.
-
-txt_chop_spaces
-Chops leading and trailing spaces and removes empty lines from the generated
-text. This option is useful for teletext based subtitles where empty spaces may
-be present at the start or at the end of the lines or empty lines may be
-present between the subtitle lines because of double-sized teletext charactes.
-Default value is 1.
-
-txt_duration
-Sets the display duration of the decoded teletext pages or subtitles in
-miliseconds. Default value is 30000 which is 30 seconds.
-
-txt_transparent
-Force transparent background of the generated teletext bitmaps. Default value
-is 0 which means an opaque (black) background.
-
-
-
-
-
7 Encoders# TOC
-
-
Encoders are configured elements in FFmpeg which allow the encoding of
-multimedia streams.
-
-
When you configure your FFmpeg build, all the supported native encoders
-are enabled by default. Encoders requiring an external library must be enabled
-manually via the corresponding --enable-lib
option. You can list all
-available encoders using the configure option --list-encoders
.
-
-
You can disable all the encoders with the configure option
---disable-encoders
and selectively enable / disable single encoders
-with the options --enable-encoder=ENCODER
/
---disable-encoder=ENCODER
.
-
-
The option -encoders
of the ff* tools will display the list of
-enabled encoders.
-
-
-
-
8 Audio Encoders# TOC
-
-
A description of some of the currently available audio encoders
-follows.
-
-
-
-
-
Advanced Audio Coding (AAC) encoder.
-
-
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
-low complexity (AAC-LC) profile is supported. To use this encoder, you must set
-strict option to ‘experimental ’ or lower.
-
-
As this encoder is experimental, unexpected behavior may exist from time to
-time. For a more stable AAC encoder, see libvo-aacenc . However, be warned
-that it has a worse quality reported by some users.
-
-
See also libfdk_aac and libfaac .
-
-
-
8.1.1 Options# TOC
-
-
-b
-Set bit rate in bits/s. Setting this automatically activates constant bit rate
-(CBR) mode.
-
-
-q
-Set quality for variable bit rate (VBR) mode. This option is valid only using
-the ffmpeg
command-line tool. For library interface users, use
-global_quality .
-
-
-stereo_mode
-Set stereo encoding mode. Possible values:
-
-
-‘auto ’
-Automatically selected by the encoder.
-
-
-‘ms_off ’
-Disable middle/side encoding. This is the default.
-
-
-‘ms_force ’
-Force middle/side encoding.
-
-
-
-
-aac_coder
-Set AAC encoder coding method. Possible values:
-
-
-‘faac ’
-FAAC-inspired method.
-
-This method is a simplified reimplementation of the method used in FAAC, which
-sets thresholds proportional to the band energies, and then decreases all the
-thresholds with quantizer steps to find the appropriate quantization with
-distortion below threshold band by band.
-
-The quality of this method is comparable to the two loop searching method
-described below, but somewhat a little better and slower.
-
-
-‘anmr ’
-Average noise to mask ratio (ANMR) trellis-based solution.
-
-This has a theoretic best quality out of all the coding methods, but at the
-cost of the slowest speed.
-
-
-‘twoloop ’
-Two loop searching (TLS) method.
-
-This method first sets quantizers depending on band thresholds and then tries
-to find an optimal combination by adding or subtracting a specific value from
-all quantizers and adjusting some individual quantizer a little.
-
-This method produces similar quality with the FAAC method and is the default.
-
-
-‘fast ’
-Constant quantizer method.
-
-This method sets a constant quantizer for all bands. This is the fastest of all
-the methods, yet produces the worst quality.
-
-
-
-
-
-
-
-
-
8.2 ac3 and ac3_fixed# TOC
-
-
AC-3 audio encoders.
-
-
These encoders implement part of ATSC A/52:2010 and ETSI TS 102 366, as well as
-the undocumented RealAudio 3 (a.k.a. dnet).
-
-
The ac3 encoder uses floating-point math, while the ac3_fixed
-encoder only uses fixed-point integer math. This does not mean that one is
-always faster, just that one or the other may be better suited to a
-particular system. The floating-point encoder will generally produce better
-quality audio for a given bitrate. The ac3_fixed encoder is not the
-default codec for any of the output formats, so it must be specified explicitly
-using the option -acodec ac3_fixed
in order to use it.
-
-
-
8.2.1 AC-3 Metadata# TOC
-
-
The AC-3 metadata options are used to set parameters that describe the audio,
-but in most cases do not affect the audio encoding itself. Some of the options
-do directly affect or influence the decoding and playback of the resulting
-bitstream, while others are just for informational purposes. A few of the
-options will add bits to the output stream that could otherwise be used for
-audio data, and will thus affect the quality of the output. Those will be
-indicated accordingly with a note in the option list below.
-
-
These parameters are described in detail in several publicly-available
-documents.
-
-
-
-
8.2.1.1 Metadata Control Options# TOC
-
-
--per_frame_metadata boolean
-Allow Per-Frame Metadata. Specifies if the encoder should check for changing
-metadata for each frame.
-
-0
-The metadata values set at initialization will be used for every frame in the
-stream. (default)
-
-1
-Metadata values can be changed before encoding each frame.
-
-
-
-
-
-
-
-
8.2.1.2 Downmix Levels# TOC
-
-
--center_mixlev level
-Center Mix Level. The amount of gain the decoder should apply to the center
-channel when downmixing to stereo. This field will only be written to the
-bitstream if a center channel is present. The value is specified as a scale
-factor. There are 3 valid values:
-
-0.707
-Apply -3dB gain
-
-0.595
-Apply -4.5dB gain (default)
-
-0.500
-Apply -6dB gain
-
-
-
-
--surround_mixlev level
-Surround Mix Level. The amount of gain the decoder should apply to the surround
-channel(s) when downmixing to stereo. This field will only be written to the
-bitstream if one or more surround channels are present. The value is specified
-as a scale factor. There are 3 valid values:
-
-0.707
-Apply -3dB gain
-
-0.500
-Apply -6dB gain (default)
-
-0.000
-Silence Surround Channel(s)
-
-
-
-
-
-
-
-
8.2.1.3 Audio Production Information# TOC
-
Audio Production Information is optional information describing the mixing
-environment. Either none or both of the fields are written to the bitstream.
-
-
--mixing_level number
-Mixing Level. Specifies peak sound pressure level (SPL) in the production
-environment when the mix was mastered. Valid values are 80 to 111, or -1 for
-unknown or not indicated. The default value is -1, but that value cannot be
-used if the Audio Production Information is written to the bitstream. Therefore,
-if the room_type
option is not the default value, the mixing_level
-option must not be -1.
-
-
--room_type type
-Room Type. Describes the equalization used during the final mixing session at
-the studio or on the dubbing stage. A large room is a dubbing stage with the
-industry standard X-curve equalization; a small room has flat equalization.
-This field will not be written to the bitstream if both the mixing_level
-option and the room_type
option have the default values.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-large
-Large Room
-
-2
-small
-Small Room
-
-
-
-
-
-
-
-
8.2.1.4 Other Metadata Options# TOC
-
-
--copyright boolean
-Copyright Indicator. Specifies whether a copyright exists for this audio.
-
-0
-off
-No Copyright Exists (default)
-
-1
-on
-Copyright Exists
-
-
-
-
--dialnorm value
-Dialogue Normalization. Indicates how far the average dialogue level of the
-program is below digital 100% full scale (0 dBFS). This parameter determines a
-level shift during audio reproduction that sets the average volume of the
-dialogue to a preset level. The goal is to match volume level between program
-sources. A value of -31dB will result in no volume level change, relative to
-the source volume, during audio reproduction. Valid values are whole numbers in
-the range -31 to -1, with -31 being the default.
-
-
--dsur_mode mode
-Dolby Surround Mode. Specifies whether the stereo signal uses Dolby Surround
-(Pro Logic). This field will only be written to the bitstream if the audio
-stream is stereo. Using this option does NOT mean the encoder will actually
-apply Dolby Surround processing.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-off
-Not Dolby Surround Encoded
-
-2
-on
-Dolby Surround Encoded
-
-
-
-
--original boolean
-Original Bit Stream Indicator. Specifies whether this audio is from the
-original source and not a copy.
-
-0
-off
-Not Original Source
-
-1
-on
-Original Source (default)
-
-
-
-
-
-
-
-
8.2.2 Extended Bitstream Information# TOC
-
The extended bitstream options are part of the Alternate Bit Stream Syntax as
-specified in Annex D of the A/52:2010 standard. It is grouped into 2 parts.
-If any one parameter in a group is specified, all values in that group will be
-written to the bitstream. Default values are used for those that are written
-but have not been specified. If the mixing levels are written, the decoder
-will use these values instead of the ones specified in the center_mixlev
-and surround_mixlev
options if it supports the Alternate Bit Stream
-Syntax.
-
-
-
8.2.2.1 Extended Bitstream Information - Part 1# TOC
-
-
--dmix_mode mode
-Preferred Stereo Downmix Mode. Allows the user to select either Lt/Rt
-(Dolby Surround) or Lo/Ro (normal stereo) as the preferred stereo downmix mode.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-ltrt
-Lt/Rt Downmix Preferred
-
-2
-loro
-Lo/Ro Downmix Preferred
-
-
-
-
--ltrt_cmixlev level
-Lt/Rt Center Mix Level. The amount of gain the decoder should apply to the
-center channel when downmixing to stereo in Lt/Rt mode.
-
-1.414
-Apply +3dB gain
-
-1.189
-Apply +1.5dB gain
-
-1.000
-Apply 0dB gain
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain (default)
-
-0.500
-Apply -6.0dB gain
-
-0.000
-Silence Center Channel
-
-
-
-
--ltrt_surmixlev level
-Lt/Rt Surround Mix Level. The amount of gain the decoder should apply to the
-surround channel(s) when downmixing to stereo in Lt/Rt mode.
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain
-
-0.500
-Apply -6.0dB gain (default)
-
-0.000
-Silence Surround Channel(s)
-
-
-
-
--loro_cmixlev level
-Lo/Ro Center Mix Level. The amount of gain the decoder should apply to the
-center channel when downmixing to stereo in Lo/Ro mode.
-
-1.414
-Apply +3dB gain
-
-1.189
-Apply +1.5dB gain
-
-1.000
-Apply 0dB gain
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain (default)
-
-0.500
-Apply -6.0dB gain
-
-0.000
-Silence Center Channel
-
-
-
-
--loro_surmixlev level
-Lo/Ro Surround Mix Level. The amount of gain the decoder should apply to the
-surround channel(s) when downmixing to stereo in Lo/Ro mode.
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain
-
-0.500
-Apply -6.0dB gain (default)
-
-0.000
-Silence Surround Channel(s)
-
-
-
-
-
-
-
-
8.2.2.2 Extended Bitstream Information - Part 2# TOC
-
-
--dsurex_mode mode
-Dolby Surround EX Mode. Indicates whether the stream uses Dolby Surround EX
-(7.1 matrixed to 5.1). Using this option does NOT mean the encoder will actually
-apply Dolby Surround EX processing.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-on
-Dolby Surround EX Off
-
-2
-off
-Dolby Surround EX On
-
-
-
-
--dheadphone_mode mode
-Dolby Headphone Mode. Indicates whether the stream uses Dolby Headphone
-encoding (multi-channel matrixed to 2.0 for use with headphones). Using this
-option does NOT mean the encoder will actually apply Dolby Headphone
-processing.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-on
-Dolby Headphone Off
-
-2
-off
-Dolby Headphone On
-
-
-
-
--ad_conv_type type
-A/D Converter Type. Indicates whether the audio has passed through HDCD A/D
-conversion.
-
-0
-standard
-Standard A/D Converter (default)
-
-1
-hdcd
-HDCD A/D Converter
-
-
-
-
-
-
-
-
8.2.3 Other AC-3 Encoding Options# TOC
-
-
--stereo_rematrixing boolean
-Stereo Rematrixing. Enables/Disables use of rematrixing for stereo input. This
-is an optional AC-3 feature that increases quality by selectively encoding
-the left/right channels as mid/side. This option is enabled by default, and it
-is highly recommended that it be left as enabled except for testing purposes.
-
-
-
-
-
-
8.2.4 Floating-Point-Only AC-3 Encoding Options# TOC
-
-
These options are only valid for the floating-point encoder and do not exist
-for the fixed-point encoder due to the corresponding features not being
-implemented in fixed-point.
-
-
--channel_coupling boolean
-Enables/Disables use of channel coupling, which is an optional AC-3 feature
-that increases quality by combining high frequency information from multiple
-channels into a single channel. The per-channel high frequency information is
-sent with less accuracy in both the frequency and time domains. This allows
-more bits to be used for lower frequencies while preserving enough information
-to reconstruct the high frequencies. This option is enabled by default for the
-floating-point encoder and should generally be left as enabled except for
-testing purposes or to increase encoding speed.
-
--1
-auto
-Selected by Encoder (default)
-
-0
-off
-Disable Channel Coupling
-
-1
-on
-Enable Channel Coupling
-
-
-
-
--cpl_start_band number
-Coupling Start Band. Sets the channel coupling start band, from 1 to 15. If a
-value higher than the bandwidth is used, it will be reduced to 1 less than the
-coupling end band. If auto is used, the start band will be determined by
-the encoder based on the bit rate, sample rate, and channel layout. This option
-has no effect if channel coupling is disabled.
-
--1
-auto
-Selected by Encoder (default)
-
-
-
-
-
-
-
-
8.3 libfaac# TOC
-
-
libfaac AAC (Advanced Audio Coding) encoder wrapper.
-
-
Requires the presence of the libfaac headers and library during
-configuration. You need to explicitly configure the build with
---enable-libfaac --enable-nonfree
.
-
-
This encoder is considered to be of higher quality with respect to the
-the native experimental FFmpeg AAC encoder .
-
-
For more information see the libfaac project at
-http://www.audiocoding.com/faac.html/ .
-
-
-
8.3.1 Options# TOC
-
-
The following shared FFmpeg codec options are recognized.
-
-
The following options are supported by the libfaac wrapper. The
-faac
-equivalent of the options are listed in parentheses.
-
-
-b (-b )
-Set bit rate in bits/s for ABR (Average Bit Rate) mode. If the bit rate
-is not explicitly specified, it is automatically set to a suitable
-value depending on the selected profile. faac
bitrate is
-expressed in kilobits/s.
-
-Note that libfaac does not support CBR (Constant Bit Rate) but only
-ABR (Average Bit Rate).
-
-If VBR mode is enabled this option is ignored.
-
-
-ar (-R )
-Set audio sampling rate (in Hz).
-
-
-ac (-c )
-Set the number of audio channels.
-
-
-cutoff (-C )
-Set cutoff frequency. If not specified (or explicitly set to 0) it
-will use a value automatically computed by the library. Default value
-is 0.
-
-
-profile
-Set audio profile.
-
-The following profiles are recognized:
-
-‘aac_main ’
-Main AAC (Main)
-
-
-‘aac_low ’
-Low Complexity AAC (LC)
-
-
-‘aac_ssr ’
-Scalable Sample Rate (SSR)
-
-
-‘aac_ltp ’
-Long Term Prediction (LTP)
-
-
-
-If not specified it is set to ‘aac_low ’.
-
-
-flags +qscale
-Set constant quality VBR (Variable Bit Rate) mode.
-
-
-global_quality
-Set quality in VBR mode as an integer number of lambda units.
-
-Only relevant when VBR mode is enabled with flags +qscale
. The
-value is converted to QP units by dividing it by FF_QP2LAMBDA
,
-and used to set the quality value used by libfaac. A reasonable range
-for the option value in QP units is [10-500], the higher the value the
-higher the quality.
-
-
-q (-q )
-Enable VBR mode when set to a non-negative value, and set constant
-quality value as a double floating point value in QP units.
-
-The value sets the quality value used by libfaac. A reasonable range
-for the option value is [10-500], the higher the value the higher the
-quality.
-
-This option is valid only using the ffmpeg
command-line
-tool. For library interface users, use global_quality .
-
-
-
-
-
8.3.2 Examples# TOC
-
-
- Use ffmpeg
to convert an audio file to ABR 128 kbps AAC in an M4A (MP4)
-container:
-
-
ffmpeg -i input.wav -codec:a libfaac -b:a 128k -output.m4a
-
-
- Use ffmpeg
to convert an audio file to VBR AAC, using the
-LTP AAC profile:
-
-
ffmpeg -i input.wav -c:a libfaac -profile:a aac_ltp -q:a 100 output.m4a
-
-
-
-
-
8.4 libfdk_aac# TOC
-
-
libfdk-aac AAC (Advanced Audio Coding) encoder wrapper.
-
-
The libfdk-aac library is based on the Fraunhofer FDK AAC code from
-the Android project.
-
-
Requires the presence of the libfdk-aac headers and library during
-configuration. You need to explicitly configure the build with
---enable-libfdk-aac
. The library is also incompatible with GPL,
-so if you allow the use of GPL, you should configure with
---enable-gpl --enable-nonfree --enable-libfdk-aac
.
-
-
This encoder is considered to be of higher quality with respect to
-both the native experimental FFmpeg AAC encoder and
-libfaac .
-
-
VBR encoding, enabled through the vbr or flags
-+qscale options, is experimental and only works with some
-combinations of parameters.
-
-
Support for encoding 7.1 audio is only available with libfdk-aac 0.1.3 or
-higher.
-
-
For more information see the fdk-aac project at
-http://sourceforge.net/p/opencore-amr/fdk-aac/ .
-
-
-
8.4.1 Options# TOC
-
-
The following options are mapped on the shared FFmpeg codec options.
-
-
-b
-Set bit rate in bits/s. If the bitrate is not explicitly specified, it
-is automatically set to a suitable value depending on the selected
-profile.
-
-In case VBR mode is enabled the option is ignored.
-
-
-ar
-Set audio sampling rate (in Hz).
-
-
-channels
-Set the number of audio channels.
-
-
-flags +qscale
-Enable fixed quality, VBR (Variable Bit Rate) mode.
-Note that VBR is implicitly enabled when the vbr value is
-positive.
-
-
-cutoff
-Set cutoff frequency. If not specified (or explicitly set to 0) it
-will use a value automatically computed by the library. Default value
-is 0.
-
-
-profile
-Set audio profile.
-
-The following profiles are recognized:
-
-‘aac_low ’
-Low Complexity AAC (LC)
-
-
-‘aac_he ’
-High Efficiency AAC (HE-AAC)
-
-
-‘aac_he_v2 ’
-High Efficiency AAC version 2 (HE-AACv2)
-
-
-‘aac_ld ’
-Low Delay AAC (LD)
-
-
-‘aac_eld ’
-Enhanced Low Delay AAC (ELD)
-
-
-
-If not specified it is set to ‘aac_low ’.
-
-
-
-
The following are private options of the libfdk_aac encoder.
-
-
-afterburner
-Enable afterburner feature if set to 1, disabled if set to 0. This
-improves the quality but also the required processing power.
-
-Default value is 1.
-
-
-eld_sbr
-Enable SBR (Spectral Band Replication) for ELD if set to 1, disabled
-if set to 0.
-
-Default value is 0.
-
-
-signaling
-Set SBR/PS signaling style.
-
-It can assume one of the following values:
-
-‘default ’
-choose signaling implicitly (explicit hierarchical by default,
-implicit if global header is disabled)
-
-
-‘implicit ’
-implicit backwards compatible signaling
-
-
-‘explicit_sbr ’
-explicit SBR, implicit PS signaling
-
-
-‘explicit_hierarchical ’
-explicit hierarchical signaling
-
-
-
-Default value is ‘default ’.
-
-
-latm
-Output LATM/LOAS encapsulated data if set to 1, disabled if set to 0.
-
-Default value is 0.
-
-
-header_period
-Set StreamMuxConfig and PCE repetition period (in frames) for sending
-in-band configuration buffers within LATM/LOAS transport layer.
-
-Must be a 16-bits non-negative integer.
-
-Default value is 0.
-
-
-vbr
-Set VBR mode, from 1 to 5. 1 is lowest quality (though still pretty
-good) and 5 is highest quality. A value of 0 will disable VBR, and CBR
-(Constant Bit Rate) is enabled.
-
-Currently only the ‘aac_low ’ profile supports VBR encoding.
-
-VBR modes 1-5 correspond to roughly the following average bit rates:
-
-
-‘1 ’
-32 kbps/channel
-
-‘2 ’
-40 kbps/channel
-
-‘3 ’
-48-56 kbps/channel
-
-‘4 ’
-64 kbps/channel
-
-‘5 ’
-about 80-96 kbps/channel
-
-
-
-Default value is 0.
-
-
-
-
-
8.4.2 Examples# TOC
-
-
- Use ffmpeg
to convert an audio file to VBR AAC in an M4A (MP4)
-container:
-
-
ffmpeg -i input.wav -codec:a libfdk_aac -vbr 3 output.m4a
-
-
- Use ffmpeg
to convert an audio file to CBR 64k kbps AAC, using the
-High-Efficiency AAC profile:
-
-
ffmpeg -i input.wav -c:a libfdk_aac -profile:a aac_he -b:a 64k output.m4a
-
-
-
-
-
8.5 libmp3lame# TOC
-
-
LAME (Lame Ain’t an MP3 Encoder) MP3 encoder wrapper.
-
-
Requires the presence of the libmp3lame headers and library during
-configuration. You need to explicitly configure the build with
---enable-libmp3lame
.
-
-
See libshine for a fixed-point MP3 encoder, although with a
-lower quality.
-
-
-
8.5.1 Options# TOC
-
-
The following options are supported by the libmp3lame wrapper. The
-lame
-equivalent of the options are listed in parentheses.
-
-
-b (-b )
-Set bitrate expressed in bits/s for CBR or ABR. LAME bitrate
is
-expressed in kilobits/s.
-
-
-q (-V )
-Set constant quality setting for VBR. This option is valid only
-using the ffmpeg
command-line tool. For library interface
-users, use global_quality .
-
-
-compression_level (-q )
-Set algorithm quality. Valid arguments are integers in the 0-9 range,
-with 0 meaning highest quality but slowest, and 9 meaning fastest
-while producing the worst quality.
-
-
-reservoir
-Enable use of bit reservoir when set to 1. Default value is 1. LAME
-has this enabled by default, but can be overridden by use
---nores option.
-
-
-joint_stereo (-m j )
-Enable the encoder to use (on a frame by frame basis) either L/R
-stereo or mid/side stereo. Default value is 1.
-
-
-abr (--abr )
-Enable the encoder to use ABR when set to 1. The lame
---abr sets the target bitrate, while this options only
-tells FFmpeg to use ABR still relies on b to set bitrate.
-
-
-
-
-
-
8.6 libopencore-amrnb# TOC
-
-
OpenCORE Adaptive Multi-Rate Narrowband encoder.
-
-
Requires the presence of the libopencore-amrnb headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopencore-amrnb --enable-version3
.
-
-
This is a mono-only encoder. Officially it only supports 8000Hz sample rate,
-but you can override it by setting strict to ‘unofficial ’ or
-lower.
-
-
-
8.6.1 Options# TOC
-
-
-b
-Set bitrate in bits per second. Only the following bitrates are supported,
-otherwise libavcodec will round to the nearest valid bitrate.
-
-
-4750
-5150
-5900
-6700
-7400
-7950
-10200
-12200
-
-
-
-dtx
-Allow discontinuous transmission (generate comfort noise) when set to 1. The
-default value is 0 (disabled).
-
-
-
-
-
-
8.7 libshine# TOC
-
-
Shine Fixed-Point MP3 encoder wrapper.
-
-
Shine is a fixed-point MP3 encoder. It has a far better performance on
-platforms without an FPU, e.g. armel CPUs, and some phones and tablets.
-However, as it is more targeted on performance than quality, it is not on par
-with LAME and other production-grade encoders quality-wise. Also, according to
-the project’s homepage, this encoder may not be free of bugs as the code was
-written a long time ago and the project was dead for at least 5 years.
-
-
This encoder only supports stereo and mono input. This is also CBR-only.
-
-
The original project (last updated in early 2007) is at
-http://sourceforge.net/projects/libshine-fxp/ . We only support the
-updated fork by the Savonet/Liquidsoap project at https://github.com/savonet/shine .
-
-
Requires the presence of the libshine headers and library during
-configuration. You need to explicitly configure the build with
---enable-libshine
.
-
-
See also libmp3lame .
-
-
-
8.7.1 Options# TOC
-
-
The following options are supported by the libshine wrapper. The
-shineenc
-equivalent of the options are listed in parentheses.
-
-
-b (-b )
-Set bitrate expressed in bits/s for CBR. shineenc
-b option
-is expressed in kilobits/s.
-
-
-
-
-
-
8.8 libtwolame# TOC
-
-
TwoLAME MP2 encoder wrapper.
-
-
Requires the presence of the libtwolame headers and library during
-configuration. You need to explicitly configure the build with
---enable-libtwolame
.
-
-
-
8.8.1 Options# TOC
-
-
The following options are supported by the libtwolame wrapper. The
-twolame
-equivalent options follow the FFmpeg ones and are in
-parentheses.
-
-
-b (-b )
-Set bitrate expressed in bits/s for CBR. twolame
b
-option is expressed in kilobits/s. Default value is 128k.
-
-
-q (-V )
-Set quality for experimental VBR support. Maximum value range is
-from -50 to 50, useful range is from -10 to 10. The higher the
-value, the better the quality. This option is valid only using the
-ffmpeg
command-line tool. For library interface users,
-use global_quality .
-
-
-mode (--mode )
-Set the mode of the resulting audio. Possible values:
-
-
-‘auto ’
-Choose mode automatically based on the input. This is the default.
-
-‘stereo ’
-Stereo
-
-‘joint_stereo ’
-Joint stereo
-
-‘dual_channel ’
-Dual channel
-
-‘mono ’
-Mono
-
-
-
-
-psymodel (--psyc-mode )
-Set psychoacoustic model to use in encoding. The argument must be
-an integer between -1 and 4, inclusive. The higher the value, the
-better the quality. The default value is 3.
-
-
-energy_levels (--energy )
-Enable energy levels extensions when set to 1. The default value is
-0 (disabled).
-
-
-error_protection (--protect )
-Enable CRC error protection when set to 1. The default value is 0
-(disabled).
-
-
-copyright (--copyright )
-Set MPEG audio copyright flag when set to 1. The default value is 0
-(disabled).
-
-
-original (--original )
-Set MPEG audio original flag when set to 1. The default value is 0
-(disabled).
-
-
-
-
-
-
8.9 libvo-aacenc# TOC
-
-
VisualOn AAC encoder.
-
-
Requires the presence of the libvo-aacenc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libvo-aacenc --enable-version3
.
-
-
This encoder is considered to be worse than the
-native experimental FFmpeg AAC encoder , according to
-multiple sources.
-
-
-
8.9.1 Options# TOC
-
-
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
-channels. It is also CBR-only.
-
-
-b
-Set bit rate in bits/s.
-
-
-
-
-
-
8.10 libvo-amrwbenc# TOC
-
-
VisualOn Adaptive Multi-Rate Wideband encoder.
-
-
Requires the presence of the libvo-amrwbenc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libvo-amrwbenc --enable-version3
.
-
-
This is a mono-only encoder. Officially it only supports 16000Hz sample
-rate, but you can override it by setting strict to
-‘unofficial ’ or lower.
-
-
-
8.10.1 Options# TOC
-
-
-b
-Set bitrate in bits/s. Only the following bitrates are supported, otherwise
-libavcodec will round to the nearest valid bitrate.
-
-
-‘6600 ’
-‘8850 ’
-‘12650 ’
-‘14250 ’
-‘15850 ’
-‘18250 ’
-‘19850 ’
-‘23050 ’
-‘23850 ’
-
-
-
-dtx
-Allow discontinuous transmission (generate comfort noise) when set to 1. The
-default value is 0 (disabled).
-
-
-
-
-
-
8.11 libopus# TOC
-
-
libopus Opus Interactive Audio Codec encoder wrapper.
-
-
Requires the presence of the libopus headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopus
.
-
-
-
8.11.1 Option Mapping# TOC
-
-
Most libopus options are modelled after the opusenc
utility from
-opus-tools. The following is an option mapping chart describing options
-supported by the libopus wrapper, and their opusenc
-equivalent
-in parentheses.
-
-
-b (bitrate )
-Set the bit rate in bits/s. FFmpeg’s b option is
-expressed in bits/s, while opusenc
’s bitrate in
-kilobits/s.
-
-
-vbr (vbr , hard-cbr , and cvbr )
-Set VBR mode. The FFmpeg vbr option has the following
-valid arguments, with the their opusenc
equivalent options
-in parentheses:
-
-
-‘off (hard-cbr ) ’
-Use constant bit rate encoding.
-
-
-‘on (vbr ) ’
-Use variable bit rate encoding (the default).
-
-
-‘constrained (cvbr ) ’
-Use constrained variable bit rate encoding.
-
-
-
-
-compression_level (comp )
-Set encoding algorithm complexity. Valid options are integers in
-the 0-10 range. 0 gives the fastest encodes but lower quality, while 10
-gives the highest quality but slowest encoding. The default is 10.
-
-
-frame_duration (framesize )
-Set maximum frame size, or duration of a frame in milliseconds. The
-argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
-frame sizes achieve lower latency but less quality at a given bitrate.
-Sizes greater than 20ms are only interesting at fairly low bitrates.
-The default is 20ms.
-
-
-packet_loss (expect-loss )
-Set expected packet loss percentage. The default is 0.
-
-
-application (N.A.)
-Set intended application type. Valid options are listed below:
-
-
-‘voip ’
-Favor improved speech intelligibility.
-
-‘audio ’
-Favor faithfulness to the input (the default).
-
-‘lowdelay ’
-Restrict to only the lowest delay modes.
-
-
-
-
-cutoff (N.A.)
-Set cutoff bandwidth in Hz. The argument must be exactly one of the
-following: 4000, 6000, 8000, 12000, or 20000, corresponding to
-narrowband, mediumband, wideband, super wideband, and fullband
-respectively. The default is 0 (cutoff disabled).
-
-
-
-
-
-
8.12 libvorbis# TOC
-
-
libvorbis encoder wrapper.
-
-
Requires the presence of the libvorbisenc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libvorbis
.
-
-
-
8.12.1 Options# TOC
-
-
The following options are supported by the libvorbis wrapper. The
-oggenc
-equivalent of the options are listed in parentheses.
-
-
To get a more accurate and extensive documentation of the libvorbis
-options, consult the libvorbisenc’s and oggenc
’s documentations.
-See http://xiph.org/vorbis/ ,
-http://wiki.xiph.org/Vorbis-tools , and oggenc(1).
-
-
-b (-b )
-Set bitrate expressed in bits/s for ABR. oggenc
-b is
-expressed in kilobits/s.
-
-
-q (-q )
-Set constant quality setting for VBR. The value should be a float
-number in the range of -1.0 to 10.0. The higher the value, the better
-the quality. The default value is ‘3.0 ’.
-
-This option is valid only using the ffmpeg
command-line tool.
-For library interface users, use global_quality .
-
-
-cutoff (--advanced-encode-option lowpass_frequency=N )
-Set cutoff bandwidth in Hz, a value of 0 disables cutoff. oggenc
’s
-related option is expressed in kHz. The default value is ‘0 ’ (cutoff
-disabled).
-
-
-minrate (-m )
-Set minimum bitrate expressed in bits/s. oggenc
-m is
-expressed in kilobits/s.
-
-
-maxrate (-M )
-Set maximum bitrate expressed in bits/s. oggenc
-M is
-expressed in kilobits/s. This only has effect on ABR mode.
-
-
-iblock (--advanced-encode-option impulse_noisetune=N )
-Set noise floor bias for impulse blocks. The value is a float number from
--15.0 to 0.0. A negative bias instructs the encoder to pay special attention
-to the crispness of transients in the encoded audio. The tradeoff for better
-transient response is a higher bitrate.
-
-
-
-
-
-
8.13 libwavpack# TOC
-
-
A wrapper providing WavPack encoding through libwavpack.
-
-
Only lossless mode using 32-bit integer samples is supported currently.
-
-
Requires the presence of the libwavpack headers and library during
-configuration. You need to explicitly configure the build with
---enable-libwavpack
.
-
-
Note that a libavcodec-native encoder for the WavPack codec exists so users can
-encode audios with this codec without using this encoder. See wavpackenc .
-
-
-
8.13.1 Options# TOC
-
-
wavpack
command line utility’s corresponding options are listed in
-parentheses, if any.
-
-
-frame_size (--blocksize )
-Default is 32768.
-
-
-compression_level
-Set speed vs. compression tradeoff. Acceptable arguments are listed below:
-
-
-‘0 (-f ) ’
-Fast mode.
-
-
-‘1 ’
-Normal (default) settings.
-
-
-‘2 (-h ) ’
-High quality.
-
-
-‘3 (-hh ) ’
-Very high quality.
-
-
-‘4-8 (-hh -x EXTRAPROC ) ’
-Same as ‘3 ’, but with extra processing enabled.
-
-‘4 ’ is the same as -x2 and ‘8 ’ is the same as -x6 .
-
-
-
-
-
-
-
-
8.14 wavpack# TOC
-
-
WavPack lossless audio encoder.
-
-
This is a libavcodec-native WavPack encoder. There is also an encoder based on
-libwavpack, but there is virtually no reason to use that encoder.
-
-
See also libwavpack .
-
-
-
8.14.1 Options# TOC
-
-
The equivalent options for wavpack
command line utility are listed in
-parentheses.
-
-
-
8.14.1.1 Shared options# TOC
-
-
The following shared options are effective for this encoder. Only special notes
-about this particular encoder will be documented here. For the general meaning
-of the options, see the Codec Options chapter .
-
-
-frame_size (--blocksize )
-For this encoder, the range for this option is between 128 and 131072. Default
-is automatically decided based on sample rate and number of channel.
-
-For the complete formula of calculating default, see
-libavcodec/wavpackenc.c .
-
-
-compression_level (-f , -h , -hh , and -x )
-This option’s syntax is consistent with libwavpack ’s.
-
-
-
-
-
8.14.1.2 Private options# TOC
-
-
-joint_stereo (-j )
-Set whether to enable joint stereo. Valid values are:
-
-
-‘on (1 ) ’
-Force mid/side audio encoding.
-
-‘off (0 ) ’
-Force left/right audio encoding.
-
-‘auto ’
-Let the encoder decide automatically.
-
-
-
-
-optimize_mono
-Set whether to enable optimization for mono. This option is only effective for
-non-mono streams. Available values:
-
-
-‘on ’
-enabled
-
-‘off ’
-disabled
-
-
-
-
-
-
-
-
-
9 Video Encoders# TOC
-
-
A description of some of the currently available video encoders
-follows.
-
-
-
9.1 libtheora# TOC
-
-
libtheora Theora encoder wrapper.
-
-
Requires the presence of the libtheora headers and library during
-configuration. You need to explicitly configure the build with
---enable-libtheora
.
-
-
For more information about the libtheora project see
-http://www.theora.org/ .
-
-
-
9.1.1 Options# TOC
-
-
The following global options are mapped to internal libtheora options
-which affect the quality and the bitrate of the encoded stream.
-
-
-b
-Set the video bitrate in bit/s for CBR (Constant Bit Rate) mode. In
-case VBR (Variable Bit Rate) mode is enabled this option is ignored.
-
-
-flags
-Used to enable constant quality mode (VBR) encoding through the
-qscale flag, and to enable the pass1
and pass2
-modes.
-
-
-g
-Set the GOP size.
-
-
-global_quality
-Set the global quality as an integer in lambda units.
-
-Only relevant when VBR mode is enabled with flags +qscale
. The
-value is converted to QP units by dividing it by FF_QP2LAMBDA
,
-clipped in the [0 - 10] range, and then multiplied by 6.3 to get a
-value in the native libtheora range [0-63]. A higher value corresponds
-to a higher quality.
-
-
-q
-Enable VBR mode when set to a non-negative value, and set constant
-quality value as a double floating point value in QP units.
-
-The value is clipped in the [0-10] range, and then multiplied by 6.3
-to get a value in the native libtheora range [0-63].
-
-This option is valid only using the ffmpeg
command-line
-tool. For library interface users, use global_quality .
-
-
-
-
-
9.1.2 Examples# TOC
-
-
- Set maximum constant quality (VBR) encoding with ffmpeg
:
-
-
ffmpeg -i INPUT -codec:v libtheora -q:v 10 OUTPUT.ogg
-
-
- Use ffmpeg
to convert a CBR 1000 kbps Theora video stream:
-
-
ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg
-
-
-
-
-
9.2 libvpx# TOC
-
-
VP8/VP9 format supported through libvpx.
-
-
Requires the presence of the libvpx headers and library during configuration.
-You need to explicitly configure the build with --enable-libvpx
.
-
-
-
9.2.1 Options# TOC
-
-
Mapping from FFmpeg to libvpx options with conversion notes in parentheses.
-
-
-threads
-g_threads
-
-
-profile
-g_profile
-
-
-vb
-rc_target_bitrate
-
-
-g
-kf_max_dist
-
-
-keyint_min
-kf_min_dist
-
-
-qmin
-rc_min_quantizer
-
-
-qmax
-rc_max_quantizer
-
-
-bufsize, vb
-rc_buf_sz
-(bufsize * 1000 / vb)
-
-rc_buf_optimal_sz
-(bufsize * 1000 / vb * 5 / 6)
-
-
-rc_init_occupancy, vb
-rc_buf_initial_sz
-(rc_init_occupancy * 1000 / vb)
-
-
-rc_buffer_aggressivity
-rc_undershoot_pct
-
-
-skip_threshold
-rc_dropframe_thresh
-
-
-qcomp
-rc_2pass_vbr_bias_pct
-
-
-maxrate, vb
-rc_2pass_vbr_maxsection_pct
-(maxrate * 100 / vb)
-
-
-minrate, vb
-rc_2pass_vbr_minsection_pct
-(minrate * 100 / vb)
-
-
-minrate, maxrate, vb
-VPX_CBR
-(minrate == maxrate == vb)
-
-
-crf
-VPX_CQ
, VP8E_SET_CQ_LEVEL
-
-
-quality
-
-best
-VPX_DL_BEST_QUALITY
-
-good
-VPX_DL_GOOD_QUALITY
-
-realtime
-VPX_DL_REALTIME
-
-
-
-
-speed
-VP8E_SET_CPUUSED
-
-
-nr
-VP8E_SET_NOISE_SENSITIVITY
-
-
-mb_threshold
-VP8E_SET_STATIC_THRESHOLD
-
-
-slices
-VP8E_SET_TOKEN_PARTITIONS
-
-
-max-intra-rate
-VP8E_SET_MAX_INTRA_BITRATE_PCT
-
-
-force_key_frames
-VPX_EFLAG_FORCE_KF
-
-
-Alternate reference frame related
-
-vp8flags altref
-VP8E_SET_ENABLEAUTOALTREF
-
-arnr_max_frames
-VP8E_SET_ARNR_MAXFRAMES
-
-arnr_type
-VP8E_SET_ARNR_TYPE
-
-arnr_strength
-VP8E_SET_ARNR_STRENGTH
-
-rc_lookahead
-g_lag_in_frames
-
-
-
-
-vp8flags error_resilient
-g_error_resilient
-
-
-aq_mode
-VP9E_SET_AQ_MODE
-
-
-
-
-
For more information about libvpx see:
-http://www.webmproject.org/
-
-
-
-
9.3 libwebp# TOC
-
-
libwebp WebP Image encoder wrapper
-
-
libwebp is Google’s official encoder for WebP images. It can encode in either
-lossy or lossless mode. Lossy images are essentially a wrapper around a VP8
-frame. Lossless images are a separate codec developed by Google.
-
-
-
9.3.1 Pixel Format# TOC
-
-
Currently, libwebp only supports YUV420 for lossy and RGB for lossless due
-to limitations of the format and libwebp. Alpha is supported for either mode.
-Because of API limitations, if RGB is passed in when encoding lossy or YUV is
-passed in for encoding lossless, the pixel format will automatically be
-converted using functions from libwebp. This is not ideal and is done only for
-convenience.
-
-
-
9.3.2 Options# TOC
-
-
--lossless boolean
-Enables/Disables use of lossless mode. Default is 0.
-
-
--compression_level integer
-For lossy, this is a quality/speed tradeoff. Higher values give better quality
-for a given size at the cost of increased encoding time. For lossless, this is
-a size/speed tradeoff. Higher values give smaller size at the cost of increased
-encoding time. More specifically, it controls the number of extra algorithms
-and compression tools used, and varies the combination of these tools. This
-maps to the method option in libwebp. The valid range is 0 to 6.
-Default is 4.
-
-
--qscale float
-For lossy encoding, this controls image quality, 0 to 100. For lossless
-encoding, this controls the effort and time spent at compressing more. The
-default value is 75. Note that for usage via libavcodec, this option is called
-global_quality and must be multiplied by FF_QP2LAMBDA .
-
-
--preset type
-Configuration preset. This does some automatic settings based on the general
-type of the image.
-
-none
-Do not use a preset.
-
-default
-Use the encoder default.
-
-picture
-Digital picture, like portrait, inner shot
-
-photo
-Outdoor photograph, with natural lighting
-
-drawing
-Hand or line drawing, with high-contrast details
-
-icon
-Small-sized colorful images
-
-text
-Text-like
-
-
-
-
-
-
-
-
9.4 libx264, libx264rgb# TOC
-
-
x264 H.264/MPEG-4 AVC encoder wrapper.
-
-
This encoder requires the presence of the libx264 headers and library
-during configuration. You need to explicitly configure the build with
---enable-libx264
.
-
-
libx264 supports an impressive number of features, including 8x8 and
-4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC
-entropy coding, interlacing (MBAFF), lossless mode, psy optimizations
-for detail retention (adaptive quantization, psy-RD, psy-trellis).
-
-
Many libx264 encoder options are mapped to FFmpeg global codec
-options, while unique encoder options are provided through private
-options. Additionally the x264opts and x264-params
-private options allows one to pass a list of key=value tuples as accepted
-by the libx264 x264_param_parse
function.
-
-
The x264 project website is at
-http://www.videolan.org/developers/x264.html .
-
-
The libx264rgb encoder is the same as libx264, except it accepts packed RGB
-pixel formats as input instead of YUV.
-
-
-
9.4.1 Supported Pixel Formats# TOC
-
-
x264 supports 8- to 10-bit color spaces. The exact bit depth is controlled at
-x264’s configure time. FFmpeg only supports one bit depth in one particular
-build. In other words, it is not possible to build one FFmpeg with multiple
-versions of x264 with different bit depths.
-
-
-
9.4.2 Options# TOC
-
-
The following options are supported by the libx264 wrapper. The
-x264
-equivalent options or values are listed in parentheses
-for easy migration.
-
-
To reduce the duplication of documentation, only the private options
-and some others requiring special attention are documented here. For
-the documentation of the undocumented generic options, see
-the Codec Options chapter .
-
-
To get a more accurate and extensive documentation of the libx264
-options, invoke the command x264 --full-help
or consult
-the libx264 documentation.
-
-
-b (bitrate )
-Set bitrate in bits/s. Note that FFmpeg’s b option is
-expressed in bits/s, while x264
’s bitrate is in
-kilobits/s.
-
-
-bf (bframes )
-g (keyint )
-qmin (qpmin )
-Minimum quantizer scale.
-
-
-qmax (qpmax )
-Maximum quantizer scale.
-
-
-qdiff (qpstep )
-Maximum difference between quantizer scales.
-
-
-qblur (qblur )
-Quantizer curve blur
-
-
-qcomp (qcomp )
-Quantizer curve compression factor
-
-
-refs (ref )
-Number of reference frames each P-frame can use. The range is from 0-16 .
-
-
-sc_threshold (scenecut )
-Sets the threshold for the scene change detection.
-
-
-trellis (trellis )
-Performs Trellis quantization to increase efficiency. Enabled by default.
-
-
-nr (nr )
-me_range (merange )
-Maximum range of the motion search in pixels.
-
-
-me_method (me )
-Set motion estimation method. Possible values in the decreasing order
-of speed:
-
-
-‘dia (dia ) ’
-‘epzs (dia ) ’
-Diamond search with radius 1 (fastest). ‘epzs ’ is an alias for
-‘dia ’.
-
-‘hex (hex ) ’
-Hexagonal search with radius 2.
-
-‘umh (umh ) ’
-Uneven multi-hexagon search.
-
-‘esa (esa ) ’
-Exhaustive search.
-
-‘tesa (tesa ) ’
-Hadamard exhaustive search (slowest).
-
-
-
-
-subq (subme )
-Sub-pixel motion estimation method.
-
-
-b_strategy (b-adapt )
-Adaptive B-frame placement decision algorithm. Use only on first-pass.
-
-
-keyint_min (min-keyint )
-Minimum GOP size.
-
-
-coder
-Set entropy encoder. Possible values:
-
-
-‘ac ’
-Enable CABAC.
-
-
-‘vlc ’
-Enable CAVLC and disable CABAC. It generates the same effect as
-x264
’s --no-cabac option.
-
-
-
-
-cmp
-Set full pixel motion estimation comparation algorithm. Possible values:
-
-
-‘chroma ’
-Enable chroma in motion estimation.
-
-
-‘sad ’
-Ignore chroma in motion estimation. It generates the same effect as
-x264
’s --no-chroma-me option.
-
-
-
-
-threads (threads )
-Number of encoding threads.
-
-
-thread_type
-Set multithreading technique. Possible values:
-
-
-‘slice ’
-Slice-based multithreading. It generates the same effect as
-x264
’s --sliced-threads option.
-
-‘frame ’
-Frame-based multithreading.
-
-
-
-
-flags
-Set encoding flags. It can be used to disable closed GOP and enable
-open GOP by setting it to -cgop
. The result is similar to
-the behavior of x264
’s --open-gop option.
-
-
-rc_init_occupancy (vbv-init )
-preset (preset )
-Set the encoding preset.
-
-
-tune (tune )
-Set tuning of the encoding params.
-
-
-profile (profile )
-Set profile restrictions.
-
-
-fastfirstpass
-Enable fast settings when encoding first pass, when set to 1. When set
-to 0, it has the same effect of x264
’s
---slow-firstpass option.
-
-
-crf (crf )
-Set the quality for constant quality mode.
-
-
-crf_max (crf-max )
-In CRF mode, prevents VBV from lowering quality beyond this point.
-
-
-qp (qp )
-Set constant quantization rate control method parameter.
-
-
-aq-mode (aq-mode )
-Set AQ method. Possible values:
-
-
-‘none (0 ) ’
-Disabled.
-
-
-‘variance (1 ) ’
-Variance AQ (complexity mask).
-
-
-‘autovariance (2 ) ’
-Auto-variance AQ (experimental).
-
-
-
-
-aq-strength (aq-strength )
-Set AQ strength, reduce blocking and blurring in flat and textured areas.
-
-
-psy
-Use psychovisual optimizations when set to 1. When set to 0, it has the
-same effect as x264
’s --no-psy option.
-
-
-psy-rd (psy-rd )
-Set strength of psychovisual optimization, in
-psy-rd :psy-trellis format.
-
-
-rc-lookahead (rc-lookahead )
-Set number of frames to look ahead for frametype and ratecontrol.
-
-
-weightb
-Enable weighted prediction for B-frames when set to 1. When set to 0,
-it has the same effect as x264
’s --no-weightb option.
-
-
-weightp (weightp )
-Set weighted prediction method for P-frames. Possible values:
-
-
-‘none (0 ) ’
-Disabled
-
-‘simple (1 ) ’
-Enable only weighted refs
-
-‘smart (2 ) ’
-Enable both weighted refs and duplicates
-
-
-
-
-ssim (ssim )
-Enable calculation and printing SSIM stats after the encoding.
-
-
-intra-refresh (intra-refresh )
-Enable the use of Periodic Intra Refresh instead of IDR frames when set
-to 1.
-
-
-avcintra-class (class )
-Configure the encoder to generate AVC-Intra.
-Valid values are 50,100 and 200
-
-
-bluray-compat (bluray-compat )
-Configure the encoder to be compatible with the bluray standard.
-It is a shorthand for setting "bluray-compat=1 force-cfr=1".
-
-
-b-bias (b-bias )
-Set the influence on how often B-frames are used.
-
-
-b-pyramid (b-pyramid )
-Set method for keeping of some B-frames as references. Possible values:
-
-
-‘none (none ) ’
-Disabled.
-
-‘strict (strict ) ’
-Strictly hierarchical pyramid.
-
-‘normal (normal ) ’
-Non-strict (not Blu-ray compatible).
-
-
-
-
-mixed-refs
-Enable the use of one reference per partition, as opposed to one
-reference per macroblock when set to 1. When set to 0, it has the
-same effect as x264
’s --no-mixed-refs option.
-
-
-8x8dct
-Enable adaptive spatial transform (high profile 8x8 transform)
-when set to 1. When set to 0, it has the same effect as
-x264
’s --no-8x8dct option.
-
-
-fast-pskip
-Enable early SKIP detection on P-frames when set to 1. When set
-to 0, it has the same effect as x264
’s
---no-fast-pskip option.
-
-
-aud (aud )
-Enable use of access unit delimiters when set to 1.
-
-
-mbtree
-Enable use macroblock tree ratecontrol when set to 1. When set
-to 0, it has the same effect as x264
’s
---no-mbtree option.
-
-
-deblock (deblock )
-Set loop filter parameters, in alpha :beta form.
-
-
-cplxblur (cplxblur )
-Set fluctuations reduction in QP (before curve compression).
-
-
-partitions (partitions )
-Set partitions to consider as a comma-separated list of. Possible
-values in the list:
-
-
-‘p8x8 ’
-8x8 P-frame partition.
-
-‘p4x4 ’
-4x4 P-frame partition.
-
-‘b8x8 ’
-4x4 B-frame partition.
-
-‘i8x8 ’
-8x8 I-frame partition.
-
-‘i4x4 ’
-4x4 I-frame partition.
-(Enabling ‘p4x4 ’ requires ‘p8x8 ’ to be enabled. Enabling
-‘i8x8 ’ requires adaptive spatial transform (8x8dct
-option) to be enabled.)
-
-‘none (none ) ’
-Do not consider any partitions.
-
-‘all (all ) ’
-Consider every partition.
-
-
-
-
-direct-pred (direct )
-Set direct MV prediction mode. Possible values:
-
-
-‘none (none ) ’
-Disable MV prediction.
-
-‘spatial (spatial ) ’
-Enable spatial predicting.
-
-‘temporal (temporal ) ’
-Enable temporal predicting.
-
-‘auto (auto ) ’
-Automatically decided.
-
-
-
-
-slice-max-size (slice-max-size )
-Set the limit of the size of each slice in bytes. If not specified
-but RTP payload size (ps ) is specified, that is used.
-
-
-stats (stats )
-Set the file name for multi-pass stats.
-
-
-nal-hrd (nal-hrd )
-Set signal HRD information (requires vbv-bufsize to be set).
-Possible values:
-
-
-‘none (none ) ’
-Disable HRD information signaling.
-
-‘vbr (vbr ) ’
-Variable bit rate.
-
-‘cbr (cbr ) ’
-Constant bit rate (not allowed in MP4 container).
-
-
-
-
-x264opts (N.A.)
-Set any x264 option, see x264 --fullhelp
for a list.
-
-Argument is a list of key =value couples separated by
-":". In filter and psy-rd options that use ":" as a separator
-themselves, use "," instead. They accept it as well since long ago but this
-is kept undocumented for some reason.
-
-For example to specify libx264 encoding options with ffmpeg
:
-
-
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
-
-
-
-x264-params (N.A.)
-Override the x264 configuration using a :-separated list of key=value
-parameters.
-
-This option is functionally the same as the x264opts , but is
-duplicated for compatibility with the Libav fork.
-
-For example to specify libx264 encoding options with ffmpeg
:
-
-
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
-cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
-no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
-
-
-
-
-
Encoding ffpresets for common usages are provided so they can be used with the
-general presets system (e.g. passing the pre option).
-
-
-
9.5 libx265# TOC
-
-
x265 H.265/HEVC encoder wrapper.
-
-
This encoder requires the presence of the libx265 headers and library
-during configuration. You need to explicitly configure the build with
---enable-libx265 .
-
-
-
9.5.1 Options# TOC
-
-
-preset
-Set the x265 preset.
-
-
-tune
-Set the x265 tune parameter.
-
-
-x265-params
-Set x265 options using a list of key =value couples separated
-by ":". See x265 --help
for a list of options.
-
-For example to specify libx265 encoding options with -x265-params :
-
-
-
ffmpeg -i input -c:v libx265 -x265-params crf=26:psy-rd=1 output.mp4
-
-
-
-
-
-
9.6 libxvid# TOC
-
-
Xvid MPEG-4 Part 2 encoder wrapper.
-
-
This encoder requires the presence of the libxvidcore headers and library
-during configuration. You need to explicitly configure the build with
---enable-libxvid --enable-gpl
.
-
-
The native mpeg4
encoder supports the MPEG-4 Part 2 format, so
-users can encode to this format without this library.
-
-
-
9.6.1 Options# TOC
-
-
The following options are supported by the libxvid wrapper. Some of
-the following options are listed but are not documented, and
-correspond to shared codec options. See the Codec
-Options chapter for their documentation. The other shared options
-which are not listed have no effect for the libxvid encoder.
-
-
-b
-g
-qmin
-qmax
-mpeg_quant
-threads
-bf
-b_qfactor
-b_qoffset
-flags
-Set specific encoding flags. Possible values:
-
-
-‘mv4 ’
-Use four motion vector by macroblock.
-
-
-‘aic ’
-Enable high quality AC prediction.
-
-
-‘gray ’
-Only encode grayscale.
-
-
-‘gmc ’
-Enable the use of global motion compensation (GMC).
-
-
-‘qpel ’
-Enable quarter-pixel motion compensation.
-
-
-‘cgop ’
-Enable closed GOP.
-
-
-‘global_header ’
-Place global headers in extradata instead of every keyframe.
-
-
-
-
-
-trellis
-me_method
-Set motion estimation method. Possible values in decreasing order of
-speed and increasing order of quality:
-
-
-‘zero ’
-Use no motion estimation (default).
-
-
-‘phods ’
-‘x1 ’
-‘log ’
-Enable advanced diamond zonal search for 16x16 blocks and half-pixel
-refinement for 16x16 blocks. ‘x1 ’ and ‘log ’ are aliases for
-‘phods ’.
-
-
-‘epzs ’
-Enable all of the things described above, plus advanced diamond zonal
-search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
-estimation on chroma planes.
-
-
-‘full ’
-Enable all of the things described above, plus extended 16x16 and 8x8
-blocks search.
-
-
-
-
-mbd
-Set macroblock decision algorithm. Possible values in the increasing
-order of quality:
-
-
-‘simple ’
-Use macroblock comparing function algorithm (default).
-
-
-‘bits ’
-Enable rate distortion-based half pixel and quarter pixel refinement for
-16x16 blocks.
-
-
-‘rd ’
-Enable all of the things described above, plus rate distortion-based
-half pixel and quarter pixel refinement for 8x8 blocks, and rate
-distortion-based search using square pattern.
-
-
-
-
-lumi_aq
-Enable lumi masking adaptive quantization when set to 1. Default is 0
-(disabled).
-
-
-variance_aq
-Enable variance adaptive quantization when set to 1. Default is 0
-(disabled).
-
-When combined with lumi_aq , the resulting quality will not
-be better than any of the two specified individually. In other
-words, the resulting quality will be the worse one of the two
-effects.
-
-
-ssim
-Set structural similarity (SSIM) displaying method. Possible values:
-
-
-‘off ’
-Disable displaying of SSIM information.
-
-
-‘avg ’
-Output average SSIM at the end of encoding to stdout. The format of
-showing the average SSIM is:
-
-
-
-For users who are not familiar with C, %f means a float number, or
-a decimal (e.g. 0.939232).
-
-
-‘frame ’
-Output both per-frame SSIM data during encoding and average SSIM at
-the end of encoding to stdout. The format of per-frame information
-is:
-
-
-
SSIM: avg: %1.3f min: %1.3f max: %1.3f
-
-
-For users who are not familiar with C, %1.3f means a float number
-rounded to 3 digits after the dot (e.g. 0.932).
-
-
-
-
-
-ssim_acc
-Set SSIM accuracy. Valid options are integers within the range of
-0-4, while 0 gives the most accurate result and 4 computes the
-fastest.
-
-
-
-
-
-
9.7 mpeg2# TOC
-
-
MPEG-2 video encoder.
-
-
-
9.7.1 Options# TOC
-
-
-seq_disp_ext integer
-Specifies if the encoder should write a sequence_display_extension to the
-output.
-
--1
-auto
-Decide automatically to write it or not (this is the default) by checking if
-the data to be written is different from the default or unspecified values.
-
-0
-never
-Never write it.
-
-1
-always
-Always write it.
-
-
-
-
-
-
-
-
-
PNG image encoder.
-
-
-
9.8.1 Private options# TOC
-
-
-dpi integer
-Set physical density of pixels, in dots per inch, unset by default
-
-dpm integer
-Set physical density of pixels, in dots per meter, unset by default
-
-
-
-
-
9.9 ProRes# TOC
-
-
Apple ProRes encoder.
-
-
FFmpeg contains 2 ProRes encoders, the prores-aw and prores-ks encoder.
-The used encoder can be chosen with the -vcodec
option.
-
-
-
9.9.1 Private Options for prores-ks# TOC
-
-
-profile integer
-Select the ProRes profile to encode
-
-‘proxy ’
-‘lt ’
-‘standard ’
-‘hq ’
-‘4444 ’
-
-
-
-quant_mat integer
-Select quantization matrix.
-
-‘auto ’
-‘default ’
-‘proxy ’
-‘lt ’
-‘standard ’
-‘hq ’
-
-If set to auto , the matrix matching the profile will be picked.
-If not set, the matrix providing the highest quality, default , will be
-picked.
-
-
-bits_per_mb integer
-How many bits to allot for coding one macroblock. Different profiles use
-between 200 and 2400 bits per macroblock, the maximum is 8000.
-
-
-mbs_per_slice integer
-Number of macroblocks in each slice (1-8); the default value (8)
-should be good in almost all situations.
-
-
-vendor string
-Override the 4-byte vendor ID.
-A custom vendor ID like apl0 would claim the stream was produced by
-the Apple encoder.
-
-
-alpha_bits integer
-Specify number of bits for alpha component.
-Possible values are 0 , 8 and 16 .
-Use 0 to disable alpha plane coding.
-
-
-
-
-
-
9.9.2 Speed considerations# TOC
-
-
In the default mode of operation the encoder has to honor frame constraints
-(i.e. not produce frames with size bigger than requested) while still making
-output picture as good as possible.
-A frame containing a lot of small details is harder to compress and the encoder
-would spend more time searching for appropriate quantizers for each slice.
-
-
Setting a higher bits_per_mb limit will improve the speed.
-
-
For the fastest encoding speed set the qscale parameter (4 is the
-recommended value) and do not set a size constraint.
-
-
-
-
10 Subtitles Encoders# TOC
-
-
-
10.1 dvdsub# TOC
-
-
This codec encodes the bitmap subtitle format that is used in DVDs.
-Typically they are stored in VOBSUB file pairs (*.idx + *.sub),
-and they can also be used in Matroska files.
-
-
-
10.1.1 Options# TOC
-
-
-even_rows_fix
-When set to 1, enable a work-around that makes the number of pixel rows
-even in all subtitles. This fixes a problem with some players that
-cut off the bottom row if the number is odd. The work-around just adds
-a fully transparent row if needed. The overhead is low, typically
-one byte per subtitle on average.
-
-By default, this work-around is disabled.
-
-
-
-
-
-
11 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavcodec
-
-
-
-
12 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-devices.html b/Externals/ffmpeg/dev/doc/ffmpeg-devices.html
deleted file mode 100644
index a460bd1369..0000000000
--- a/Externals/ffmpeg/dev/doc/ffmpeg-devices.html
+++ /dev/null
@@ -1,1810 +0,0 @@
-
-
-
-
-
-
- FFmpeg Devices Documentation
-
-
-
-
-
-
-
-
- FFmpeg Devices Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes the input and output devices provided by the
-libavdevice library.
-
-
-
-
2 Device Options# TOC
-
-
The libavdevice library provides the same interface as
-libavformat. Namely, an input device is considered like a demuxer, and
-an output device like a muxer, and the interface and generic device
-options are the same provided by libavformat (see the ffmpeg-formats
-manual).
-
-
In addition each input or output device may support so-called private
-options, which are specific for that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the device
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
-
-
3 Input Devices# TOC
-
-
Input devices are configured elements in FFmpeg which allow to access
-the data coming from a multimedia device attached to your system.
-
-
When you configure your FFmpeg build, all the supported input devices
-are enabled by default. You can list all available ones using the
-configure option "–list-indevs".
-
-
You can disable all the input devices using the configure option
-"–disable-indevs", and selectively enable an input device using the
-option "–enable-indev=INDEV ", or you can disable a particular
-input device using the option "–disable-indev=INDEV ".
-
-
The option "-devices" of the ff* tools will display the list of
-supported input devices.
-
-
A description of the currently available input devices follows.
-
-
-
3.1 alsa# TOC
-
-
ALSA (Advanced Linux Sound Architecture) input device.
-
-
To enable this input device during configuration you need libasound
-installed on your system.
-
-
This device allows capturing from an ALSA device. The name of the
-device to capture has to be an ALSA card identifier.
-
-
An ALSA identifier has the syntax:
-
-
hw:CARD [,DEV [,SUBDEV ]]
-
-
-
where the DEV and SUBDEV components are optional.
-
-
The three arguments (in order: CARD ,DEV ,SUBDEV )
-specify card number or identifier, device number and subdevice number
-(-1 means any).
-
-
To see the list of cards currently recognized by your system check the
-files /proc/asound/cards and /proc/asound/devices .
-
-
For example to capture with ffmpeg
from an ALSA device with
-card id 0, you may run the command:
-
-
ffmpeg -f alsa -i hw:0 alsaout.wav
-
-
-
For more information see:
-http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html
-
-
-
3.2 avfoundation# TOC
-
-
AVFoundation input device.
-
-
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
-The older QTKit framework has been marked deprecated since OSX version 10.7.
-
-
The input filename has to be given in the following syntax:
-
-
-i "[[VIDEO]:[AUDIO]]"
-
-
The first entry selects the video input while the latter selects the audio input.
-The stream has to be specified by the device name or the device index as shown by the device list.
-Alternatively, the video and/or audio input device can be chosen by index using the
-
- -video_device_index <INDEX>
-
-and/or
-
- -audio_device_index <INDEX>
-
-, overriding any
-device name or index given in the input filename.
-
-
All available devices can be enumerated by using -list_devices true , listing
-all device names and corresponding indices.
-
-
There are two device name aliases:
-
-default
-Select the AVFoundation default device of the corresponding type.
-
-
-none
-Do not record the corresponding media type.
-This is equivalent to specifying an empty device name or index.
-
-
-
-
-
-
3.2.1 Options# TOC
-
-
AVFoundation supports the following options:
-
-
--list_devices <TRUE|FALSE>
-If set to true, a list of all available input devices is given showing all
-device names and indices.
-
-
--video_device_index <INDEX>
-Specify the video device by its index. Overrides anything given in the input filename.
-
-
--audio_device_index <INDEX>
-Specify the audio device by its index. Overrides anything given in the input filename.
-
-
--pixel_format <FORMAT>
-Request the video device to use a specific pixel format.
-If the specified format is not supported, a list of available formats is given
-und the first one in this list is used instead. Available pixel formats are:
-monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
- bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
- yuv420p, nv12, yuyv422, gray
-
-
-
-
-
-
3.2.2 Examples# TOC
-
-
- Print the list of AVFoundation supported devices and exit:
-
-
$ ffmpeg -f avfoundation -list_devices true -i ""
-
-
- Record video from video device 0 and audio from audio device 0 into out.avi:
-
-
$ ffmpeg -f avfoundation -i "0:0" out.avi
-
-
- Record video from video device 2 and audio from audio device 1 into out.avi:
-
-
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
-
-
- Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
-
-
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
-
-
-
-
-
-
3.3 bktr# TOC
-
-
BSD video input device.
-
-
-
3.4 dshow# TOC
-
-
Windows DirectShow input device.
-
-
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
-Currently only audio and video devices are supported.
-
-
Multiple devices may be opened as separate inputs, but they may also be
-opened on the same input, which should improve synchronism between them.
-
-
The input name should be in the format:
-
-
-
-
where TYPE can be either audio or video ,
-and NAME is the device’s name.
-
-
-
3.4.1 Options# TOC
-
-
If no options are specified, the device’s defaults are used.
-If the device does not support the requested options, it will
-fail to open.
-
-
-video_size
-Set the video size in the captured video.
-
-
-framerate
-Set the frame rate in the captured video.
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-
-
-sample_size
-Set the sample size (in bits) of the captured audio.
-
-
-channels
-Set the number of channels in the captured audio.
-
-
-list_devices
-If set to true , print a list of devices and exit.
-
-
-list_options
-If set to true , print a list of selected device’s options
-and exit.
-
-
-video_device_number
-Set video device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-audio_device_number
-Set audio device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-pixel_format
-Select pixel format to be used by DirectShow. This may only be set when
-the video codec is not set or set to rawvideo.
-
-
-audio_buffer_size
-Set audio device buffer size in milliseconds (which can directly
-impact latency, depending on the device).
-Defaults to using the audio device’s
-default buffer size (typically some multiple of 500ms).
-Setting this value too low can degrade performance.
-See also
-http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx
-
-
-
-
-
-
3.4.2 Examples# TOC
-
-
- Print the list of DirectShow supported devices and exit:
-
-
$ ffmpeg -list_devices true -f dshow -i dummy
-
-
- Open video device Camera :
-
-
$ ffmpeg -f dshow -i video="Camera"
-
-
- Open second video device with name Camera :
-
-
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
-
-
- Open video device Camera and audio device Microphone :
-
-
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
-
-
- Print the list of supported options in selected device and exit:
-
-
$ ffmpeg -list_options true -f dshow -i video="Camera"
-
-
-
-
-
-
3.5 dv1394# TOC
-
-
Linux DV 1394 input device.
-
-
-
3.6 fbdev# TOC
-
-
Linux framebuffer input device.
-
-
The Linux framebuffer is a graphic hardware-independent abstraction
-layer to show graphics on a computer monitor, typically on the
-console. It is accessed through a file device node, usually
-/dev/fb0 .
-
-
For more detailed information read the file
-Documentation/fb/framebuffer.txt included in the Linux source tree.
-
-
To record from the framebuffer device /dev/fb0 with
-ffmpeg
:
-
-
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
-
-
-
You can take a single screenshot image with the command:
-
-
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
-
-
-
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
-
-
-
3.7 gdigrab# TOC
-
-
Win32 GDI-based screen capture device.
-
-
This device allows you to capture a region of the display on Windows.
-
-
There are two options for the input filename:
-
-
or
-
-
-
The first option will capture the entire desktop, or a fixed region of the
-desktop. The second option will instead capture the contents of a single
-window, regardless of its position on the screen.
-
-
For example, to grab the entire desktop using ffmpeg
:
-
-
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
-
-
-
Grab a 640x480 region at position 10,20
:
-
-
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
-
-
-
Grab the contents of the window named "Calculator"
-
-
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
-
-
-
-
3.7.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. Use the value 0
to
-not draw the pointer. Default value is 1
.
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-Note that show_region is incompatible with grabbing the contents
-of a single window.
-
-For example:
-
-
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
-
-
-
-video_size
-Set the video frame size. The default is to capture the full screen if desktop is selected, or the full window size if title=window_title is selected.
-
-
-offset_x
-When capturing a region with video_size , set the distance from the left edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative offset_x value to move the region to that monitor.
-
-
-offset_y
-When capturing a region with video_size , set the distance from the top edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative offset_y value to move the region to that monitor.
-
-
-
-
-
-
3.8 iec61883# TOC
-
-
FireWire DV/HDV input device using libiec61883.
-
-
To enable this input device, you need libiec61883, libraw1394 and
-libavc1394 installed on your system. Use the configure option
---enable-libiec61883
to compile with the device enabled.
-
-
The iec61883 capture device supports capturing from a video device
-connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
-FireWire stack (juju). This is the default DV/HDV input method in Linux
-Kernel 2.6.37 and later, since the old FireWire stack was removed.
-
-
Specify the FireWire port to be used as input file, or "auto"
-to choose the first port connected.
-
-
-
3.8.1 Options# TOC
-
-
-dvtype
-Override autodetection of DV/HDV. This should only be used if auto
-detection does not work, or if usage of a different device type
-should be prohibited. Treating a DV device as HDV (or vice versa) will
-not work and result in undefined behavior.
-The values auto , dv and hdv are supported.
-
-
-dvbuffer
-Set maximum size of buffer for incoming data, in frames. For DV, this
-is an exact value. For HDV, it is not frame exact, since HDV does
-not have a fixed frame size.
-
-
-dvguid
-Select the capture device by specifying it’s GUID. Capturing will only
-be performed from the specified device and fails if no device with the
-given GUID is found. This is useful to select the input if multiple
-devices are connected at the same time.
-Look at /sys/bus/firewire/devices to find out the GUIDs.
-
-
-
-
-
-
3.8.2 Examples# TOC
-
-
- Grab and show the input of a FireWire DV/HDV device.
-
-
ffplay -f iec61883 -i auto
-
-
- Grab and record the input of a FireWire DV/HDV device,
-using a packet buffer of 100000 packets if the source is HDV.
-
-
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
-
-
-
-
-
-
3.9 jack# TOC
-
-
JACK input device.
-
-
To enable this input device during configuration you need libjack
-installed on your system.
-
-
A JACK input device creates one or more JACK writable clients, one for
-each audio channel, with name client_name :input_N , where
-client_name is the name provided by the application, and N
-is a number which identifies the channel.
-Each writable client will send the acquired data to the FFmpeg input
-device.
-
-
Once you have created one or more JACK readable clients, you need to
-connect them to one or more JACK writable clients.
-
-
To connect or disconnect JACK clients you can use the jack_connect
-and jack_disconnect
programs, or do it through a graphical interface,
-for example with qjackctl
.
-
-
To list the JACK clients and their properties you can invoke the command
-jack_lsp
.
-
-
Follows an example which shows how to capture a JACK readable client
-with ffmpeg
.
-
-
# Create a JACK writable client with name "ffmpeg".
-$ ffmpeg -f jack -i ffmpeg -y out.wav
-
-# Start the sample jack_metro readable client.
-$ jack_metro -b 120 -d 0.2 -f 4000
-
-# List the current JACK clients.
-$ jack_lsp -c
-system:capture_1
-system:capture_2
-system:playback_1
-system:playback_2
-ffmpeg:input_1
-metro:120_bpm
-
-# Connect metro to the ffmpeg writable client.
-$ jack_connect metro:120_bpm ffmpeg:input_1
-
-
-
For more information read:
-http://jackaudio.org/
-
-
-
3.10 lavfi# TOC
-
-
Libavfilter input virtual device.
-
-
This input device reads data from the open output pads of a libavfilter
-filtergraph.
-
-
For each filtergraph open output, the input device will create a
-corresponding stream which is mapped to the generated output. Currently
-only video data is supported. The filtergraph is specified through the
-option graph .
-
-
-
3.10.1 Options# TOC
-
-
-graph
-Specify the filtergraph to use as input. Each video open output must be
-labelled by a unique string of the form "outN ", where N is a
-number starting from 0 corresponding to the mapped input stream
-generated by the device.
-The first unlabelled output is automatically assigned to the "out0"
-label, but all the others need to be specified explicitly.
-
-The suffix "+subcc" can be appended to the output label to create an extra
-stream with the closed captions packets attached to that output
-(experimental; only for EIA-608 / CEA-708 for now).
-The subcc streams are created after all the normal streams, in the order of
-the corresponding stream.
-For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
-stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
-
-If not specified defaults to the filename specified for the input
-device.
-
-
-graph_file
-Set the filename of the filtergraph to be read and sent to the other
-filters. Syntax of the filtergraph is the same as the one specified by
-the option graph .
-
-
-
-
-
-
3.10.2 Examples# TOC
-
-
- Create a color video stream and play it back with ffplay
:
-
-
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
-
-
- As the previous example, but use filename for specifying the graph
-description, and omit the "out0" label:
-
-
ffplay -f lavfi color=c=pink
-
-
- Create three different video test filtered sources and play them:
-
-
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
-
-
- Read an audio stream from a file using the amovie source and play it
-back with ffplay
:
-
-
ffplay -f lavfi "amovie=test.wav"
-
-
- Read an audio stream and a video stream and play it back with
-ffplay
:
-
-
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
-
-
- Dump decoded frames to images and closed captions to a file (experimental):
-
-
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
-
-
-
-
-
-
3.11 libcdio# TOC
-
-
Audio-CD input device based on cdio.
-
-
To enable this input device during configuration you need libcdio
-installed on your system. Requires the configure option
---enable-libcdio
.
-
-
This device allows playing and grabbing from an Audio-CD.
-
-
For example to copy with ffmpeg
the entire Audio-CD in /dev/sr0,
-you may run the command:
-
-
ffmpeg -f libcdio -i /dev/sr0 cd.wav
-
-
-
-
3.12 libdc1394# TOC
-
-
IIDC1394 input device, based on libdc1394 and libraw1394.
-
-
Requires the configure option --enable-libdc1394
.
-
-
-
3.13 openal# TOC
-
-
The OpenAL input device provides audio capture on all systems with a
-working OpenAL 1.1 implementation.
-
-
To enable this input device during configuration, you need OpenAL
-headers and libraries installed on your system, and need to configure
-FFmpeg with --enable-openal
.
-
-
OpenAL headers and libraries should be provided as part of your OpenAL
-implementation, or as an additional download (an SDK). Depending on your
-installation you may need to specify additional flags via the
---extra-cflags
and --extra-ldflags
for allowing the build
-system to locate the OpenAL headers and libraries.
-
-
An incomplete list of OpenAL implementations follows:
-
-
-Creative
-The official Windows implementation, providing hardware acceleration
-with supported devices and software fallback.
-See http://openal.org/ .
-
-OpenAL Soft
-Portable, open source (LGPL) software implementation. Includes
-backends for the most common sound APIs on the Windows, Linux,
-Solaris, and BSD operating systems.
-See http://kcat.strangesoft.net/openal.html .
-
-Apple
-OpenAL is part of Core Audio, the official Mac OS X Audio interface.
-See http://developer.apple.com/technologies/mac/audio-and-video.html
-
-
-
-
This device allows one to capture from an audio input device handled
-through OpenAL.
-
-
You need to specify the name of the device to capture in the provided
-filename. If the empty string is provided, the device will
-automatically select the default device. You can get the list of the
-supported devices by using the option list_devices .
-
-
-
3.13.1 Options# TOC
-
-
-channels
-Set the number of channels in the captured audio. Only the values
-1 (monaural) and 2 (stereo) are currently supported.
-Defaults to 2 .
-
-
-sample_size
-Set the sample size (in bits) of the captured audio. Only the values
-8 and 16 are currently supported. Defaults to
-16 .
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-Defaults to 44.1k .
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-
-
-
-
3.13.2 Examples# TOC
-
-
Print the list of OpenAL supported devices and exit:
-
-
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
-
-
-
Capture from the OpenAL device DR-BT101 via PulseAudio :
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
-
-
-
Capture from the default device (note the empty string ” as filename):
-
-
$ ffmpeg -f openal -i '' out.ogg
-
-
-
Capture from two devices simultaneously, writing to two different files,
-within the same ffmpeg
command:
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
-
-
Note: not all OpenAL implementations support multiple simultaneous capture -
-try the latest OpenAL Soft if the above does not work.
-
-
-
3.14 oss# TOC
-
-
Open Sound System input device.
-
-
The filename to provide to the input device is the device node
-representing the OSS input device, and is usually set to
-/dev/dsp .
-
-
For example to grab from /dev/dsp using ffmpeg
use the
-command:
-
-
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
-
-
-
For more information about OSS see:
-http://manuals.opensound.com/usersguide/dsp.html
-
-
-
3.15 pulse# TOC
-
-
PulseAudio input device.
-
-
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
-
-
The filename to provide to the input device is a source device or the
-string "default"
-
-
To list the PulseAudio source devices and their properties you can invoke
-the command pactl list sources
.
-
-
More information about PulseAudio can be found on http://www.pulseaudio.org .
-
-
-
3.15.1 Options# TOC
-
-server
-Connect to a specific PulseAudio server, specified by an IP address.
-Default server is used when not provided.
-
-
-name
-Specify the application name PulseAudio will use when showing active clients,
-by default it is the LIBAVFORMAT_IDENT
string.
-
-
-stream_name
-Specify the stream name PulseAudio will use when showing active streams,
-by default it is "record".
-
-
-sample_rate
-Specify the samplerate in Hz, by default 48kHz is used.
-
-
-channels
-Specify the channels in use, by default 2 (stereo) is set.
-
-
-frame_size
-Specify the number of bytes per frame, by default it is set to 1024.
-
-
-fragment_size
-Specify the minimal buffering fragment in PulseAudio, it will affect the
-audio latency. By default it is unset.
-
-
-
-
-
3.15.2 Examples# TOC
-
Record a stream from default device:
-
-
ffmpeg -f pulse -i default /tmp/pulse.wav
-
-
-
-
3.16 qtkit# TOC
-
-
QTKit input device.
-
-
The filename passed as input is parsed to contain either a device name or index.
-The device index can also be given by using -video_device_index.
-A given device index will override any given device name.
-If the desired device consists of numbers only, use -video_device_index to identify it.
-The default device will be chosen if an empty string or the device name "default" is given.
-The available devices can be enumerated by using -list_devices.
-
-
-
ffmpeg -f qtkit -i "0" out.mpg
-
-
-
-
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
-
-
-
-
ffmpeg -f qtkit -i "default" out.mpg
-
-
-
-
ffmpeg -f qtkit -list_devices true -i ""
-
-
-
-
3.17 sndio# TOC
-
-
sndio input device.
-
-
To enable this input device during configuration you need libsndio
-installed on your system.
-
-
The filename to provide to the input device is the device node
-representing the sndio input device, and is usually set to
-/dev/audio0 .
-
-
For example to grab from /dev/audio0 using ffmpeg
use the
-command:
-
-
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
-
-
-
-
3.18 video4linux2, v4l2# TOC
-
-
Video4Linux2 input video device.
-
-
"v4l2" can be used as alias for "video4linux2".
-
-
If FFmpeg is built with v4l-utils support (by using the
---enable-libv4l2
configure option), it is possible to use it with the
--use_libv4l2
input device option.
-
-
The name of the device to grab is a file device node, usually Linux
-systems tend to automatically create such nodes when the device
-(e.g. an USB webcam) is plugged into the system, and has a name of the
-kind /dev/videoN , where N is a number associated to
-the device.
-
-
Video4Linux2 devices usually support a limited set of
-width xheight sizes and frame rates. You can check which are
-supported using -list_formats all
for Video4Linux2 devices.
-Some devices, like TV cards, support one or more standards. It is possible
-to list all the supported standards using -list_standards all
.
-
-
The time base for the timestamps is 1 microsecond. Depending on the kernel
-version and configuration, the timestamps may be derived from the real time
-clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
-boot time, unaffected by NTP or manual changes to the clock). The
--timestamps abs or -ts abs option can be used to force
-conversion into the real time clock.
-
-
Some usage examples of the video4linux2 device with ffmpeg
-and ffplay
:
-
- Grab and show the input of a video4linux2 device:
-
-
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
-
-
- Grab and record the input of a video4linux2 device, leave the
-frame rate and size as previously set:
-
-
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
-
-
-
-
For more information about Video4Linux, check http://linuxtv.org/ .
-
-
-
3.18.1 Options# TOC
-
-
-standard
-Set the standard. Must be the name of a supported standard. To get a
-list of the supported standards, use the list_standards
-option.
-
-
-channel
-Set the input channel number. Default to -1, which means using the
-previously selected channel.
-
-
-video_size
-Set the video frame size. The argument must be a string in the form
-WIDTH xHEIGHT or a valid size abbreviation.
-
-
-pixel_format
-Select the pixel format (only valid for raw video input).
-
-
-input_format
-Set the preferred pixel format (for raw video) or a codec name.
-This option allows one to select the input format, when several are
-available.
-
-
-framerate
-Set the preferred video frame rate.
-
-
-list_formats
-List available formats (supported pixel formats, codecs, and frame
-sizes) and exit.
-
-Available values are:
-
-‘all ’
-Show all available (compressed and non-compressed) formats.
-
-
-‘raw ’
-Show only raw video (non-compressed) formats.
-
-
-‘compressed ’
-Show only compressed formats.
-
-
-
-
-list_standards
-List supported standards and exit.
-
-Available values are:
-
-‘all ’
-Show all supported standards.
-
-
-
-
-timestamps, ts
-Set type of timestamps for grabbed frames.
-
-Available values are:
-
-‘default ’
-Use timestamps from the kernel.
-
-
-‘abs ’
-Use absolute timestamps (wall clock).
-
-
-‘mono2abs ’
-Force conversion from monotonic to absolute timestamps.
-
-
-
-Default value is default
.
-
-
-
-
-
3.19 vfwcap# TOC
-
-
VfW (Video for Windows) capture input device.
-
-
The filename passed as input is the capture driver number, ranging from
-0 to 9. You may use "list" as filename to print a list of drivers. Any
-other filename will be interpreted as device number 0.
-
-
-
3.20 x11grab# TOC
-
-
X11 video input device.
-
-
Depends on X11, Xext, and Xfixes. Requires the configure option
---enable-x11grab
.
-
-
This device allows one to capture a region of an X11 display.
-
-
The filename passed as input has the syntax:
-
-
[hostname ]:display_number .screen_number [+x_offset ,y_offset ]
-
-
-
hostname :display_number .screen_number specifies the
-X11 display name of the screen to grab from. hostname can be
-omitted, and defaults to "localhost". The environment variable
-DISPLAY
contains the default display name.
-
-
x_offset and y_offset specify the offsets of the grabbed
-area with respect to the top-left border of the X11 screen. They
-default to 0.
-
-
Check the X11 documentation (e.g. man X) for more detailed information.
-
-
Use the dpyinfo
program for getting basic information about the
-properties of your X11 display (e.g. grep for "name" or "dimensions").
-
-
For example to grab from :0.0 using ffmpeg
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
Grab at position 10,20
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-
-
3.20.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. A value of 0
specify
-not to draw the pointer. Default value is 1
.
-
-
-follow_mouse
-Make the grabbed area follow the mouse. The argument can be
-centered
or a number of pixels PIXELS .
-
-When it is specified with "centered", the grabbing region follows the mouse
-pointer and keeps the pointer at the center of region; otherwise, the region
-follows only when the mouse pointer reaches within PIXELS (greater than
-zero) to the edge of region.
-
-For example:
-
-
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-To follow only when the mouse pointer reaches within 100 pixels to edge:
-
-
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-For example:
-
-
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-With follow_mouse :
-
-
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-video_size
-Set the video frame size. Default value is vga
.
-
-
-use_shm
-Use the MIT-SHM extension for shared memory. Default value is 1
.
-It may be necessary to disable it for remote displays.
-
-
-
-
-
3.21 decklink# TOC
-
-
The decklink input device provides capture capabilities for Blackmagic
-DeckLink devices.
-
-
To enable this input device, you need the Blackmagic DeckLink SDK and you
-need to configure with the appropriate --extra-cflags
-and --extra-ldflags
.
-On Windows, you need to run the IDL files through widl
.
-
-
DeckLink is very picky about the formats it supports. Pixel format is always
-uyvy422, framerate and video size must be determined for your device with
--list_formats 1
. Audio sample rate is always 48 kHz and the number
-of channels currently is limited to 2 (stereo).
-
-
-
3.21.1 Options# TOC
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-list_formats
-If set to true , print a list of supported formats and exit.
-Defaults to false .
-
-
-
-
-
-
3.21.2 Examples# TOC
-
-
- List input devices:
-
-
ffmpeg -f decklink -list_devices 1 -i dummy
-
-
- List supported formats:
-
-
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
-
-
- Capture video clip at 1080i50 (format 11):
-
-
ffmpeg -f decklink -i 'Intensity Pro@11' -acodec copy -vcodec copy output.avi
-
-
-
-
-
-
-
4 Output Devices# TOC
-
-
Output devices are configured elements in FFmpeg that can write
-multimedia data to an output device attached to your system.
-
-
When you configure your FFmpeg build, all the supported output devices
-are enabled by default. You can list all available ones using the
-configure option "–list-outdevs".
-
-
You can disable all the output devices using the configure option
-"–disable-outdevs", and selectively enable an output device using the
-option "–enable-outdev=OUTDEV ", or you can disable a particular
-input device using the option "–disable-outdev=OUTDEV ".
-
-
The option "-devices" of the ff* tools will display the list of
-enabled output devices.
-
-
A description of the currently available output devices follows.
-
-
-
4.1 alsa# TOC
-
-
ALSA (Advanced Linux Sound Architecture) output device.
-
-
-
4.1.1 Examples# TOC
-
-
- Play a file on default ALSA device:
-
-
ffmpeg -i INPUT -f alsa default
-
-
- Play a file on soundcard 1, audio device 7:
-
-
ffmpeg -i INPUT -f alsa hw:1,7
-
-
-
-
-
4.2 caca# TOC
-
-
CACA output device.
-
-
This output device allows one to show a video stream in CACA window.
-Only one CACA window is allowed per application, so you can
-have only one instance of this output device in an application.
-
-
To enable this output device you need to configure FFmpeg with
---enable-libcaca
.
-libcaca is a graphics library that outputs text instead of pixels.
-
-
For more information about libcaca, check:
-http://caca.zoy.org/wiki/libcaca
-
-
-
4.2.1 Options# TOC
-
-
-window_title
-Set the CACA window title, if not specified default to the filename
-specified for the output device.
-
-
-window_size
-Set the CACA window size, can be a string of the form
-width xheight or a video size abbreviation.
-If not specified it defaults to the size of the input video.
-
-
-driver
-Set display driver.
-
-
-algorithm
-Set dithering algorithm. Dithering is necessary
-because the picture being rendered has usually far more colours than
-the available palette.
-The accepted values are listed with -list_dither algorithms
.
-
-
-antialias
-Set antialias method. Antialiasing smoothens the rendered
-image and avoids the commonly seen staircase effect.
-The accepted values are listed with -list_dither antialiases
.
-
-
-charset
-Set which characters are going to be used when rendering text.
-The accepted values are listed with -list_dither charsets
.
-
-
-color
-Set color to be used when rendering text.
-The accepted values are listed with -list_dither colors
.
-
-
-list_drivers
-If set to true , print a list of available drivers and exit.
-
-
-list_dither
-List available dither options related to the argument.
-The argument must be one of algorithms
, antialiases
,
-charsets
, colors
.
-
-
-
-
-
4.2.2 Examples# TOC
-
-
- The following command shows the ffmpeg
output is an
-CACA window, forcing its size to 80x25:
-
-
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca -
-
-
- Show the list of available drivers and exit:
-
-
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_drivers true -
-
-
- Show the list of available dither colors and exit:
-
-
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_dither colors -
-
-
-
-
-
4.3 decklink# TOC
-
-
The decklink output device provides playback capabilities for Blackmagic
-DeckLink devices.
-
-
To enable this output device, you need the Blackmagic DeckLink SDK and you
-need to configure with the appropriate --extra-cflags
-and --extra-ldflags
.
-On Windows, you need to run the IDL files through widl
.
-
-
DeckLink is very picky about the formats it supports. Pixel format is always
-uyvy422, framerate and video size must be determined for your device with
--list_formats 1
. Audio sample rate is always 48 kHz.
-
-
-
4.3.1 Options# TOC
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-list_formats
-If set to true , print a list of supported formats and exit.
-Defaults to false .
-
-
-preroll
-Amount of time to preroll video in seconds.
-Defaults to 0.5 .
-
-
-
-
-
-
4.3.2 Examples# TOC
-
-
- List output devices:
-
-
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
-
-
- List supported formats:
-
-
ffmpeg -i test.avi -f decklink -list_formats 1 'DeckLink Mini Monitor'
-
-
- Play video clip:
-
-
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 'DeckLink Mini Monitor'
-
-
- Play video clip with non-standard framerate or video size:
-
-
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLink Mini Monitor'
-
-
-
-
-
-
4.4 fbdev# TOC
-
-
Linux framebuffer output device.
-
-
The Linux framebuffer is a graphic hardware-independent abstraction
-layer to show graphics on a computer monitor, typically on the
-console. It is accessed through a file device node, usually
-/dev/fb0 .
-
-
For more detailed information read the file
-Documentation/fb/framebuffer.txt included in the Linux source tree.
-
-
-
4.4.1 Options# TOC
-
-xoffset
-yoffset
-Set x/y coordinate of top left corner. Default is 0.
-
-
-
-
-
4.4.2 Examples# TOC
-
Play a file on framebuffer device /dev/fb0 .
-Required pixel format depends on current framebuffer settings.
-
-
ffmpeg -re -i INPUT -vcodec rawvideo -pix_fmt bgra -f fbdev /dev/fb0
-
-
-
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
-
-
-
4.5 opengl# TOC
-
OpenGL output device.
-
-
To enable this output device you need to configure FFmpeg with --enable-opengl
.
-
-
This output device allows one to render to OpenGL context.
-Context may be provided by application or default SDL window is created.
-
-
When device renders to external context, application must implement handlers for following messages:
-AV_DEV_TO_APP_CREATE_WINDOW_BUFFER
- create OpenGL context on current thread.
-AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER
- make OpenGL context current.
-AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER
- swap buffers.
-AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER
- destroy OpenGL context.
-Application is also required to inform a device about current resolution by sending AV_APP_TO_DEV_WINDOW_SIZE
message.
-
-
-
4.5.1 Options# TOC
-
-background
-Set background color. Black is a default.
-
-no_window
-Disables default SDL window when set to non-zero value.
-Application must provide OpenGL context and both window_size_cb
and window_swap_buffers_cb
callbacks when set.
-
-window_title
-Set the SDL window title, if not specified default to the filename specified for the output device.
-Ignored when no_window is set.
-
-window_size
-Set preferred window size, can be a string of the form widthxheight or a video size abbreviation.
-If not specified it defaults to the size of the input video, downscaled according to the aspect ratio.
-Mostly usable when no_window is not set.
-
-
-
-
-
-
4.5.2 Examples# TOC
-
Play a file on SDL window using OpenGL rendering:
-
-
ffmpeg -i INPUT -f opengl "window title"
-
-
-
-
-
-
OSS (Open Sound System) output device.
-
-
-
4.7 pulse# TOC
-
-
PulseAudio output device.
-
-
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
-
-
More information about PulseAudio can be found on http://www.pulseaudio.org
-
-
-
4.7.1 Options# TOC
-
-server
-Connect to a specific PulseAudio server, specified by an IP address.
-Default server is used when not provided.
-
-
-name
-Specify the application name PulseAudio will use when showing active clients,
-by default it is the LIBAVFORMAT_IDENT
string.
-
-
-stream_name
-Specify the stream name PulseAudio will use when showing active streams,
-by default it is set to the specified output name.
-
-
-device
-Specify the device to use. Default device is used when not provided.
-List of output devices can be obtained with command pactl list sinks
.
-
-
-buffer_size
-buffer_duration
-Control the size and duration of the PulseAudio buffer. A small buffer
-gives more control, but requires more frequent updates.
-
-buffer_size specifies size in bytes while
-buffer_duration specifies duration in milliseconds.
-
-When both options are provided then the highest value is used
-(duration is recalculated to bytes using stream parameters). If they
-are set to 0 (which is default), the device will use the default
-PulseAudio duration value. By default PulseAudio set buffer duration
-to around 2 seconds.
-
-
-prebuf
-Specify pre-buffering size in bytes. The server does not start with
-playback before at least prebuf bytes are available in the
-buffer. By default this option is initialized to the same value as
-buffer_size or buffer_duration (whichever is bigger).
-
-
-minreq
-Specify minimum request size in bytes. The server does not request less
-than minreq bytes from the client, instead waits until the buffer
-is free enough to request more bytes at once. It is recommended to not set
-this option, which will initialize this to a value that is deemed sensible
-by the server.
-
-
-
-
-
-
4.7.2 Examples# TOC
-
Play a file on default device on default server:
-
-
ffmpeg -i INPUT -f pulse "stream name"
-
-
-
-
-
-
SDL (Simple DirectMedia Layer) output device.
-
-
This output device allows one to show a video stream in an SDL
-window. Only one SDL window is allowed per application, so you can
-have only one instance of this output device in an application.
-
-
To enable this output device you need libsdl installed on your system
-when configuring your build.
-
-
For more information about SDL, check:
-http://www.libsdl.org/
-
-
-
4.8.1 Options# TOC
-
-
-window_title
-Set the SDL window title, if not specified default to the filename
-specified for the output device.
-
-
-icon_title
-Set the name of the iconified SDL window, if not specified it is set
-to the same value of window_title .
-
-
-window_size
-Set the SDL window size, can be a string of the form
-width xheight or a video size abbreviation.
-If not specified it defaults to the size of the input video,
-downscaled according to the aspect ratio.
-
-
-window_fullscreen
-Set fullscreen mode when non-zero value is provided.
-Default value is zero.
-
-
-
-
-
4.8.2 Interactive commands# TOC
-
-
The window created by the device can be controlled through the
-following interactive commands.
-
-
-q, ESC
-Quit the device immediately.
-
-
-
-
-
4.8.3 Examples# TOC
-
-
The following command shows the ffmpeg
output is an
-SDL window, forcing its size to the qcif format:
-
-
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
-
-
-
-
4.9 sndio# TOC
-
-
sndio audio output device.
-
-
-
-
-
XV (XVideo) output device.
-
-
This output device allows one to show a video stream in a X Window System
-window.
-
-
-
4.10.1 Options# TOC
-
-
-display_name
-Specify the hardware display name, which determines the display and
-communications domain to be used.
-
-The display name or DISPLAY environment variable can be a string in
-the format hostname [:number [.screen_number ]].
-
-hostname specifies the name of the host machine on which the
-display is physically attached. number specifies the number of
-the display server on that host machine. screen_number specifies
-the screen to be used on that server.
-
-If unspecified, it defaults to the value of the DISPLAY environment
-variable.
-
-For example, dual-headed:0.1
would specify screen 1 of display
-0 on the machine named “dual-headed”.
-
-Check the X11 specification for more detailed information about the
-display name format.
-
-
-window_id
-When set to non-zero value then device doesn’t create new window,
-but uses existing one with provided window_id . By default
-this options is set to zero and device creates its own window.
-
-
-window_size
-Set the created window size, can be a string of the form
-width xheight or a video size abbreviation. If not
-specified it defaults to the size of the input video.
-Ignored when window_id is set.
-
-
-window_x
-window_y
-Set the X and Y window offsets for the created window. They are both
-set to 0 by default. The values may be ignored by the window manager.
-Ignored when window_id is set.
-
-
-window_title
-Set the window title, if not specified default to the filename
-specified for the output device. Ignored when window_id is set.
-
-
-
-
For more information about XVideo see http://www.x.org/ .
-
-
-
4.10.2 Examples# TOC
-
-
- Decode, display and encode video input with ffmpeg
at the
-same time:
-
-
ffmpeg -i INPUT OUTPUT -f xv display
-
-
- Decode and display the input video to multiple X11 windows:
-
-
ffmpeg -i INPUT -f xv normal -vf negate -f xv negated
-
-
-
-
-
-
5 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavdevice
-
-
-
-
6 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-filters.html b/Externals/ffmpeg/dev/doc/ffmpeg-filters.html
deleted file mode 100644
index b0373dc55a..0000000000
--- a/Externals/ffmpeg/dev/doc/ffmpeg-filters.html
+++ /dev/null
@@ -1,13382 +0,0 @@
-
-
-
-
-
-
- FFmpeg Filters Documentation
-
-
-
-
-
-
-
-
- FFmpeg Filters Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes filters, sources, and sinks provided by the
-libavfilter library.
-
-
-
-
2 Filtering Introduction# TOC
-
-
Filtering in FFmpeg is enabled through the libavfilter library.
-
-
In libavfilter, a filter can have multiple inputs and multiple
-outputs.
-To illustrate the sorts of things that are possible, we consider the
-following filtergraph.
-
-
-
[main]
-input --> split ---------------------> overlay --> output
- | ^
- |[tmp] [flip]|
- +-----> crop --> vflip -------+
-
-
-
This filtergraph splits the input stream in two streams, then sends one
-stream through the crop filter and the vflip filter, before merging it
-back with the other stream by overlaying it on top. You can use the
-following command to achieve this:
-
-
-
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
-
-
-
The result will be that the top half of the video is mirrored
-onto the bottom half of the output video.
-
-
Filters in the same linear chain are separated by commas, and distinct
-linear chains of filters are separated by semicolons. In our example,
-crop,vflip are in one linear chain, split and
-overlay are separately in another. The points where the linear
-chains join are labelled by names enclosed in square brackets. In the
-example, the split filter generates two outputs that are associated to
-the labels [main] and [tmp] .
-
-
The stream sent to the second output of split , labelled as
-[tmp] , is processed through the crop filter, which crops
-away the lower half part of the video, and then vertically flipped. The
-overlay filter takes in input the first unchanged output of the
-split filter (which was labelled as [main] ), and overlay on its
-lower half the output generated by the crop,vflip filterchain.
-
-
Some filters take in input a list of parameters: they are specified
-after the filter name and an equal sign, and are separated from each other
-by a colon.
-
-
There exist so-called source filters that do not have an
-audio/video input, and sink filters that will not have audio/video
-output.
-
-
-
-
3 graph2dot# TOC
-
-
The graph2dot program included in the FFmpeg tools
-directory can be used to parse a filtergraph description and issue a
-corresponding textual representation in the dot language.
-
-
Invoke the command:
-
-
-
to see how to use graph2dot .
-
-
You can then pass the dot description to the dot program (from
-the graphviz suite of programs) and obtain a graphical representation
-of the filtergraph.
-
-
For example the sequence of commands:
-
-
echo GRAPH_DESCRIPTION | \
-tools/graph2dot -o graph.tmp && \
-dot -Tpng graph.tmp -o graph.png && \
-display graph.png
-
-
-
can be used to create and display an image representing the graph
-described by the GRAPH_DESCRIPTION string. Note that this string must be
-a complete self-contained graph, with its inputs and outputs explicitly defined.
-For example if your command line is of the form:
-
-
ffmpeg -i infile -vf scale=640:360 outfile
-
-
your GRAPH_DESCRIPTION string will need to be of the form:
-
-
nullsrc,scale=640:360,nullsink
-
-
you may also need to set the nullsrc parameters and add a format
-filter in order to simulate a specific input file.
-
-
-
-
4 Filtergraph description# TOC
-
-
A filtergraph is a directed graph of connected filters. It can contain
-cycles, and there can be multiple links between a pair of
-filters. Each link has one input pad on one side connecting it to one
-filter from which it takes its input, and one output pad on the other
-side connecting it to one filter accepting its output.
-
-
Each filter in a filtergraph is an instance of a filter class
-registered in the application, which defines the features and the
-number of input and output pads of the filter.
-
-
A filter with no input pads is called a "source", and a filter with no
-output pads is called a "sink".
-
-
-
4.1 Filtergraph syntax# TOC
-
-
A filtergraph has a textual representation, which is
-recognized by the -filter /-vf and -filter_complex
-options in ffmpeg
and -vf in ffplay
, and by the
-avfilter_graph_parse()
/avfilter_graph_parse2()
functions defined in
-libavfilter/avfilter.h .
-
-
A filterchain consists of a sequence of connected filters, each one
-connected to the previous one in the sequence. A filterchain is
-represented by a list of ","-separated filter descriptions.
-
-
A filtergraph consists of a sequence of filterchains. A sequence of
-filterchains is represented by a list of ";"-separated filterchain
-descriptions.
-
-
A filter is represented by a string of the form:
-[in_link_1 ]...[in_link_N ]filter_name =arguments [out_link_1 ]...[out_link_M ]
-
-
filter_name is the name of the filter class of which the
-described filter is an instance of, and has to be the name of one of
-the filter classes registered in the program.
-The name of the filter class is optionally followed by a string
-"=arguments ".
-
-
arguments is a string which contains the parameters used to
-initialize the filter instance. It may have one of two forms:
-
- A ’:’-separated list of key=value pairs.
-
- A ’:’-separated list of value . In this case, the keys are assumed to be
-the option names in the order they are declared. E.g. the fade
filter
-declares three options in this order – type , start_frame and
-nb_frames . Then the parameter list in:0:30 means that the value
-in is assigned to the option type , 0 to
-start_frame and 30 to nb_frames .
-
- A ’:’-separated list of mixed direct value and long key=value
-pairs. The direct value must precede the key=value pairs, and
-follow the same constraints order of the previous point. The following
-key=value pairs can be set in any preferred order.
-
-
-
-
If the option value itself is a list of items (e.g. the format
filter
-takes a list of pixel formats), the items in the list are usually separated by
-’|’.
-
-
The list of arguments can be quoted using the character "’" as initial
-and ending mark, and the character ’\’ for escaping the characters
-within the quoted text; otherwise the argument string is considered
-terminated when the next special character (belonging to the set
-"[]=;,") is encountered.
-
-
The name and arguments of the filter are optionally preceded and
-followed by a list of link labels.
-A link label allows one to name a link and associate it to a filter output
-or input pad. The preceding labels in_link_1
-... in_link_N , are associated to the filter input pads,
-the following labels out_link_1 ... out_link_M , are
-associated to the output pads.
-
-
When two link labels with the same name are found in the
-filtergraph, a link between the corresponding input and output pad is
-created.
-
-
If an output pad is not labelled, it is linked by default to the first
-unlabelled input pad of the next filter in the filterchain.
-For example in the filterchain
-
-
nullsrc, split[L1], [L2]overlay, nullsink
-
-
the split filter instance has two output pads, and the overlay filter
-instance two input pads. The first output pad of split is labelled
-"L1", the first input pad of overlay is labelled "L2", and the second
-output pad of split is linked to the second input pad of overlay,
-which are both unlabelled.
-
-
In a complete filterchain all the unlabelled filter input and output
-pads must be connected. A filtergraph is considered valid if all the
-filter input and output pads of all the filterchains are connected.
-
-
Libavfilter will automatically insert scale filters where format
-conversion is required. It is possible to specify swscale flags
-for those automatically inserted scalers by prepending
-sws_flags=flags ;
-to the filtergraph description.
-
-
Here is a BNF description of the filtergraph syntax:
-
-
NAME ::= sequence of alphanumeric characters and '_'
-LINKLABEL ::= "[" NAME "]"
-LINKLABELS ::= LINKLABEL [LINKLABELS ]
-FILTER_ARGUMENTS ::= sequence of chars (possibly quoted)
-FILTER ::= [LINKLABELS ] NAME ["=" FILTER_ARGUMENTS ] [LINKLABELS ]
-FILTERCHAIN ::= FILTER [,FILTERCHAIN ]
-FILTERGRAPH ::= [sws_flags=flags ;] FILTERCHAIN [;FILTERGRAPH ]
-
-
-
-
4.2 Notes on filtergraph escaping# TOC
-
-
Filtergraph description composition entails several levels of
-escaping. See (ffmpeg-utils)the "Quoting and escaping"
-section in the ffmpeg-utils(1) manual for more
-information about the employed escaping procedure.
-
-
A first level escaping affects the content of each filter option
-value, which may contain the special character :
used to
-separate values, or one of the escaping characters \'
.
-
-
A second level escaping affects the whole filter description, which
-may contain the escaping characters \'
or the special
-characters [],;
used by the filtergraph description.
-
-
Finally, when you specify a filtergraph on a shell commandline, you
-need to perform a third level escaping for the shell special
-characters contained within it.
-
-
For example, consider the following string to be embedded in
-the drawtext filter description text value:
-
-
this is a 'string': may contain one, or more, special characters
-
-
-
This string contains the '
special escaping character, and the
-:
special character, so it needs to be escaped in this way:
-
-
text=this is a \'string\'\: may contain one, or more, special characters
-
-
-
A second level of escaping is required when embedding the filter
-description in a filtergraph description, in order to escape all the
-filtergraph special characters. Thus the example above becomes:
-
-
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
-
-
(note that in addition to the \'
escaping special characters,
-also ,
needs to be escaped).
-
-
Finally an additional level of escaping is needed when writing the
-filtergraph description in a shell command, which depends on the
-escaping rules of the adopted shell. For example, assuming that
-\
is special and needs to be escaped with another \
, the
-previous string will finally result in:
-
-
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
-
-
-
-
5 Timeline editing# TOC
-
-
Some filters support a generic enable option. For the filters
-supporting timeline editing, this option can be set to an expression which is
-evaluated before sending a frame to the filter. If the evaluation is non-zero,
-the filter will be enabled, otherwise the frame will be sent unchanged to the
-next filter in the filtergraph.
-
-
The expression accepts the following values:
-
-‘t ’
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-‘n ’
-sequential number of the input frame, starting from 0
-
-
-‘pos ’
-the position in the file of the input frame, NAN if unknown
-
-
-‘w ’
-‘h ’
-width and height of the input frame if video
-
-
-
-
Additionally, these filters support an enable command that can be used
-to re-define the expression.
-
-
Like any other filtering option, the enable option follows the same
-rules.
-
-
For example, to enable a blur filter (smartblur ) from 10 seconds to 3
-minutes, and a curves filter starting at 3 seconds:
-
-
smartblur = enable='between(t,10,3*60)',
-curves = enable='gte(t,3)' : preset=cross_process
-
-
-
-
-
6 Audio Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the audio filters included in your
-build.
-
-
Below is a description of the currently available audio filters.
-
-
-
6.1 adelay# TOC
-
-
Delay one or more audio channels.
-
-
Samples in delayed channel are filled with silence.
-
-
The filter accepts the following option:
-
-
-delays
-Set list of delays in milliseconds for each channel separated by ’|’.
-At least one delay greater than 0 should be provided.
-Unused delays will be silently ignored. If number of given delays is
-smaller than number of channels all remaining channels will not be delayed.
-
-
-
-
-
6.1.1 Examples# TOC
-
-
- Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave
-the second channel (and any other channels that may be present) unchanged.
-
-
-
-
-
6.2 aecho# TOC
-
-
Apply echoing to the input audio.
-
-
Echoes are reflected sound and can occur naturally amongst mountains
-(and sometimes large buildings) when talking or shouting; digital echo
-effects emulate this behaviour and are often used to help fill out the
-sound of a single instrument or vocal. The time difference between the
-original signal and the reflection is the delay
, and the
-loudness of the reflected signal is the decay
.
-Multiple echoes can have different delays and decays.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain of reflected signal. Default is 0.6
.
-
-
-out_gain
-Set output gain of reflected signal. Default is 0.3
.
-
-
-delays
-Set list of time intervals in milliseconds between original signal and reflections
-separated by ’|’. Allowed range for each delay
is (0 - 90000.0]
.
-Default is 1000
.
-
-
-decays
-Set list of loudnesses of reflected signals separated by ’|’.
-Allowed range for each decay
is (0 - 1.0]
.
-Default is 0.5
.
-
-
-
-
-
6.2.1 Examples# TOC
-
-
- Make it sound as if there are twice as many instruments as are actually playing:
-
-
- If delay is very short, then it sound like a (metallic) robot playing music:
-
-
- A longer delay will sound like an open air concert in the mountains:
-
-
aecho=0.8:0.9:1000:0.3
-
-
- Same as above but with one more mountain:
-
-
aecho=0.8:0.9:1000|1800:0.3|0.25
-
-
-
-
-
6.3 aeval# TOC
-
-
Modify an audio signal according to the specified expressions.
-
-
This filter accepts one or more expressions (one for each channel),
-which are evaluated and used to modify a corresponding audio signal.
-
-
It accepts the following parameters:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. If
-the number of input channels is greater than the number of
-expressions, the last specified expression is used for the remaining
-output channels.
-
-
-channel_layout, c
-Set output channel layout. If not specified, the channel layout is
-specified by the number of expressions. If set to ‘same ’, it will
-use by default the same input channel layout.
-
-
-
-
Each expression in exprs can contain the following constants and functions:
-
-
-ch
-channel number of the current expression
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-s
-sample rate
-
-
-t
-time of the evaluated sample expressed in seconds
-
-
-nb_in_channels
-nb_out_channels
-input and output number of channels
-
-
-val(CH)
-the value of input channel with number CH
-
-
-
-
Note: this filter is slow. For faster processing you should use a
-dedicated filter.
-
-
-
6.3.1 Examples# TOC
-
-
- Half volume:
-
-
aeval=val(ch)/2:c=same
-
-
- Invert phase of the second channel:
-
-
-
-
-
6.4 afade# TOC
-
-
Apply fade-in/out effect to input audio.
-
-
A description of the accepted parameters follows.
-
-
-type, t
-Specify the effect type, can be either in
for fade-in, or
-out
for a fade-out effect. Default is in
.
-
-
-start_sample, ss
-Specify the number of the start sample for starting to apply the fade
-effect. Default is 0.
-
-
-nb_samples, ns
-Specify the number of samples for which the fade effect has to last. At
-the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence. Default is 44100.
-
-
-start_time, st
-Specify the start time of the fade effect. Default is 0.
-The value must be specified as a time duration; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-If set this option is used instead of start_sample .
-
-
-duration, d
-Specify the duration of the fade effect. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-At the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence.
-By default the duration is determined by nb_samples .
-If set this option is used instead of nb_samples .
-
-
-curve
-Set curve for fade transition.
-
-It accepts the following values:
-
-tri
-select triangular, linear slope (default)
-
-qsin
-select quarter of sine wave
-
-hsin
-select half of sine wave
-
-esin
-select exponential sine wave
-
-log
-select logarithmic
-
-par
-select inverted parabola
-
-qua
-select quadratic
-
-cub
-select cubic
-
-squ
-select square root
-
-cbr
-select cubic root
-
-
-
-
-
-
-
6.4.1 Examples# TOC
-
-
- Fade in first 15 seconds of audio:
-
-
- Fade out last 25 seconds of a 900 seconds audio:
-
-
afade=t=out:st=875:d=25
-
-
-
-
-
6.5 aformat# TOC
-
-
Set output format constraints for the input audio. The framework will
-negotiate the most appropriate format to minimize conversions.
-
-
It accepts the following parameters:
-
-sample_fmts
-A ’|’-separated list of requested sample formats.
-
-
-sample_rates
-A ’|’-separated list of requested sample rates.
-
-
-channel_layouts
-A ’|’-separated list of requested channel layouts.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-
-
If a parameter is omitted, all values are allowed.
-
-
Force the output to either unsigned 8-bit or signed 16-bit stereo
-
-
aformat=sample_fmts=u8|s16:channel_layouts=stereo
-
-
-
-
6.6 allpass# TOC
-
-
Apply a two-pole all-pass filter with central frequency (in Hz)
-frequency , and filter-width width .
-An all-pass filter changes the audio’s frequency to phase relationship
-without changing its frequency to amplitude relationship.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
6.7 amerge# TOC
-
-
Merge two or more audio streams into a single multi-channel stream.
-
-
The filter accepts the following options:
-
-
-inputs
-Set the number of inputs. Default is 2.
-
-
-
-
-
If the channel layouts of the inputs are disjoint, and therefore compatible,
-the channel layout of the output will be set accordingly and the channels
-will be reordered as necessary. If the channel layouts of the inputs are not
-disjoint, the output will have all the channels of the first input then all
-the channels of the second input, in that order, and the channel layout of
-the output will be the default value corresponding to the total number of
-channels.
-
-
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
-is FC+BL+BR, then the output will be in 5.1, with the channels in the
-following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
-first input, b1 is the first channel of the second input).
-
-
On the other hand, if both input are in stereo, the output channels will be
-in the default order: a1, a2, b1, b2, and the channel layout will be
-arbitrarily set to 4.0, which may or may not be the expected value.
-
-
All inputs must have the same sample rate, and format.
-
-
If inputs do not have the same duration, the output will stop with the
-shortest.
-
-
-
6.7.1 Examples# TOC
-
-
- Merge two mono files into a stereo stream:
-
-
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
-
-
- Multiple merges assuming 1 video stream and 6 audio streams in input.mkv :
-
-
ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
-
-
-
-
-
6.8 amix# TOC
-
-
Mixes multiple audio inputs into a single output.
-
-
Note that this filter only supports float samples (the amerge
-and pan audio filters support many formats). If the amix
-input has integer samples then aresample will be automatically
-inserted to perform the conversion to float samples.
-
-
For example
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
-
-
will mix 3 input audio streams to a single output with the same duration as the
-first input and a dropout transition time of 3 seconds.
-
-
It accepts the following parameters:
-
-inputs
-The number of inputs. If unspecified, it defaults to 2.
-
-
-duration
-How to determine the end-of-stream.
-
-longest
-The duration of the longest input. (default)
-
-
-shortest
-The duration of the shortest input.
-
-
-first
-The duration of the first input.
-
-
-
-
-
-dropout_transition
-The transition time, in seconds, for volume renormalization when an input
-stream ends. The default value is 2 seconds.
-
-
-
-
-
-
6.9 anull# TOC
-
-
Pass the audio source unchanged to the output.
-
-
-
6.10 apad# TOC
-
-
Pad the end of an audio stream with silence.
-
-
This can be used together with ffmpeg
-shortest to
-extend audio streams to the same length as the video stream.
-
-
A description of the accepted options follows.
-
-
-packet_size
-Set silence packet size. Default value is 4096.
-
-
-pad_len
-Set the number of samples of silence to add to the end. After the
-value is reached, the stream is terminated. This option is mutually
-exclusive with whole_len .
-
-
-whole_len
-Set the minimum total number of samples in the output audio stream. If
-the value is longer than the input audio length, silence is added to
-the end, until the value is reached. This option is mutually exclusive
-with pad_len .
-
-
-
-
If neither the pad_len nor the whole_len option is
-set, the filter will add silence to the end of the input stream
-indefinitely.
-
-
-
6.10.1 Examples# TOC
-
-
- Add 1024 samples of silence to the end of the input:
-
-
- Make sure the audio output will contain at least 10000 samples, pad
-the input with silence if required:
-
-
- Use ffmpeg
to pad the audio input with silence, so that the
-video stream will always result the shortest and will be converted
-until the end in the output file when using the shortest
-option:
-
-
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
-
-
-
-
-
6.11 aphaser# TOC
-
Add a phasing effect to the input audio.
-
-
A phaser filter creates series of peaks and troughs in the frequency spectrum.
-The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain. Default is 0.4.
-
-
-out_gain
-Set output gain. Default is 0.74
-
-
-delay
-Set delay in milliseconds. Default is 3.0.
-
-
-decay
-Set decay. Default is 0.4.
-
-
-speed
-Set modulation speed in Hz. Default is 0.5.
-
-
-type
-Set modulation type. Default is triangular.
-
-It accepts the following values:
-
-‘triangular, t ’
-‘sinusoidal, s ’
-
-
-
-
-
-
6.12 aresample# TOC
-
-
Resample the input audio to the specified parameters, using the
-libswresample library. If none are specified then the filter will
-automatically convert between its input and output.
-
-
This filter is also able to stretch/squeeze the audio data to make it match
-the timestamps or to inject silence / cut out audio to make it match the
-timestamps, do a combination of both or do neither.
-
-
The filter accepts the syntax
-[sample_rate :]resampler_options , where sample_rate
-expresses a sample rate and resampler_options is a list of
-key =value pairs, separated by ":". See the
-ffmpeg-resampler manual for the complete list of supported options.
-
-
-
6.12.1 Examples# TOC
-
-
- Resample the input audio to 44100Hz:
-
-
- Stretch/squeeze samples to the given timestamps, with a maximum of 1000
-samples per second compensation:
-
-
-
-
-
6.13 asetnsamples# TOC
-
-
Set the number of samples per each output audio frame.
-
-
The last output packet may contain a different number of samples, as
-the filter will flush all the remaining samples when the input audio
-signal its end.
-
-
The filter accepts the following options:
-
-
-nb_out_samples, n
-Set the number of frames per each output audio frame. The number is
-intended as the number of samples per each channel .
-Default value is 1024.
-
-
-pad, p
-If set to 1, the filter will pad the last audio frame with zeroes, so
-that the last frame will contain the same number of samples as the
-previous ones. Default value is 1.
-
-
-
-
For example, to set the number of per-frame samples to 1234 and
-disable padding for the last frame, use:
-
-
asetnsamples=n=1234:p=0
-
-
-
-
6.14 asetrate# TOC
-
-
Set the sample rate without altering the PCM data.
-This will result in a change of speed and pitch.
-
-
The filter accepts the following options:
-
-
-sample_rate, r
-Set the output sample rate. Default is 44100 Hz.
-
-
-
-
-
6.15 ashowinfo# TOC
-
-
Show a line containing various information for each input audio frame.
-The input audio is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The presentation timestamp of the input frame, in time base units; the time base
-depends on the filter input pad, and is usually 1/sample_rate .
-
-
-pts_time
-The presentation timestamp of the input frame in seconds.
-
-
-pos
-position of the frame in the input stream, -1 if this information in
-unavailable and/or meaningless (for example in case of synthetic audio)
-
-
-fmt
-The sample format.
-
-
-chlayout
-The channel layout.
-
-
-rate
-The sample rate for the audio frame.
-
-
-nb_samples
-The number of samples (per channel) in the frame.
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of the audio data. For planar
-audio, the data is treated as if all the planes were concatenated.
-
-
-plane_checksums
-A list of Adler-32 checksums for each data plane.
-
-
-
-
-
6.16 astats# TOC
-
-
Display time domain statistical information about the audio channels.
-Statistics are calculated and displayed for each audio channel and,
-where applicable, an overall figure is also given.
-
-
It accepts the following option:
-
-length
-Short window length in seconds, used for peak and trough RMS measurement.
-Default is 0.05
(50 milliseconds). Allowed range is [0.1 - 10]
.
-
-
-
-
A description of each shown parameter follows:
-
-
-DC offset
-Mean amplitude displacement from zero.
-
-
-Min level
-Minimal sample level.
-
-
-Max level
-Maximal sample level.
-
-
-Peak level dB
-RMS level dB
-Standard peak and RMS level measured in dBFS.
-
-
-RMS peak dB
-RMS trough dB
-Peak and trough values for RMS level measured over a short window.
-
-
-Crest factor
-Standard ratio of peak to RMS level (note: not in dB).
-
-
-Flat factor
-Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels
-(i.e. either Min level or Max level ).
-
-
-Peak count
-Number of occasions (not the number of samples) that the signal attained either
-Min level or Max level .
-
-
-
-
-
6.17 astreamsync# TOC
-
-
Forward two audio streams and control the order the buffers are forwarded.
-
-
The filter accepts the following options:
-
-
-expr, e
-Set the expression deciding which stream should be
-forwarded next: if the result is negative, the first stream is forwarded; if
-the result is positive or zero, the second stream is forwarded. It can use
-the following variables:
-
-
-b1 b2
-number of buffers forwarded so far on each stream
-
-s1 s2
-number of samples forwarded so far on each stream
-
-t1 t2
-current timestamp of each stream
-
-
-
-The default value is t1-t2
, which means to always forward the stream
-that has a smaller timestamp.
-
-
-
-
-
6.17.1 Examples# TOC
-
-
Stress-test amerge
by randomly sending buffers on the wrong
-input, while avoiding too much of a desynchronization:
-
-
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
-[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
-[a2] [b2] amerge
-
-
-
-
6.18 asyncts# TOC
-
-
Synchronize audio data with timestamps by squeezing/stretching it and/or
-dropping samples/adding silence when needed.
-
-
This filter is not built by default, please use aresample to do squeezing/stretching.
-
-
It accepts the following parameters:
-
-compensate
-Enable stretching/squeezing the data to make it match the timestamps. Disabled
-by default. When disabled, time gaps are covered with silence.
-
-
-min_delta
-The minimum difference between timestamps and audio data (in seconds) to trigger
-adding/dropping samples. The default value is 0.1. If you get an imperfect
-sync with this filter, try setting this parameter to 0.
-
-
-max_comp
-The maximum compensation in samples per second. Only relevant with compensate=1.
-The default value is 500.
-
-
-first_pts
-Assume that the first PTS should be this value. The time base is 1 / sample
-rate. This allows for padding/trimming at the start of the stream. By default,
-no assumption is made about the first frame’s expected PTS, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative PTS due to encoder delay.
-
-
-
-
-
-
6.19 atempo# TOC
-
-
Adjust audio tempo.
-
-
The filter accepts exactly one parameter, the audio tempo. If not
-specified then the filter will assume nominal 1.0 tempo. Tempo must
-be in the [0.5, 2.0] range.
-
-
-
6.19.1 Examples# TOC
-
-
- Slow down audio to 80% tempo:
-
-
- To speed up audio to 125% tempo:
-
-
-
-
-
6.20 atrim# TOC
-
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Timestamp (in seconds) of the start of the section to keep. I.e. the audio
-sample with the timestamp start will be the first sample in the output.
-
-
-end
-Specify time of the first audio sample that will be dropped, i.e. the
-audio sample immediately preceding the one with the timestamp end will be
-the last sample in the output.
-
-
-start_pts
-Same as start , except this option sets the start timestamp in samples
-instead of seconds.
-
-
-end_pts
-Same as end , except this option sets the end timestamp in samples instead
-of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_sample
-The number of the first sample that should be output.
-
-
-end_sample
-The number of the first sample that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _sample options simply count the
-samples that pass through the filter. So start/end_pts and start/end_sample will
-give different results when the timestamps are wrong, inexact or do not start at
-zero. Also note that this filter does not modify the timestamps. If you wish
-to have the output timestamps start at zero, insert the asetpts filter after the
-atrim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all samples that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple atrim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -af atrim=60:120
-
-
- Keep only the first 1000 samples:
-
-
ffmpeg -i INPUT -af atrim=end_sample=1000
-
-
-
-
-
-
6.21 bandpass# TOC
-
-
Apply a two-pole Butterworth band-pass filter with central
-frequency frequency , and (3dB-point) band-width width.
-The csg option selects a constant skirt gain (peak gain = Q)
-instead of the default: constant 0dB peak gain.
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-csg
-Constant skirt gain if set to 1. Defaults to 0.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
6.22 bandreject# TOC
-
-
Apply a two-pole Butterworth band-reject filter with central
-frequency frequency , and (3dB-point) band-width width .
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
6.23 bass# TOC
-
-
Boost or cut the bass (lower) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at 0 Hz. Its useful range is about -20
-(for a large cut) to +20 (for a large boost).
-Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 100
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
6.24 biquad# TOC
-
-
Apply a biquad IIR filter with the given coefficients.
-Where b0 , b1 , b2 and a0 , a1 , a2
-are the numerator and denominator coefficients respectively.
-
-
-
6.25 bs2b# TOC
-
Bauer stereo to binaural transformation, which improves headphone listening of
-stereo audio records.
-
-
It accepts the following parameters:
-
-profile
-Pre-defined crossfeed level.
-
-default
-Default level (fcut=700, feed=50).
-
-
-cmoy
-Chu Moy circuit (fcut=700, feed=60).
-
-
-jmeier
-Jan Meier circuit (fcut=650, feed=95).
-
-
-
-
-
-fcut
-Cut frequency (in Hz).
-
-
-feed
-Feed level (in Hz).
-
-
-
-
-
-
6.26 channelmap# TOC
-
-
Remap input channels to new locations.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the output stream.
-
-
-map
-Map channels from input to output. The argument is a ’|’-separated list of
-mappings, each in the in_channel -out_channel
or
-in_channel form. in_channel can be either the name of the input
-channel (e.g. FL for front left) or its index in the input channel layout.
-out_channel is the name of the output channel or its index in the output
-channel layout. If out_channel is not given then it is implicitly an
-index, starting with zero and increasing by one for each mapping.
-
-
-
-
If no mapping is present, the filter will implicitly map input channels to
-output channels, preserving indices.
-
-
For example, assuming a 5.1+downmix input MOV file,
-
-
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
-
-
will create an output WAV file tagged as stereo from the downmix channels of
-the input.
-
-
To fix a 5.1 WAV improperly encoded in AAC’s native channel order
-
-
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
-
-
-
-
6.27 channelsplit# TOC
-
-
Split each channel from an input audio stream into a separate output stream.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the input stream. The default is "stereo".
-
-
-
-
For example, assuming a stereo input MP3 file,
-
-
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
-
-
will create an output Matroska file with two audio streams, one containing only
-the left channel and the other the right channel.
-
-
Split a 5.1 WAV file into per-channel files:
-
-
ffmpeg -i in.wav -filter_complex
-'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
--map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
-front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
-side_right.wav
-
-
-
-
6.28 compand# TOC
-
Compress or expand the audio’s dynamic range.
-
-
It accepts the following parameters:
-
-
-attacks
-decays
-A list of times in seconds for each channel over which the instantaneous level
-of the input signal is averaged to determine its volume. attacks refers to
-increase of volume and decays refers to decrease of volume. For most
-situations, the attack time (response to the audio getting louder) should be
-shorter than the decay time, because the human ear is more sensitive to sudden
-loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and
-a typical value for decay is 0.8 seconds.
-
-
-points
-A list of points for the transfer function, specified in dB relative to the
-maximum possible signal amplitude. Each key points list must be defined using
-the following syntax: x0/y0|x1/y1|x2/y2|....
or
-x0/y0 x1/y1 x2/y2 ....
-
-The input values must be in strictly increasing order but the transfer function
-does not have to be monotonically rising. The point 0/0
is assumed but
-may be overridden (by 0/out-dBn
). Typical values for the transfer
-function are -70/-70|-60/-20
.
-
-
-soft-knee
-Set the curve radius in dB for all joints. It defaults to 0.01.
-
-
-gain
-Set the additional gain in dB to be applied at all points on the transfer
-function. This allows for easy adjustment of the overall gain.
-It defaults to 0.
-
-
-volume
-Set an initial volume, in dB, to be assumed for each channel when filtering
-starts. This permits the user to supply a nominal level initially, so that, for
-example, a very large gain is not applied to initial signal levels before the
-companding has begun to operate. A typical value for audio which is initially
-quiet is -90 dB. It defaults to 0.
-
-
-delay
-Set a delay, in seconds. The input audio is analyzed immediately, but audio is
-delayed before being fed to the volume adjuster. Specifying a delay
-approximately equal to the attack/decay times allows the filter to effectively
-operate in predictive rather than reactive mode. It defaults to 0.
-
-
-
-
-
-
6.28.1 Examples# TOC
-
-
- Make music with both quiet and loud passages suitable for listening to in a
-noisy environment:
-
-
compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
-
-
- A noise gate for when the noise is at a lower level than the signal:
-
-
compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
-
-
- Here is another noise gate, this time for when the noise is at a higher level
-than the signal (making it, in some ways, similar to squelch):
-
-
compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
-
-
-
-
-
6.29 earwax# TOC
-
-
Make audio easier to listen to on headphones.
-
-
This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio
-so that when listened to on headphones the stereo image is moved from
-inside your head (standard for headphones) to outside and in front of
-the listener (standard for speakers).
-
-
Ported from SoX.
-
-
-
6.30 equalizer# TOC
-
-
Apply a two-pole peaking equalisation (EQ) filter. With this
-filter, the signal-level at and around a selected frequency can
-be increased or decreased, whilst (unlike bandpass and bandreject
-filters) that at all other frequencies is unchanged.
-
-
In order to produce complex equalisation curves, this filter can
-be given several times, each with a different central frequency.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-gain, g
-Set the required gain or attenuation in dB.
-Beware of clipping when using a positive gain.
-
-
-
-
-
6.30.1 Examples# TOC
-
- Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz:
-
-
equalizer=f=1000:width_type=h:width=200:g=-10
-
-
- Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2:
-
-
equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
-
-
-
-
-
6.31 flanger# TOC
-
Apply a flanging effect to the audio.
-
-
The filter accepts the following options:
-
-
-delay
-Set base delay in milliseconds. Range from 0 to 30. Default value is 0.
-
-
-depth
-Set added swep delay in milliseconds. Range from 0 to 10. Default value is 2.
-
-
-regen
-Set percentage regeneration (delayed signal feedback). Range from -95 to 95.
-Default value is 0.
-
-
-width
-Set percentage of delayed signal mixed with original. Range from 0 to 100.
-Default value is 71.
-
-
-speed
-Set sweeps per second (Hz). Range from 0.1 to 10. Default value is 0.5.
-
-
-shape
-Set swept wave shape, can be triangular or sinusoidal .
-Default value is sinusoidal .
-
-
-phase
-Set swept wave percentage-shift for multi channel. Range from 0 to 100.
-Default value is 25.
-
-
-interp
-Set delay-line interpolation, linear or quadratic .
-Default is linear .
-
-
-
-
-
6.32 highpass# TOC
-
-
Apply a high-pass filter with 3dB point frequency.
-The filter can be either single-pole, or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 3000.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
6.33 join# TOC
-
-
Join multiple input streams into one multi-channel stream.
-
-
It accepts the following parameters:
-
-inputs
-The number of input streams. It defaults to 2.
-
-
-channel_layout
-The desired output channel layout. It defaults to stereo.
-
-
-map
-Map channels from inputs to output. The argument is a ’|’-separated list of
-mappings, each in the input_idx .in_channel -out_channel
-form. input_idx is the 0-based index of the input stream. in_channel
-can be either the name of the input channel (e.g. FL for front left) or its
-index in the specified input stream. out_channel is the name of the output
-channel.
-
-
-
-
The filter will attempt to guess the mappings when they are not specified
-explicitly. It does so by first trying to find an unused matching input channel
-and if that fails it picks the first unused input channel.
-
-
Join 3 inputs (with properly set channel layouts):
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
-
-
-
Build a 5.1 output from 6 single-channel streams:
-
-
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
-'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
-out
-
-
-
-
6.34 ladspa# TOC
-
-
Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-ladspa
.
-
-
-file, f
-Specifies the name of LADSPA plugin library to load. If the environment
-variable LADSPA_PATH
is defined, the LADSPA plugin is searched in
-each one of the directories specified by the colon separated list in
-LADSPA_PATH
, otherwise in the standard LADSPA paths, which are in
-this order: HOME/.ladspa/lib/ , /usr/local/lib/ladspa/ ,
-/usr/lib/ladspa/ .
-
-
-plugin, p
-Specifies the plugin within the library. Some libraries contain only
-one plugin, but others contain many of them. If this is not set filter
-will list all available plugins within the specified library.
-
-
-controls, c
-Set the ’|’ separated list of controls which are zero or more floating point
-values that determine the behavior of the loaded plugin (for example delay,
-threshold or gain).
-Controls need to be defined using the following syntax:
-c0=value0 |c1=value1 |c2=value2 |..., where
-valuei is the value set on the i -th control.
-If controls is set to help
, all available controls and
-their valid ranges are printed.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100. Only used if plugin have
-zero inputs.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame, default
-is 1024. Only used if plugin have zero inputs.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified duration,
-as the generated audio is always cut at the end of a complete frame.
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-Only used if plugin have zero inputs.
-
-
-
-
-
-
6.34.1 Examples# TOC
-
-
- List all available plugins within amp (LADSPA example plugin) library:
-
-
- List all available controls and their valid ranges for vcf_notch
-plugin from VCF
library:
-
-
ladspa=f=vcf:p=vcf_notch:c=help
-
-
- Simulate low quality audio equipment using Computer Music Toolkit
(CMT)
-plugin library:
-
-
ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
-
-
- Add reverberation to the audio using TAP-plugins
-(Tom’s Audio Processing plugins):
-
-
ladspa=file=tap_reverb:tap_reverb
-
-
- Generate white noise, with 0.2 amplitude:
-
-
ladspa=file=cmt:noise_source_white:c=c0=.2
-
-
- Generate 20 bpm clicks using plugin C* Click - Metronome
from the
-C* Audio Plugin Suite
(CAPS) library:
-
-
ladspa=file=caps:Click:c=c1=20'
-
-
- Apply C* Eq10X2 - Stereo 10-band equaliser
effect:
-
-
ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
-
-
-
-
-
6.34.2 Commands# TOC
-
-
This filter supports the following commands:
-
-cN
-Modify the N -th control value.
-
-If the specified value is not valid, it is ignored and prior one is kept.
-
-
-
-
-
6.35 lowpass# TOC
-
-
Apply a low-pass filter with 3dB point frequency.
-The filter can be either single-pole or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 500.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
6.36 pan# TOC
-
-
Mix channels with specific gain levels. The filter accepts the output
-channel layout followed by a set of channels definitions.
-
-
This filter is also designed to efficiently remap the channels of an audio
-stream.
-
-
The filter accepts parameters of the form:
-"l |outdef |outdef |..."
-
-
-l
-output channel layout or number of channels
-
-
-outdef
-output channel specification, of the form:
-"out_name =[gain *]in_name [+[gain *]in_name ...]"
-
-
-out_name
-output channel to define, either a channel name (FL, FR, etc.) or a channel
-number (c0, c1, etc.)
-
-
-gain
-multiplicative coefficient for the channel, 1 leaving the volume unchanged
-
-
-in_name
-input channel to use, see out_name for details; it is not possible to mix
-named and numbered input channels
-
-
-
-
If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for
-that specification will be renormalized so that the total is 1, thus
-avoiding clipping noise.
-
-
-
6.36.1 Mixing examples# TOC
-
-
For example, if you want to down-mix from stereo to mono, but with a bigger
-factor for the left channel:
-
-
pan=1c|c0=0.9*c0+0.1*c1
-
-
-
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
-7-channels surround:
-
-
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
-
-
-
Note that ffmpeg
integrates a default down-mix (and up-mix) system
-that should be preferred (see "-ac" option) unless you have very specific
-needs.
-
-
-
6.36.2 Remapping examples# TOC
-
-
The channel remapping will be effective if, and only if:
-
-
- gain coefficients are zeroes or ones,
- only one input per channel output,
-
-
-
If all these conditions are satisfied, the filter will notify the user ("Pure
-channel mapping detected"), and use an optimized and lossless method to do the
-remapping.
-
-
For example, if you have a 5.1 source and want a stereo audio stream by
-dropping the extra channels:
-
-
pan="stereo| c0=FL | c1=FR"
-
-
-
Given the same source, you can also switch front left and front right channels
-and keep the input channel layout:
-
-
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
-
-
-
If the input is a stereo audio stream, you can mute the front left channel (and
-still keep the stereo channel layout) with:
-
-
-
Still with a stereo audio stream input, you can copy the right channel in both
-front left and right:
-
-
pan="stereo| c0=FR | c1=FR"
-
-
-
-
6.37 replaygain# TOC
-
-
ReplayGain scanner filter. This filter takes an audio stream as an input and
-outputs it unchanged.
-At end of filtering it displays track_gain
and track_peak
.
-
-
-
6.38 resample# TOC
-
-
Convert the audio sample format, sample rate and channel layout. It is
-not meant to be used directly.
-
-
-
6.39 silencedetect# TOC
-
-
Detect silence in an audio stream.
-
-
This filter logs a message when it detects that the input audio volume is less
-or equal to a noise tolerance value for a duration greater or equal to the
-minimum detected noise duration.
-
-
The printed times and duration are expressed in seconds.
-
-
The filter accepts the following options:
-
-
-duration, d
-Set silence duration until notification (default is 2 seconds).
-
-
-noise, n
-Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
-specified value) or amplitude ratio. Default is -60dB, or 0.001.
-
-
-
-
-
6.39.1 Examples# TOC
-
-
- Detect 5 seconds of silence with -50dB noise tolerance:
-
-
silencedetect=n=-50dB:d=5
-
-
- Complete example with ffmpeg
to detect silence with 0.0001 noise
-tolerance in silence.mp3 :
-
-
ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
-
-
-
-
-
6.40 silenceremove# TOC
-
-
Remove silence from the beginning, middle or end of the audio.
-
-
The filter accepts the following options:
-
-
-start_periods
-This value is used to indicate if audio should be trimmed at beginning of
-the audio. A value of zero indicates no silence should be trimmed from the
-beginning. When specifying a non-zero value, it trims audio up until it
-finds non-silence. Normally, when trimming silence from beginning of audio
-the start_periods will be 1
but it can be increased to higher
-values to trim all audio up to specific count of non-silence periods.
-Default value is 0
.
-
-
-start_duration
-Specify the amount of time that non-silence must be detected before it stops
-trimming audio. By increasing the duration, bursts of noises can be treated
-as silence and trimmed off. Default value is 0
.
-
-
-start_threshold
-This indicates what sample value should be treated as silence. For digital
-audio, a value of 0
may be fine but for audio recorded from analog,
-you may wish to increase the value to account for background noise.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-stop_periods
-Set the count for trimming silence from the end of audio.
-To remove silence from the middle of a file, specify a stop_periods
-that is negative. This value is then treated as a positive value and is
-used to indicate the effect should restart processing as specified by
-start_periods , making it suitable for removing periods of silence
-in the middle of the audio.
-Default value is 0
.
-
-
-stop_duration
-Specify a duration of silence that must exist before audio is not copied any
-more. By specifying a higher duration, silence that is wanted can be left in
-the audio.
-Default value is 0
.
-
-
-stop_threshold
-This is the same as start_threshold but for trimming silence from
-the end of audio.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-leave_silence
-This indicate that stop_duration length of audio should be left intact
-at the beginning of each period of silence.
-For example, if you want to remove long pauses between words but do not want
-to remove the pauses completely. Default value is 0
.
-
-
-
-
-
-
6.40.1 Examples# TOC
-
-
- The following example shows how this filter can be used to start a recording
-that does not contain the delay at the start which usually occurs between
-pressing the record button and the start of the performance:
-
-
silenceremove=1:5:0.02
-
-
-
-
-
6.41 treble# TOC
-
-
Boost or cut treble (upper) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at whichever is the lower of ~22 kHz and the
-Nyquist frequency. Its useful range is about -20 (for a large cut)
-to +20 (for a large boost). Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 3000
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
6.42 volume# TOC
-
-
Adjust the input audio volume.
-
-
It accepts the following parameters:
-
-volume
-Set audio volume expression.
-
-Output values are clipped to the maximum value.
-
-The output audio volume is given by the relation:
-
-
output_volume = volume * input_volume
-
-
-The default value for volume is "1.0".
-
-
-precision
-This parameter represents the mathematical precision.
-
-It determines which input sample formats will be allowed, which affects the
-precision of the volume scaling.
-
-
-fixed
-8-bit fixed-point; this limits input sample format to U8, S16, and S32.
-
-float
-32-bit floating-point; this limits input sample format to FLT. (default)
-
-double
-64-bit floating-point; this limits input sample format to DBL.
-
-
-
-
-replaygain
-Choose the behaviour on encountering ReplayGain side data in input frames.
-
-
-drop
-Remove ReplayGain side data, ignoring its contents (the default).
-
-
-ignore
-Ignore ReplayGain side data, but leave it in the frame.
-
-
-track
-Prefer the track gain, if present.
-
-
-album
-Prefer the album gain, if present.
-
-
-
-
-replaygain_preamp
-Pre-amplification gain in dB to apply to the selected replaygain gain.
-
-Default value for replaygain_preamp is 0.0.
-
-
-eval
-Set when the volume expression is evaluated.
-
-It accepts the following values:
-
-‘once ’
-only evaluate expression once during the filter initialization, or
-when the ‘volume ’ command is sent
-
-
-‘frame ’
-evaluate expression for each incoming frame
-
-
-
-Default value is ‘once ’.
-
-
-
-
The volume expression can contain the following parameters.
-
-
-n
-frame number (starting at zero)
-
-nb_channels
-number of channels
-
-nb_consumed_samples
-number of samples consumed by the filter
-
-nb_samples
-number of samples in the current frame
-
-pos
-original frame position in the file
-
-pts
-frame PTS
-
-sample_rate
-sample rate
-
-startpts
-PTS at start of stream
-
-startt
-time at start of stream
-
-t
-frame time
-
-tb
-timestamp timebase
-
-volume
-last set volume value
-
-
-
-
Note that when eval is set to ‘once ’ only the
-sample_rate and tb variables are available, all other
-variables will evaluate to NAN.
-
-
-
6.42.1 Commands# TOC
-
-
This filter supports the following commands:
-
-volume
-Modify the volume expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-replaygain_noclip
-Prevent clipping by limiting the gain applied.
-
-Default value for replaygain_noclip is 1.
-
-
-
-
-
-
6.42.2 Examples# TOC
-
-
-
-
-
6.43 volumedetect# TOC
-
-
Detect the volume of the input video.
-
-
The filter has no parameters. The input is not modified. Statistics about
-the volume will be printed in the log when the input stream end is reached.
-
-
In particular it will show the mean volume (root mean square), maximum
-volume (on a per-sample basis), and the beginning of a histogram of the
-registered volume values (from the maximum value to a cumulated 1/1000 of
-the samples).
-
-
All volumes are in decibels relative to the maximum PCM value.
-
-
-
6.43.1 Examples# TOC
-
-
Here is an excerpt of the output:
-
-
[Parsed_volumedetect_0 0xa23120] mean_volume: -27 dB
-[Parsed_volumedetect_0 0xa23120] max_volume: -4 dB
-[Parsed_volumedetect_0 0xa23120] histogram_4db: 6
-[Parsed_volumedetect_0 0xa23120] histogram_5db: 62
-[Parsed_volumedetect_0 0xa23120] histogram_6db: 286
-[Parsed_volumedetect_0 0xa23120] histogram_7db: 1042
-[Parsed_volumedetect_0 0xa23120] histogram_8db: 2551
-[Parsed_volumedetect_0 0xa23120] histogram_9db: 4609
-[Parsed_volumedetect_0 0xa23120] histogram_10db: 8409
-
-
-
It means that:
-
- The mean square energy is approximately -27 dB, or 10^-2.7.
- The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB.
- There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc.
-
-
-
In other words, raising the volume by +4 dB does not cause any clipping,
-raising it by +5 dB causes clipping for 6 samples, etc.
-
-
-
-
7 Audio Sources# TOC
-
-
Below is a description of the currently available audio sources.
-
-
-
7.1 abuffer# TOC
-
-
Buffer audio frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/asrc_abuffer.h .
-
-
It accepts the following parameters:
-
-time_base
-The timebase which will be used for timestamps of submitted frames. It must be
-either a floating-point number or in numerator /denominator form.
-
-
-sample_rate
-The sample rate of the incoming audio buffers.
-
-
-sample_fmt
-The sample format of the incoming audio buffers.
-Either a sample format name or its corresponding integer representation from
-the enum AVSampleFormat in libavutil/samplefmt.h
-
-
-channel_layout
-The channel layout of the incoming audio buffers.
-Either a channel layout name from channel_layout_map in
-libavutil/channel_layout.c or its corresponding integer representation
-from the AV_CH_LAYOUT_* macros in libavutil/channel_layout.h
-
-
-channels
-The number of channels of the incoming audio buffers.
-If both channels and channel_layout are specified, then they
-must be consistent.
-
-
-
-
-
-
7.1.1 Examples# TOC
-
-
-
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
-
-
-
will instruct the source to accept planar 16bit signed stereo at 44100Hz.
-Since the sample format with name "s16p" corresponds to the number
-6 and the "stereo" channel layout corresponds to the value 0x3, this is
-equivalent to:
-
-
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
-
-
-
-
7.2 aevalsrc# TOC
-
-
Generate an audio signal specified by an expression.
-
-
This source accepts in input one or more expressions (one for each
-channel), which are evaluated and used to generate a corresponding
-audio signal.
-
-
This source accepts the following options:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. In case the
-channel_layout option is not specified, the selected channel layout
-depends on the number of provided expressions. Otherwise the last
-specified expression is applied to the remaining output channels.
-
-
-channel_layout, c
-Set the channel layout. The number of channels in the specified layout
-must be equal to the number of specified expressions.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified
-duration, as the generated audio is always cut at the end of a
-complete frame.
-
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame,
-default to 1024.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100.
-
-
-
-
Each expression in exprs can contain the following constants:
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-t
-time of the evaluated sample expressed in seconds, starting from 0
-
-
-s
-sample rate
-
-
-
-
-
-
7.2.1 Examples# TOC
-
-
- Generate silence:
-
-
- Generate a sin signal with frequency of 440 Hz, set sample rate to
-8000 Hz:
-
-
aevalsrc="sin(440*2*PI*t):s=8000"
-
-
- Generate a two channels signal, specify the channel layout (Front
-Center + Back Center) explicitly:
-
-
aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
-
-
- Generate white noise:
-
-
aevalsrc="-2+random(0)"
-
-
- Generate an amplitude modulated signal:
-
-
aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
-
-
- Generate 2.5 Hz binaural beats on a 360 Hz carrier:
-
-
aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
-
-
-
-
-
-
7.3 anullsrc# TOC
-
-
The null audio source, return unprocessed audio frames. It is mainly useful
-as a template and to be employed in analysis / debugging tools, or as
-the source for filters which ignore the input data (for example the sox
-synth filter).
-
-
This source accepts the following options:
-
-
-channel_layout, cl
-
-Specifies the channel layout, and can be either an integer or a string
-representing a channel layout. The default value of channel_layout
-is "stereo".
-
-Check the channel_layout_map definition in
-libavutil/channel_layout.c for the mapping between strings and
-channel layout values.
-
-
-sample_rate, r
-Specifies the sample rate, and defaults to 44100.
-
-
-nb_samples, n
-Set the number of samples per requested frames.
-
-
-
-
-
-
7.3.1 Examples# TOC
-
-
- Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
-
-
- Do the same operation with a more obvious syntax:
-
-
anullsrc=r=48000:cl=mono
-
-
-
-
All the parameters need to be explicitly defined.
-
-
-
7.4 flite# TOC
-
-
Synthesize a voice utterance using the libflite library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libflite
.
-
-
Note that the flite library is not thread-safe.
-
-
The filter accepts the following options:
-
-
-list_voices
-If set to 1, list the names of the available voices and exit
-immediately. Default value is 0.
-
-
-nb_samples, n
-Set the maximum number of samples per frame. Default value is 512.
-
-
-textfile
-Set the filename containing the text to speak.
-
-
-text
-Set the text to speak.
-
-
-voice, v
-Set the voice to use for the speech synthesis. Default value is
-kal
. See also the list_voices option.
-
-
-
-
-
7.4.1 Examples# TOC
-
-
- Read from file speech.txt , and synthesize the text using the
-standard flite voice:
-
-
flite=textfile=speech.txt
-
-
- Read the specified text selecting the slt
voice:
-
-
flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Input text to ffmpeg:
-
-
ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Make ffplay speak the specified text, using flite
and
-the lavfi
device:
-
-
ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
-
-
-
-
For more information about libflite, check:
-http://www.speech.cs.cmu.edu/flite/
-
-
-
7.5 sine# TOC
-
-
Generate an audio signal made of a sine wave with amplitude 1/8.
-
-
The audio signal is bit-exact.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the carrier frequency. Default is 440 Hz.
-
-
-beep_factor, b
-Enable a periodic beep every second with frequency beep_factor times
-the carrier frequency. Default is 0, meaning the beep is disabled.
-
-
-sample_rate, r
-Specify the sample rate, default is 44100.
-
-
-duration, d
-Specify the duration of the generated audio stream.
-
-
-samples_per_frame
-Set the number of samples per output frame, default is 1024.
-
-
-
-
-
7.5.1 Examples# TOC
-
-
- Generate a simple 440 Hz sine wave:
-
-
- Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
-
-
sine=220:4:d=5
-sine=f=220:b=4:d=5
-sine=frequency=220:beep_factor=4:duration=5
-
-
-
-
-
-
-
8 Audio Sinks# TOC
-
-
Below is a description of the currently available audio sinks.
-
-
-
8.1 abuffersink# TOC
-
-
Buffer audio frames, and make them available to the end of filter chain.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVABufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
8.2 anullsink# TOC
-
-
Null audio sink; do absolutely nothing with the input audio. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
9 Video Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the video filters included in your
-build.
-
-
Below is a description of the currently available video filters.
-
-
-
9.1 alphaextract# TOC
-
-
Extract the alpha component from the input as a grayscale video. This
-is especially useful with the alphamerge filter.
-
-
-
9.2 alphamerge# TOC
-
-
Add or replace the alpha component of the primary input with the
-grayscale value of a second input. This is intended for use with
-alphaextract to allow the transmission or storage of frame
-sequences that have alpha in a format that doesn’t support an alpha
-channel.
-
-
For example, to reconstruct full frames from a normal YUV-encoded video
-and a separate video created with alphaextract , you might use:
-
-
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
-
-
-
Since this filter is designed for reconstruction, it operates on frame
-sequences without considering timestamps, and terminates when either
-input reaches end of stream. This will cause problems if your encoding
-pipeline drops frames. If you’re trying to apply an image as an
-overlay to a video stream, consider the overlay filter instead.
-
-
-
-
-
Same as the subtitles filter, except that it doesn’t require libavcodec
-and libavformat to work. On the other hand, it is limited to ASS (Advanced
-Substation Alpha) subtitles files.
-
-
This filter accepts the following option in addition to the common options from
-the subtitles filter:
-
-
-shaping
-Set the shaping engine
-
-Available values are:
-
-‘auto ’
-The default libass shaping engine, which is the best available.
-
-‘simple ’
-Fast, font-agnostic shaper that can do only substitutions
-
-‘complex ’
-Slower shaper using OpenType for substitutions and positioning
-
-
-
-The default is auto
.
-
-
-
-
-
9.4 bbox# TOC
-
-
Compute the bounding box for the non-black pixels in the input frame
-luminance plane.
-
-
This filter computes the bounding box containing all the pixels with a
-luminance value greater than the minimum allowed value.
-The parameters describing the bounding box are printed on the filter
-log.
-
-
The filter accepts the following option:
-
-
-min_val
-Set the minimal luminance value. Default is 16
.
-
-
-
-
-
9.5 blackdetect# TOC
-
-
Detect video intervals that are (almost) completely black. Can be
-useful to detect chapter transitions, commercials, or invalid
-recordings. Output lines contains the time for the start, end and
-duration of the detected black interval expressed in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
The filter accepts the following options:
-
-
-black_min_duration, d
-Set the minimum detected black duration expressed in seconds. It must
-be a non-negative floating point number.
-
-Default value is 2.0.
-
-
-picture_black_ratio_th, pic_th
-Set the threshold for considering a picture "black".
-Express the minimum value for the ratio:
-
-
nb_black_pixels / nb_pixels
-
-
-for which a picture is considered black.
-Default value is 0.98.
-
-
-pixel_black_th, pix_th
-Set the threshold for considering a pixel "black".
-
-The threshold expresses the maximum pixel luminance value for which a
-pixel is considered "black". The provided value is scaled according to
-the following equation:
-
-
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
-
-
-luminance_range_size and luminance_minimum_value depend on
-the input video format, the range is [0-255] for YUV full-range
-formats and [16-235] for YUV non full-range formats.
-
-Default value is 0.10.
-
-
-
-
The following example sets the maximum pixel threshold to the minimum
-value, and detects only black intervals of 2 or more seconds:
-
-
blackdetect=d=2:pix_th=0.00
-
-
-
-
9.6 blackframe# TOC
-
-
Detect frames that are (almost) completely black. Can be useful to
-detect chapter transitions or commercials. Output lines consist of
-the frame number of the detected frame, the percentage of blackness,
-the position in the file if known or -1 and the timestamp in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
It accepts the following parameters:
-
-
-amount
-The percentage of the pixels that have to be below the threshold; it defaults to
-98
.
-
-
-threshold, thresh
-The threshold below which a pixel value is considered black; it defaults to
-32
.
-
-
-
-
-
-
9.7 blend, tblend# TOC
-
-
Blend two video frames into each other.
-
-
The blend
filter takes two input streams and outputs one
-stream, the first input is the "top" layer and second input is
-"bottom" layer. Output terminates when shortest input terminates.
-
-
The tblend
(time blend) filter takes two consecutive frames
-from one single stream, and outputs the result obtained by blending
-the new frame on top of the old frame.
-
-
A description of the accepted options follows.
-
-
-c0_mode
-c1_mode
-c2_mode
-c3_mode
-all_mode
-Set blend mode for specific pixel component or all pixel components in case
-of all_mode . Default value is normal
.
-
-Available values for component modes are:
-
-‘addition ’
-‘and ’
-‘average ’
-‘burn ’
-‘darken ’
-‘difference ’
-‘difference128 ’
-‘divide ’
-‘dodge ’
-‘exclusion ’
-‘hardlight ’
-‘lighten ’
-‘multiply ’
-‘negation ’
-‘normal ’
-‘or ’
-‘overlay ’
-‘phoenix ’
-‘pinlight ’
-‘reflect ’
-‘screen ’
-‘softlight ’
-‘subtract ’
-‘vividlight ’
-‘xor ’
-
-
-
-c0_opacity
-c1_opacity
-c2_opacity
-c3_opacity
-all_opacity
-Set blend opacity for specific pixel component or all pixel components in case
-of all_opacity . Only used in combination with pixel component blend modes.
-
-
-c0_expr
-c1_expr
-c2_expr
-c3_expr
-all_expr
-Set blend expression for specific pixel component or all pixel components in case
-of all_expr . Note that related mode options will be ignored if those are set.
-
-The expressions can use the following variables:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-the coordinates of the current sample
-
-
-W
-H
-the width and height of currently filtered plane
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-TOP, A
-Value of pixel component at current location for first video frame (top layer).
-
-
-BOTTOM, B
-Value of pixel component at current location for second video frame (bottom layer).
-
-
-
-
-shortest
-Force termination when the shortest input terminates. Default is
-0
. This option is only defined for the blend
filter.
-
-
-repeatlast
-Continue applying the last bottom frame after the end of the stream. A value of
-0
disable the filter after the last frame of the bottom layer is reached.
-Default is 1
. This option is only defined for the blend
filter.
-
-
-
-
-
9.7.1 Examples# TOC
-
-
- Apply transition from bottom layer to top layer in first 10 seconds:
-
-
blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
-
-
- Apply 1x1 checkerboard effect:
-
-
blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
-
-
- Apply uncover left effect:
-
-
blend=all_expr='if(gte(N*SW+X,W),A,B)'
-
-
- Apply uncover down effect:
-
-
blend=all_expr='if(gte(Y-N*SH,0),A,B)'
-
-
- Apply uncover up-left effect:
-
-
blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
-
-
- Display differences between the current and the previous frame:
-
-
tblend=all_mode=difference128
-
-
-
-
-
9.8 boxblur# TOC
-
-
Apply a boxblur algorithm to the input video.
-
-
It accepts the following parameters:
-
-
-luma_radius, lr
-luma_power, lp
-chroma_radius, cr
-chroma_power, cp
-alpha_radius, ar
-alpha_power, ap
-
-
-
A description of the accepted options follows.
-
-
-luma_radius, lr
-chroma_radius, cr
-alpha_radius, ar
-Set an expression for the box radius in pixels used for blurring the
-corresponding input plane.
-
-The radius value must be a non-negative number, and must not be
-greater than the value of the expression min(w,h)/2
for the
-luma and alpha planes, and of min(cw,ch)/2
for the chroma
-planes.
-
-Default value for luma_radius is "2". If not specified,
-chroma_radius and alpha_radius default to the
-corresponding value set for luma_radius .
-
-The expressions can contain the following constants:
-
-w
-h
-The input width and height in pixels.
-
-
-cw
-ch
-The input chroma image width and height in pixels.
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p", hsub is 2 and vsub is 1.
-
-
-
-
-luma_power, lp
-chroma_power, cp
-alpha_power, ap
-Specify how many times the boxblur filter is applied to the
-corresponding plane.
-
-Default value for luma_power is 2. If not specified,
-chroma_power and alpha_power default to the
-corresponding value set for luma_power .
-
-A value of 0 will disable the effect.
-
-
-
-
-
9.8.1 Examples# TOC
-
-
- Apply a boxblur filter with the luma, chroma, and alpha radii
-set to 2:
-
-
boxblur=luma_radius=2:luma_power=1
-boxblur=2:1
-
-
- Set the luma radius to 2, and alpha and chroma radius to 0:
-
-
- Set the luma and chroma radii to a fraction of the video dimension:
-
-
boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
-
-
-
-
-
9.9 codecview# TOC
-
-
Visualize information exported by some codecs.
-
-
Some codecs can export information through frames using side-data or other
-means. For example, some MPEG based codecs export motion vectors through the
-export_mvs flag in the codec flags2 option.
-
-
The filter accepts the following option:
-
-
-mv
-Set motion vectors to visualize.
-
-Available flags for mv are:
-
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-
-
-
9.9.1 Examples# TOC
-
-
- Visualizes multi-directionals MVs from P and B-Frames using ffplay
:
-
-
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
-
-
-
-
-
9.10 colorbalance# TOC
-
Modify intensity of primary colors (red, green and blue) of input frames.
-
-
The filter allows an input frame to be adjusted in the shadows, midtones or highlights
-regions for the red-cyan, green-magenta or blue-yellow balance.
-
-
A positive adjustment value shifts the balance towards the primary color, a negative
-value towards the complementary color.
-
-
The filter accepts the following options:
-
-
-rs
-gs
-bs
-Adjust red, green and blue shadows (darkest pixels).
-
-
-rm
-gm
-bm
-Adjust red, green and blue midtones (medium pixels).
-
-
-rh
-gh
-bh
-Adjust red, green and blue highlights (brightest pixels).
-
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-
-
-
9.10.1 Examples# TOC
-
-
- Add red color cast to shadows:
-
-
-
-
-
9.11 colorlevels# TOC
-
-
Adjust video input frames using levels.
-
-
The filter accepts the following options:
-
-
-rimin
-gimin
-bimin
-aimin
-Adjust red, green, blue and alpha input black point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-rimax
-gimax
-bimax
-aimax
-Adjust red, green, blue and alpha input white point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 1
.
-
-Input levels are used to lighten highlights (bright tones), darken shadows
-(dark tones), change the balance of bright and dark tones.
-
-
-romin
-gomin
-bomin
-aomin
-Adjust red, green, blue and alpha output black point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 0
.
-
-
-romax
-gomax
-bomax
-aomax
-Adjust red, green, blue and alpha output white point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 1
.
-
-Output levels allows manual selection of a constrained output level range.
-
-
-
-
-
9.11.1 Examples# TOC
-
-
- Make video output darker:
-
-
colorlevels=rimin=0.058:gimin=0.058:bimin=0.058
-
-
- Increase contrast:
-
-
colorlevels=rimin=0.039:gimin=0.039:bimin=0.039:rimax=0.96:gimax=0.96:bimax=0.96
-
-
- Make video output lighter:
-
-
colorlevels=rimax=0.902:gimax=0.902:bimax=0.902
-
-
- Increase brightness:
-
-
colorlevels=romin=0.5:gomin=0.5:bomin=0.5
-
-
-
-
-
9.12 colorchannelmixer# TOC
-
-
Adjust video input frames by re-mixing color channels.
-
-
This filter modifies a color channel by adding the values associated to
-the other channels of the same pixels. For example if the value to
-modify is red, the output value will be:
-
-
red =red *rr + blue *rb + green *rg + alpha *ra
-
-
-
The filter accepts the following options:
-
-
-rr
-rg
-rb
-ra
-Adjust contribution of input red, green, blue and alpha channels for output red channel.
-Default is 1
for rr , and 0
for rg , rb and ra .
-
-
-gr
-gg
-gb
-ga
-Adjust contribution of input red, green, blue and alpha channels for output green channel.
-Default is 1
for gg , and 0
for gr , gb and ga .
-
-
-br
-bg
-bb
-ba
-Adjust contribution of input red, green, blue and alpha channels for output blue channel.
-Default is 1
for bb , and 0
for br , bg and ba .
-
-
-ar
-ag
-ab
-aa
-Adjust contribution of input red, green, blue and alpha channels for output alpha channel.
-Default is 1
for aa , and 0
for ar , ag and ab .
-
-Allowed ranges for options are [-2.0, 2.0]
.
-
-
-
-
-
9.12.1 Examples# TOC
-
-
- Convert source to grayscale:
-
-
colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
-
- Simulate sepia tones:
-
-
colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
-
-
-
-
-
9.13 colormatrix# TOC
-
-
Convert color matrix.
-
-
The filter accepts the following options:
-
-
-src
-dst
-Specify the source and destination color matrix. Both values must be
-specified.
-
-The accepted values are:
-
-‘bt709 ’
-BT.709
-
-
-‘bt601 ’
-BT.601
-
-
-‘smpte240m ’
-SMPTE-240M
-
-
-‘fcc ’
-FCC
-
-
-
-
-
-
For example to convert from BT.601 to SMPTE-240M, use the command:
-
-
colormatrix=bt601:smpte240m
-
-
-
-
9.14 copy# TOC
-
-
Copy the input source unchanged to the output. This is mainly useful for
-testing purposes.
-
-
-
9.15 crop# TOC
-
-
Crop the input video to given dimensions.
-
-
It accepts the following parameters:
-
-
-w, out_w
-The width of the output video. It defaults to iw
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-h, out_h
-The height of the output video. It defaults to ih
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-x
-The horizontal position, in the input video, of the left edge of the output
-video. It defaults to (in_w-out_w)/2
.
-This expression is evaluated per-frame.
-
-
-y
-The vertical position, in the input video, of the top edge of the output video.
-It defaults to (in_h-out_h)/2
.
-This expression is evaluated per-frame.
-
-
-keep_aspect
-If set to 1 will force the output display aspect ratio
-to be the same of the input, by changing the output sample aspect
-ratio. It defaults to 0.
-
-
-
-
The out_w , out_h , x , y parameters are
-expressions containing the following constants:
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-in_w
-in_h
-The input width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (cropped) width and height.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-n
-The number of the input frame, starting from 0.
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
The expression for out_w may depend on the value of out_h ,
-and the expression for out_h may depend on out_w , but they
-cannot depend on x and y , as x and y are
-evaluated after out_w and out_h .
-
-
The x and y parameters specify the expressions for the
-position of the top-left corner of the output (non-cropped) area. They
-are evaluated for each frame. If the evaluated value is not valid, it
-is approximated to the nearest valid value.
-
-
The expression for x may depend on y , and the expression
-for y may depend on x .
-
-
-
9.15.1 Examples# TOC
-
-
-
-
-
9.16 cropdetect# TOC
-
-
Auto-detect the crop size.
-
-
It calculates the necessary cropping parameters and prints the
-recommended parameters via the logging system. The detected dimensions
-correspond to the non-black area of the input video.
-
-
It accepts the following parameters:
-
-
-limit
-Set higher black value threshold, which can be optionally specified
-from nothing (0) to everything (255 for 8bit based formats). An intensity
-value greater to the set value is considered non-black. It defaults to 24.
-You can also specify a value between 0.0 and 1.0 which will be scaled depending
-on the bitdepth of the pixel format.
-
-
-round
-The value which the width/height should be divisible by. It defaults to
-16. The offset is automatically adjusted to center the video. Use 2 to
-get only even dimensions (needed for 4:2:2 video). 16 is best when
-encoding to most video codecs.
-
-
-reset_count, reset
-Set the counter that determines after how many frames cropdetect will
-reset the previously detected largest video area and start over to
-detect the current optimal crop area. Default value is 0.
-
-This can be useful when channel logos distort the video area. 0
-indicates ’never reset’, and returns the largest area encountered during
-playback.
-
-
-
-
-
9.17 curves# TOC
-
-
Apply color adjustments using curves.
-
-
This filter is similar to the Adobe Photoshop and GIMP curves tools. Each
-component (red, green and blue) has its values defined by N key points
-tied from each other using a smooth curve. The x-axis represents the pixel
-values from the input frame, and the y-axis the new pixel values to be set for
-the output frame.
-
-
By default, a component curve is defined by the two points (0;0) and
-(1;1) . This creates a straight line where each original pixel value is
-"adjusted" to its own value, which means no change to the image.
-
-
The filter allows you to redefine these two points and add some more. A new
-curve (using a natural cubic spline interpolation) will be define to pass
-smoothly through all these new coordinates. The new defined points needs to be
-strictly increasing over the x-axis, and their x and y values must
-be in the [0;1] interval. If the computed curves happened to go outside
-the vector spaces, the values will be clipped accordingly.
-
-
If there is no key point defined in x=0
, the filter will automatically
-insert a (0;0) point. In the same way, if there is no key point defined
-in x=1
, the filter will automatically insert a (1;1) point.
-
-
The filter accepts the following options:
-
-
-preset
-Select one of the available color presets. This option can be used in addition
-to the r , g , b parameters; in this case, the later
-options takes priority on the preset values.
-Available presets are:
-
-‘none ’
-‘color_negative ’
-‘cross_process ’
-‘darker ’
-‘increase_contrast ’
-‘lighter ’
-‘linear_contrast ’
-‘medium_contrast ’
-‘negative ’
-‘strong_contrast ’
-‘vintage ’
-
-Default is none
.
-
-master, m
-Set the master key points. These points will define a second pass mapping. It
-is sometimes called a "luminance" or "value" mapping. It can be used with
-r , g , b or all since it acts like a
-post-processing LUT.
-
-red, r
-Set the key points for the red component.
-
-green, g
-Set the key points for the green component.
-
-blue, b
-Set the key points for the blue component.
-
-all
-Set the key points for all components (not including master).
-Can be used in addition to the other key points component
-options. In this case, the unset component(s) will fallback on this
-all setting.
-
-psfile
-Specify a Photoshop curves file (.asv
) to import the settings from.
-
-
-
-
To avoid some filtergraph syntax conflicts, each key points list need to be
-defined using the following syntax: x0/y0 x1/y1 x2/y2 ...
.
-
-
-
9.17.1 Examples# TOC
-
-
-
-
-
9.18 dctdnoiz# TOC
-
-
Denoise frames using 2D DCT (frequency domain filtering).
-
-
This filter is not designed for real time.
-
-
The filter accepts the following options:
-
-
-sigma, s
-Set the noise sigma constant.
-
-This sigma defines a hard threshold of 3 * sigma
; every DCT
-coefficient (absolute value) below this threshold with be dropped.
-
-If you need a more advanced filtering, see expr .
-
-Default is 0
.
-
-
-overlap
-Set number overlapping pixels for each block. Since the filter can be slow, you
-may want to reduce this value, at the cost of a less effective filter and the
-risk of various artefacts.
-
-If the overlapping value doesn’t allow to process the whole input width or
-height, a warning will be displayed and according borders won’t be denoised.
-
-Default value is blocksize -1, which is the best possible setting.
-
-
-expr, e
-Set the coefficient factor expression.
-
-For each coefficient of a DCT block, this expression will be evaluated as a
-multiplier value for the coefficient.
-
-If this is option is set, the sigma option will be ignored.
-
-The absolute value of the coefficient can be accessed through the c
-variable.
-
-
-n
-Set the blocksize using the number of bits. 1<<n
defines the
-blocksize , which is the width and height of the processed blocks.
-
-The default value is 3 (8x8) and can be raised to 4 for a
-blocksize of 16x16. Note that changing this setting has huge consequences
-on the speed processing. Also, a larger block size does not necessarily means a
-better de-noising.
-
-
-
-
-
9.18.1 Examples# TOC
-
-
Apply a denoise with a sigma of 4.5
:
-
-
-
The same operation can be achieved using the expression system:
-
-
dctdnoiz=e='gte(c, 4.5*3)'
-
-
-
Violent denoise using a block size of 16x16
:
-
-
-
-
9.19 decimate# TOC
-
-
Drop duplicated frames at regular intervals.
-
-
The filter accepts the following options:
-
-
-cycle
-Set the number of frames from which one will be dropped. Setting this to
-N means one frame in every batch of N frames will be dropped.
-Default is 5
.
-
-
-dupthresh
-Set the threshold for duplicate detection. If the difference metric for a frame
-is less than or equal to this value, then it is declared as duplicate. Default
-is 1.1
-
-
-scthresh
-Set scene change threshold. Default is 15
.
-
-
-blockx
-blocky
-Set the size of the x and y-axis blocks used during metric calculations.
-Larger blocks give better noise suppression, but also give worse detection of
-small movements. Must be a power of two. Default is 32
.
-
-
-ppsrc
-Mark main input as a pre-processed input and activate clean source input
-stream. This allows the input to be pre-processed with various filters to help
-the metrics calculation while keeping the frame selection lossless. When set to
-1
, the first stream is for the pre-processed input, and the second
-stream is the clean source from where the kept frames are chosen. Default is
-0
.
-
-
-chroma
-Set whether or not chroma is considered in the metric calculations. Default is
-1
.
-
-
-
-
-
9.20 dejudder# TOC
-
-
Remove judder produced by partially interlaced telecined content.
-
-
Judder can be introduced, for instance, by pullup filter. If the original
-source was partially telecined content then the output of pullup,dejudder
-will have a variable frame rate. May change the recorded frame rate of the
-container. Aside from that change, this filter will not affect constant frame
-rate video.
-
-
The option available in this filter is:
-
-cycle
-Specify the length of the window over which the judder repeats.
-
-Accepts any integer greater than 1. Useful values are:
-
-‘4 ’
-If the original was telecined from 24 to 30 fps (Film to NTSC).
-
-
-‘5 ’
-If the original was telecined from 25 to 30 fps (PAL to NTSC).
-
-
-‘20 ’
-If a mixture of the two.
-
-
-
-The default is ‘4 ’.
-
-
-
-
-
9.21 delogo# TOC
-
-
Suppress a TV station logo by a simple interpolation of the surrounding
-pixels. Just set a rectangle covering the logo and watch it disappear
-(and sometimes something even uglier appear - your mileage may vary).
-
-
It accepts the following parameters:
-
-x
-y
-Specify the top left corner coordinates of the logo. They must be
-specified.
-
-
-w
-h
-Specify the width and height of the logo to clear. They must be
-specified.
-
-
-band, t
-Specify the thickness of the fuzzy edge of the rectangle (added to
-w and h ). The default value is 4.
-
-
-show
-When set to 1, a green rectangle is drawn on the screen to simplify
-finding the right x , y , w , and h parameters.
-The default value is 0.
-
-The rectangle is drawn on the outermost pixels which will be (partly)
-replaced with interpolated values. The values of the next pixels
-immediately outside this rectangle in each direction will be used to
-compute the interpolated pixel values inside the rectangle.
-
-
-
-
-
-
9.21.1 Examples# TOC
-
-
- Set a rectangle covering the area with top left corner coordinates 0,0
-and size 100x77, and a band of size 10:
-
-
delogo=x=0:y=0:w=100:h=77:band=10
-
-
-
-
-
-
9.22 deshake# TOC
-
-
Attempt to fix small changes in horizontal and/or vertical shift. This
-filter helps remove camera shake from hand-holding a camera, bumping a
-tripod, moving on a vehicle, etc.
-
-
The filter accepts the following options:
-
-
-x
-y
-w
-h
-Specify a rectangular area where to limit the search for motion
-vectors.
-If desired the search for motion vectors can be limited to a
-rectangular area of the frame defined by its top left corner, width
-and height. These parameters have the same meaning as the drawbox
-filter which can be used to visualise the position of the bounding
-box.
-
-This is useful when simultaneous movement of subjects within the frame
-might be confused for camera motion by the motion vector search.
-
-If any or all of x , y , w and h are set to -1
-then the full frame is used. This allows later options to be set
-without specifying the bounding box for the motion vector search.
-
-Default - search the whole frame.
-
-
-rx
-ry
-Specify the maximum extent of movement in x and y directions in the
-range 0-64 pixels. Default 16.
-
-
-edge
-Specify how to generate pixels to fill blanks at the edge of the
-frame. Available values are:
-
-‘blank, 0 ’
-Fill zeroes at blank locations
-
-‘original, 1 ’
-Original image at blank locations
-
-‘clamp, 2 ’
-Extruded edge value at blank locations
-
-‘mirror, 3 ’
-Mirrored edge at blank locations
-
-
-Default value is ‘mirror ’.
-
-
-blocksize
-Specify the blocksize to use for motion search. Range 4-128 pixels,
-default 8.
-
-
-contrast
-Specify the contrast threshold for blocks. Only blocks with more than
-the specified contrast (difference between darkest and lightest
-pixels) will be considered. Range 1-255, default 125.
-
-
-search
-Specify the search strategy. Available values are:
-
-‘exhaustive, 0 ’
-Set exhaustive search
-
-‘less, 1 ’
-Set less exhaustive search.
-
-
-Default value is ‘exhaustive ’.
-
-
-filename
-If set then a detailed log of the motion search is written to the
-specified file.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
-
9.23 drawbox# TOC
-
-
Draw a colored box on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the top left corner coordinates of the box. It defaults to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the box; if 0 they are interpreted as
-the input width and height. It defaults to 0.
-
-
-color, c
-Specify the color of the box to write. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the box edge color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the box edge. Default value is 3
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y offset coordinates where the box is drawn.
-
-
-w
-h
-The width and height of the drawn box.
-
-
-t
-The thickness of the drawn box.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
9.23.1 Examples# TOC
-
-
-
-
-
9.24 drawgrid# TOC
-
-
Draw a grid on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the
-input width and height, respectively, minus thickness
, so image gets
-framed. Default to 0.
-
-
-color, c
-Specify the color of the grid. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the grid color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the grid line. Default value is 1
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input grid cell width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y coordinates of some point of grid intersection (meant to configure offset).
-
-
-w
-h
-The width and height of the drawn cell.
-
-
-t
-The thickness of the drawn cell.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
9.24.1 Examples# TOC
-
-
- Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%:
-
-
drawgrid=width=100:height=100:thickness=2:color=red@0.5
-
-
- Draw a white 3x3 grid with an opacity of 50%:
-
-
drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
-
-
-
-
-
9.25 drawtext# TOC
-
-
Draw a text string or text from a specified file on top of a video, using the
-libfreetype library.
-
-
To enable compilation of this filter, you need to configure FFmpeg with
---enable-libfreetype
.
-To enable default font fallback and the font option you need to
-configure FFmpeg with --enable-libfontconfig
.
-To enable the text_shaping option, you need to configure FFmpeg with
---enable-libfribidi
.
-
-
-
9.25.1 Syntax# TOC
-
-
It accepts the following parameters:
-
-
-box
-Used to draw a box around text using the background color.
-The value must be either 1 (enable) or 0 (disable).
-The default value of box is 0.
-
-
-boxcolor
-The color to be used for drawing box around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of boxcolor is "white".
-
-
-borderw
-Set the width of the border to be drawn around the text using bordercolor .
-The default value of borderw is 0.
-
-
-bordercolor
-Set the color to be used for drawing border around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of bordercolor is "black".
-
-
-expansion
-Select how the text is expanded. Can be either none
,
-strftime
(deprecated) or
-normal
(default). See the Text expansion section
-below for details.
-
-
-fix_bounds
-If true, check and fix text coords to avoid clipping.
-
-
-fontcolor
-The color to be used for drawing fonts. For the syntax of this option, check
-the "Color" section in the ffmpeg-utils manual.
-
-The default value of fontcolor is "black".
-
-
-fontcolor_expr
-String which is expanded the same way as text to obtain dynamic
-fontcolor value. By default this option has empty value and is not
-processed. When this option is set, it overrides fontcolor option.
-
-
-font
-The font family to be used for drawing text. By default Sans.
-
-
-fontfile
-The font file to be used for drawing text. The path must be included.
-This parameter is mandatory if the fontconfig support is disabled.
-
-
-fontsize
-The font size to be used for drawing text.
-The default value of fontsize is 16.
-
-
-text_shaping
-If set to 1, attempt to shape the text (for example, reverse the order of
-right-to-left text and join Arabic characters) before drawing it.
-Otherwise, just draw the text exactly as given.
-By default 1 (if supported).
-
-
-ft_load_flags
-The flags to be used for loading the fonts.
-
-The flags map the corresponding flags supported by libfreetype, and are
-a combination of the following values:
-
-default
-no_scale
-no_hinting
-render
-no_bitmap
-vertical_layout
-force_autohint
-crop_bitmap
-pedantic
-ignore_global_advance_width
-no_recurse
-ignore_transform
-monochrome
-linear_design
-no_autohint
-
-
-Default value is "default".
-
-For more information consult the documentation for the FT_LOAD_*
-libfreetype flags.
-
-
-shadowcolor
-The color to be used for drawing a shadow behind the drawn text. For the
-syntax of this option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of shadowcolor is "black".
-
-
-shadowx
-shadowy
-The x and y offsets for the text shadow position with respect to the
-position of the text. They can be either positive or negative
-values. The default value for both is "0".
-
-
-start_number
-The starting frame number for the n/frame_num variable. The default value
-is "0".
-
-
-tabsize
-The size in number of spaces to use for rendering the tab.
-Default value is 4.
-
-
-timecode
-Set the initial timecode representation in "hh:mm:ss[:;.]ff"
-format. It can be used with or without text parameter. timecode_rate
-option must be specified.
-
-
-timecode_rate, rate, r
-Set the timecode frame rate (timecode only).
-
-
-text
-The text string to be drawn. The text must be a sequence of UTF-8
-encoded characters.
-This parameter is mandatory if no file is specified with the parameter
-textfile .
-
-
-textfile
-A text file containing text to be drawn. The text must be a sequence
-of UTF-8 encoded characters.
-
-This parameter is mandatory if no text string is specified with the
-parameter text .
-
-If both text and textfile are specified, an error is thrown.
-
-
-reload
-If set to 1, the textfile will be reloaded before each frame.
-Be sure to update it atomically, or it may be read partially, or even fail.
-
-
-x
-y
-The expressions which specify the offsets where text will be drawn
-within the video frame. They are relative to the top/left border of the
-output image.
-
-The default value of x and y is "0".
-
-See below for the list of accepted constants and functions.
-
-
-
-
The parameters for x and y are expressions containing the
-following constants and functions:
-
-
-dar
-input display aspect ratio, it is the same as (w / h ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-line_h, lh
-the height of each text line
-
-
-main_h, h, H
-the input height
-
-
-main_w, w, W
-the input width
-
-
-max_glyph_a, ascent
-the maximum distance from the baseline to the highest/upper grid
-coordinate used to place a glyph outline point, for all the rendered
-glyphs.
-It is a positive value, due to the grid’s orientation with the Y axis
-upwards.
-
-
-max_glyph_d, descent
-the maximum distance from the baseline to the lowest grid coordinate
-used to place a glyph outline point, for all the rendered glyphs.
-This is a negative value, due to the grid’s orientation, with the Y axis
-upwards.
-
-
-max_glyph_h
-maximum glyph height, that is the maximum height for all the glyphs
-contained in the rendered text, it is equivalent to ascent -
-descent .
-
-
-max_glyph_w
-maximum glyph width, that is the maximum width for all the glyphs
-contained in the rendered text
-
-
-n
-the number of input frame, starting from 0
-
-
-rand(min, max)
-return a random number included between min and max
-
-
-sar
-The input sample aspect ratio.
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-text_h, th
-the height of the rendered text
-
-
-text_w, tw
-the width of the rendered text
-
-
-x
-y
-the x and y offset coordinates where the text is drawn.
-
-These parameters allow the x and y expressions to refer
-each other, so you can for example specify y=x/dar
.
-
-
-
-
-
9.25.2 Text expansion# TOC
-
-
If expansion is set to strftime
,
-the filter recognizes strftime() sequences in the provided text and
-expands them accordingly. Check the documentation of strftime(). This
-feature is deprecated.
-
-
If expansion is set to none
, the text is printed verbatim.
-
-
If expansion is set to normal
(which is the default),
-the following expansion mechanism is used.
-
-
The backslash character ’\’, followed by any character, always expands to
-the second character.
-
-
Sequence of the form %{...}
are expanded. The text between the
-braces is a function name, possibly followed by arguments separated by ’:’.
-If the arguments contain special characters or delimiters (’:’ or ’}’),
-they should be escaped.
-
-
Note that they probably must also be escaped as the value for the
-text option in the filter argument string and as the filter
-argument in the filtergraph description, and possibly also for the shell,
-that makes up to four levels of escaping; using a text file avoids these
-problems.
-
-
The following functions are available:
-
-
-expr, e
-The expression evaluation result.
-
-It must take one argument specifying the expression to be evaluated,
-which accepts the same constants and functions as the x and
-y values. Note that not all constants should be used, for
-example the text size is not known when evaluating the expression, so
-the constants text_w and text_h will have an undefined
-value.
-
-
-expr_int_format, eif
-Evaluate the expression’s value and output as formatted integer.
-
-The first argument is the expression to be evaluated, just as for the expr function.
-The second argument specifies the output format. Allowed values are ’x’, ’X’, ’d’ and
-’u’. They are treated exactly as in the printf function.
-The third parameter is optional and sets the number of positions taken by the output.
-It can be used to add padding with zeros from the left.
-
-
-gmtime
-The time at which the filter is running, expressed in UTC.
-It can accept an argument: a strftime() format string.
-
-
-localtime
-The time at which the filter is running, expressed in the local time zone.
-It can accept an argument: a strftime() format string.
-
-
-metadata
-Frame metadata. It must take one argument specifying metadata key.
-
-
-n, frame_num
-The frame number, starting from 0.
-
-
-pict_type
-A 1 character description of the current picture type.
-
-
-pts
-The timestamp of the current frame.
-It can take up to two arguments.
-
-The first argument is the format of the timestamp; it defaults to flt
-for seconds as a decimal number with microsecond accuracy; hms
stands
-for a formatted [-]HH:MM:SS.mmm timestamp with millisecond accuracy.
-
-The second argument is an offset added to the timestamp.
-
-
-
-
-
-
9.25.3 Examples# TOC
-
-
-
-
For more information about libfreetype, check:
-http://www.freetype.org/ .
-
-
For more information about fontconfig, check:
-http://freedesktop.org/software/fontconfig/fontconfig-user.html .
-
-
For more information about libfribidi, check:
-http://fribidi.org/ .
-
-
-
9.26 edgedetect# TOC
-
-
Detect and draw edges. The filter uses the Canny Edge Detection algorithm.
-
-
The filter accepts the following options:
-
-
-low
-high
-Set low and high threshold values used by the Canny thresholding
-algorithm.
-
-The high threshold selects the "strong" edge pixels, which are then
-connected through 8-connectivity with the "weak" edge pixels selected
-by the low threshold.
-
-low and high threshold values must be chosen in the range
-[0,1], and low should be lesser or equal to high .
-
-Default value for low is 20/255
, and default value for high
-is 50/255
.
-
-
-mode
-Define the drawing mode.
-
-
-‘wires ’
-Draw white/gray wires on black background.
-
-
-‘colormix ’
-Mix the colors to create a paint/cartoon effect.
-
-
-
-Default value is wires .
-
-
-
-
-
9.26.1 Examples# TOC
-
-
- Standard edge detection with custom values for the hysteresis thresholding:
-
-
edgedetect=low=0.1:high=0.4
-
-
- Painting effect without thresholding:
-
-
edgedetect=mode=colormix:high=0
-
-
-
-
-
9.27 extractplanes# TOC
-
-
Extract color channel components from input video stream into
-separate grayscale video streams.
-
-
The filter accepts the following option:
-
-
-planes
-Set plane(s) to extract.
-
-Available values for planes are:
-
-‘y ’
-‘u ’
-‘v ’
-‘a ’
-‘r ’
-‘g ’
-‘b ’
-
-
-Choosing planes not available in the input will result in an error.
-That means you cannot select r
, g
, b
planes
-with y
, u
, v
planes at same time.
-
-
-
-
-
9.27.1 Examples# TOC
-
-
- Extract luma, u and v color channel component from input video frame
-into 3 grayscale outputs:
-
-
ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
-
-
-
-
-
9.28 elbg# TOC
-
-
Apply a posterize effect using the ELBG (Enhanced LBG) algorithm.
-
-
For each input image, the filter will compute the optimal mapping from
-the input to the output given the codebook length, that is the number
-of distinct output colors.
-
-
This filter accepts the following options.
-
-
-codebook_length, l
-Set codebook length. The value must be a positive integer, and
-represents the number of distinct output colors. Default value is 256.
-
-
-nb_steps, n
-Set the maximum number of iterations to apply for computing the optimal
-mapping. The higher the value the better the result and the higher the
-computation time. Default value is 1.
-
-
-seed, s
-Set a random seed, must be an integer included between 0 and
-UINT32_MAX. If not specified, or if explicitly set to -1, the filter
-will try to use a good random seed on a best effort basis.
-
-
-
-
-
9.29 fade# TOC
-
-
Apply a fade-in/out effect to the input video.
-
-
It accepts the following parameters:
-
-
-type, t
-The effect type can be either "in" for a fade-in, or "out" for a fade-out
-effect.
-Default is in
.
-
-
-start_frame, s
-Specify the number of the frame to start applying the fade
-effect at. Default is 0.
-
-
-nb_frames, n
-The number of frames that the fade effect lasts. At the end of the
-fade-in effect, the output video will have the same intensity as the input video.
-At the end of the fade-out transition, the output video will be filled with the
-selected color .
-Default is 25.
-
-
-alpha
-If set to 1, fade only alpha channel, if one exists on the input.
-Default value is 0.
-
-
-start_time, st
-Specify the timestamp (in seconds) of the frame to start to apply the fade
-effect. If both start_frame and start_time are specified, the fade will start at
-whichever comes last. Default is 0.
-
-
-duration, d
-The number of seconds for which the fade effect has to last. At the end of the
-fade-in effect the output video will have the same intensity as the input video,
-at the end of the fade-out transition the output video will be filled with the
-selected color .
-If both duration and nb_frames are specified, duration is used. Default is 0.
-
-
-color, c
-Specify the color of the fade. Default is "black".
-
-
-
-
-
9.29.1 Examples# TOC
-
-
-
-
-
9.30 field# TOC
-
-
Extract a single field from an interlaced image using stride
-arithmetic to avoid wasting CPU time. The output frames are marked as
-non-interlaced.
-
-
The filter accepts the following options:
-
-
-type
-Specify whether to extract the top (if the value is 0
or
-top
) or the bottom field (if the value is 1
or
-bottom
).
-
-
-
-
-
9.31 fieldmatch# TOC
-
-
Field matching filter for inverse telecine. It is meant to reconstruct the
-progressive frames from a telecined stream. The filter does not drop duplicated
-frames, so to achieve a complete inverse telecine fieldmatch
needs to be
-followed by a decimation filter such as decimate in the filtergraph.
-
-
The separation of the field matching and the decimation is notably motivated by
-the possibility of inserting a de-interlacing filter fallback between the two.
-If the source has mixed telecined and real interlaced content,
-fieldmatch
will not be able to match fields for the interlaced parts.
-But these remaining combed frames will be marked as interlaced, and thus can be
-de-interlaced by a later filter such as yadif before decimation.
-
-
In addition to the various configuration options, fieldmatch
can take an
-optional second stream, activated through the ppsrc option. If
-enabled, the frames reconstruction will be based on the fields and frames from
-this second stream. This allows the first input to be pre-processed in order to
-help the various algorithms of the filter, while keeping the output lossless
-(assuming the fields are matched properly). Typically, a field-aware denoiser,
-or brightness/contrast adjustments can help.
-
-
Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project)
-and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
-which fieldmatch
is based on. While the semantic and usage are very
-close, some behaviour and options names can differ.
-
-
The decimate filter currently only works for constant frame rate input.
-Do not use fieldmatch
and decimate if your input has mixed
-telecined and progressive content with changing framerate.
-
-
The filter accepts the following options:
-
-
-order
-Specify the assumed field order of the input stream. Available values are:
-
-
-‘auto ’
-Auto detect parity (use FFmpeg’s internal parity value).
-
-‘bff ’
-Assume bottom field first.
-
-‘tff ’
-Assume top field first.
-
-
-
-Note that it is sometimes recommended not to trust the parity announced by the
-stream.
-
-Default value is auto .
-
-
-mode
-Set the matching mode or strategy to use. pc mode is the safest in the
-sense that it won’t risk creating jerkiness due to duplicate frames when
-possible, but if there are bad edits or blended fields it will end up
-outputting combed frames when a good match might actually exist. On the other
-hand, pcn_ub mode is the most risky in terms of creating jerkiness,
-but will almost always find a good frame if there is one. The other values are
-all somewhere in between pc and pcn_ub in terms of risking
-jerkiness and creating duplicate frames versus finding good matches in sections
-with bad edits, orphaned fields, blended fields, etc.
-
-More details about p/c/n/u/b are available in p/c/n/u/b meaning section.
-
-Available values are:
-
-
-‘pc ’
-2-way matching (p/c)
-
-‘pc_n ’
-2-way matching, and trying 3rd match if still combed (p/c + n)
-
-‘pc_u ’
-2-way matching, and trying 3rd match (same order) if still combed (p/c + u)
-
-‘pc_n_ub ’
-2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if
-still combed (p/c + n + u/b)
-
-‘pcn ’
-3-way matching (p/c/n)
-
-‘pcn_ub ’
-3-way matching, and trying 4th/5th matches if all 3 of the original matches are
-detected as combed (p/c/n + u/b)
-
-
-
-The parenthesis at the end indicate the matches that would be used for that
-mode assuming order =tff (and field on auto or
-top ).
-
-In terms of speed pc mode is by far the fastest and pcn_ub is
-the slowest.
-
-Default value is pc_n .
-
-
-ppsrc
-Mark the main input stream as a pre-processed input, and enable the secondary
-input stream as the clean source to pick the fields from. See the filter
-introduction for more details. It is similar to the clip2 feature from
-VFM/TFM.
-
-Default value is 0
(disabled).
-
-
-field
-Set the field to match from. It is recommended to set this to the same value as
-order unless you experience matching failures with that setting. In
-certain circumstances changing the field that is used to match from can have a
-large impact on matching performance. Available values are:
-
-
-‘auto ’
-Automatic (same value as order ).
-
-‘bottom ’
-Match from the bottom field.
-
-‘top ’
-Match from the top field.
-
-
-
-Default value is auto .
-
-
-mchroma
-Set whether or not chroma is included during the match comparisons. In most
-cases it is recommended to leave this enabled. You should set this to 0
-only if your clip has bad chroma problems such as heavy rainbowing or other
-artifacts. Setting this to 0
could also be used to speed things up at
-the cost of some accuracy.
-
-Default value is 1
.
-
-
-y0
-y1
-These define an exclusion band which excludes the lines between y0 and
-y1 from being included in the field matching decision. An exclusion
-band can be used to ignore subtitles, a logo, or other things that may
-interfere with the matching. y0 sets the starting scan line and
-y1 sets the ending line; all lines in between y0 and
-y1 (including y0 and y1 ) will be ignored. Setting
-y0 and y1 to the same value will disable the feature.
-y0 and y1 defaults to 0
.
-
-
-scthresh
-Set the scene change detection threshold as a percentage of maximum change on
-the luma plane. Good values are in the [8.0, 14.0]
range. Scene change
-detection is only relevant in case combmatch =sc . The range for
-scthresh is [0.0, 100.0]
.
-
-Default value is 12.0
.
-
-
-combmatch
-When combatch is not none , fieldmatch
will take into
-account the combed scores of matches when deciding what match to use as the
-final match. Available values are:
-
-
-‘none ’
-No final matching based on combed scores.
-
-‘sc ’
-Combed scores are only used when a scene change is detected.
-
-‘full ’
-Use combed scores all the time.
-
-
-
-Default is sc .
-
-
-combdbg
-Force fieldmatch
to calculate the combed metrics for certain matches and
-print them. This setting is known as micout in TFM/VFM vocabulary.
-Available values are:
-
-
-‘none ’
-No forced calculation.
-
-‘pcn ’
-Force p/c/n calculations.
-
-‘pcnub ’
-Force p/c/n/u/b calculations.
-
-
-
-Default value is none .
-
-
-cthresh
-This is the area combing threshold used for combed frame detection. This
-essentially controls how "strong" or "visible" combing must be to be detected.
-Larger values mean combing must be more visible and smaller values mean combing
-can be less visible or strong and still be detected. Valid settings are from
--1
(every pixel will be detected as combed) to 255
(no pixel will
-be detected as combed). This is basically a pixel difference value. A good
-range is [8, 12]
.
-
-Default value is 9
.
-
-
-chroma
-Sets whether or not chroma is considered in the combed frame decision. Only
-disable this if your source has chroma problems (rainbowing, etc.) that are
-causing problems for the combed frame detection with chroma enabled. Actually,
-using chroma =0 is usually more reliable, except for the case
-where there is chroma only combing in the source.
-
-Default value is 0
.
-
-
-blockx
-blocky
-Respectively set the x-axis and y-axis size of the window used during combed
-frame detection. This has to do with the size of the area in which
-combpel pixels are required to be detected as combed for a frame to be
-declared combed. See the combpel parameter description for more info.
-Possible values are any number that is a power of 2 starting at 4 and going up
-to 512.
-
-Default value is 16
.
-
-
-combpel
-The number of combed pixels inside any of the blocky by
-blockx size blocks on the frame for the frame to be detected as
-combed. While cthresh controls how "visible" the combing must be, this
-setting controls "how much" combing there must be in any localized area (a
-window defined by the blockx and blocky settings) on the
-frame. Minimum value is 0
and maximum is blocky x blockx
(at
-which point no frames will ever be detected as combed). This setting is known
-as MI in TFM/VFM vocabulary.
-
-Default value is 80
.
-
-
-
-
-
9.31.1 p/c/n/u/b meaning# TOC
-
-
-
9.31.1.1 p/c/n# TOC
-
-
We assume the following telecined stream:
-
-
-
Top fields: 1 2 2 3 4
-Bottom fields: 1 2 3 4 4
-
-
-
The numbers correspond to the progressive frame the fields relate to. Here, the
-first two frames are progressive, the 3rd and 4th are combed, and so on.
-
-
When fieldmatch
is configured to run a matching from bottom
-(field =bottom ) this is how this input stream get transformed:
-
-
-
Input stream:
- T 1 2 2 3 4
- B 1 2 3 4 4 <-- matching reference
-
-Matches: c c n n c
-
-Output stream:
- T 1 2 3 4 4
- B 1 2 3 4 4
-
-
-
As a result of the field matching, we can see that some frames get duplicated.
-To perform a complete inverse telecine, you need to rely on a decimation filter
-after this operation. See for instance the decimate filter.
-
-
The same operation now matching from top fields (field =top )
-looks like this:
-
-
-
Input stream:
- T 1 2 2 3 4 <-- matching reference
- B 1 2 3 4 4
-
-Matches: c c p p c
-
-Output stream:
- T 1 2 2 3 4
- B 1 2 2 3 4
-
-
-
In these examples, we can see what p , c and n mean;
-basically, they refer to the frame and field of the opposite parity:
-
-
- p matches the field of the opposite parity in the previous frame
- c matches the field of the opposite parity in the current frame
- n matches the field of the opposite parity in the next frame
-
-
-
-
9.31.1.2 u/b# TOC
-
-
The u and b matching are a bit special in the sense that they match
-from the opposite parity flag. In the following examples, we assume that we are
-currently matching the 2nd frame (Top:2, bottom:2). According to the match, a
-’x’ is placed above and below each matched fields.
-
-
With bottom matching (field =bottom ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 1 2 2 2
- 2 2 2 1 3
-
-
-
With top matching (field =top ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 2 2 1 2
- 2 1 3 2 2
-
-
-
-
9.31.2 Examples# TOC
-
-
Simple IVTC of a top field first telecined stream:
-
-
fieldmatch=order=tff:combmatch=none, decimate
-
-
-
Advanced IVTC, with fallback on yadif for still combed frames:
-
-
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
-
-
-
-
9.32 fieldorder# TOC
-
-
Transform the field order of the input video.
-
-
It accepts the following parameters:
-
-
-order
-The output field order. Valid values are tff for top field first or bff
-for bottom field first.
-
-
-
-
The default value is ‘tff ’.
-
-
The transformation is done by shifting the picture content up or down
-by one line, and filling the remaining line with appropriate picture content.
-This method is consistent with most broadcast field order converters.
-
-
If the input video is not flagged as being interlaced, or it is already
-flagged as being of the required output field order, then this filter does
-not alter the incoming video.
-
-
It is very useful when converting to or from PAL DV material,
-which is bottom field first.
-
-
For example:
-
-
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
-
-
-
-
9.33 fifo# TOC
-
-
Buffer input images and send them when they are requested.
-
-
It is mainly useful when auto-inserted by the libavfilter
-framework.
-
-
It does not take parameters.
-
-
-
9.34 format# TOC
-
-
Convert the input video to one of the specified pixel formats.
-Libavfilter will try to pick one that is suitable as input to
-the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-"pix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
9.34.1 Examples# TOC
-
-
-
-
-
9.35 fps# TOC
-
-
Convert the video to specified constant frame rate by duplicating or dropping
-frames as necessary.
-
-
It accepts the following parameters:
-
-fps
-The desired output frame rate. The default is 25
.
-
-
-round
-Rounding method.
-
-Possible values are:
-
-zero
-zero round towards 0
-
-inf
-round away from 0
-
-down
-round towards -infinity
-
-up
-round towards +infinity
-
-near
-round to nearest
-
-
-The default is near
.
-
-
-start_time
-Assume the first PTS should be the given value, in seconds. This allows for
-padding/trimming at the start of stream. By default, no assumption is made
-about the first frame’s expected PTS, so no padding or trimming is done.
-For example, this could be set to 0 to pad the beginning with duplicates of
-the first frame if a video stream starts after the audio stream or to trim any
-frames with a negative PTS.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-fps [:round ].
-
-
See also the setpts filter.
-
-
-
9.35.1 Examples# TOC
-
-
- A typical usage in order to set the fps to 25:
-
-
- Sets the fps to 24, using abbreviation and rounding method to round to nearest:
-
-
fps=fps=film:round=near
-
-
-
-
-
9.36 framepack# TOC
-
-
Pack two different video streams into a stereoscopic video, setting proper
-metadata on supported codecs. The two views should have the same size and
-framerate and processing will stop when the shorter video ends. Please note
-that you may conveniently adjust view properties with the scale and
-fps filters.
-
-
It accepts the following parameters:
-
-format
-The desired packing format. Supported values are:
-
-
-sbs
-The views are next to each other (default).
-
-
-tab
-The views are on top of each other.
-
-
-lines
-The views are packed by line.
-
-
-columns
-The views are packed by column.
-
-
-frameseq
-The views are temporally interleaved.
-
-
-
-
-
-
-
-
Some examples:
-
-
-
# Convert left and right views into a frame-sequential video
-ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
-
-# Convert views into a side-by-side video with the same output resolution as the input
-ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
-
-
-
-
9.37 framestep# TOC
-
-
Select one frame every N-th frame.
-
-
This filter accepts the following option:
-
-step
-Select frame after every step
frames.
-Allowed values are positive integers higher than 0. Default value is 1
.
-
-
-
-
-
9.38 frei0r# TOC
-
-
Apply a frei0r effect to the input video.
-
-
To enable the compilation of this filter, you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the frei0r effect to load. If the environment variable
-FREI0R_PATH
is defined, the frei0r effect is searched for in each of the
-directories specified by the colon-separated list in FREIOR_PATH
.
-Otherwise, the standard frei0r paths are searched, in this order:
-HOME/.frei0r-1/lib/ , /usr/local/lib/frei0r-1/ ,
-/usr/lib/frei0r-1/ .
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r effect.
-
-
-
-
-
A frei0r effect parameter can be a boolean (its value is either
-"y" or "n"), a double, a color (specified as
-R /G /B , where R , G , and B are floating point
-numbers between 0.0 and 1.0, inclusive) or by a color description specified in the "Color"
-section in the ffmpeg-utils manual), a position (specified as X /Y , where
-X and Y are floating point numbers) and/or a string.
-
-
The number and types of parameters depend on the loaded effect. If an
-effect parameter is not specified, the default value is set.
-
-
-
9.38.1 Examples# TOC
-
-
- Apply the distort0r effect, setting the first two double parameters:
-
-
frei0r=filter_name=distort0r:filter_params=0.5|0.01
-
-
- Apply the colordistance effect, taking a color as the first parameter:
-
-
frei0r=colordistance:0.2/0.3/0.4
-frei0r=colordistance:violet
-frei0r=colordistance:0x112233
-
-
- Apply the perspective effect, specifying the top left and top right image
-positions:
-
-
frei0r=perspective:0.2/0.2|0.8/0.2
-
-
-
-
For more information, see
-http://frei0r.dyne.org
-
-
-
9.39 fspp# TOC
-
-
Apply fast and simple postprocessing. It is a faster version of spp .
-
-
It splits (I)DCT into horizontal/vertical passes. Unlike the simple post-
-processing filter, one of them is performed once per block, not per pixel.
-This allows for much higher speed.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 4-5. Default value is 4
.
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range 0-63.
-If not set, the filter will use the QP from the video stream (if available).
-
-
-strength
-Set filter strength. It accepts an integer in range -15 to 32. Lower values mean
-more details but also more artifacts, while higher values make the image smoother
-but also blurrier. Default value is 0
− PSNR optimal.
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
-
9.40 geq# TOC
-
-
The filter accepts the following options:
-
-
-lum_expr, lum
-Set the luminance expression.
-
-cb_expr, cb
-Set the chrominance blue expression.
-
-cr_expr, cr
-Set the chrominance red expression.
-
-alpha_expr, a
-Set the alpha expression.
-
-red_expr, r
-Set the red expression.
-
-green_expr, g
-Set the green expression.
-
-blue_expr, b
-Set the blue expression.
-
-
-
-
The colorspace is selected according to the specified options. If one
-of the lum_expr , cb_expr , or cr_expr
-options is specified, the filter will automatically select a YCbCr
-colorspace. If one of the red_expr , green_expr , or
-blue_expr options is specified, it will select an RGB
-colorspace.
-
-
If one of the chrominance expression is not defined, it falls back on the other
-one. If no alpha expression is specified it will evaluate to opaque value.
-If none of chrominance expressions are specified, they will evaluate
-to the luminance expression.
-
-
The expressions can use the following variables and functions:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-The coordinates of the current sample.
-
-
-W
-H
-The width and height of the image.
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-p(x, y)
-Return the value of the pixel at location (x ,y ) of the current
-plane.
-
-
-lum(x, y)
-Return the value of the pixel at location (x ,y ) of the luminance
-plane.
-
-
-cb(x, y)
-Return the value of the pixel at location (x ,y ) of the
-blue-difference chroma plane. Return 0 if there is no such plane.
-
-
-cr(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red-difference chroma plane. Return 0 if there is no such plane.
-
-
-r(x, y)
-g(x, y)
-b(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red/green/blue component. Return 0 if there is no such component.
-
-
-alpha(x, y)
-Return the value of the pixel at location (x ,y ) of the alpha
-plane. Return 0 if there is no such plane.
-
-
-
-
For functions, if x and y are outside the area, the value will be
-automatically clipped to the closer edge.
-
-
-
9.40.1 Examples# TOC
-
-
- Flip the image horizontally:
-
-
- Generate a bidimensional sine wave, with angle PI/3
and a
-wavelength of 100 pixels:
-
-
geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
-
-
- Generate a fancy enigmatic moving light:
-
-
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
-
-
- Generate a quick emboss effect:
-
-
format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
-
-
- Modify RGB components depending on pixel position:
-
-
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
-
-
- Create a radial gradient that is the same size as the input (also see
-the vignette filter):
-
-
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
-
-
- Create a linear gradient to use as a mask for another filter, then
-compose with overlay . In this example the video will gradually
-become more blurry from the top to the bottom of the y-axis as defined
-by the linear gradient:
-
-
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
-
-
-
-
-
9.41 gradfun# TOC
-
-
Fix the banding artifacts that are sometimes introduced into nearly flat
-regions by truncation to 8bit color depth.
-Interpolate the gradients that should go where the bands are, and
-dither them.
-
-
It is designed for playback only. Do not use it prior to
-lossy compression, because compression tends to lose the dither and
-bring back the bands.
-
-
It accepts the following parameters:
-
-
-strength
-The maximum amount by which the filter will change any one pixel. This is also
-the threshold for detecting nearly flat regions. Acceptable values range from
-.51 to 64; the default value is 1.2. Out-of-range values will be clipped to the
-valid range.
-
-
-radius
-The neighborhood to fit the gradient to. A larger radius makes for smoother
-gradients, but also prevents the filter from modifying the pixels near detailed
-regions. Acceptable values are 8-32; the default value is 16. Out-of-range
-values will be clipped to the valid range.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-strength [:radius ]
-
-
-
9.41.1 Examples# TOC
-
-
- Apply the filter with a 3.5
strength and radius of 8
:
-
-
- Specify radius, omitting the strength (which will fall-back to the default
-value):
-
-
-
-
-
-
9.42 haldclut# TOC
-
-
Apply a Hald CLUT to a video stream.
-
-
First input is the video stream to process, and second one is the Hald CLUT.
-The Hald CLUT input can be a simple picture or a complete video stream.
-
-
The filter accepts the following options:
-
-
-shortest
-Force termination when the shortest input terminates. Default is 0
.
-
-repeatlast
-Continue applying the last CLUT after the end of the stream. A value of
-0
disable the filter after the last frame of the CLUT is reached.
-Default is 1
.
-
-
-
-
haldclut
also has the same interpolation options as lut3d (both
-filters share the same internals).
-
-
More information about the Hald CLUT can be found on Eskil Steenberg’s website
-(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html .
-
-
-
9.42.1 Workflow examples# TOC
-
-
-
9.42.1.1 Hald CLUT video stream# TOC
-
-
Generate an identity Hald CLUT stream altered with various effects:
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
-
-
-
Note: make sure you use a lossless codec.
-
-
Then use it with haldclut
to apply it on some random stream:
-
-
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
-
-
-
The Hald CLUT will be applied to the 10 first seconds (duration of
-clut.nut ), then the latest picture of that CLUT stream will be applied
-to the remaining frames of the mandelbrot
stream.
-
-
-
9.42.1.2 Hald CLUT with preview# TOC
-
-
A Hald CLUT is supposed to be a squared image of Level*Level*Level
by
-Level*Level*Level
pixels. For a given Hald CLUT, FFmpeg will select the
-biggest possible square starting at the top left of the picture. The remaining
-padding pixels (bottom or right) will be ignored. This area can be used to add
-a preview of the Hald CLUT.
-
-
Typically, the following generated Hald CLUT will be supported by the
-haldclut
filter:
-
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "
- pad=iw+320 [padded_clut];
- smptebars=s=320x256, split [a][b];
- [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
- [main][b] overlay=W-320" -frames:v 1 clut.png
-
-
-
It contains the original and a preview of the effect of the CLUT: SMPTE color
-bars are displayed on the right-top, and below the same color bars processed by
-the color changes.
-
-
Then, the effect of this Hald CLUT can be visualized with:
-
-
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
-
-
-
-
9.43 hflip# TOC
-
-
Flip the input video horizontally.
-
-
For example, to horizontally flip the input video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "hflip" out.avi
-
-
-
-
9.44 histeq# TOC
-
This filter applies a global color histogram equalization on a
-per-frame basis.
-
-
It can be used to correct video that has a compressed range of pixel
-intensities. The filter redistributes the pixel intensities to
-equalize their distribution across the intensity range. It may be
-viewed as an "automatically adjusting contrast filter". This filter is
-useful only for correcting degraded or poorly captured source
-video.
-
-
The filter accepts the following options:
-
-
-strength
-Determine the amount of equalization to be applied. As the strength
-is reduced, the distribution of pixel intensities more-and-more
-approaches that of the input frame. The value must be a float number
-in the range [0,1] and defaults to 0.200.
-
-
-intensity
-Set the maximum intensity that can generated and scale the output
-values appropriately. The strength should be set as desired and then
-the intensity can be limited if needed to avoid washing-out. The value
-must be a float number in the range [0,1] and defaults to 0.210.
-
-
-antibanding
-Set the antibanding level. If enabled the filter will randomly vary
-the luminance of output pixels by a small amount to avoid banding of
-the histogram. Possible values are none
, weak
or
-strong
. It defaults to none
.
-
-
-
-
-
9.45 histogram# TOC
-
-
Compute and draw a color distribution histogram for the input video.
-
-
The computed histogram is a representation of the color component
-distribution in an image.
-
-
The filter accepts the following options:
-
-
-mode
-Set histogram mode.
-
-It accepts the following values:
-
-‘levels ’
-Standard histogram that displays the color components distribution in an
-image. Displays color graph for each color component. Shows distribution of
-the Y, U, V, A or R, G, B components, depending on input format, in the
-current frame. Below each graph a color component scale meter is shown.
-
-
-‘color ’
-Displays chroma values (U/V color placement) in a two dimensional
-graph (which is called a vectorscope). The brighter a pixel in the
-vectorscope, the more pixels of the input frame correspond to that pixel
-(i.e., more pixels have this chroma value). The V component is displayed on
-the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost
-side being V = 255. The U component is displayed on the vertical (Y) axis,
-with the top representing U = 0 and the bottom representing U = 255.
-
-The position of a white pixel in the graph corresponds to the chroma value of
-a pixel of the input clip. The graph can therefore be used to read the hue
-(color flavor) and the saturation (the dominance of the hue in the color). As
-the hue of a color changes, it moves around the square. At the center of the
-square the saturation is zero, which means that the corresponding pixel has no
-color. If the amount of a specific color is increased (while leaving the other
-colors unchanged) the saturation increases, and the indicator moves towards
-the edge of the square.
-
-
-‘color2 ’
-Chroma values in vectorscope, similar as color
but actual chroma values
-are displayed.
-
-
-‘waveform ’
-Per row/column color component graph. In row mode, the graph on the left side
-represents color component value 0 and the right side represents value = 255.
-In column mode, the top side represents color component value = 0 and bottom
-side represents value = 255.
-
-
-Default value is levels
.
-
-
-level_height
-Set height of level in levels
. Default value is 200
.
-Allowed range is [50, 2048].
-
-
-scale_height
-Set height of color scale in levels
. Default value is 12
.
-Allowed range is [0, 40].
-
-
-step
-Set step for waveform
mode. Smaller values are useful to find out how
-many values of the same luminance are distributed across input rows/columns.
-Default value is 10
. Allowed range is [1, 255].
-
-
-waveform_mode
-Set mode for waveform
. Can be either row
, or column
.
-Default is row
.
-
-
-waveform_mirror
-Set mirroring mode for waveform
. 0
means unmirrored, 1
-means mirrored. In mirrored mode, higher values will be represented on the left
-side for row
mode and at the top for column
mode. Default is
-0
(unmirrored).
-
-
-display_mode
-Set display mode for waveform
and levels
.
-It accepts the following values:
-
-‘parade ’
-Display separate graph for the color components side by side in
-row
waveform mode or one below the other in column
waveform mode
-for waveform
histogram mode. For levels
histogram mode,
-per color component graphs are placed below each other.
-
-Using this display mode in waveform
histogram mode makes it easy to
-spot color casts in the highlights and shadows of an image, by comparing the
-contours of the top and the bottom graphs of each waveform. Since whites,
-grays, and blacks are characterized by exactly equal amounts of red, green,
-and blue, neutral areas of the picture should display three waveforms of
-roughly equal width/height. If not, the correction is easy to perform by
-making level adjustments the three waveforms.
-
-
-‘overlay ’
-Presents information identical to that in the parade
, except
-that the graphs representing color components are superimposed directly
-over one another.
-
-This display mode in waveform
histogram mode makes it easier to spot
-relative differences or similarities in overlapping areas of the color
-components that are supposed to be identical, such as neutral whites, grays,
-or blacks.
-
-
-Default is parade
.
-
-
-levels_mode
-Set mode for levels
. Can be either linear
, or logarithmic
.
-Default is linear
.
-
-
-
-
-
9.45.1 Examples# TOC
-
-
- Calculate and draw histogram:
-
-
ffplay -i input -vf histogram
-
-
-
-
-
-
9.46 hqdn3d# TOC
-
-
This is a high precision/quality 3d denoise filter. It aims to reduce
-image noise, producing smooth images and making still images really
-still. It should enhance compressibility.
-
-
It accepts the following optional parameters:
-
-
-luma_spatial
-A non-negative floating point number which specifies spatial luma strength.
-It defaults to 4.0.
-
-
-chroma_spatial
-A non-negative floating point number which specifies spatial chroma strength.
-It defaults to 3.0*luma_spatial /4.0.
-
-
-luma_tmp
-A floating point number which specifies luma temporal strength. It defaults to
-6.0*luma_spatial /4.0.
-
-
-chroma_tmp
-A floating point number which specifies chroma temporal strength. It defaults to
-luma_tmp *chroma_spatial /luma_spatial .
-
-
-
-
-
9.47 hqx# TOC
-
-
Apply a high-quality magnification filter designed for pixel art. This filter
-was originally created by Maxim Stepin.
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for hq2x
, 3
for
-hq3x
and 4
for hq4x
.
-Default is 3
.
-
-
-
-
-
9.48 hue# TOC
-
-
Modify the hue and/or the saturation of the input.
-
-
It accepts the following parameters:
-
-
-h
-Specify the hue angle as a number of degrees. It accepts an expression,
-and defaults to "0".
-
-
-s
-Specify the saturation in the [-10,10] range. It accepts an expression and
-defaults to "1".
-
-
-H
-Specify the hue angle as a number of radians. It accepts an
-expression, and defaults to "0".
-
-
-b
-Specify the brightness in the [-10,10] range. It accepts an expression and
-defaults to "0".
-
-
-
-
h and H are mutually exclusive, and can’t be
-specified at the same time.
-
-
The b , h , H and s option values are
-expressions containing the following constants:
-
-
-n
-frame count of the input frame starting from 0
-
-
-pts
-presentation timestamp of the input frame expressed in time base units
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-tb
-time base of the input video
-
-
-
-
-
9.48.1 Examples# TOC
-
-
-
-
-
9.48.2 Commands# TOC
-
-
This filter supports the following commands:
-
-b
-s
-h
-H
-Modify the hue and/or the saturation and/or brightness of the input video.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
9.49 idet# TOC
-
-
Detect video interlacing type.
-
-
This filter tries to detect if the input frames as interlaced, progressive,
-top or bottom field first. It will also try and detect fields that are
-repeated between adjacent frames (a sign of telecine).
-
-
Single frame detection considers only immediately adjacent frames when classifying each frame.
-Multiple frame detection incorporates the classification history of previous frames.
-
-
The filter will log these metadata values:
-
-
-single.current_frame
-Detected type of current frame using single-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-single.tff
-Cumulative number of frames detected as top field first using single-frame detection.
-
-
-multiple.tff
-Cumulative number of frames detected as top field first using multiple-frame detection.
-
-
-single.bff
-Cumulative number of frames detected as bottom field first using single-frame detection.
-
-
-multiple.current_frame
-Detected type of current frame using multiple-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-multiple.bff
-Cumulative number of frames detected as bottom field first using multiple-frame detection.
-
-
-single.progressive
-Cumulative number of frames detected as progressive using single-frame detection.
-
-
-multiple.progressive
-Cumulative number of frames detected as progressive using multiple-frame detection.
-
-
-single.undetermined
-Cumulative number of frames that could not be classified using single-frame detection.
-
-
-multiple.undetermined
-Cumulative number of frames that could not be classified using multiple-frame detection.
-
-
-repeated.current_frame
-Which field in the current frame is repeated from the last. One of “neither”, “top”, or “bottom”.
-
-
-repeated.neither
-Cumulative number of frames with no repeated field.
-
-
-repeated.top
-Cumulative number of frames with the top field repeated from the previous frame’s top field.
-
-
-repeated.bottom
-Cumulative number of frames with the bottom field repeated from the previous frame’s bottom field.
-
-
-
-
The filter accepts the following options:
-
-
-intl_thres
-Set interlacing threshold.
-
-prog_thres
-Set progressive threshold.
-
-repeat_thres
-Threshold for repeated field detection.
-
-half_life
-Number of frames after which a given frame’s contribution to the
-statistics is halved (i.e., it contributes only 0.5 to it’s
-classification). The default of 0 means that all frames seen are given
-full weight of 1.0 forever.
-
-analyze_interlaced_flag
-When this is not 0 then idet will use the specified number of frames to determine
-if the interlaced flag is accurate, it will not count undetermined frames.
-If the flag is found to be accurate it will be used without any further
-computations, if it is found to be inaccuarte it will be cleared without any
-further computations. This allows inserting the idet filter as a low computational
-method to clean up the interlaced flag
-
-
-
-
-
-
-
Deinterleave or interleave fields.
-
-
This filter allows one to process interlaced images fields without
-deinterlacing them. Deinterleaving splits the input frame into 2
-fields (so called half pictures). Odd lines are moved to the top
-half of the output image, even lines to the bottom half.
-You can process (filter) them independently and then re-interleave them.
-
-
The filter accepts the following options:
-
-
-luma_mode, l
-chroma_mode, c
-alpha_mode, a
-Available values for luma_mode , chroma_mode and
-alpha_mode are:
-
-
-‘none ’
-Do nothing.
-
-
-‘deinterleave, d ’
-Deinterleave fields, placing one above the other.
-
-
-‘interleave, i ’
-Interleave fields. Reverse the effect of deinterleaving.
-
-
-Default value is none
.
-
-
-luma_swap, ls
-chroma_swap, cs
-alpha_swap, as
-Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0
.
-
-
-
-
-
9.51 interlace# TOC
-
-
Simple interlacing filter from progressive contents. This interleaves upper (or
-lower) lines from odd frames with lower (or upper) lines from even frames,
-halving the frame rate and preserving image height.
-
-
-
Original Original New Frame
- Frame 'j' Frame 'j+1' (tff)
- ========== =========== ==================
- Line 0 --------------------> Frame 'j' Line 0
- Line 1 Line 1 ----> Frame 'j+1' Line 1
- Line 2 ---------------------> Frame 'j' Line 2
- Line 3 Line 3 ----> Frame 'j+1' Line 3
- ... ... ...
-New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
-
-
-
It accepts the following optional parameters:
-
-
-scan
-This determines whether the interlaced frame is taken from the even
-(tff - default) or odd (bff) lines of the progressive frame.
-
-
-lowpass
-Enable (default) or disable the vertical lowpass filter to avoid twitter
-interlacing and reduce moire patterns.
-
-
-
-
-
9.52 kerndeint# TOC
-
-
Deinterlace input video by applying Donald Graft’s adaptive kernel
-deinterling. Work on interlaced parts of a video to produce
-progressive frames.
-
-
The description of the accepted parameters follows.
-
-
-thresh
-Set the threshold which affects the filter’s tolerance when
-determining if a pixel line must be processed. It must be an integer
-in the range [0,255] and defaults to 10. A value of 0 will result in
-applying the process on every pixels.
-
-
-map
-Paint pixels exceeding the threshold value to white if set to 1.
-Default is 0.
-
-
-order
-Set the fields order. Swap fields if set to 1, leave fields alone if
-0. Default is 0.
-
-
-sharp
-Enable additional sharpening if set to 1. Default is 0.
-
-
-twoway
-Enable twoway sharpening if set to 1. Default is 0.
-
-
-
-
-
9.52.1 Examples# TOC
-
-
- Apply default values:
-
-
kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
-
-
- Enable additional sharpening:
-
-
- Paint processed pixels in white:
-
-
-
-
-
9.53 lenscorrection# TOC
-
-
Correct radial lens distortion
-
-
This filter can be used to correct for radial distortion as can result from the use
-of wide angle lenses, and thereby re-rectify the image. To find the right parameters
-one can use tools available for example as part of opencv or simply trial-and-error.
-To use opencv use the calibration sample (under samples/cpp) from the opencv sources
-and extract the k1 and k2 coefficients from the resulting matrix.
-
-
Note that effectively the same filter is available in the open-source tools Krita and
-Digikam from the KDE project.
-
-
In contrast to the vignette filter, which can also be used to compensate lens errors,
-this filter corrects the distortion of the image, whereas vignette corrects the
-brightness distribution, so you may want to use both filters together in certain
-cases, though you will have to take care of ordering, i.e. whether vignetting should
-be applied before or after lens correction.
-
-
-
9.53.1 Options# TOC
-
-
The filter accepts the following options:
-
-
-cx
-Relative x-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-width.
-
-cy
-Relative y-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-height.
-
-k1
-Coefficient of the quadratic correction term. 0.5 means no correction.
-
-k2
-Coefficient of the double quadratic correction term. 0.5 means no correction.
-
-
-
-
The formula that generates the correction is:
-
-
r_src = r_tgt * (1 + k1 * (r_tgt / r_0 )^2 + k2 * (r_tgt / r_0 )^4)
-
-
where r_0 is halve of the image diagonal and r_src and r_tgt are the
-distances from the focal point in the source and target images, respectively.
-
-
-
9.54 lut3d# TOC
-
-
Apply a 3D LUT to an input video.
-
-
The filter accepts the following options:
-
-
-file
-Set the 3D LUT file name.
-
-Currently supported formats:
-
-‘3dl ’
-AfterEffects
-
-‘cube ’
-Iridas
-
-‘dat ’
-DaVinci
-
-‘m3d ’
-Pandora
-
-
-
-interp
-Select interpolation mode.
-
-Available values are:
-
-
-‘nearest ’
-Use values from the nearest defined point.
-
-‘trilinear ’
-Interpolate values using the 8 points defining a cube.
-
-‘tetrahedral ’
-Interpolate values using a tetrahedron.
-
-
-
-
-
-
-
9.55 lut, lutrgb, lutyuv# TOC
-
-
Compute a look-up table for binding each pixel component input value
-to an output value, and apply it to the input video.
-
-
lutyuv applies a lookup table to a YUV input video, lutrgb
-to an RGB input video.
-
-
These filters accept the following parameters:
-
-c0
-set first pixel component expression
-
-c1
-set second pixel component expression
-
-c2
-set third pixel component expression
-
-c3
-set fourth pixel component expression, corresponds to the alpha component
-
-
-r
-set red component expression
-
-g
-set green component expression
-
-b
-set blue component expression
-
-a
-alpha component expression
-
-
-y
-set Y/luminance component expression
-
-u
-set U/Cb component expression
-
-v
-set V/Cr component expression
-
-
-
-
Each of them specifies the expression to use for computing the lookup table for
-the corresponding pixel component values.
-
-
The exact component associated to each of the c* options depends on the
-format in input.
-
-
The lut filter requires either YUV or RGB pixel formats in input,
-lutrgb requires RGB pixel formats in input, and lutyuv requires YUV.
-
-
The expressions can contain the following constants and functions:
-
-
-w
-h
-The input width and height.
-
-
-val
-The input value for the pixel component.
-
-
-clipval
-The input value, clipped to the minval -maxval range.
-
-
-maxval
-The maximum value for the pixel component.
-
-
-minval
-The minimum value for the pixel component.
-
-
-negval
-The negated value for the pixel component value, clipped to the
-minval -maxval range; it corresponds to the expression
-"maxval-clipval+minval".
-
-
-clip(val)
-The computed value in val , clipped to the
-minval -maxval range.
-
-
-gammaval(gamma)
-The computed gamma correction value of the pixel component value,
-clipped to the minval -maxval range. It corresponds to the
-expression
-"pow((clipval-minval)/(maxval-minval)\,gamma )*(maxval-minval)+minval"
-
-
-
-
-
All expressions default to "val".
-
-
-
9.55.1 Examples# TOC
-
-
-
-
-
9.56 mergeplanes# TOC
-
-
Merge color channel components from several video streams.
-
-
The filter accepts up to 4 input streams, and merge selected input
-planes to the output video.
-
-
This filter accepts the following options:
-
-mapping
-Set input to output plane mapping. Default is 0
.
-
-The mappings is specified as a bitmap. It should be specified as a
-hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the
-mapping for the first plane of the output stream. ’A’ sets the number of
-the input stream to use (from 0 to 3), and ’a’ the plane number of the
-corresponding input to use (from 0 to 3). The rest of the mappings is
-similar, ’Bb’ describes the mapping for the output stream second
-plane, ’Cc’ describes the mapping for the output stream third plane and
-’Dd’ describes the mapping for the output stream fourth plane.
-
-
-format
-Set output pixel format. Default is yuva444p
.
-
-
-
-
-
9.56.1 Examples# TOC
-
-
- Merge three gray video streams of same width and height into single video stream:
-
-
[a0][a1][a2]mergeplanes=0x001020:yuv444p
-
-
- Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream:
-
-
[a0][a1]mergeplanes=0x00010210:yuva444p
-
-
- Swap Y and A plane in yuva444p stream:
-
-
format=yuva444p,mergeplanes=0x03010200:yuva444p
-
-
- Swap U and V plane in yuv420p stream:
-
-
format=yuv420p,mergeplanes=0x000201:yuv420p
-
-
- Cast a rgb24 clip to yuv444p:
-
-
format=rgb24,mergeplanes=0x000102:yuv444p
-
-
-
-
-
9.57 mcdeint# TOC
-
-
Apply motion-compensation deinterlacing.
-
-
It needs one field per frame as input and must thus be used together
-with yadif=1/3 or equivalent.
-
-
This filter accepts the following options:
-
-mode
-Set the deinterlacing mode.
-
-It accepts one of the following values:
-
-‘fast ’
-‘medium ’
-‘slow ’
-use iterative motion estimation
-
-‘extra_slow ’
-like ‘slow ’, but use multiple reference frames.
-
-
-Default value is ‘fast ’.
-
-
-parity
-Set the picture field parity assumed for the input video. It must be
-one of the following values:
-
-
-‘0, tff ’
-assume top field first
-
-‘1, bff ’
-assume bottom field first
-
-
-
-Default value is ‘bff ’.
-
-
-qp
-Set per-block quantization parameter (QP) used by the internal
-encoder.
-
-Higher values should result in a smoother motion vector field but less
-optimal individual vectors. Default value is 1.
-
-
-
-
-
-
-
Apply an MPlayer filter to the input video.
-
-
This filter provides a wrapper around some of the filters of
-MPlayer/MEncoder.
-
-
This wrapper is considered experimental. Some of the wrapped filters
-may not work properly and we may drop support for them, as they will
-be implemented natively into FFmpeg. Thus you should avoid
-depending on them when writing portable scripts.
-
-
The filter accepts the parameters:
-filter_name [:=]filter_params
-
-
filter_name is the name of a supported MPlayer filter,
-filter_params is a string containing the parameters accepted by
-the named filter.
-
-
The list of the currently supported filters follows:
-
-eq2
-eq
-ilpack
-softpulldown
-
-
-
The parameter syntax and behavior for the listed filters are the same
-of the corresponding MPlayer filters. For detailed instructions check
-the "VIDEO FILTERS" section in the MPlayer manual.
-
-
-
9.58.1 Examples# TOC
-
-
- Adjust gamma, brightness, contrast:
-
-
-
-
See also mplayer(1), http://www.mplayerhq.hu/ .
-
-
-
9.59 mpdecimate# TOC
-
-
Drop frames that do not differ greatly from the previous frame in
-order to reduce frame rate.
-
-
The main use of this filter is for very-low-bitrate encoding
-(e.g. streaming over dialup modem), but it could in theory be used for
-fixing movies that were inverse-telecined incorrectly.
-
-
A description of the accepted options follows.
-
-
-max
-Set the maximum number of consecutive frames which can be dropped (if
-positive), or the minimum interval between dropped frames (if
-negative). If the value is 0, the frame is dropped unregarding the
-number of previous sequentially dropped frames.
-
-Default value is 0.
-
-
-hi
-lo
-frac
-Set the dropping threshold values.
-
-Values for hi and lo are for 8x8 pixel blocks and
-represent actual pixel value differences, so a threshold of 64
-corresponds to 1 unit of difference for each pixel, or the same spread
-out differently over the block.
-
-A frame is a candidate for dropping if no 8x8 blocks differ by more
-than a threshold of hi , and if no more than frac blocks (1
-meaning the whole image) differ by more than a threshold of lo .
-
-Default value for hi is 64*12, default value for lo is
-64*5, and default value for frac is 0.33.
-
-
-
-
-
-
9.60 negate# TOC
-
-
Negate input video.
-
-
It accepts an integer in input; if non-zero it negates the
-alpha component (if available). The default value in input is 0.
-
-
-
9.61 noformat# TOC
-
-
Force libavfilter not to use any of the specified pixel formats for the
-input to the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-apix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
9.61.1 Examples# TOC
-
-
- Force libavfilter to use a format different from yuv420p for the
-input to the vflip filter:
-
-
noformat=pix_fmts=yuv420p,vflip
-
-
- Convert the input video to any of the formats not contained in the list:
-
-
noformat=yuv420p|yuv444p|yuv410p
-
-
-
-
-
9.62 noise# TOC
-
-
Add noise on video input frame.
-
-
The filter accepts the following options:
-
-
-all_seed
-c0_seed
-c1_seed
-c2_seed
-c3_seed
-Set noise seed for specific pixel component or all pixel components in case
-of all_seed . Default value is 123457
.
-
-
-all_strength, alls
-c0_strength, c0s
-c1_strength, c1s
-c2_strength, c2s
-c3_strength, c3s
-Set noise strength for specific pixel component or all pixel components in case
-all_strength . Default value is 0
. Allowed range is [0, 100].
-
-
-all_flags, allf
-c0_flags, c0f
-c1_flags, c1f
-c2_flags, c2f
-c3_flags, c3f
-Set pixel component flags or set flags for all components if all_flags .
-Available values for component flags are:
-
-‘a ’
-averaged temporal noise (smoother)
-
-‘p ’
-mix random noise with a (semi)regular pattern
-
-‘t ’
-temporal noise (noise pattern changes between frames)
-
-‘u ’
-uniform noise (gaussian otherwise)
-
-
-
-
-
-
-
9.62.1 Examples# TOC
-
-
Add temporal and uniform noise to input video:
-
-
noise=alls=20:allf=t+u
-
-
-
-
9.63 null# TOC
-
-
Pass the video source unchanged to the output.
-
-
-
9.64 ocv# TOC
-
-
Apply a video transform using libopencv.
-
-
To enable this filter, install the libopencv library and headers and
-configure FFmpeg with --enable-libopencv
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the libopencv filter to apply.
-
-
-filter_params
-The parameters to pass to the libopencv filter. If not specified, the default
-values are assumed.
-
-
-
-
-
Refer to the official libopencv documentation for more precise
-information:
-http://docs.opencv.org/master/modules/imgproc/doc/filtering.html
-
-
Several libopencv filters are supported; see the following subsections.
-
-
-
9.64.1 dilate# TOC
-
-
Dilate an image by using a specific structuring element.
-It corresponds to the libopencv function cvDilate
.
-
-
It accepts the parameters: struct_el |nb_iterations .
-
-
struct_el represents a structuring element, and has the syntax:
-cols xrows +anchor_x xanchor_y /shape
-
-
cols and rows represent the number of columns and rows of
-the structuring element, anchor_x and anchor_y the anchor
-point, and shape the shape for the structuring element. shape
-must be "rect", "cross", "ellipse", or "custom".
-
-
If the value for shape is "custom", it must be followed by a
-string of the form "=filename ". The file with name
-filename is assumed to represent a binary image, with each
-printable character corresponding to a bright pixel. When a custom
-shape is used, cols and rows are ignored, the number
-or columns and rows of the read file are assumed instead.
-
-
The default value for struct_el is "3x3+0x0/rect".
-
-
nb_iterations specifies the number of times the transform is
-applied to the image, and defaults to 1.
-
-
Some examples:
-
-
# Use the default values
-ocv=dilate
-
-# Dilate using a structuring element with a 5x5 cross, iterating two times
-ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
-
-# Read the shape from the file diamond.shape, iterating two times.
-# The file diamond.shape may contain a pattern of characters like this
-# *
-# ***
-# *****
-# ***
-# *
-# The specified columns and rows are ignored
-# but the anchor point coordinates are not
-ocv=dilate:0x0+2x2/custom=diamond.shape|2
-
-
-
-
9.64.2 erode# TOC
-
-
Erode an image by using a specific structuring element.
-It corresponds to the libopencv function cvErode
.
-
-
It accepts the parameters: struct_el :nb_iterations ,
-with the same syntax and semantics as the dilate filter.
-
-
-
9.64.3 smooth# TOC
-
-
Smooth the input video.
-
-
The filter takes the following parameters:
-type |param1 |param2 |param3 |param4 .
-
-
type is the type of smooth filter to apply, and must be one of
-the following values: "blur", "blur_no_scale", "median", "gaussian",
-or "bilateral". The default value is "gaussian".
-
-
The meaning of param1 , param2 , param3 , and param4
-depend on the smooth type. param1 and
-param2 accept integer positive values or 0. param3 and
-param4 accept floating point values.
-
-
The default value for param1 is 3. The default value for the
-other parameters is 0.
-
-
These parameters correspond to the parameters assigned to the
-libopencv function cvSmooth
.
-
-
-
9.65 overlay# TOC
-
-
Overlay one video on top of another.
-
-
It takes two inputs and has one output. The first input is the "main"
-video on which the second input is overlaid.
-
-
It accepts the following parameters:
-
-
A description of the accepted options follows.
-
-
-x
-y
-Set the expression for the x and y coordinates of the overlaid video
-on the main video. Default value is "0" for both expressions. In case
-the expression is invalid, it is set to a huge value (meaning that the
-overlay will not be displayed within the output visible area).
-
-
-eof_action
-The action to take when EOF is encountered on the secondary input; it accepts
-one of the following values:
-
-
-repeat
-Repeat the last frame (the default).
-
-endall
-End both streams.
-
-pass
-Pass the main input through.
-
-
-
-
-eval
-Set when the expressions for x , and y are evaluated.
-
-It accepts the following values:
-
-‘init ’
-only evaluate expressions once during the filter initialization or
-when a command is processed
-
-
-‘frame ’
-evaluate expressions for each incoming frame
-
-
-
-Default value is ‘frame ’.
-
-
-shortest
-If set to 1, force the output to terminate when the shortest input
-terminates. Default value is 0.
-
-
-format
-Set the format for the output video.
-
-It accepts the following values:
-
-‘yuv420 ’
-force YUV420 output
-
-
-‘yuv422 ’
-force YUV422 output
-
-
-‘yuv444 ’
-force YUV444 output
-
-
-‘rgb ’
-force RGB output
-
-
-
-Default value is ‘yuv420 ’.
-
-
-rgb (deprecated)
-If set to 1, force the filter to accept inputs in the RGB
-color space. Default value is 0. This option is deprecated, use
-format instead.
-
-
-repeatlast
-If set to 1, force the filter to draw the last overlay frame over the
-main input until the end of the stream. A value of 0 disables this
-behavior. Default value is 1.
-
-
-
-
The x , and y expressions can contain the following
-parameters.
-
-
-main_w, W
-main_h, H
-The main input width and height.
-
-
-overlay_w, w
-overlay_h, h
-The overlay input width and height.
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values of the output
-format. For example for the pixel format "yuv422p" hsub is 2 and
-vsub is 1.
-
-
-n
-the number of input frame, starting from 0
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp, expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
Note that the n , pos , t variables are available only
-when evaluation is done per frame , and will evaluate to NAN
-when eval is set to ‘init ’.
-
-
Be aware that frames are taken from each input video in timestamp
-order, hence, if their initial timestamps differ, it is a good idea
-to pass the two inputs through a setpts=PTS-STARTPTS filter to
-have them begin in the same zero timestamp, as the example for
-the movie filter does.
-
-
You can chain together more overlays but you should test the
-efficiency of such approach.
-
-
-
9.65.1 Commands# TOC
-
-
This filter supports the following commands:
-
-x
-y
-Modify the x and y of the overlay input.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
9.65.2 Examples# TOC
-
-
-
-
-
9.66 owdenoise# TOC
-
-
Apply Overcomplete Wavelet denoiser.
-
-
The filter accepts the following options:
-
-
-depth
-Set depth.
-
-Larger depth values will denoise lower frequency components more, but
-slow down filtering.
-
-Must be an int in the range 8-16, default is 8
.
-
-
-luma_strength, ls
-Set luma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-chroma_strength, cs
-Set chroma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-
-
-
9.67 pad# TOC
-
-
Add paddings to the input image, and place the original input at the
-provided x , y coordinates.
-
-
It accepts the following parameters:
-
-
-width, w
-height, h
-Specify an expression for the size of the output image with the
-paddings added. If the value for width or height is 0, the
-corresponding input size is used for the output.
-
-The width expression can reference the value set by the
-height expression, and vice versa.
-
-The default value of width and height is 0.
-
-
-x
-y
-Specify the offsets to place the input image at within the padded area,
-with respect to the top/left border of the output image.
-
-The x expression can reference the value set by the y
-expression, and vice versa.
-
-The default value of x and y is 0.
-
-
-color
-Specify the color of the padded area. For the syntax of this option,
-check the "Color" section in the ffmpeg-utils manual.
-
-The default value of color is "black".
-
-
-
-
The value for the width , height , x , and y
-options are expressions containing the following constants:
-
-
-in_w
-in_h
-The input video width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output width and height (the size of the padded area), as
-specified by the width and height expressions.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-x
-y
-The x and y offsets as specified by the x and y
-expressions, or NAN if not yet specified.
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
9.67.1 Examples# TOC
-
-
-
-
-
9.68 perspective# TOC
-
-
Correct perspective of video not recorded perpendicular to the screen.
-
-
A description of the accepted parameters follows.
-
-
-x0
-y0
-x1
-y1
-x2
-y2
-x3
-y3
-Set coordinates expression for top left, top right, bottom left and bottom right corners.
-Default values are 0:0:W:0:0:H:W:H
with which perspective will remain unchanged.
-If the sense
option is set to source
, then the specified points will be sent
-to the corners of the destination. If the sense
option is set to destination
,
-then the corners of the source will be sent to the specified coordinates.
-
-The expressions can use the following variables:
-
-
-W
-H
-the width and height of video frame.
-
-
-
-
-interpolation
-Set interpolation for perspective correction.
-
-It accepts the following values:
-
-‘linear ’
-‘cubic ’
-
-
-Default value is ‘linear ’.
-
-
-sense
-Set interpretation of coordinate options.
-
-It accepts the following values:
-
-‘0, source ’
-
-Send point in the source specified by the given coordinates to
-the corners of the destination.
-
-
-‘1, destination ’
-
-Send the corners of the source to the point in the destination specified
-by the given coordinates.
-
-Default value is ‘source ’.
-
-
-
-
-
-
-
9.69 phase# TOC
-
-
Delay interlaced video by one field time so that the field order changes.
-
-
The intended use is to fix PAL movies that have been captured with the
-opposite field order to the film-to-video transfer.
-
-
A description of the accepted parameters follows.
-
-
-mode
-Set phase mode.
-
-It accepts the following values:
-
-‘t ’
-Capture field order top-first, transfer bottom-first.
-Filter will delay the bottom field.
-
-
-‘b ’
-Capture field order bottom-first, transfer top-first.
-Filter will delay the top field.
-
-
-‘p ’
-Capture and transfer with the same field order. This mode only exists
-for the documentation of the other options to refer to, but if you
-actually select it, the filter will faithfully do nothing.
-
-
-‘a ’
-Capture field order determined automatically by field flags, transfer
-opposite.
-Filter selects among ‘t ’ and ‘b ’ modes on a frame by frame
-basis using field flags. If no field information is available,
-then this works just like ‘u ’.
-
-
-‘u ’
-Capture unknown or varying, transfer opposite.
-Filter selects among ‘t ’ and ‘b ’ on a frame by frame basis by
-analyzing the images and selecting the alternative that produces best
-match between the fields.
-
-
-‘T ’
-Capture top-first, transfer unknown or varying.
-Filter selects among ‘t ’ and ‘p ’ using image analysis.
-
-
-‘B ’
-Capture bottom-first, transfer unknown or varying.
-Filter selects among ‘b ’ and ‘p ’ using image analysis.
-
-
-‘A ’
-Capture determined by field flags, transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using field flags and
-image analysis. If no field information is available, then this works just
-like ‘U ’. This is the default mode.
-
-
-‘U ’
-Both capture and transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using image analysis only.
-
-
-
-
-
-
-
9.70 pixdesctest# TOC
-
-
Pixel format descriptor test filter, mainly useful for internal
-testing. The output video should be equal to the input video.
-
-
For example:
-
-
format=monow, pixdesctest
-
-
-
can be used to test the monowhite pixel format descriptor definition.
-
-
-
-
-
Enable the specified chain of postprocessing subfilters using libpostproc. This
-library should be automatically selected with a GPL build (--enable-gpl
).
-Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’.
-Each subfilter and some options have a short and a long name that can be used
-interchangeably, i.e. dr/dering are the same.
-
-
The filters accept the following options:
-
-
-subfilters
-Set postprocessing subfilters string.
-
-
-
-
All subfilters share common options to determine their scope:
-
-
-a/autoq
-Honor the quality commands for this subfilter.
-
-
-c/chrom
-Do chrominance filtering, too (default).
-
-
-y/nochrom
-Do luminance filtering only (no chrominance).
-
-
-n/noluma
-Do chrominance filtering only (no luminance).
-
-
-
-
These options can be appended after the subfilter name, separated by a ’|’.
-
-
Available subfilters are:
-
-
-hb/hdeblock[|difference[|flatness]]
-Horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-vb/vdeblock[|difference[|flatness]]
-Vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-ha/hadeblock[|difference[|flatness]]
-Accurate horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-va/vadeblock[|difference[|flatness]]
-Accurate vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-
-
The horizontal and vertical deblocking filters share the difference and
-flatness values so you cannot set different horizontal and vertical
-thresholds.
-
-
-h1/x1hdeblock
-Experimental horizontal deblocking filter
-
-
-v1/x1vdeblock
-Experimental vertical deblocking filter
-
-
-dr/dering
-Deringing filter
-
-
-tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
-
-threshold1
-larger -> stronger filtering
-
-threshold2
-larger -> stronger filtering
-
-threshold3
-larger -> stronger filtering
-
-
-
-
-al/autolevels[:f/fullyrange], automatic brightness / contrast correction
-
-f/fullyrange
-Stretch luminance to 0-255
.
-
-
-
-
-lb/linblenddeint
-Linear blend deinterlacing filter that deinterlaces the given block by
-filtering all lines with a (1 2 1)
filter.
-
-
-li/linipoldeint
-Linear interpolating deinterlacing filter that deinterlaces the given block by
-linearly interpolating every second line.
-
-
-ci/cubicipoldeint
-Cubic interpolating deinterlacing filter deinterlaces the given block by
-cubically interpolating every second line.
-
-
-md/mediandeint
-Median deinterlacing filter that deinterlaces the given block by applying a
-median filter to every second line.
-
-
-fd/ffmpegdeint
-FFmpeg deinterlacing filter that deinterlaces the given block by filtering every
-second line with a (-1 4 2 4 -1)
filter.
-
-
-l5/lowpass5
-Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given
-block by filtering all lines with a (-1 2 6 2 -1)
filter.
-
-
-fq/forceQuant[|quantizer]
-Overrides the quantizer table from the input with the constant quantizer you
-specify.
-
-quantizer
-Quantizer to use
-
-
-
-
-de/default
-Default pp filter combination (hb|a,vb|a,dr|a
)
-
-
-fa/fast
-Fast pp filter combination (h1|a,v1|a,dr|a
)
-
-
-ac
-High quality pp filter combination (ha|a|128|7,va|a,dr|a
)
-
-
-
-
-
9.71.1 Examples# TOC
-
-
- Apply horizontal and vertical deblocking, deringing and automatic
-brightness/contrast:
-
-
- Apply default filters without brightness/contrast correction:
-
-
- Apply default filters and temporal denoiser:
-
-
pp=default/tmpnoise|1|2|3
-
-
- Apply deblocking on luminance only, and switch vertical deblocking on or off
-automatically depending on available CPU time:
-
-
-
-
-
9.72 pp7# TOC
-
Apply Postprocessing filter 7. It is variant of the spp filter,
-similar to spp = 6 with 7 point DCT, where only the center sample is
-used after IDCT.
-
-
The filter accepts the following options:
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range
-0 to 63. If not set, the filter will use the QP from the video stream
-(if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding.
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-‘medium ’
-Set medium thresholding (good results, default).
-
-
-
-
-
-
-
9.73 psnr# TOC
-
-
Obtain the average, maximum and minimum PSNR (Peak Signal to Noise
-Ratio) between two input videos.
-
-
This filter takes in input two input videos, the first input is
-considered the "main" source and is passed unchanged to the
-output. The second input is used as a "reference" video for computing
-the PSNR.
-
-
Both video inputs must have the same resolution and pixel format for
-this filter to work correctly. Also it assumes that both inputs
-have the same number of frames, which are compared one by one.
-
-
The obtained average PSNR is printed through the logging system.
-
-
The filter stores the accumulated MSE (mean squared error) of each
-frame, and at the end of the processing it is averaged across all frames
-equally, and the following formula is applied to obtain the PSNR:
-
-
-
PSNR = 10*log10(MAX^2/MSE)
-
-
-
Where MAX is the average of the maximum values of each component of the
-image.
-
-
The description of the accepted parameters follows.
-
-
-stats_file, f
-If specified the filter will use the named file to save the PSNR of
-each individual frame.
-
-
-
-
The file printed if stats_file is selected, contains a sequence of
-key/value pairs of the form key :value for each compared
-couple of frames.
-
-
A description of each shown parameter follows:
-
-
-n
-sequential number of the input frame, starting from 1
-
-
-mse_avg
-Mean Square Error pixel-by-pixel average difference of the compared
-frames, averaged over all the image components.
-
-
-mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
-Mean Square Error pixel-by-pixel average difference of the compared
-frames for the component specified by the suffix.
-
-
-psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
-Peak Signal to Noise ratio of the compared frames for the component
-specified by the suffix.
-
-
-
-
For example:
-
-
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
-[main][ref] psnr="stats_file=stats.log" [out]
-
-
-
On this example the input file being processed is compared with the
-reference file ref_movie.mpg . The PSNR of each individual frame
-is stored in stats.log .
-
-
-
9.74 pullup# TOC
-
-
Pulldown reversal (inverse telecine) filter, capable of handling mixed
-hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive
-content.
-
-
The pullup filter is designed to take advantage of future context in making
-its decisions. This filter is stateless in the sense that it does not lock
-onto a pattern to follow, but it instead looks forward to the following
-fields in order to identify matches and rebuild progressive frames.
-
-
To produce content with an even framerate, insert the fps filter after
-pullup, use fps=24000/1001
if the input frame rate is 29.97fps,
-fps=24
for 30fps and the (rare) telecined 25fps input.
-
-
The filter accepts the following options:
-
-
-jl
-jr
-jt
-jb
-These options set the amount of "junk" to ignore at the left, right, top, and
-bottom of the image, respectively. Left and right are in units of 8 pixels,
-while top and bottom are in units of 2 lines.
-The default is 8 pixels on each side.
-
-
-sb
-Set the strict breaks. Setting this option to 1 will reduce the chances of
-filter generating an occasional mismatched frame, but it may also cause an
-excessive number of frames to be dropped during high motion sequences.
-Conversely, setting it to -1 will make filter match fields more easily.
-This may help processing of video where there is slight blurring between
-the fields, but may also cause there to be interlaced frames in the output.
-Default value is 0
.
-
-
-mp
-Set the metric plane to use. It accepts the following values:
-
-‘l ’
-Use luma plane.
-
-
-‘u ’
-Use chroma blue plane.
-
-
-‘v ’
-Use chroma red plane.
-
-
-
-This option may be set to use chroma plane instead of the default luma plane
-for doing filter’s computations. This may improve accuracy on very clean
-source material, but more likely will decrease accuracy, especially if there
-is chroma noise (rainbow effect) or any grayscale video.
-The main purpose of setting mp to a chroma plane is to reduce CPU
-load and make pullup usable in realtime on slow machines.
-
-
-
-
For best results (without duplicated frames in the output file) it is
-necessary to change the output frame rate. For example, to inverse
-telecine NTSC input:
-
-
ffmpeg -i input -vf pullup -r 24000/1001 ...
-
-
-
-
-
-
Change video quantization parameters (QP).
-
-
The filter accepts the following option:
-
-
-qp
-Set expression for quantization parameter.
-
-
-
-
The expression is evaluated through the eval API and can contain, among others,
-the following constants:
-
-
-known
-1 if index is not 129, 0 otherwise.
-
-
-qp
-Sequentional index starting from -129 to 128.
-
-
-
-
-
9.75.1 Examples# TOC
-
-
- Some equation like:
-
-
-
-
-
9.76 removelogo# TOC
-
-
Suppress a TV station logo, using an image file to determine which
-pixels comprise the logo. It works by filling in the pixels that
-comprise the logo with neighboring pixels.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filter bitmap file, which can be any image format supported by
-libavformat. The width and height of the image file must match those of the
-video stream being processed.
-
-
-
-
Pixels in the provided bitmap image with a value of zero are not
-considered part of the logo, non-zero pixels are considered part of
-the logo. If you use white (255) for the logo and black (0) for the
-rest, you will be safe. For making the filter bitmap, it is
-recommended to take a screen capture of a black frame with the logo
-visible, and then using a threshold filter followed by the erode
-filter once or twice.
-
-
If needed, little splotches can be fixed manually. Remember that if
-logo pixels are not covered, the filter quality will be much
-reduced. Marking too many pixels as part of the logo does not hurt as
-much, but it will increase the amount of blurring needed to cover over
-the image and will destroy more information than necessary, and extra
-pixels will slow things down on a large logo.
-
-
-
9.77 rotate# TOC
-
-
Rotate video by an arbitrary angle expressed in radians.
-
-
The filter accepts the following options:
-
-
A description of the optional parameters follows.
-
-angle, a
-Set an expression for the angle by which to rotate the input video
-clockwise, expressed as a number of radians. A negative value will
-result in a counter-clockwise rotation. By default it is set to "0".
-
-This expression is evaluated for each frame.
-
-
-out_w, ow
-Set the output width expression, default value is "iw".
-This expression is evaluated just once during configuration.
-
-
-out_h, oh
-Set the output height expression, default value is "ih".
-This expression is evaluated just once during configuration.
-
-
-bilinear
-Enable bilinear interpolation if set to 1, a value of 0 disables
-it. Default value is 1.
-
-
-fillcolor, c
-Set the color used to fill the output area not covered by the rotated
-image. For the general syntax of this option, check the "Color" section in the
-ffmpeg-utils manual. If the special value "none" is selected then no
-background is printed (useful for example if the background is never shown).
-
-Default value is "black".
-
-
-
-
The expressions for the angle and the output size can contain the
-following constants and functions:
-
-
-n
-sequential number of the input frame, starting from 0. It is always NAN
-before the first frame is filtered.
-
-
-t
-time in seconds of the input frame, it is set to 0 when the filter is
-configured. It is always NAN before the first frame is filtered.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_w, iw
-in_h, ih
-the input video width and height
-
-
-out_w, ow
-out_h, oh
-the output width and height, that is the size of the padded area as
-specified by the width and height expressions
-
-
-rotw(a)
-roth(a)
-the minimal width/height required for completely containing the input
-video rotated by a radians.
-
-These are only available when computing the out_w and
-out_h expressions.
-
-
-
-
-
9.77.1 Examples# TOC
-
-
- Rotate the input by PI/6 radians clockwise:
-
-
- Rotate the input by PI/6 radians counter-clockwise:
-
-
- Rotate the input by 45 degrees clockwise:
-
-
- Apply a constant rotation with period T, starting from an angle of PI/3:
-
-
- Make the input video rotation oscillating with a period of T
-seconds and an amplitude of A radians:
-
-
rotate=A*sin(2*PI/T*t)
-
-
- Rotate the video, output size is chosen so that the whole rotating
-input video is always completely contained in the output:
-
-
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
-
-
- Rotate the video, reduce the output size so that no background is ever
-shown:
-
-
rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
-
-
-
-
-
9.77.2 Commands# TOC
-
-
The filter supports the following commands:
-
-
-a, angle
-Set the angle expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
9.78 sab# TOC
-
-
Apply Shape Adaptive Blur.
-
-
The filter accepts the following options:
-
-
-luma_radius, lr
-Set luma blur filter strength, must be a value in range 0.1-4.0, default
-value is 1.0. A greater value will result in a more blurred image, and
-in slower processing.
-
-
-luma_pre_filter_radius, lpfr
-Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default
-value is 1.0.
-
-
-luma_strength, ls
-Set luma maximum difference between pixels to still be considered, must
-be a value in the 0.1-100.0 range, default value is 1.0.
-
-
-chroma_radius, cr
-Set chroma blur filter strength, must be a value in range 0.1-4.0. A
-greater value will result in a more blurred image, and in slower
-processing.
-
-
-chroma_pre_filter_radius, cpfr
-Set chroma pre-filter radius, must be a value in the 0.1-2.0 range.
-
-
-chroma_strength, cs
-Set chroma maximum difference between pixels to still be considered,
-must be a value in the 0.1-100.0 range.
-
-
-
-
Each chroma option value, if not explicitly specified, is set to the
-corresponding luma option value.
-
-
-
9.79 scale# TOC
-
-
Scale (resize) the input video, using the libswscale library.
-
-
The scale filter forces the output display aspect ratio to be the same
-of the input, by changing the output sample aspect ratio.
-
-
If the input image format is different from the format requested by
-the next filter, the scale filter will convert the input to the
-requested format.
-
-
-
9.79.1 Options# TOC
-
The filter accepts the following options, or any of the options
-supported by the libswscale scaler.
-
-
See (ffmpeg-scaler)the ffmpeg-scaler manual for
-the complete list of scaler options.
-
-
-width, w
-height, h
-Set the output video dimension expression. Default value is the input
-dimension.
-
-If the value is 0, the input width is used for the output.
-
-If one of the values is -1, the scale filter will use a value that
-maintains the aspect ratio of the input image, calculated from the
-other specified dimension. If both of them are -1, the input size is
-used
-
-If one of the values is -n with n > 1, the scale filter will also use a value
-that maintains the aspect ratio of the input image, calculated from the other
-specified dimension. After that it will, however, make sure that the calculated
-dimension is divisible by n and adjust the value if necessary.
-
-See below for the list of accepted constants for use in the dimension
-expression.
-
-
-interl
-Set the interlacing mode. It accepts the following values:
-
-
-‘1 ’
-Force interlaced aware scaling.
-
-
-‘0 ’
-Do not apply interlaced scaling.
-
-
-‘-1 ’
-Select interlaced aware scaling depending on whether the source frames
-are flagged as interlaced or not.
-
-
-
-Default value is ‘0 ’.
-
-
-flags
-Set libswscale scaling flags. See
-(ffmpeg-scaler)the ffmpeg-scaler manual for the
-complete list of values. If not explicitly specified the filter applies
-the default flags.
-
-
-size, s
-Set the video size. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-in_color_matrix
-out_color_matrix
-Set in/output YCbCr color space type.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder.
-
-If not specified, the color space type depends on the pixel format.
-
-Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘bt709 ’
-Format conforming to International Telecommunication Union (ITU)
-Recommendation BT.709.
-
-
-‘fcc ’
-Set color space conforming to the United States Federal Communications
-Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a).
-
-
-‘bt601 ’
-Set color space conforming to:
-
-
- ITU Radiocommunication Sector (ITU-R) Recommendation BT.601
-
- ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G
-
- Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004
-
-
-
-
-‘smpte240m ’
-Set color space conforming to SMPTE ST 240:1999.
-
-
-
-
-in_range
-out_range
-Set in/output YCbCr sample range.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder. If not specified, the
-range depends on the pixel format. Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘jpeg/full/pc ’
-Set full range (0-255 in case of 8-bit luma).
-
-
-‘mpeg/tv ’
-Set "MPEG" range (16-235 in case of 8-bit luma).
-
-
-
-
-force_original_aspect_ratio
-Enable decreasing or increasing output video width or height if necessary to
-keep the original aspect ratio. Possible values:
-
-
-‘disable ’
-Scale the video as specified and disable this feature.
-
-
-‘decrease ’
-The output video dimensions will automatically be decreased if needed.
-
-
-‘increase ’
-The output video dimensions will automatically be increased if needed.
-
-
-
-
-One useful instance of this option is that when you know a specific device’s
-maximum allowed resolution, you can use this to limit the output video to
-that, while retaining the aspect ratio. For example, device A allows
-1280x720 playback, and your video is 1920x800. Using this option (set it to
-decrease) and specifying 1280x720 to the command line makes the output
-1280x533.
-
-Please note that this is a different thing than specifying -1 for w
-or h , you still need to specify the output resolution for this option
-to work.
-
-
-
-
-
The values of the w and h options are expressions
-containing the following constants:
-
-
-in_w
-in_h
-The input width and height
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (scaled) width and height
-
-
-ow
-oh
-These are the same as out_w and out_h
-
-
-a
-The same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-The input display aspect ratio. Calculated from (iw / ih) * sar
.
-
-
-hsub
-vsub
-horizontal and vertical input chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-ohsub
-ovsub
-horizontal and vertical output chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
9.79.2 Examples# TOC
-
-
-
-
-
9.80 separatefields# TOC
-
-
The separatefields
takes a frame-based video input and splits
-each frame into its components fields, producing a new half height clip
-with twice the frame rate and twice the frame count.
-
-
This filter use field-dominance information in frame to decide which
-of each pair of fields to place first in the output.
-If it gets it wrong use setfield filter before separatefields
filter.
-
-
-
9.81 setdar, setsar# TOC
-
-
The setdar
filter sets the Display Aspect Ratio for the filter
-output video.
-
-
This is done by changing the specified Sample (aka Pixel) Aspect
-Ratio, according to the following equation:
-
-
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
-
-
-
Keep in mind that the setdar
filter does not modify the pixel
-dimensions of the video frame. Also, the display aspect ratio set by
-this filter may be changed by later filters in the filterchain,
-e.g. in case of scaling or if another "setdar" or a "setsar" filter is
-applied.
-
-
The setsar
filter sets the Sample (aka Pixel) Aspect Ratio for
-the filter output video.
-
-
Note that as a consequence of the application of this filter, the
-output display aspect ratio will change according to the equation
-above.
-
-
Keep in mind that the sample aspect ratio set by the setsar
-filter may be changed by later filters in the filterchain, e.g. if
-another "setsar" or a "setdar" filter is applied.
-
-
It accepts the following parameters:
-
-
-r, ratio, dar (setdar
only), sar (setsar
only)
-Set the aspect ratio used by the filter.
-
-The parameter can be a floating point number string, an expression, or
-a string of the form num :den , where num and
-den are the numerator and denominator of the aspect ratio. If
-the parameter is not specified, it is assumed the value "0".
-In case the form "num :den " is used, the :
character
-should be escaped.
-
-
-max
-Set the maximum integer value to use for expressing numerator and
-denominator when reducing the expressed aspect ratio to a rational.
-Default value is 100
.
-
-
-
-
-
The parameter sar is an expression containing
-the following constants:
-
-
-E, PI, PHI
-These are approximated values for the mathematical constants e
-(Euler’s number), pi (Greek pi), and phi (the golden ratio).
-
-
-w, h
-The input width and height.
-
-
-a
-These are the same as w / h .
-
-
-sar
-The input sample aspect ratio.
-
-
-dar
-The input display aspect ratio. It is the same as
-(w / h ) * sar .
-
-
-hsub, vsub
-Horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
9.81.1 Examples# TOC
-
-
- To change the display aspect ratio to 16:9, specify one of the following:
-
-
setdar=dar=1.77777
-setdar=dar=16/9
-setdar=dar=1.77777
-
-
- To change the sample aspect ratio to 10:11, specify:
-
-
- To set a display aspect ratio of 16:9, and specify a maximum integer value of
-1000 in the aspect ratio reduction, use the command:
-
-
setdar=ratio=16/9:max=1000
-
-
-
-
-
-
9.82 setfield# TOC
-
-
Force field for the output video frame.
-
-
The setfield
filter marks the interlace type field for the
-output frames. It does not change the input frame, but only sets the
-corresponding property, which affects how the frame is treated by
-following filters (e.g. fieldorder
or yadif
).
-
-
The filter accepts the following options:
-
-
-mode
-Available values are:
-
-
-‘auto ’
-Keep the same field property.
-
-
-‘bff ’
-Mark the frame as bottom-field-first.
-
-
-‘tff ’
-Mark the frame as top-field-first.
-
-
-‘prog ’
-Mark the frame as progressive.
-
-
-
-
-
-
-
9.83 showinfo# TOC
-
-
Show a line containing various information for each input video frame.
-The input video is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The Presentation TimeStamp of the input frame, expressed as a number of
-time base units. The time base unit depends on the filter input pad.
-
-
-pts_time
-The Presentation TimeStamp of the input frame, expressed as a number of
-seconds.
-
-
-pos
-The position of the frame in the input stream, or -1 if this information is
-unavailable and/or meaningless (for example in case of synthetic video).
-
-
-fmt
-The pixel format name.
-
-
-sar
-The sample aspect ratio of the input frame, expressed in the form
-num /den .
-
-
-s
-The size of the input frame. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-i
-The type of interlaced mode ("P" for "progressive", "T" for top field first, "B"
-for bottom field first).
-
-
-iskey
-This is 1 if the frame is a key frame, 0 otherwise.
-
-
-type
-The picture type of the input frame ("I" for an I-frame, "P" for a
-P-frame, "B" for a B-frame, or "?" for an unknown type).
-Also refer to the documentation of the AVPictureType
enum and of
-the av_get_picture_type_char
function defined in
-libavutil/avutil.h .
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame.
-
-
-plane_checksum
-The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
-expressed in the form "[c0 c1 c2 c3 ]".
-
-
-
-
-
9.84 shuffleplanes# TOC
-
-
Reorder and/or duplicate video planes.
-
-
It accepts the following parameters:
-
-
-map0
-The index of the input plane to be used as the first output plane.
-
-
-map1
-The index of the input plane to be used as the second output plane.
-
-
-map2
-The index of the input plane to be used as the third output plane.
-
-
-map3
-The index of the input plane to be used as the fourth output plane.
-
-
-
-
-
The first plane has the index 0. The default is to keep the input unchanged.
-
-
Swap the second and third planes of the input:
-
-
ffmpeg -i INPUT -vf shuffleplanes=0:2:1:3 OUTPUT
-
-
-
-
9.85 signalstats# TOC
-
Evaluate various visual metrics that assist in determining issues associated
-with the digitization of analog video media.
-
-
By default the filter will log these metadata values:
-
-
-YMIN
-Display the minimal Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-YLOW
-Display the Y value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YAVG
-Display the average Y value within the input frame. Expressed in range of
-[0-255].
-
-
-YHIGH
-Display the Y value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YMAX
-Display the maximum Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-UMIN
-Display the minimal U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-ULOW
-Display the U value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UAVG
-Display the average U value within the input frame. Expressed in range of
-[0-255].
-
-
-UHIGH
-Display the U value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UMAX
-Display the maximum U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VMIN
-Display the minimal V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VLOW
-Display the V value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VAVG
-Display the average V value within the input frame. Expressed in range of
-[0-255].
-
-
-VHIGH
-Display the V value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VMAX
-Display the maximum V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-SATMIN
-Display the minimal saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATLOW
-Display the saturation value at the 10% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATAVG
-Display the average saturation value within the input frame. Expressed in range
-of [0-~181.02].
-
-
-SATHIGH
-Display the saturation value at the 90% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATMAX
-Display the maximum saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-HUEMED
-Display the median value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-HUEAVG
-Display the average value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-YDIF
-Display the average of sample value difference between all values of the Y
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-UDIF
-Display the average of sample value difference between all values of the U
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-VDIF
-Display the average of sample value difference between all values of the V
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-
-
The filter accepts the following options:
-
-
-stat
-out
-
-stat specify an additional form of image analysis.
-out output video with the specified type of pixel highlighted.
-
-Both options accept the following values:
-
-
-‘tout ’
-Identify temporal outliers pixels. A temporal outlier is a pixel
-unlike the neighboring pixels of the same field. Examples of temporal outliers
-include the results of video dropouts, head clogs, or tape tracking issues.
-
-
-‘vrep ’
-Identify vertical line repetition . Vertical line repetition includes
-similar rows of pixels within a frame. In born-digital video vertical line
-repetition is common, but this pattern is uncommon in video digitized from an
-analog source. When it occurs in video that results from the digitization of an
-analog source it can indicate concealment from a dropout compensator.
-
-
-‘brng ’
-Identify pixels that fall outside of legal broadcast range.
-
-
-
-
-color, c
-Set the highlight color for the out option. The default color is
-yellow.
-
-
-
-
-
9.85.1 Examples# TOC
-
-
-
-
-
9.86 smartblur# TOC
-
-
Blur the input video without impacting the outlines.
-
-
It accepts the following options:
-
-
-luma_radius, lr
-Set the luma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-luma_strength, ls
-Set the luma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-luma_threshold, lt
-Set the luma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-chroma_radius, cr
-Set the chroma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-chroma_strength, cs
-Set the chroma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-chroma_threshold, ct
-Set the chroma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-
-
If a chroma option is not explicitly set, the corresponding luma value
-is set.
-
-
-
9.87 stereo3d# TOC
-
-
Convert between different stereoscopic image formats.
-
-
The filters accept the following options:
-
-
-in
-Set stereoscopic image format of input.
-
-Available values for input image formats are:
-
-‘sbsl ’
-side by side parallel (left eye left, right eye right)
-
-
-‘sbsr ’
-side by side crosseye (right eye left, left eye right)
-
-
-‘sbs2l ’
-side by side parallel with half width resolution
-(left eye left, right eye right)
-
-
-‘sbs2r ’
-side by side crosseye with half width resolution
-(right eye left, left eye right)
-
-
-‘abl ’
-above-below (left eye above, right eye below)
-
-
-‘abr ’
-above-below (right eye above, left eye below)
-
-
-‘ab2l ’
-above-below with half height resolution
-(left eye above, right eye below)
-
-
-‘ab2r ’
-above-below with half height resolution
-(right eye above, left eye below)
-
-
-‘al ’
-alternating frames (left eye first, right eye second)
-
-
-‘ar ’
-alternating frames (right eye first, left eye second)
-
-Default value is ‘sbsl ’.
-
-
-
-
-out
-Set stereoscopic image format of output.
-
-Available values for output image formats are all the input formats as well as:
-
-‘arbg ’
-anaglyph red/blue gray
-(red filter on left eye, blue filter on right eye)
-
-
-‘argg ’
-anaglyph red/green gray
-(red filter on left eye, green filter on right eye)
-
-
-‘arcg ’
-anaglyph red/cyan gray
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arch ’
-anaglyph red/cyan half colored
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcc ’
-anaglyph red/cyan color
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcd ’
-anaglyph red/cyan color optimized with the least squares projection of dubois
-(red filter on left eye, cyan filter on right eye)
-
-
-‘agmg ’
-anaglyph green/magenta gray
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmh ’
-anaglyph green/magenta half colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmc ’
-anaglyph green/magenta colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmd ’
-anaglyph green/magenta color optimized with the least squares projection of dubois
-(green filter on left eye, magenta filter on right eye)
-
-
-‘aybg ’
-anaglyph yellow/blue gray
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybh ’
-anaglyph yellow/blue half colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybc ’
-anaglyph yellow/blue colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybd ’
-anaglyph yellow/blue color optimized with the least squares projection of dubois
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘irl ’
-interleaved rows (left eye has top row, right eye starts on next row)
-
-
-‘irr ’
-interleaved rows (right eye has top row, left eye starts on next row)
-
-
-‘ml ’
-mono output (left eye only)
-
-
-‘mr ’
-mono output (right eye only)
-
-
-
-Default value is ‘arcd ’.
-
-
-
-
-
9.87.1 Examples# TOC
-
-
- Convert input video from side by side parallel to anaglyph yellow/blue dubois:
-
-
- Convert input video from above bellow (left eye above, right eye below) to side by side crosseye.
-
-
-
-
-
9.88 spp# TOC
-
-
Apply a simple postprocessing filter that compresses and decompresses the image
-at several (or - in the case of quality level 6
- all) shifts
-and average the results.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-6. If set to 0
, the filter will have no
-effect. A value of 6
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding (default).
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
9.89 subtitles# TOC
-
-
Draw subtitles on top of input video using the libass library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libass
. This filter also requires a build with libavcodec and
-libavformat to convert the passed subtitles file to ASS (Advanced Substation
-Alpha) subtitles format.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filename of the subtitle file to read. It must be specified.
-
-
-original_size
-Specify the size of the original video, the video for which the ASS file
-was composed. For the syntax of this option, check the "Video size" section in
-the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic,
-this is necessary to correctly scale the fonts if the aspect ratio has been
-changed.
-
-
-charenc
-Set subtitles input character encoding. subtitles
filter only. Only
-useful if not UTF-8.
-
-
-stream_index, si
-Set subtitles stream index. subtitles
filter only.
-
-
-
-
If the first key is not specified, it is assumed that the first value
-specifies the filename .
-
-
For example, to render the file sub.srt on top of the input
-video, use the command:
-
-
-
which is equivalent to:
-
-
subtitles=filename=sub.srt
-
-
-
To render the default subtitles stream from file video.mkv , use:
-
-
-
To render the second subtitles stream from that file, use:
-
-
subtitles=video.mkv:si=1
-
-
-
-
9.90 super2xsai# TOC
-
-
Scale the input by 2x and smooth using the Super2xSaI (Scale and
-Interpolate) pixel art scaling algorithm.
-
-
Useful for enlarging pixel art images without reducing sharpness.
-
-
-
9.91 swapuv# TOC
-
Swap U & V plane.
-
-
-
9.92 telecine# TOC
-
-
Apply telecine process to the video.
-
-
This filter accepts the following options:
-
-
-first_field
-
-‘top, t ’
-top field first
-
-‘bottom, b ’
-bottom field first
-The default value is top
.
-
-
-
-
-pattern
-A string of numbers representing the pulldown pattern you wish to apply.
-The default value is 23
.
-
-
-
-
-
Some typical patterns:
-
-NTSC output (30i):
-27.5p: 32222
-24p: 23 (classic)
-24p: 2332 (preferred)
-20p: 33
-18p: 334
-16p: 3444
-
-PAL output (25i):
-27.5p: 12222
-24p: 222222222223 ("Euro pulldown")
-16.67p: 33
-16p: 33333334
-
-
-
-
9.93 thumbnail# TOC
-
Select the most representative frame in a given sequence of consecutive frames.
-
-
The filter accepts the following options:
-
-
-n
-Set the frames batch size to analyze; in a set of n frames, the filter
-will pick one of them, and then handle the next batch of n frames until
-the end. Default is 100
.
-
-
-
-
Since the filter keeps track of the whole frames sequence, a bigger n
-value will result in a higher memory usage, so a high value is not recommended.
-
-
-
9.93.1 Examples# TOC
-
-
- Extract one picture each 50 frames:
-
-
- Complete example of a thumbnail creation with ffmpeg
:
-
-
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
-
-
-
-
-
9.94 tile# TOC
-
-
Tile several successive frames together.
-
-
The filter accepts the following options:
-
-
-layout
-Set the grid size (i.e. the number of lines and columns). For the syntax of
-this option, check the "Video size" section in the ffmpeg-utils manual.
-
-
-nb_frames
-Set the maximum number of frames to render in the given area. It must be less
-than or equal to w xh . The default value is 0
, meaning all
-the area will be used.
-
-
-margin
-Set the outer border margin in pixels.
-
-
-padding
-Set the inner border thickness (i.e. the number of pixels between frames). For
-more advanced padding options (such as having different values for the edges),
-refer to the pad video filter.
-
-
-color
-Specify the color of the unused area. For the syntax of this option, check the
-"Color" section in the ffmpeg-utils manual. The default value of color
-is "black".
-
-
-
-
-
9.94.1 Examples# TOC
-
-
-
-
-
9.95 tinterlace# TOC
-
-
Perform various types of temporal field interlacing.
-
-
Frames are counted starting from 1, so the first input frame is
-considered odd.
-
-
The filter accepts the following options:
-
-
-mode
-Specify the mode of the interlacing. This option can also be specified
-as a value alone. See below for a list of values for this option.
-
-Available values are:
-
-
-‘merge, 0 ’
-Move odd frames into the upper field, even into the lower field,
-generating a double height frame at half frame rate.
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-‘drop_odd, 1 ’
-Only output even frames, odd frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
- 22222 44444
- 22222 44444
- 22222 44444
- 22222 44444
-
-
-
-‘drop_even, 2 ’
-Only output odd frames, even frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-11111 33333
-11111 33333
-11111 33333
-
-
-
-‘pad, 3 ’
-Expand each frame to full height, but pad alternate lines with black,
-generating a frame with double height at the same input frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-
-
-
-
-‘interleave_top, 4 ’
-Interleave the upper field from odd frames with the lower field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-
-‘interleave_bottom, 5 ’
-Interleave the lower field from odd frames with the upper field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-
-Output:
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-
-
-
-
-‘interlacex2, 6 ’
-Double frame rate with unchanged height. Frames are inserted each
-containing the second temporal field from the previous input frame and
-the first temporal field from the next input frame. This mode relies on
-the top_field_first flag. Useful for interlaced video displays with no
-field synchronisation.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
- 11111 22222 33333 44444
-11111 22222 33333 44444
- 11111 22222 33333 44444
-
-Output:
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-
-
-
-
-
-
-Numeric values are deprecated but are accepted for backward
-compatibility reasons.
-
-Default mode is merge
.
-
-
-flags
-Specify flags influencing the filter process.
-
-Available value for flags is:
-
-
-low_pass_filter, vlfp
-Enable vertical low-pass filtering in the filter.
-Vertical low-pass filtering is required when creating an interlaced
-destination from a progressive source which contains high-frequency
-vertical detail. Filtering will reduce interlace ’twitter’ and Moire
-patterning.
-
-Vertical low-pass filtering can only be enabled for mode
-interleave_top and interleave_bottom .
-
-
-
-
-
-
-
-
9.96 transpose# TOC
-
-
Transpose rows with columns in the input video and optionally flip it.
-
-
It accepts the following parameters:
-
-
-dir
-Specify the transposition direction.
-
-Can assume the following values:
-
-‘0, 4, cclock_flip ’
-Rotate by 90 degrees counterclockwise and vertically flip (default), that is:
-
-
L.R L.l
-. . -> . .
-l.r R.r
-
-
-
-‘1, 5, clock ’
-Rotate by 90 degrees clockwise, that is:
-
-
L.R l.L
-. . -> . .
-l.r r.R
-
-
-
-‘2, 6, cclock ’
-Rotate by 90 degrees counterclockwise, that is:
-
-
L.R R.r
-. . -> . .
-l.r L.l
-
-
-
-‘3, 7, clock_flip ’
-Rotate by 90 degrees clockwise and vertically flip, that is:
-
-
L.R r.R
-. . -> . .
-l.r l.L
-
-
-
-
-For values between 4-7, the transposition is only done if the input
-video geometry is portrait and not landscape. These values are
-deprecated, the passthrough
option should be used instead.
-
-Numerical values are deprecated, and should be dropped in favor of
-symbolic constants.
-
-
-passthrough
-Do not apply the transposition if the input geometry matches the one
-specified by the specified value. It accepts the following values:
-
-‘none ’
-Always apply transposition.
-
-‘portrait ’
-Preserve portrait geometry (when height >= width ).
-
-‘landscape ’
-Preserve landscape geometry (when width >= height ).
-
-
-
-Default value is none
.
-
-
-
-
For example to rotate by 90 degrees clockwise and preserve portrait
-layout:
-
-
transpose=dir=1:passthrough=portrait
-
-
-
The command above can also be specified as:
-
-
-
-
9.97 trim# TOC
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Specify the time of the start of the kept section, i.e. the frame with the
-timestamp start will be the first frame in the output.
-
-
-end
-Specify the time of the first frame that will be dropped, i.e. the frame
-immediately preceding the one with the timestamp end will be the last
-frame in the output.
-
-
-start_pts
-This is the same as start , except this option sets the start timestamp
-in timebase units instead of seconds.
-
-
-end_pts
-This is the same as end , except this option sets the end timestamp
-in timebase units instead of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_frame
-The number of the first frame that should be passed to the output.
-
-
-end_frame
-The number of the first frame that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _frame variants simply count the
-frames that pass through the filter. Also note that this filter does not modify
-the timestamps. If you wish for the output timestamps to start at zero, insert a
-setpts filter after the trim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all the frames that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple trim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -vf trim=60:120
-
-
- Keep only the first second:
-
-
ffmpeg -i INPUT -vf trim=duration=1
-
-
-
-
-
-
-
9.98 unsharp# TOC
-
-
Sharpen or blur the input video.
-
-
It accepts the following parameters:
-
-
-luma_msize_x, lx
-Set the luma matrix horizontal size. It must be an odd integer between
-3 and 63. The default value is 5.
-
-
-luma_msize_y, ly
-Set the luma matrix vertical size. It must be an odd integer between 3
-and 63. The default value is 5.
-
-
-luma_amount, la
-Set the luma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 1.0.
-
-
-chroma_msize_x, cx
-Set the chroma matrix horizontal size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_msize_y, cy
-Set the chroma matrix vertical size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_amount, ca
-Set the chroma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 0.0.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
All parameters are optional and default to the equivalent of the
-string ’5:5:1.0:5:5:0.0’.
-
-
-
9.98.1 Examples# TOC
-
-
- Apply strong luma sharpen effect:
-
-
unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
-
-
- Apply a strong blur of both luma and chroma parameters:
-
-
-
-
-
9.99 uspp# TOC
-
-
Apply ultra slow/simple postprocessing filter that compresses and decompresses
-the image at several (or - in the case of quality level 8
- all)
-shifts and average the results.
-
-
The way this differs from the behavior of spp is that uspp actually encodes &
-decodes each case with libavcodec Snow, whereas spp uses a simplified intra only 8x8
-DCT similar to MJPEG.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-8. If set to 0
, the filter will have no
-effect. A value of 8
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-
-
-
9.100 vidstabdetect# TOC
-
-
Analyze video stabilization/deshaking. Perform pass 1 of 2, see
-vidstabtransform for pass 2.
-
-
This filter generates a file with relative translation and rotation
-transform information about subsequent frames, which is then used by
-the vidstabtransform filter.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
This filter accepts the following options:
-
-
-result
-Set the path to the file used to write the transforms information.
-Default value is transforms.trf .
-
-
-shakiness
-Set how shaky the video is and how quick the camera is. It accepts an
-integer in the range 1-10, a value of 1 means little shakiness, a
-value of 10 means strong shakiness. Default value is 5.
-
-
-accuracy
-Set the accuracy of the detection process. It must be a value in the
-range 1-15. A value of 1 means low accuracy, a value of 15 means high
-accuracy. Default value is 15.
-
-
-stepsize
-Set stepsize of the search process. The region around minimum is
-scanned with 1 pixel resolution. Default value is 6.
-
-
-mincontrast
-Set minimum contrast. Below this value a local measurement field is
-discarded. Must be a floating point value in the range 0-1. Default
-value is 0.3.
-
-
-tripod
-Set reference frame number for tripod mode.
-
-If enabled, the motion of the frames is compared to a reference frame
-in the filtered stream, identified by the specified number. The idea
-is to compensate all movements in a more-or-less static scene and keep
-the camera view absolutely still.
-
-If set to 0, it is disabled. The frames are counted starting from 1.
-
-
-show
-Show fields and transforms in the resulting frames. It accepts an
-integer in the range 0-2. Default value is 0, which disables any
-visualization.
-
-
-
-
-
9.100.1 Examples# TOC
-
-
- Use default values:
-
-
- Analyze strongly shaky movie and put the results in file
-mytransforms.trf :
-
-
vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
-
-
- Visualize the result of internal transformations in the resulting
-video:
-
-
- Analyze a video with medium shakiness using ffmpeg
:
-
-
ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
-
-
-
-
-
9.101 vidstabtransform# TOC
-
-
Video stabilization/deshaking: pass 2 of 2,
-see vidstabdetect for pass 1.
-
-
Read a file with transform information for each frame and
-apply/compensate them. Together with the vidstabdetect
-filter this can be used to deshake videos. See also
-http://public.hronopik.de/vid.stab . It is important to also use
-the unsharp filter, see below.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
-
9.101.1 Options# TOC
-
-
-input
-Set path to the file used to read the transforms. Default value is
-transforms.trf .
-
-
-smoothing
-Set the number of frames (value*2 + 1) used for lowpass filtering the
-camera movements. Default value is 10.
-
-For example a number of 10 means that 21 frames are used (10 in the
-past and 10 in the future) to smoothen the motion in the video. A
-larger value leads to a smoother video, but limits the acceleration of
-the camera (pan/tilt movements). 0 is a special case where a static
-camera is simulated.
-
-
-optalgo
-Set the camera path optimization algorithm.
-
-Accepted values are:
-
-‘gauss ’
-gaussian kernel low-pass filter on camera motion (default)
-
-‘avg ’
-averaging on transformations
-
-
-
-
-maxshift
-Set maximal number of pixels to translate frames. Default value is -1,
-meaning no limit.
-
-
-maxangle
-Set maximal angle in radians (degree*PI/180) to rotate frames. Default
-value is -1, meaning no limit.
-
-
-crop
-Specify how to deal with borders that may be visible due to movement
-compensation.
-
-Available values are:
-
-‘keep ’
-keep image information from previous frame (default)
-
-‘black ’
-fill the border black
-
-
-
-
-invert
-Invert transforms if set to 1. Default value is 0.
-
-
-relative
-Consider transforms as relative to previous frame if set to 1,
-absolute if set to 0. Default value is 0.
-
-
-zoom
-Set percentage to zoom. A positive value will result in a zoom-in
-effect, a negative value in a zoom-out effect. Default value is 0 (no
-zoom).
-
-
-optzoom
-Set optimal zooming to avoid borders.
-
-Accepted values are:
-
-‘0 ’
-disabled
-
-‘1 ’
-optimal static zoom value is determined (only very strong movements
-will lead to visible borders) (default)
-
-‘2 ’
-optimal adaptive zoom value is determined (no borders will be
-visible), see zoomspeed
-
-
-
-Note that the value given at zoom is added to the one calculated here.
-
-
-zoomspeed
-Set percent to zoom maximally each frame (enabled when
-optzoom is set to 2). Range is from 0 to 5, default value is
-0.25.
-
-
-interpol
-Specify type of interpolation.
-
-Available values are:
-
-‘no ’
-no interpolation
-
-‘linear ’
-linear only horizontal
-
-‘bilinear ’
-linear in both directions (default)
-
-‘bicubic ’
-cubic in both directions (slow)
-
-
-
-
-tripod
-Enable virtual tripod mode if set to 1, which is equivalent to
-relative=0:smoothing=0
. Default value is 0.
-
-Use also tripod
option of vidstabdetect .
-
-
-debug
-Increase log verbosity if set to 1. Also the detected global motions
-are written to the temporary file global_motions.trf . Default
-value is 0.
-
-
-
-
-
9.101.2 Examples# TOC
-
-
-
-
-
9.102 vflip# TOC
-
-
Flip the input video vertically.
-
-
For example, to vertically flip a video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "vflip" out.avi
-
-
-
-
9.103 vignette# TOC
-
-
Make or reverse a natural vignetting effect.
-
-
The filter accepts the following options:
-
-
-angle, a
-Set lens angle expression as a number of radians.
-
-The value is clipped in the [0,PI/2]
range.
-
-Default value: "PI/5"
-
-
-x0
-y0
-Set center coordinates expressions. Respectively "w/2"
and "h/2"
-by default.
-
-
-mode
-Set forward/backward mode.
-
-Available modes are:
-
-‘forward ’
-The larger the distance from the central point, the darker the image becomes.
-
-
-‘backward ’
-The larger the distance from the central point, the brighter the image becomes.
-This can be used to reverse a vignette effect, though there is no automatic
-detection to extract the lens angle and other settings (yet). It can
-also be used to create a burning effect.
-
-
-
-Default value is ‘forward ’.
-
-
-eval
-Set evaluation mode for the expressions (angle , x0 , y0 ).
-
-It accepts the following values:
-
-‘init ’
-Evaluate expressions only once during the filter initialization.
-
-
-‘frame ’
-Evaluate expressions for each incoming frame. This is way slower than the
-‘init ’ mode since it requires all the scalers to be re-computed, but it
-allows advanced dynamic expressions.
-
-
-
-Default value is ‘init ’.
-
-
-dither
-Set dithering to reduce the circular banding effects. Default is 1
-(enabled).
-
-
-aspect
-Set vignette aspect. This setting allows one to adjust the shape of the vignette.
-Setting this value to the SAR of the input will make a rectangular vignetting
-following the dimensions of the video.
-
-Default is 1/1
.
-
-
-
-
-
9.103.1 Expressions# TOC
-
-
The alpha , x0 and y0 expressions can contain the
-following parameters.
-
-
-w
-h
-input width and height
-
-
-n
-the number of input frame, starting from 0
-
-
-pts
-the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in
-TB units, NAN if undefined
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-the PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in seconds, NAN if undefined
-
-
-tb
-time base of the input video
-
-
-
-
-
-
9.103.2 Examples# TOC
-
-
- Apply simple strong vignetting effect:
-
-
- Make a flickering vignetting:
-
-
vignette='PI/4+random(1)*PI/50':eval=frame
-
-
-
-
-
-
9.104 w3fdif# TOC
-
-
Deinterlace the input video ("w3fdif" stands for "Weston 3 Field
-Deinterlacing Filter").
-
-
Based on the process described by Martin Weston for BBC R&D, and
-implemented based on the de-interlace algorithm written by Jim
-Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter
-uses filter coefficients calculated by BBC R&D.
-
-
There are two sets of filter coefficients, so called "simple":
-and "complex". Which set of filter coefficients is used can
-be set by passing an optional parameter:
-
-
-filter
-Set the interlacing filter coefficients. Accepts one of the following values:
-
-
-‘simple ’
-Simple filter coefficient set.
-
-‘complex ’
-More-complex filter coefficient set.
-
-
-Default value is ‘complex ’.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following values:
-
-
-‘all ’
-Deinterlace all frames,
-
-‘interlaced ’
-Only deinterlace frames marked as interlaced.
-
-
-
-Default value is ‘all ’.
-
-
-
-
-
9.105 xbr# TOC
-
Apply the xBR high-quality magnification filter which is designed for pixel
-art. It follows a set of edge-detection rules, see
-http://www.libretro.com/forums/viewtopic.php?f=6&t=134 .
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for 2xBR
, 3
for
-3xBR
and 4
for 4xBR
.
-Default is 3
.
-
-
-
-
-
9.106 yadif# TOC
-
-
Deinterlace the input video ("yadif" means "yet another deinterlacing
-filter").
-
-
It accepts the following parameters:
-
-
-
-mode
-The interlacing mode to adopt. It accepts one of the following values:
-
-
-0, send_frame
-Output one frame for each frame.
-
-1, send_field
-Output one frame for each field.
-
-2, send_frame_nospatial
-Like send_frame
, but it skips the spatial interlacing check.
-
-3, send_field_nospatial
-Like send_field
, but it skips the spatial interlacing check.
-
-
-
-The default value is send_frame
.
-
-
-parity
-The picture field parity assumed for the input interlaced video. It accepts one
-of the following values:
-
-
-0, tff
-Assume the top field is first.
-
-1, bff
-Assume the bottom field is first.
-
--1, auto
-Enable automatic detection of field parity.
-
-
-
-The default value is auto
.
-If the interlacing is unknown or the decoder does not export this information,
-top field first will be assumed.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following
-values:
-
-
-0, all
-Deinterlace all frames.
-
-1, interlaced
-Only deinterlace frames marked as interlaced.
-
-
-
-The default value is all
.
-
-
-
-
-
9.107 zoompan# TOC
-
-
Apply Zoom & Pan effect.
-
-
This filter accepts the following options:
-
-
-zoom, z
-Set the zoom expression. Default is 1.
-
-
-x
-y
-Set the x and y expression. Default is 0.
-
-
-d
-Set the duration expression in number of frames.
-This sets for how many number of frames effect will last for
-single input image.
-
-
-s
-Set the output image size, default is ’hd720’.
-
-
-
-
Each expression can contain the following constants:
-
-
-in_w, iw
-Input width.
-
-
-in_h, ih
-Input height.
-
-
-out_w, ow
-Output width.
-
-
-out_h, oh
-Output height.
-
-
-in
-Input frame count.
-
-
-on
-Output frame count.
-
-
-x
-y
-Last calculated ’x’ and ’y’ position from ’x’ and ’y’ expression
-for current input frame.
-
-
-px
-py
-’x’ and ’y’ of last output frame of previous input frame or 0 when there was
-not yet such frame (first input frame).
-
-
-zoom
-Last calculated zoom from ’z’ expression for current input frame.
-
-
-pzoom
-Last calculated zoom of last output frame of previous input frame.
-
-
-duration
-Number of output frames for current input frame. Calculated from ’d’ expression
-for each input frame.
-
-
-pduration
-number of output frames created for previous input frame
-
-
-a
-Rational number: input width / input height
-
-
-sar
-sample aspect ratio
-
-
-dar
-display aspect ratio
-
-
-
-
-
-
9.107.1 Examples# TOC
-
-
- Zoom-in up to 1.5 and pan at same time to some spot near center of picture:
-
-
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
-
-
-
-
-
-
10 Video Sources# TOC
-
-
Below is a description of the currently available video sources.
-
-
-
10.1 buffer# TOC
-
-
Buffer video frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/vsrc_buffer.h .
-
-
It accepts the following parameters:
-
-
-video_size
-Specify the size (width and height) of the buffered video frames. For the
-syntax of this option, check the "Video size" section in the ffmpeg-utils
-manual.
-
-
-width
-The input video width.
-
-
-height
-The input video height.
-
-
-pix_fmt
-A string representing the pixel format of the buffered video frames.
-It may be a number corresponding to a pixel format, or a pixel format
-name.
-
-
-time_base
-Specify the timebase assumed by the timestamps of the buffered frames.
-
-
-frame_rate
-Specify the frame rate expected for the video stream.
-
-
-pixel_aspect, sar
-The sample (pixel) aspect ratio of the input video.
-
-
-sws_param
-Specify the optional parameters to be used for the scale filter which
-is automatically inserted when an input change is detected in the
-input size or format.
-
-
-
-
For example:
-
-
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
-
-
-
will instruct the source to accept video frames with size 320x240 and
-with format "yuv410p", assuming 1/24 as the timestamps timebase and
-square pixels (1:1 sample aspect ratio).
-Since the pixel format with name "yuv410p" corresponds to the number 6
-(check the enum AVPixelFormat definition in libavutil/pixfmt.h ),
-this example corresponds to:
-
-
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
-
-
-
Alternatively, the options can be specified as a flat string, but this
-syntax is deprecated:
-
-
width :height :pix_fmt :time_base.num :time_base.den :pixel_aspect.num :pixel_aspect.den [:sws_param ]
-
-
-
10.2 cellauto# TOC
-
-
Create a pattern generated by an elementary cellular automaton.
-
-
The initial state of the cellular automaton can be defined through the
-filename , and pattern options. If such options are
-not specified an initial state is created randomly.
-
-
At each new frame a new row in the video is filled with the result of
-the cellular automaton next generation. The behavior when the whole
-frame is filled is defined by the scroll option.
-
-
This source accepts the following options:
-
-
-filename, f
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified file.
-In the file, each non-whitespace character is considered an alive
-cell, a newline will terminate the row, and further characters in the
-file will be ignored.
-
-
-pattern, p
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified string.
-
-Each non-whitespace character in the string is considered an alive
-cell, a newline will terminate the row, and further characters in the
-string will be ignored.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial cellular automaton row. It
-is a floating point number value ranging from 0 to 1, defaults to
-1/PHI.
-
-This option is ignored when a file or a pattern is specified.
-
-
-random_seed, seed
-Set the seed for filling randomly the initial row, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the cellular automaton rule, it is a number ranging from 0 to 255.
-Default value is 110.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual.
-
-If filename or pattern is specified, the size is set
-by default to the width of the specified initial state row, and the
-height is set to width * PHI.
-
-If size is set, it must contain the width of the specified
-pattern string, and the specified pattern will be centered in the
-larger row.
-
-If a filename or a pattern string is not specified, the size value
-defaults to "320x518" (used for a randomly generated initial state).
-
-
-scroll
-If set to 1, scroll the output upward when all the rows in the output
-have been already filled. If set to 0, the new generated row will be
-written over the top row just after the bottom row is filled.
-Defaults to 1.
-
-
-start_full, full
-If set to 1, completely fill the output with generated rows before
-outputting the first frame.
-This is the default behavior, for disabling set the value to 0.
-
-
-stitch
-If set to 1, stitch the left and right row edges together.
-This is the default behavior, for disabling set the value to 0.
-
-
-
-
-
10.2.1 Examples# TOC
-
-
- Read the initial state from pattern , and specify an output of
-size 200x400.
-
-
cellauto=f=pattern:s=200x400
-
-
- Generate a random initial row with a width of 200 cells, with a fill
-ratio of 2/3:
-
-
cellauto=ratio=2/3:s=200x200
-
-
- Create a pattern generated by rule 18 starting by a single alive cell
-centered on an initial row with width 100:
-
-
cellauto=p=@:s=100x400:full=0:rule=18
-
-
- Specify a more elaborated initial pattern:
-
-
cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
-
-
-
-
-
-
10.3 mandelbrot# TOC
-
-
Generate a Mandelbrot set fractal, and progressively zoom towards the
-point specified with start_x and start_y .
-
-
This source accepts the following options:
-
-
-end_pts
-Set the terminal pts value. Default value is 400.
-
-
-end_scale
-Set the terminal scale value.
-Must be a floating point value. Default value is 0.3.
-
-
-inner
-Set the inner coloring mode, that is the algorithm used to draw the
-Mandelbrot fractal internal region.
-
-It shall assume one of the following values:
-
-black
-Set black mode.
-
-convergence
-Show time until convergence.
-
-mincol
-Set color based on point closest to the origin of the iterations.
-
-period
-Set period mode.
-
-
-
-Default value is mincol .
-
-
-bailout
-Set the bailout value. Default value is 10.0.
-
-
-maxiter
-Set the maximum of iterations performed by the rendering
-algorithm. Default value is 7189.
-
-
-outer
-Set outer coloring mode.
-It shall assume one of following values:
-
-iteration_count
-Set iteration cound mode.
-
-normalized_iteration_count
-set normalized iteration count mode.
-
-
-Default value is normalized_iteration_count .
-
-
-rate, r
-Set frame rate, expressed as number of frames per second. Default
-value is "25".
-
-
-size, s
-Set frame size. For the syntax of this option, check the "Video
-size" section in the ffmpeg-utils manual. Default value is "640x480".
-
-
-start_scale
-Set the initial scale value. Default value is 3.0.
-
-
-start_x
-Set the initial x position. Must be a floating point value between
--100 and 100. Default value is -0.743643887037158704752191506114774.
-
-
-start_y
-Set the initial y position. Must be a floating point value between
--100 and 100. Default value is -0.131825904205311970493132056385139.
-
-
-
-
-
10.4 mptestsrc# TOC
-
-
Generate various test patterns, as generated by the MPlayer test filter.
-
-
The size of the generated video is fixed, and is 256x256.
-This source is useful in particular for testing encoding features.
-
-
This source accepts the following options:
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-test, t
-
-Set the number or the name of the test to perform. Supported tests are:
-
-dc_luma
-dc_chroma
-freq_luma
-freq_chroma
-amp_luma
-amp_chroma
-cbp
-mv
-ring1
-ring2
-all
-
-
-Default value is "all", which will cycle through the list of all tests.
-
-
-
-
Some examples:
-
-
-
will generate a "dc_luma" test pattern.
-
-
-
10.5 frei0r_src# TOC
-
-
Provide a frei0r source.
-
-
To enable compilation of this filter you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
This source accepts the following parameters:
-
-
-size
-The size of the video to generate. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-
-framerate
-The framerate of the generated video. It may be a string of the form
-num /den or a frame rate abbreviation.
-
-
-filter_name
-The name to the frei0r source to load. For more information regarding frei0r and
-how to set the parameters, read the frei0r section in the video filters
-documentation.
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r source.
-
-
-
-
-
For example, to generate a frei0r partik0l source with size 200x200
-and frame rate 10 which is overlaid on the overlay filter main input:
-
-
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
-
-
-
-
10.6 life# TOC
-
-
Generate a life pattern.
-
-
This source is based on a generalization of John Conway’s life game.
-
-
The sourced input represents a life grid, each pixel represents a cell
-which can be in one of two possible states, alive or dead. Every cell
-interacts with its eight neighbours, which are the cells that are
-horizontally, vertically, or diagonally adjacent.
-
-
At each interaction the grid evolves according to the adopted rule,
-which specifies the number of neighbor alive cells which will make a
-cell stay alive or born. The rule option allows one to specify
-the rule to adopt.
-
-
This source accepts the following options:
-
-
-filename, f
-Set the file from which to read the initial grid state. In the file,
-each non-whitespace character is considered an alive cell, and newline
-is used to delimit the end of each row.
-
-If this option is not specified, the initial grid is generated
-randomly.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial random grid. It is a
-floating point number value ranging from 0 to 1, defaults to 1/PHI.
-It is ignored when a file is specified.
-
-
-random_seed, seed
-Set the seed for filling the initial random grid, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the life rule.
-
-A rule can be specified with a code of the kind "SNS /BNB ",
-where NS and NB are sequences of numbers in the range 0-8,
-NS specifies the number of alive neighbor cells which make a
-live cell stay alive, and NB the number of alive neighbor cells
-which make a dead cell to become alive (i.e. to "born").
-"s" and "b" can be used in place of "S" and "B", respectively.
-
-Alternatively a rule can be specified by an 18-bits integer. The 9
-high order bits are used to encode the next cell state if it is alive
-for each number of neighbor alive cells, the low order bits specify
-the rule for "borning" new cells. Higher order bits encode for an
-higher number of neighbor cells.
-For example the number 6153 = (12<<9)+9
specifies a stay alive
-rule of 12 and a born rule of 9, which corresponds to "S23/B03".
-
-Default value is "S23/B3", which is the original Conway’s game of life
-rule, and will keep a cell alive if it has 2 or 3 neighbor alive
-cells, and will born a new cell if there are three alive cells around
-a dead cell.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-If filename is specified, the size is set by default to the
-same size of the input file. If size is set, it must contain
-the size specified in the input file, and the initial grid defined in
-that file is centered in the larger resulting area.
-
-If a filename is not specified, the size value defaults to "320x240"
-(used for a randomly generated initial grid).
-
-
-stitch
-If set to 1, stitch the left and right grid edges together, and the
-top and bottom edges also. Defaults to 1.
-
-
-mold
-Set cell mold speed. If set, a dead cell will go from death_color to
-mold_color with a step of mold . mold can have a
-value from 0 to 255.
-
-
-life_color
-Set the color of living (or new born) cells.
-
-
-death_color
-Set the color of dead cells. If mold is set, this is the first color
-used to represent a dead cell.
-
-
-mold_color
-Set mold color, for definitely dead and moldy cells.
-
-For the syntax of these 3 color options, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-
-
-
10.6.1 Examples# TOC
-
-
- Read a grid from pattern , and center it on a grid of size
-300x300 pixels:
-
-
life=f=pattern:s=300x300
-
-
- Generate a random grid of size 200x200, with a fill ratio of 2/3:
-
-
life=ratio=2/3:s=200x200
-
-
- Specify a custom rule for evolving a randomly generated grid:
-
-
- Full example with slow death effect (mold) using ffplay
:
-
-
ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
-
-
-
-
-
10.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc# TOC
-
-
The color
source provides an uniformly colored input.
-
-
The haldclutsrc
source provides an identity Hald CLUT. See also
-haldclut filter.
-
-
The nullsrc
source returns unprocessed video frames. It is
-mainly useful to be employed in analysis / debugging tools, or as the
-source for filters which ignore the input data.
-
-
The rgbtestsrc
source generates an RGB test pattern useful for
-detecting RGB vs BGR issues. You should see a red, green and blue
-stripe from top to bottom.
-
-
The smptebars
source generates a color bars pattern, based on
-the SMPTE Engineering Guideline EG 1-1990.
-
-
The smptehdbars
source generates a color bars pattern, based on
-the SMPTE RP 219-2002.
-
-
The testsrc
source generates a test video pattern, showing a
-color pattern, a scrolling gradient and a timestamp. This is mainly
-intended for testing purposes.
-
-
The sources accept the following parameters:
-
-
-color, c
-Specify the color of the source, only available in the color
-source. For the syntax of this option, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-level
-Specify the level of the Hald CLUT, only available in the haldclutsrc
-source. A level of N
generates a picture of N*N*N
by N*N*N
-pixels to be used as identity matrix for 3D lookup tables. Each component is
-coded on a 1/(N*N)
scale.
-
-
-size, s
-Specify the size of the sourced video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual. The default value is
-"320x240".
-
-This option is not available with the haldclutsrc
filter.
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-sar
-Set the sample aspect ratio of the sourced video.
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-decimals, n
-Set the number of decimals to show in the timestamp, only available in the
-testsrc
source.
-
-The displayed timestamp value will correspond to the original
-timestamp value multiplied by the power of 10 of the specified
-value. Default value is 0.
-
-
-
-
For example the following:
-
-
testsrc=duration=5.3:size=qcif:rate=10
-
-
-
will generate a video with a duration of 5.3 seconds, with size
-176x144 and a frame rate of 10 frames per second.
-
-
The following graph description will generate a red source
-with an opacity of 0.2, with size "qcif" and a frame rate of 10
-frames per second.
-
-
color=c=red@0.2:s=qcif:r=10
-
-
-
If the input content is to be ignored, nullsrc
can be used. The
-following command generates noise in the luminance plane by employing
-the geq
filter:
-
-
nullsrc=s=256x256, geq=random(1)*255:128:128
-
-
-
-
10.7.1 Commands# TOC
-
-
The color
source supports the following commands:
-
-
-c, color
-Set the color of the created image. Accepts the same syntax of the
-corresponding color option.
-
-
-
-
-
-
11 Video Sinks# TOC
-
-
Below is a description of the currently available video sinks.
-
-
-
11.1 buffersink# TOC
-
-
Buffer video frames, and make them available to the end of the filter
-graph.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVBufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
-
11.2 nullsink# TOC
-
-
Null video sink: do absolutely nothing with the input video. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
12 Multimedia Filters# TOC
-
-
Below is a description of the currently available multimedia filters.
-
-
-
12.1 avectorscope# TOC
-
-
Convert input audio to a video output, representing the audio vector
-scope.
-
-
The filter is used to measure the difference between channels of stereo
-audio stream. A monoaural signal, consisting of identical left and right
-signal, results in straight vertical line. Any stereo separation is visible
-as a deviation from this line, creating a Lissajous figure.
-If the straight (or deviation from it) but horizontal line appears this
-indicates that the left and right channels are out of phase.
-
-
The filter accepts the following options:
-
-
-mode, m
-Set the vectorscope mode.
-
-Available values are:
-
-‘lissajous ’
-Lissajous rotated by 45 degrees.
-
-
-‘lissajous_xy ’
-Same as above but not rotated.
-
-
-
-Default value is ‘lissajous ’.
-
-
-size, s
-Set the video size for the output. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual. Default value is 400x400
.
-
-
-rate, r
-Set the output frame rate. Default value is 25
.
-
-
-rc
-gc
-bc
-Specify the red, green and blue contrast. Default values are 40
, 160
and 80
.
-Allowed range is [0, 255]
.
-
-
-rf
-gf
-bf
-Specify the red, green and blue fade. Default values are 15
, 10
and 5
.
-Allowed range is [0, 255]
.
-
-
-zoom
-Set the zoom factor. Default value is 1
. Allowed range is [1, 10]
.
-
-
-
-
-
12.1.1 Examples# TOC
-
-
- Complete example using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
-
-
-
-
-
12.2 concat# TOC
-
-
Concatenate audio and video streams, joining them together one after the
-other.
-
-
The filter works on segments of synchronized video and audio streams. All
-segments must have the same number of streams of each type, and that will
-also be the number of streams at output.
-
-
The filter accepts the following options:
-
-
-n
-Set the number of segments. Default is 2.
-
-
-v
-Set the number of output video streams, that is also the number of video
-streams in each segment. Default is 1.
-
-
-a
-Set the number of output audio streams, that is also the number of audio
-streams in each segment. Default is 0.
-
-
-unsafe
-Activate unsafe mode: do not fail if segments have a different format.
-
-
-
-
-
The filter has v +a outputs: first v video outputs, then
-a audio outputs.
-
-
There are n x(v +a ) inputs: first the inputs for the first
-segment, in the same order as the outputs, then the inputs for the second
-segment, etc.
-
-
Related streams do not always have exactly the same duration, for various
-reasons including codec frame size or sloppy authoring. For that reason,
-related synchronized streams (e.g. a video and its audio track) should be
-concatenated at once. The concat filter will use the duration of the longest
-stream in each segment (except the last one), and if necessary pad shorter
-audio streams with silence.
-
-
For this filter to work correctly, all segments must start at timestamp 0.
-
-
All corresponding streams must have the same parameters in all segments; the
-filtering system will automatically select a common pixel format for video
-streams, and a common sample format, sample rate and channel layout for
-audio streams, but other settings, such as resolution, must be converted
-explicitly by the user.
-
-
Different frame rates are acceptable but will result in variable frame rate
-at output; be sure to configure the output file to handle it.
-
-
-
12.2.1 Examples# TOC
-
-
-
-
-
12.3 ebur128# TOC
-
-
EBU R128 scanner filter. This filter takes an audio stream as input and outputs
-it unchanged. By default, it logs a message at a frequency of 10Hz with the
-Momentary loudness (identified by M
), Short-term loudness (S
),
-Integrated loudness (I
) and Loudness Range (LRA
).
-
-
The filter also has a video output (see the video option) with a real
-time graph to observe the loudness evolution. The graphic contains the logged
-message mentioned above, so it is not printed anymore when this option is set,
-unless the verbose logging is set. The main graphing area contains the
-short-term loudness (3 seconds of analysis), and the gauge on the right is for
-the momentary loudness (400 milliseconds).
-
-
More information about the Loudness Recommendation EBU R128 on
-http://tech.ebu.ch/loudness .
-
-
The filter accepts the following options:
-
-
-video
-Activate the video output. The audio stream is passed unchanged whether this
-option is set or no. The video stream will be the first output stream if
-activated. Default is 0
.
-
-
-size
-Set the video size. This option is for video only. For the syntax of this
-option, check the "Video size" section in the ffmpeg-utils manual. Default
-and minimum resolution is 640x480
.
-
-
-meter
-Set the EBU scale meter. Default is 9
. Common values are 9
and
-18
, respectively for EBU scale meter +9 and EBU scale meter +18. Any
-other integer value between this range is allowed.
-
-
-metadata
-Set metadata injection. If set to 1
, the audio input will be segmented
-into 100ms output frames, each of them containing various loudness information
-in metadata. All the metadata keys are prefixed with lavfi.r128.
.
-
-Default is 0
.
-
-
-framelog
-Force the frame logging level.
-
-Available values are:
-
-‘info ’
-information logging level
-
-‘verbose ’
-verbose logging level
-
-
-
-By default, the logging level is set to info . If the video or
-the metadata options are set, it switches to verbose .
-
-
-peak
-Set peak mode(s).
-
-Available modes can be cumulated (the option is a flag
type). Possible
-values are:
-
-‘none ’
-Disable any peak mode (default).
-
-‘sample ’
-Enable sample-peak mode.
-
-Simple peak mode looking for the higher sample value. It logs a message
-for sample-peak (identified by SPK
).
-
-‘true ’
-Enable true-peak mode.
-
-If enabled, the peak lookup is done on an over-sampled version of the input
-stream for better peak accuracy. It logs a message for true-peak.
-(identified by TPK
) and true-peak per frame (identified by FTPK
).
-This mode requires a build with libswresample
.
-
-
-
-
-
-
-
-
12.3.1 Examples# TOC
-
-
- Real-time graph using ffplay
, with a EBU scale meter +18:
-
-
ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
-
-
- Run an analysis with ffmpeg
:
-
-
ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
-
-
-
-
-
12.4 interleave, ainterleave# TOC
-
-
Temporally interleave frames from several inputs.
-
-
interleave
works with video inputs, ainterleave
with audio.
-
-
These filters read frames from several inputs and send the oldest
-queued frame to the output.
-
-
Input streams must have a well defined, monotonically increasing frame
-timestamp values.
-
-
In order to submit one frame to output, these filters need to enqueue
-at least one frame for each input, so they cannot work in case one
-input is not yet terminated and will not receive incoming frames.
-
-
For example consider the case when one input is a select
filter
-which always drop input frames. The interleave
filter will keep
-reading from that input, but it will never be able to send new frames
-to output until the input will send an end-of-stream signal.
-
-
Also, depending on inputs synchronization, the filters will drop
-frames in case one input receives more frames than the other ones, and
-the queue is already filled.
-
-
These filters accept the following options:
-
-
-nb_inputs, n
-Set the number of different inputs, it is 2 by default.
-
-
-
-
-
12.4.1 Examples# TOC
-
-
- Interleave frames belonging to different streams using ffmpeg
:
-
-
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
-
-
- Add flickering blur effect:
-
-
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
-
-
-
-
-
12.5 perms, aperms# TOC
-
-
Set read/write permissions for the output frames.
-
-
These filters are mainly aimed at developers to test direct path in the
-following filter in the filtergraph.
-
-
The filters accept the following options:
-
-
-mode
-Select the permissions mode.
-
-It accepts the following values:
-
-‘none ’
-Do nothing. This is the default.
-
-‘ro ’
-Set all the output frames read-only.
-
-‘rw ’
-Set all the output frames directly writable.
-
-‘toggle ’
-Make the frame read-only if writable, and writable if read-only.
-
-‘random ’
-Set each output frame read-only or writable randomly.
-
-
-
-
-seed
-Set the seed for the random mode, must be an integer included between
-0
and UINT32_MAX
. If not specified, or if explicitly set to
--1
, the filter will try to use a good random seed on a best effort
-basis.
-
-
-
-
Note: in case of auto-inserted filter between the permission filter and the
-following one, the permission might not be received as expected in that
-following filter. Inserting a format or aformat filter before the
-perms/aperms filter can avoid this problem.
-
-
-
12.6 select, aselect# TOC
-
-
Select frames to pass in output.
-
-
This filter accepts the following options:
-
-
-expr, e
-Set expression, which is evaluated for each input frame.
-
-If the expression is evaluated to zero, the frame is discarded.
-
-If the evaluation result is negative or NaN, the frame is sent to the
-first output; otherwise it is sent to the output with index
-ceil(val)-1
, assuming that the input index starts from 0.
-
-For example a value of 1.2
corresponds to the output with index
-ceil(1.2)-1 = 2-1 = 1
, that is the second output.
-
-
-outputs, n
-Set the number of outputs. The output to which to send the selected
-frame is based on the result of the evaluation. Default value is 1.
-
-
-
-
The expression can contain the following constants:
-
-
-n
-The (sequential) number of the filtered frame, starting from 0.
-
-
-selected_n
-The (sequential) number of the selected frame, starting from 0.
-
-
-prev_selected_n
-The sequential number of the last selected frame. It’s NAN if undefined.
-
-
-TB
-The timebase of the input timestamps.
-
-
-pts
-The PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in TB units. It’s NAN if undefined.
-
-
-t
-The PTS of the filtered video frame,
-expressed in seconds. It’s NAN if undefined.
-
-
-prev_pts
-The PTS of the previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_pts
-The PTS of the last previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_t
-The PTS of the last previously selected video frame. It’s NAN if undefined.
-
-
-start_pts
-The PTS of the first video frame in the video. It’s NAN if undefined.
-
-
-start_t
-The time of the first video frame in the video. It’s NAN if undefined.
-
-
-pict_type (video only)
-The type of the filtered frame. It can assume one of the following
-values:
-
-I
-P
-B
-S
-SI
-SP
-BI
-
-
-
-interlace_type (video only)
-The frame interlace type. It can assume one of the following values:
-
-PROGRESSIVE
-The frame is progressive (not interlaced).
-
-TOPFIRST
-The frame is top-field-first.
-
-BOTTOMFIRST
-The frame is bottom-field-first.
-
-
-
-
-consumed_sample_n (audio only)
-the number of selected samples before the current frame
-
-
-samples_n (audio only)
-the number of samples in the current frame
-
-
-sample_rate (audio only)
-the input sample rate
-
-
-key
-This is 1 if the filtered frame is a key-frame, 0 otherwise.
-
-
-pos
-the position in the file of the filtered frame, -1 if the information
-is not available (e.g. for synthetic video)
-
-
-scene (video only)
-value between 0 and 1 to indicate a new scene; a low value reflects a low
-probability for the current frame to introduce a new scene, while a higher
-value means the current frame is more likely to be one (see the example below)
-
-
-
-
-
The default value of the select expression is "1".
-
-
-
12.6.1 Examples# TOC
-
-
-
-
-
12.7 sendcmd, asendcmd# TOC
-
-
Send commands to filters in the filtergraph.
-
-
These filters read commands to be sent to other filters in the
-filtergraph.
-
-
sendcmd
must be inserted between two video filters,
-asendcmd
must be inserted between two audio filters, but apart
-from that they act the same way.
-
-
The specification of commands can be provided in the filter arguments
-with the commands option, or in a file specified by the
-filename option.
-
-
These filters accept the following options:
-
-commands, c
-Set the commands to be read and sent to the other filters.
-
-filename, f
-Set the filename of the commands to be read and sent to the other
-filters.
-
-
-
-
-
12.7.1 Commands syntax# TOC
-
-
A commands description consists of a sequence of interval
-specifications, comprising a list of commands to be executed when a
-particular event related to that interval occurs. The occurring event
-is typically the current frame time entering or leaving a given time
-interval.
-
-
An interval is specified by the following syntax:
-
-
-
The time interval is specified by the START and END times.
-END is optional and defaults to the maximum time.
-
-
The current frame time is considered within the specified interval if
-it is included in the interval [START , END ), that is when
-the time is greater or equal to START and is lesser than
-END .
-
-
COMMANDS consists of a sequence of one or more command
-specifications, separated by ",", relating to that interval. The
-syntax of a command specification is given by:
-
-
[FLAGS ] TARGET COMMAND ARG
-
-
-
FLAGS is optional and specifies the type of events relating to
-the time interval which enable sending the specified command, and must
-be a non-null sequence of identifier flags separated by "+" or "|" and
-enclosed between "[" and "]".
-
-
The following flags are recognized:
-
-enter
-The command is sent when the current frame timestamp enters the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was not in the given interval, and the
-current is.
-
-
-leave
-The command is sent when the current frame timestamp leaves the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was in the given interval, and the
-current is not.
-
-
-
-
If FLAGS is not specified, a default value of [enter]
is
-assumed.
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional list of argument for
-the given COMMAND .
-
-
Between one interval specification and another, whitespaces, or
-sequences of characters starting with #
until the end of line,
-are ignored and can be used to annotate comments.
-
-
A simplified BNF description of the commands specification syntax
-follows:
-
-
COMMAND_FLAG ::= "enter" | "leave"
-COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG ]
-COMMAND ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG ]
-COMMANDS ::= COMMAND [,COMMANDS ]
-INTERVAL ::= START [-END ] COMMANDS
-INTERVALS ::= INTERVAL [;INTERVALS ]
-
-
-
-
12.7.2 Examples# TOC
-
-
-
-
-
12.8 setpts, asetpts# TOC
-
-
Change the PTS (presentation timestamp) of the input frames.
-
-
setpts
works on video frames, asetpts
on audio frames.
-
-
This filter accepts the following options:
-
-
-expr
-The expression which is evaluated for each frame to construct its timestamp.
-
-
-
-
-
The expression is evaluated through the eval API and can contain the following
-constants:
-
-
-FRAME_RATE
-frame rate, only defined for constant frame-rate video
-
-
-PTS
-The presentation timestamp in input
-
-
-N
-The count of the input frame for video or the number of consumed samples,
-not including the current frame for audio, starting from 0.
-
-
-NB_CONSUMED_SAMPLES
-The number of consumed samples, not including the current frame (only
-audio)
-
-
-NB_SAMPLES, S
-The number of samples in the current frame (only audio)
-
-
-SAMPLE_RATE, SR
-The audio sample rate.
-
-
-STARTPTS
-The PTS of the first frame.
-
-
-STARTT
-the time in seconds of the first frame
-
-
-INTERLACED
-State whether the current frame is interlaced.
-
-
-T
-the time in seconds of the current frame
-
-
-POS
-original position in the file of the frame, or undefined if undefined
-for the current frame
-
-
-PREV_INPTS
-The previous input PTS.
-
-
-PREV_INT
-previous input time in seconds
-
-
-PREV_OUTPTS
-The previous output PTS.
-
-
-PREV_OUTT
-previous output time in seconds
-
-
-RTCTIME
-The wallclock (RTC) time in microseconds. This is deprecated, use time(0)
-instead.
-
-
-RTCSTART
-The wallclock (RTC) time at the start of the movie in microseconds.
-
-
-TB
-The timebase of the input timestamps.
-
-
-
-
-
-
12.8.1 Examples# TOC
-
-
- Start counting PTS from zero
-
-
- Apply fast motion effect:
-
-
- Apply slow motion effect:
-
-
- Set fixed rate of 25 frames per second:
-
-
- Set fixed rate 25 fps with some jitter:
-
-
setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
-
-
- Apply an offset of 10 seconds to the input PTS:
-
-
- Generate timestamps from a "live source" and rebase onto the current timebase:
-
-
setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
-
-
- Generate timestamps by counting samples:
-
-
-
-
-
-
12.9 settb, asettb# TOC
-
-
Set the timebase to use for the output frames timestamps.
-It is mainly useful for testing timebase configuration.
-
-
It accepts the following parameters:
-
-
-expr, tb
-The expression which is evaluated into the output timebase.
-
-
-
-
-
The value for tb is an arithmetic expression representing a
-rational. The expression can contain the constants "AVTB" (the default
-timebase), "intb" (the input timebase) and "sr" (the sample rate,
-audio only). Default value is "intb".
-
-
-
12.9.1 Examples# TOC
-
-
- Set the timebase to 1/25:
-
-
- Set the timebase to 1/10:
-
-
- Set the timebase to 1001/1000:
-
-
- Set the timebase to 2*intb:
-
-
- Set the default timebase value:
-
-
-
-
-
12.10 showcqt# TOC
-
Convert input audio to a video output representing
-frequency spectrum logarithmically (using constant Q transform with
-Brown-Puckette algorithm), with musical tone scale, from E0 to D#10 (10 octaves).
-
-
The filter accepts the following options:
-
-
-volume
-Specify transform volume (multiplier) expression. The expression can contain
-variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-a_weighting(f)
-A-weighting of equal loudness
-
-b_weighting(f)
-B-weighting of equal loudness
-
-c_weighting(f)
-C-weighting of equal loudness
-
-
-Default value is 16
.
-
-
-tlength
-Specify transform length expression. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-Default value is 384/f*tc/(384/f+tc)
.
-
-
-timeclamp
-Specify the transform timeclamp. At low frequency, there is trade-off between
-accuracy in time domain and frequency domain. If timeclamp is lower,
-event in time domain is represented more accurately (such as fast bass drum),
-otherwise event in frequency domain is represented more accurately
-(such as bass guitar). Acceptable value is [0.1, 1.0]. Default value is 0.17
.
-
-
-coeffclamp
-Specify the transform coeffclamp. If coeffclamp is lower, transform is
-more accurate, otherwise transform is faster. Acceptable value is [0.1, 10.0].
-Default value is 1.0
.
-
-
-gamma
-Specify gamma. Lower gamma makes the spectrum more contrast, higher gamma
-makes the spectrum having more range. Acceptable value is [1.0, 7.0].
-Default value is 3.0
.
-
-
-fontfile
-Specify font file for use with freetype. If not specified, use embedded font.
-
-
-fontcolor
-Specify font color expression. This is arithmetic expression that should return
-integer value 0xRRGGBB. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-midi(f)
-midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
-
-r(x), g(x), b(x)
-red, green, and blue value of intensity x
-
-
-Default value is st(0, (midi(f)-59.5)/12);
-st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
-r(1-ld(1)) + b(ld(1))
-
-
-fullhd
-If set to 1 (the default), the video size is 1920x1080 (full HD),
-if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
-
-
-fps
-Specify video fps. Default value is 25
.
-
-
-count
-Specify number of transform per frame, so there are fps*count transforms
-per second. Note that audio data rate must be divisible by fps*count.
-Default value is 6
.
-
-
-
-
-
-
12.10.1 Examples# TOC
-
-
- Playing audio while showing the spectrum:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with frame rate 30 fps:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fps=30:count=5 [out0]'
-
-
- Playing at 960x540 and lower CPU usage:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fullhd=0:count=3 [out0]'
-
-
- A1 and its harmonics: A1, A2, (near)E3, A3:
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with more accuracy in frequency domain (and slower):
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
-
-
- B-weighting of equal loudness
-
-
volume=16*b_weighting(f)
-
-
- Lower Q factor
-
-
tlength=100/f*tc/(100/f+tc)
-
-
- Custom fontcolor, C-note is colored green, others are colored blue
-
-
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
-
-
-
-
-
-
12.11 showspectrum# TOC
-
-
Convert input audio to a video output, representing the audio frequency
-spectrum.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value is
-640x512
.
-
-
-slide
-Specify how the spectrum should slide along the window.
-
-It accepts the following values:
-
-‘replace ’
-the samples start again on the left when they reach the right
-
-‘scroll ’
-the samples scroll from right to left
-
-‘fullframe ’
-frames are only produced when the samples reach the right
-
-
-
-Default value is replace
.
-
-
-mode
-Specify display mode.
-
-It accepts the following values:
-
-‘combined ’
-all channels are displayed in the same row
-
-‘separate ’
-all channels are displayed in separate rows
-
-
-
-Default value is ‘combined ’.
-
-
-color
-Specify display color mode.
-
-It accepts the following values:
-
-‘channel ’
-each channel is displayed in a separate color
-
-‘intensity ’
-each channel is is displayed using the same color scheme
-
-
-
-Default value is ‘channel ’.
-
-
-scale
-Specify scale used for calculating intensity color values.
-
-It accepts the following values:
-
-‘lin ’
-linear
-
-‘sqrt ’
-square root, default
-
-‘cbrt ’
-cubic root
-
-‘log ’
-logarithmic
-
-
-
-Default value is ‘sqrt ’.
-
-
-saturation
-Set saturation modifier for displayed colors. Negative values provide
-alternative color scheme. 0
is no saturation at all.
-Saturation must be in [-10.0, 10.0] range.
-Default value is 1
.
-
-
-win_func
-Set window function.
-
-It accepts the following values:
-
-‘none ’
-No samples pre-processing (do not expect this to be faster)
-
-‘hann ’
-Hann window
-
-‘hamming ’
-Hamming window
-
-‘blackman ’
-Blackman window
-
-
-
-Default value is hann
.
-
-
-
-
The usage is very similar to the showwaves filter; see the examples in that
-section.
-
-
-
12.11.1 Examples# TOC
-
-
- Large window with logarithmic color scaling:
-
-
showspectrum=s=1280x480:scale=log
-
-
- Complete example for a colored and sliding spectrum per channel using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
-
-
-
-
-
12.12 showwaves# TOC
-
-
Convert input audio to a video output, representing the samples waves.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value
-is "600x240".
-
-
-mode
-Set display mode.
-
-Available values are:
-
-‘point ’
-Draw a point for each sample.
-
-
-‘line ’
-Draw a vertical line for each sample.
-
-
-‘p2p ’
-Draw a point for each sample and a line between them.
-
-
-‘cline ’
-Draw a centered vertical line for each sample.
-
-
-
-Default value is point
.
-
-
-n
-Set the number of samples which are printed on the same column. A
-larger value will decrease the frame rate. Must be a positive
-integer. This option can be set only if the value for rate
-is not explicitly specified.
-
-
-rate, r
-Set the (approximate) output frame rate. This is done by setting the
-option n . Default value is "25".
-
-
-split_channels
-Set if channels should be drawn separately or overlap. Default value is 0.
-
-
-
-
-
-
12.12.1 Examples# TOC
-
-
- Output the input file audio and the corresponding video representation
-at the same time:
-
-
amovie=a.mp3,asplit[out0],showwaves[out1]
-
-
- Create a synthetic signal and show it with showwaves, forcing a
-frame rate of 30 frames per second:
-
-
aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
-
-
-
-
-
12.13 split, asplit# TOC
-
-
Split input into several identical outputs.
-
-
asplit
works with audio input, split
with video.
-
-
The filter accepts a single parameter which specifies the number of outputs. If
-unspecified, it defaults to 2.
-
-
-
12.13.1 Examples# TOC
-
-
- Create two separate outputs from the same input:
-
-
[in] split [out0][out1]
-
-
- To create 3 or more outputs, you need to specify the number of
-outputs, like in:
-
-
[in] asplit=3 [out0][out1][out2]
-
-
- Create two separate outputs from the same input, one cropped and
-one padded:
-
-
[in] split [splitout1][splitout2];
-[splitout1] crop=100:100:0:0 [cropout];
-[splitout2] pad=200:200:100:100 [padout];
-
-
- Create 5 copies of the input audio with ffmpeg
:
-
-
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
-
-
-
-
-
12.14 zmq, azmq# TOC
-
-
Receive commands sent through a libzmq client, and forward them to
-filters in the filtergraph.
-
-
zmq
and azmq
work as a pass-through filters. zmq
-must be inserted between two video filters, azmq
between two
-audio filters.
-
-
To enable these filters you need to install the libzmq library and
-headers and configure FFmpeg with --enable-libzmq
.
-
-
For more information about libzmq see:
-http://www.zeromq.org/
-
-
The zmq
and azmq
filters work as a libzmq server, which
-receives messages sent through a network interface defined by the
-bind_address option.
-
-
The received message must be in the form:
-
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional argument list for the
-given COMMAND .
-
-
Upon reception, the message is processed and the corresponding command
-is injected into the filtergraph. Depending on the result, the filter
-will send a reply to the client, adopting the format:
-
-
ERROR_CODE ERROR_REASON
-MESSAGE
-
-
-
MESSAGE is optional.
-
-
-
12.14.1 Examples# TOC
-
-
Look at tools/zmqsend for an example of a zmq client which can
-be used to send commands processed by these filters.
-
-
Consider the following filtergraph generated by ffplay
-
-
ffplay -dumpgraph 1 -f lavfi "
-color=s=100x100:c=red [l];
-color=s=100x100:c=blue [r];
-nullsrc=s=200x100, zmq [bg];
-[bg][l] overlay [bg+l];
-[bg+l][r] overlay=x=100 "
-
-
-
To change the color of the left side of the video, the following
-command can be used:
-
-
echo Parsed_color_0 c yellow | tools/zmqsend
-
-
-
To change the right side:
-
-
echo Parsed_color_1 c pink | tools/zmqsend
-
-
-
-
-
13 Multimedia Sources# TOC
-
-
Below is a description of the currently available multimedia sources.
-
-
-
13.1 amovie# TOC
-
-
This is the same as movie source, except it selects an audio
-stream by default.
-
-
-
13.2 movie# TOC
-
-
Read audio and/or video stream(s) from a movie container.
-
-
It accepts the following parameters:
-
-
-filename
-The name of the resource to read (not necessarily a file; it can also be a
-device or a stream accessed through some protocol).
-
-
-format_name, f
-Specifies the format assumed for the movie to read, and can be either
-the name of a container or an input device. If not specified, the
-format is guessed from movie_name or by probing.
-
-
-seek_point, sp
-Specifies the seek point in seconds. The frames will be output
-starting from this seek point. The parameter is evaluated with
-av_strtod
, so the numerical value may be suffixed by an IS
-postfix. The default value is "0".
-
-
-streams, s
-Specifies the streams to read. Several streams can be specified,
-separated by "+". The source will then have as many outputs, in the
-same order. The syntax is explained in the “Stream specifiers”
-section in the ffmpeg manual. Two special names, "dv" and "da" specify
-respectively the default (best suited) video and audio stream. Default
-is "dv", or "da" if the filter is called as "amovie".
-
-
-stream_index, si
-Specifies the index of the video stream to read. If the value is -1,
-the most suitable video stream will be automatically selected. The default
-value is "-1". Deprecated. If the filter is called "amovie", it will select
-audio instead of video.
-
-
-loop
-Specifies how many times to read the stream in sequence.
-If the value is less than 1, the stream will be read again and again.
-Default value is "1".
-
-Note that when the movie is looped the source timestamps are not
-changed, so it will generate non monotonically increasing timestamps.
-
-
-
-
It allows overlaying a second video on top of the main input of
-a filtergraph, as shown in this graph:
-
-
input -----------> deltapts0 --> overlay --> output
- ^
- |
-movie --> scale--> deltapts1 -------+
-
-
-
13.2.1 Examples# TOC
-
-
- Skip 3.2 seconds from the start of the AVI file in.avi, and overlay it
-on top of the input labelled "in":
-
-
movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read from a video4linux2 device, and overlay it on top of the input
-labelled "in":
-
-
movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read the first video stream and the audio stream with id 0x81 from
-dvd.vob; the video is connected to the pad named "video" and the audio is
-connected to the pad named "audio":
-
-
movie=dvd.vob:s=v:0+#0x81 [video] [audio]
-
-
-
-
-
-
14 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavfilter
-
-
-
-
15 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-formats.html b/Externals/ffmpeg/dev/doc/ffmpeg-formats.html
deleted file mode 100644
index 1350b5caba..0000000000
--- a/Externals/ffmpeg/dev/doc/ffmpeg-formats.html
+++ /dev/null
@@ -1,2311 +0,0 @@
-
-
-
-
-
-
- FFmpeg Formats Documentation
-
-
-
-
-
-
-
-
- FFmpeg Formats Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes the supported formats (muxers and demuxers)
-provided by the libavformat library.
-
-
-
-
2 Format Options# TOC
-
-
The libavformat library provides some generic global options, which
-can be set on all the muxers and demuxers. In addition each muxer or
-demuxer may support so-called private options, which are specific for
-that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follows:
-
-
-avioflags flags (input/output )
-Possible values:
-
-‘direct ’
-Reduce buffering.
-
-
-
-
-probesize integer (input )
-Set probing size in bytes, i.e. the size of the data to analyze to get
-stream information. A higher value will allow to detect more
-information in case it is dispersed into the stream, but will increase
-latency. Must be an integer not lesser than 32. It is 5000000 by default.
-
-
-packetsize integer (output )
-Set packet size.
-
-
-fflags flags (input/output )
-Set format flags.
-
-Possible values:
-
-‘ignidx ’
-Ignore index.
-
-‘genpts ’
-Generate PTS.
-
-‘nofillin ’
-Do not fill in missing values that can be exactly calculated.
-
-‘noparse ’
-Disable AVParsers, this needs +nofillin
too.
-
-‘igndts ’
-Ignore DTS.
-
-‘discardcorrupt ’
-Discard corrupted frames.
-
-‘sortdts ’
-Try to interleave output packets by DTS.
-
-‘keepside ’
-Do not merge side data.
-
-‘latm ’
-Enable RTP MP4A-LATM payload.
-
-‘nobuffer ’
-Reduce the latency introduced by optional buffering
-
-‘bitexact ’
-Only write platform-, build- and time-independent data.
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-
-
-
-seek2any integer (input )
-Allow seeking to non-keyframes on demuxer level when supported if set to 1.
-Default is 0.
-
-
-analyzeduration integer (input )
-Specify how many microseconds are analyzed to probe the input. A
-higher value will allow to detect more accurate information, but will
-increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
-
-
-cryptokey hexadecimal string (input )
-Set decryption key.
-
-
-indexmem integer (input )
-Set max memory used for timestamp index (per stream).
-
-
-rtbufsize integer (input )
-Set max memory used for buffering real-time frames.
-
-
-fdebug flags (input/output )
-Print specific debug info.
-
-Possible values:
-
-‘ts ’
-
-
-
-max_delay integer (input/output )
-Set maximum muxing or demuxing delay in microseconds.
-
-
-fpsprobesize integer (input )
-Set number of frames used to probe fps.
-
-
-audio_preload integer (output )
-Set microseconds by which audio packets should be interleaved earlier.
-
-
-chunk_duration integer (output )
-Set microseconds for each chunk.
-
-
-chunk_size integer (output )
-Set size in bytes for each chunk.
-
-
-err_detect, f_err_detect flags (input )
-Set error detection flags. f_err_detect
is deprecated and
-should be used only via the ffmpeg
tool.
-
-Possible values:
-
-‘crccheck ’
-Verify embedded CRCs.
-
-‘bitstream ’
-Detect bitstream specification deviations.
-
-‘buffer ’
-Detect improper bitstream length.
-
-‘explode ’
-Abort decoding on minor error detection.
-
-‘careful ’
-Consider things that violate the spec and have not been seen in the
-wild as errors.
-
-‘compliant ’
-Consider all spec non compliancies as errors.
-
-‘aggressive ’
-Consider things that a sane encoder should not do as an error.
-
-
-
-
-use_wallclock_as_timestamps integer (input )
-Use wallclock as timestamps.
-
-
-avoid_negative_ts integer (output )
-
-Possible values:
-
-‘make_non_negative ’
-Shift timestamps to make them non-negative.
-Also note that this affects only leading negative timestamps, and not
-non-monotonic negative timestamps.
-
-‘make_zero ’
-Shift timestamps so that the first timestamp is 0.
-
-‘auto (default) ’
-Enables shifting when required by the target format.
-
-‘disabled ’
-Disables shifting of timestamp.
-
-
-
-When shifting is enabled, all output timestamps are shifted by the
-same amount. Audio, video, and subtitles desynching and relative
-timestamp differences are preserved compared to how they would have
-been without shifting.
-
-
-skip_initial_bytes integer (input )
-Set number of bytes to skip before reading header and frames if set to 1.
-Default is 0.
-
-
-correct_ts_overflow integer (input )
-Correct single timestamp overflows if set to 1. Default is 1.
-
-
-flush_packets integer (output )
-Flush the underlying I/O stream after each packet. Default 1 enables it, and
-has the effect of reducing the latency; 0 disables it and may slightly
-increase performance in some cases.
-
-
-output_ts_offset offset (output )
-Set the output time offset.
-
-offset must be a time duration specification,
-see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-The offset is added by the muxer to the output timestamps.
-
-Specifying a positive offset means that the corresponding streams are
-delayed bt the time duration specified in offset . Default value
-is 0
(meaning that no offset is applied).
-
-
-format_whitelist list (input )
-"," separated List of allowed demuxers. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
2.1 Format stream specifiers# TOC
-
-
Format stream specifiers allow selection of one or more streams that
-match specific properties.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index.
-
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio,
-’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If
-stream_index is given, then it matches the stream number
-stream_index of this type. Otherwise, it matches all streams of
-this type.
-
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number
-stream_index in the program with the id
-program_id . Otherwise, it matches all streams in the program.
-
-
-#stream_id
-Matches the stream by a format-specific ID.
-
-
-
-
The exact semantics of stream specifiers is defined by the
-avformat_match_stream_specifier()
function declared in the
-libavformat/avformat.h header.
-
-
-
3 Demuxers# TOC
-
-
Demuxers are configured elements in FFmpeg that can read the
-multimedia streams from a particular type of file.
-
-
When you configure your FFmpeg build, all the supported demuxers
-are enabled by default. You can list all available ones using the
-configure option --list-demuxers
.
-
-
You can disable all the demuxers using the configure option
---disable-demuxers
, and selectively enable a single demuxer with
-the option --enable-demuxer=DEMUXER
, or disable it
-with the option --disable-demuxer=DEMUXER
.
-
-
The option -formats
of the ff* tools will display the list of
-enabled demuxers.
-
-
The description of some of the currently available demuxers follows.
-
-
-
3.1 applehttp# TOC
-
-
Apple HTTP Live Streaming demuxer.
-
-
This demuxer presents all AVStreams from all variant streams.
-The id field is set to the bitrate variant index number. By setting
-the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay),
-the caller can decide which variant streams to actually receive.
-The total bitrate of the variant that the stream belongs to is
-available in a metadata key named "variant_bitrate".
-
-
-
3.2 apng# TOC
-
-
Animated Portable Network Graphics demuxer.
-
-
This demuxer is used to demux APNG files.
-All headers, but the PNG signature, up to (but not including) the first
-fcTL chunk are transmitted as extradata.
-Frames are then split as being all the chunks between two fcTL ones, or
-between the last fcTL and IEND chunks.
-
-
--ignore_loop bool
-Ignore the loop variable in the file if set.
-
--max_fps int
-Maximum framerate in frames per second (0 for no limit).
-
--default_fps int
-Default framerate in frames per second when none is specified in the file
-(0 meaning as fast as possible).
-
-
-
-
-
-
-
Advanced Systems Format demuxer.
-
-
This demuxer is used to demux ASF files and MMS network streams.
-
-
--no_resync_search bool
-Do not try to resynchronize by looking for a certain optional start code.
-
-
-
-
-
3.4 concat# TOC
-
-
Virtual concatenation script demuxer.
-
-
This demuxer reads a list of files and other directives from a text file and
-demuxes them one after the other, as if all their packet had been muxed
-together.
-
-
The timestamps in the files are adjusted so that the first file starts at 0
-and each next file starts where the previous one finishes. Note that it is
-done globally and may cause gaps if all streams do not have exactly the same
-length.
-
-
All files must have the same streams (same codecs, same time base, etc.).
-
-
The duration of each file is used to adjust the timestamps of the next file:
-if the duration is incorrect (because it was computed using the bit-rate or
-because the file is truncated, for example), it can cause artifacts. The
-duration
directive can be used to override the duration stored in
-each file.
-
-
-
3.4.1 Syntax# TOC
-
-
The script is a text file in extended-ASCII, with one directive per line.
-Empty lines, leading spaces and lines starting with ’#’ are ignored. The
-following directive is recognized:
-
-
-file path
-Path to a file to read; special characters and spaces must be escaped with
-backslash or single quotes.
-
-All subsequent file-related directives apply to that file.
-
-
-ffconcat version 1.0
-Identify the script type and version. It also sets the safe option
-to 1 if it was to its default -1.
-
-To make FFmpeg recognize the format automatically, this directive must
-appears exactly as is (no extra space or byte-order-mark) on the very first
-line of the script.
-
-
-duration dur
-Duration of the file. This information can be specified from the file;
-specifying it here may be more efficient or help if the information from the
-file is not available or accurate.
-
-If the duration is set for all files, then it is possible to seek in the
-whole concatenated video.
-
-
-stream
-Introduce a stream in the virtual file.
-All subsequent stream-related directives apply to the last introduced
-stream.
-Some streams properties must be set in order to allow identifying the
-matching streams in the subfiles.
-If no streams are defined in the script, the streams from the first file are
-copied.
-
-
-exact_stream_id id
-Set the id of the stream.
-If this directive is given, the string with the corresponding id in the
-subfiles will be used.
-This is especially useful for MPEG-PS (VOB) files, where the order of the
-streams is not reliable.
-
-
-
-
-
-
3.4.2 Options# TOC
-
-
This demuxer accepts the following option:
-
-
-safe
-If set to 1, reject unsafe file paths. A file path is considered safe if it
-does not contain a protocol specification and is relative and all components
-only contain characters from the portable character set (letters, digits,
-period, underscore and hyphen) and have no period at the beginning of a
-component.
-
-If set to 0, any file name is accepted.
-
-The default is -1, it is equivalent to 1 if the format was automatically
-probed and 0 otherwise.
-
-
-auto_convert
-If set to 1, try to perform automatic conversions on packet data to make the
-streams concatenable.
-
-Currently, the only conversion is adding the h264_mp4toannexb bitstream
-filter to H.264 streams in MP4 format. This is necessary in particular if
-there are resolution changes.
-
-
-
-
-
-
-
-
Adobe Flash Video Format demuxer.
-
-
This demuxer is used to demux FLV files and RTMP network streams.
-
-
--flv_metadata bool
-Allocate the streams according to the onMetaData array content.
-
-
-
-
-
3.6 libgme# TOC
-
-
The Game Music Emu library is a collection of video game music file emulators.
-
-
See http://code.google.com/p/game-music-emu/ for more information.
-
-
Some files have multiple tracks. The demuxer will pick the first track by
-default. The track_index option can be used to select a different
-track. Track indexes start at 0. The demuxer exports the number of tracks as
-tracks meta data entry.
-
-
For very large files, the max_size option may have to be adjusted.
-
-
-
3.7 libquvi# TOC
-
-
Play media from Internet services using the quvi project.
-
-
The demuxer accepts a format option to request a specific quality. It
-is by default set to best .
-
-
See http://quvi.sourceforge.net/ for more information.
-
-
FFmpeg needs to be built with --enable-libquvi
for this demuxer to be
-enabled.
-
-
-
-
-
Animated GIF demuxer.
-
-
It accepts the following options:
-
-
-min_delay
-Set the minimum valid delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 2.
-
-
-default_delay
-Set the default delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 10.
-
-
-ignore_loop
-GIF files can contain information to loop a certain number of times (or
-infinitely). If ignore_loop is set to 1, then the loop setting
-from the input will be ignored and looping will not occur. If set to 0,
-then looping will occur and will cycle the number of times according to
-the GIF. Default value is 1.
-
-
-
-
For example, with the overlay filter, place an infinitely looping GIF
-over another video:
-
-
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
-
-
-
Note that in the above example the shortest option for overlay filter is
-used to end the output video at the length of the shortest input file,
-which in this case is input.mp4 as the GIF in this example loops
-infinitely.
-
-
-
3.9 image2# TOC
-
-
Image file demuxer.
-
-
This demuxer reads from a list of image files specified by a pattern.
-The syntax and meaning of the pattern is specified by the
-option pattern_type .
-
-
The pattern may contain a suffix which is used to automatically
-determine the format of the images contained in the files.
-
-
The size, the pixel format, and the format of each image must be the
-same for all the files in the sequence.
-
-
This demuxer accepts the following options:
-
-framerate
-Set the frame rate for the video stream. It defaults to 25.
-
-loop
-If set to 1, loop over the input. Default value is 0.
-
-pattern_type
-Select the pattern type used to interpret the provided filename.
-
-pattern_type accepts one of the following values.
-
-sequence
-Select a sequence pattern type, used to specify a sequence of files
-indexed by sequential numbers.
-
-A sequence pattern may contain the string "%d" or "%0N d", which
-specifies the position of the characters representing a sequential
-number in each filename matched by the pattern. If the form
-"%d0N d" is used, the string representing the number in each
-filename is 0-padded and N is the total number of 0-padded
-digits representing the number. The literal character ’%’ can be
-specified in the pattern with the string "%%".
-
-If the sequence pattern contains "%d" or "%0N d", the first filename of
-the file list specified by the pattern must contain a number
-inclusively contained between start_number and
-start_number +start_number_range -1, and all the following
-numbers must be sequential.
-
-For example the pattern "img-%03d.bmp" will match a sequence of
-filenames of the form img-001.bmp , img-002.bmp , ...,
-img-010.bmp , etc.; the pattern "i%%m%%g-%d.jpg" will match a
-sequence of filenames of the form i%m%g-1.jpg ,
-i%m%g-2.jpg , ..., i%m%g-10.jpg , etc.
-
-Note that the pattern must not necessarily contain "%d" or
-"%0N d", for example to convert a single image file
-img.jpeg you can employ the command:
-
-
ffmpeg -i img.jpeg img.png
-
-
-
-glob
-Select a glob wildcard pattern type.
-
-The pattern is interpreted like a glob()
pattern. This is only
-selectable if libavformat was compiled with globbing support.
-
-
-glob_sequence (deprecated, will be removed)
-Select a mixed glob wildcard/sequence pattern.
-
-If your version of libavformat was compiled with globbing support, and
-the provided pattern contains at least one glob meta character among
-%*?[]{}
that is preceded by an unescaped "%", the pattern is
-interpreted like a glob()
pattern, otherwise it is interpreted
-like a sequence pattern.
-
-All glob special characters %*?[]{}
must be prefixed
-with "%". To escape a literal "%" you shall use "%%".
-
-For example the pattern foo-%*.jpeg
will match all the
-filenames prefixed by "foo-" and terminating with ".jpeg", and
-foo-%?%?%?.jpeg
will match all the filenames prefixed with
-"foo-", followed by a sequence of three characters, and terminating
-with ".jpeg".
-
-This pattern type is deprecated in favor of glob and
-sequence .
-
-
-
-Default value is glob_sequence .
-
-pixel_format
-Set the pixel format of the images to read. If not specified the pixel
-format is guessed from the first image file in the sequence.
-
-start_number
-Set the index of the file matched by the image file pattern to start
-to read from. Default value is 0.
-
-start_number_range
-Set the index interval range to check when looking for the first image
-file in the sequence, starting from start_number . Default value
-is 5.
-
-ts_from_file
-If set to 1, will set frame timestamp to modification time of image file. Note
-that monotonity of timestamps is not provided: images go in the same order as
-without this option. Default value is 0.
-If set to 2, will set frame timestamp to the modification time of the image file in
-nanosecond precision.
-
-video_size
-Set the video size of the images to read. If not specified the video
-size is guessed from the first image file in the sequence.
-
-
-
-
-
3.9.1 Examples# TOC
-
-
- Use ffmpeg
for creating a video from the images in the file
-sequence img-001.jpeg , img-002.jpeg , ..., assuming an
-input frame rate of 10 frames per second:
-
-
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
-
-
- As above, but start by reading from a file with index 100 in the sequence:
-
-
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
-
-
- Read images matching the "*.png" glob pattern , that is all the files
-terminating with the ".png" suffix:
-
-
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
-
-
-
-
-
3.10 mpegts# TOC
-
-
MPEG-2 transport stream demuxer.
-
-
-fix_teletext_pts
-Overrides teletext packet PTS and DTS values with the timestamps calculated
-from the PCR of the first program which the teletext stream is part of and is
-not discarded. Default value is 1, set this option to 0 if you want your
-teletext packet PTS and DTS values untouched.
-
-
-
-
-
3.11 rawvideo# TOC
-
-
Raw video demuxer.
-
-
This demuxer allows one to read raw video data. Since there is no header
-specifying the assumed video parameters, the user must specify them
-in order to be able to decode the data correctly.
-
-
This demuxer accepts the following options:
-
-framerate
-Set input video frame rate. Default value is 25.
-
-
-pixel_format
-Set the input video pixel format. Default value is yuv420p
.
-
-
-video_size
-Set the input video size. This value must be specified explicitly.
-
-
-
-
For example to read a rawvideo file input.raw with
-ffplay
, assuming a pixel format of rgb24
, a video
-size of 320x240
, and a frame rate of 10 images per second, use
-the command:
-
-
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
-
-
-
-
3.12 sbg# TOC
-
-
SBaGen script demuxer.
-
-
This demuxer reads the script language used by SBaGen
-http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG
-script looks like that:
-
-
-SE
-a: 300-2.5/3 440+4.5/0
-b: 300-2.5/0 440+4.5/3
-off: -
-NOW == a
-+0:07:00 == b
-+0:14:00 == a
-+0:21:00 == b
-+0:30:00 off
-
-
-
A SBG script can mix absolute and relative timestamps. If the script uses
-either only absolute timestamps (including the script start time) or only
-relative ones, then its layout is fixed, and the conversion is
-straightforward. On the other hand, if the script mixes both kind of
-timestamps, then the NOW reference for relative timestamps will be
-taken from the current time of day at the time the script is read, and the
-script layout will be frozen according to that reference. That means that if
-the script is directly played, the actual times will match the absolute
-timestamps up to the sound controller’s clock accuracy, but if the user
-somehow pauses the playback or seeks, all times will be shifted accordingly.
-
-
-
3.13 tedcaptions# TOC
-
-
JSON captions used for TED Talks .
-
-
TED does not provide links to the captions, but they can be guessed from the
-page. The file tools/bookmarklets.html from the FFmpeg source tree
-contains a bookmarklet to expose them.
-
-
This demuxer accepts the following option:
-
-start_time
-Set the start time of the TED talk, in milliseconds. The default is 15000
-(15s). It is used to sync the captions with the downloadable videos, because
-they include a 15s intro.
-
-
-
-
Example: convert the captions to a format most players understand:
-
-
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
-
-
-
-
4 Muxers# TOC
-
-
Muxers are configured elements in FFmpeg which allow writing
-multimedia streams to a particular type of file.
-
-
When you configure your FFmpeg build, all the supported muxers
-are enabled by default. You can list all available muxers using the
-configure option --list-muxers
.
-
-
You can disable all the muxers with the configure option
---disable-muxers
and selectively enable / disable single muxers
-with the options --enable-muxer=MUXER
/
---disable-muxer=MUXER
.
-
-
The option -formats
of the ff* tools will display the list of
-enabled muxers.
-
-
A description of some of the currently available muxers follows.
-
-
-
4.1 aiff# TOC
-
-
Audio Interchange File Format muxer.
-
-
-
4.1.1 Options# TOC
-
-
It accepts the following options:
-
-
-write_id3v2
-Enable ID3v2 tags writing when set to 1. Default is 0 (disabled).
-
-
-id3v2_version
-Select ID3v2 version to write. Currently only version 3 and 4 (aka.
-ID3v2.3 and ID3v2.4) are supported. The default is version 4.
-
-
-
-
-
-
-
-
CRC (Cyclic Redundancy Check) testing format.
-
-
This muxer computes and prints the Adler-32 CRC of all the input audio
-and video frames. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-CRC.
-
-
The output of the muxer consists of a single line of the form:
-CRC=0xCRC , where CRC is a hexadecimal number 0-padded to
-8 digits containing the CRC for all the decoded input frames.
-
-
See also the framecrc muxer.
-
-
-
4.2.1 Examples# TOC
-
-
For example to compute the CRC of the input, and store it in the file
-out.crc :
-
-
ffmpeg -i INPUT -f crc out.crc
-
-
-
You can print the CRC to stdout with the command:
-
-
ffmpeg -i INPUT -f crc -
-
-
-
You can select the output format of each frame with ffmpeg
by
-specifying the audio and video codec and format. For example to
-compute the CRC of the input audio converted to PCM unsigned 8-bit
-and the input video converted to MPEG-2 video, use the command:
-
-
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
-
-
-
-
4.3 framecrc# TOC
-
-
Per-packet CRC (Cyclic Redundancy Check) testing format.
-
-
This muxer computes and prints the Adler-32 CRC for each audio
-and video packet. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-CRC.
-
-
The output of the muxer consists of a line for each audio and video
-packet of the form:
-
-
stream_index , packet_dts , packet_pts , packet_duration , packet_size , 0xCRC
-
-
-
CRC is a hexadecimal number 0-padded to 8 digits containing the
-CRC of the packet.
-
-
-
4.3.1 Examples# TOC
-
-
For example to compute the CRC of the audio and video frames in
-INPUT , converted to raw audio and video packets, and store it
-in the file out.crc :
-
-
ffmpeg -i INPUT -f framecrc out.crc
-
-
-
To print the information to stdout, use the command:
-
-
ffmpeg -i INPUT -f framecrc -
-
-
-
With ffmpeg
, you can select the output format to which the
-audio and video frames are encoded before computing the CRC for each
-packet by specifying the audio and video codec. For example, to
-compute the CRC of each decoded input audio frame converted to PCM
-unsigned 8-bit and of each decoded input video frame converted to
-MPEG-2 video, use the command:
-
-
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
-
-
-
See also the crc muxer.
-
-
-
4.4 framemd5# TOC
-
-
Per-packet MD5 testing format.
-
-
This muxer computes and prints the MD5 hash for each audio
-and video packet. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-hash.
-
-
The output of the muxer consists of a line for each audio and video
-packet of the form:
-
-
stream_index , packet_dts , packet_pts , packet_duration , packet_size , MD5
-
-
-
MD5 is a hexadecimal number representing the computed MD5 hash
-for the packet.
-
-
-
4.4.1 Examples# TOC
-
-
For example to compute the MD5 of the audio and video frames in
-INPUT , converted to raw audio and video packets, and store it
-in the file out.md5 :
-
-
ffmpeg -i INPUT -f framemd5 out.md5
-
-
-
To print the information to stdout, use the command:
-
-
ffmpeg -i INPUT -f framemd5 -
-
-
-
See also the md5 muxer.
-
-
-
-
-
Animated GIF muxer.
-
-
It accepts the following options:
-
-
-loop
-Set the number of times to loop the output. Use -1
for no loop, 0
-for looping indefinitely (default).
-
-
-final_delay
-Force the delay (expressed in centiseconds) after the last frame. Each frame
-ends with a delay until the next frame. The default is -1
, which is a
-special value to tell the muxer to re-use the previous delay. In case of a
-loop, you might want to customize this value to mark a pause for instance.
-
-
-
-
For example, to encode a gif looping 10 times, with a 5 seconds delay between
-the loops:
-
-
ffmpeg -i INPUT -loop 10 -final_delay 500 out.gif
-
-
-
Note 1: if you wish to extract the frames in separate GIF files, you need to
-force the image2 muxer:
-
-
ffmpeg -i INPUT -c:v gif -f image2 "out%d.gif"
-
-
-
Note 2: the GIF format has a very small time base: the delay between two frames
-can not be smaller than one centi second.
-
-
-
-
-
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
-the HTTP Live Streaming (HLS) specification.
-
-
It creates a playlist file, and one or more segment files. The output filename
-specifies the playlist filename.
-
-
By default, the muxer creates a file for each segment produced. These files
-have the same name as the playlist, followed by a sequential number and a
-.ts extension.
-
-
For example, to convert an input file with ffmpeg
:
-
-
ffmpeg -i in.nut out.m3u8
-
-
This example will produce the playlist, out.m3u8 , and segment files:
-out0.ts , out1.ts , out2.ts , etc.
-
-
See also the segment muxer, which provides a more generic and
-flexible implementation of a segmenter, and can be used to perform HLS
-segmentation.
-
-
-
4.6.1 Options# TOC
-
-
This muxer supports the following options:
-
-
-hls_time seconds
-Set the segment length in seconds. Default value is 2.
-
-
-hls_list_size size
-Set the maximum number of playlist entries. If set to 0 the list file
-will contain all the segments. Default value is 5.
-
-
-hls_ts_options options_list
-Set output format options using a :-separated list of key=value
-parameters. Values containing :
special characters must be
-escaped.
-
-
-hls_wrap wrap
-Set the number after which the segment filename number (the number
-specified in each segment file) wraps. If set to 0 the number will be
-never wrapped. Default value is 0.
-
-This option is useful to avoid to fill the disk with many segment
-files, and limits the maximum number of segment files written to disk
-to wrap .
-
-
-start_number number
-Start the playlist sequence number from number . Default value is
-0.
-
-
-hls_allow_cache allowcache
-Explicitly set whether the client MAY (1) or MUST NOT (0) cache media segments.
-
-
-hls_base_url baseurl
-Append baseurl to every entry in the playlist.
-Useful to generate playlists with absolute paths.
-
-Note that the playlist sequence number must be unique for each segment
-and it is not to be confused with the segment filename sequence number
-which can be cyclic, for example if the wrap option is
-specified.
-
-
-hls_segment_filename filename
-Set the segment filename. Unless hls_flags single_file is set filename
-is used as a string format with the segment number:
-
-
ffmpeg in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
-
-This example will produce the playlist, out.m3u8 , and segment files:
-file000.ts , file001.ts , file002.ts , etc.
-
-
-hls_flags single_file
-If this flag is set, the muxer will store all segments in a single MPEG-TS
-file, and will use byte ranges in the playlist. HLS playlists generated with
-this way will have the version number 4.
-For example:
-
-
ffmpeg -i in.nut -hls_flags single_file out.m3u8
-
-Will produce the playlist, out.m3u8 , and a single segment file,
-out.ts .
-
-
-hls_flags delete_segments
-Segment files removed from the playlist are deleted after a period of time
-equal to the duration of the segment plus the duration of the playlist.
-
-
-
-
-
-
-
ICO file muxer.
-
-
Microsoft’s icon file format (ICO) has some strict limitations that should be noted:
-
-
- Size cannot exceed 256 pixels in any dimension
-
- Only BMP and PNG images can be stored
-
- If a BMP image is used, it must be one of the following pixel formats:
-
-
BMP Bit Depth FFmpeg Pixel Format
-1bit pal8
-4bit pal8
-8bit pal8
-16bit rgb555le
-24bit bgr24
-32bit bgra
-
-
- If a BMP image is used, it must use the BITMAPINFOHEADER DIB header
-
- If a PNG image is used, it must use the rgba pixel format
-
-
-
-
4.8 image2# TOC
-
-
Image file muxer.
-
-
The image file muxer writes video frames to image files.
-
-
The output filenames are specified by a pattern, which can be used to
-produce sequentially numbered series of files.
-The pattern may contain the string "%d" or "%0N d", this string
-specifies the position of the characters representing a numbering in
-the filenames. If the form "%0N d" is used, the string
-representing the number in each filename is 0-padded to N
-digits. The literal character ’%’ can be specified in the pattern with
-the string "%%".
-
-
If the pattern contains "%d" or "%0N d", the first filename of
-the file list specified will contain the number 1, all the following
-numbers will be sequential.
-
-
The pattern may contain a suffix which is used to automatically
-determine the format of the image files to write.
-
-
For example the pattern "img-%03d.bmp" will specify a sequence of
-filenames of the form img-001.bmp , img-002.bmp , ...,
-img-010.bmp , etc.
-The pattern "img%%-%d.jpg" will specify a sequence of filenames of the
-form img%-1.jpg , img%-2.jpg , ..., img%-10.jpg ,
-etc.
-
-
-
4.8.1 Examples# TOC
-
-
The following example shows how to use ffmpeg
for creating a
-sequence of files img-001.jpeg , img-002.jpeg , ...,
-taking one image every second from the input video:
-
-
ffmpeg -i in.avi -vsync 1 -r 1 -f image2 'img-%03d.jpeg'
-
-
-
Note that with ffmpeg
, if the format is not specified with the
--f
option and the output filename specifies an image file
-format, the image2 muxer is automatically selected, so the previous
-command can be written as:
-
-
ffmpeg -i in.avi -vsync 1 -r 1 'img-%03d.jpeg'
-
-
-
Note also that the pattern must not necessarily contain "%d" or
-"%0N d", for example to create a single image file
-img.jpeg from the input video you can employ the command:
-
-
ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg
-
-
-
The strftime option allows you to expand the filename with
-date and time information. Check the documentation of
-the strftime()
function for the syntax.
-
-
For example to generate image files from the strftime()
-"%Y-%m-%d_%H-%M-%S" pattern, the following ffmpeg
command
-can be used:
-
-
ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
-
-
-
-
4.8.2 Options# TOC
-
-
-start_number
-Start the sequence from the specified number. Default value is 1. Must
-be a non-negative number.
-
-
-update
-If set to 1, the filename will always be interpreted as just a
-filename, not a pattern, and the corresponding file will be continuously
-overwritten with new images. Default value is 0.
-
-
-strftime
-If set to 1, expand the filename with date and time information from
-strftime()
. Default value is 0.
-
-
-
-
The image muxer supports the .Y.U.V image file format. This format is
-special in that that each image frame consists of three files, for
-each of the YUV420P components. To read or write this image file format,
-specify the name of the ’.Y’ file. The muxer will automatically open the
-’.U’ and ’.V’ files as required.
-
-
-
4.9 matroska# TOC
-
-
Matroska container muxer.
-
-
This muxer implements the matroska and webm container specs.
-
-
-
4.9.1 Metadata# TOC
-
-
The recognized metadata settings in this muxer are:
-
-
-title
-Set title name provided to a single track.
-
-
-language
-Specify the language of the track in the Matroska languages form.
-
-The language can be either the 3 letters bibliographic ISO-639-2 (ISO
-639-2/B) form (like "fre" for French), or a language code mixed with a
-country code for specialities in languages (like "fre-ca" for Canadian
-French).
-
-
-stereo_mode
-Set stereo 3D video layout of two views in a single video track.
-
-The following values are recognized:
-
-‘mono ’
-video is not stereo
-
-‘left_right ’
-Both views are arranged side by side, Left-eye view is on the left
-
-‘bottom_top ’
-Both views are arranged in top-bottom orientation, Left-eye view is at bottom
-
-‘top_bottom ’
-Both views are arranged in top-bottom orientation, Left-eye view is on top
-
-‘checkerboard_rl ’
-Each view is arranged in a checkerboard interleaved pattern, Left-eye view being first
-
-‘checkerboard_lr ’
-Each view is arranged in a checkerboard interleaved pattern, Right-eye view being first
-
-‘row_interleaved_rl ’
-Each view is constituted by a row based interleaving, Right-eye view is first row
-
-‘row_interleaved_lr ’
-Each view is constituted by a row based interleaving, Left-eye view is first row
-
-‘col_interleaved_rl ’
-Both views are arranged in a column based interleaving manner, Right-eye view is first column
-
-‘col_interleaved_lr ’
-Both views are arranged in a column based interleaving manner, Left-eye view is first column
-
-‘anaglyph_cyan_red ’
-All frames are in anaglyph format viewable through red-cyan filters
-
-‘right_left ’
-Both views are arranged side by side, Right-eye view is on the left
-
-‘anaglyph_green_magenta ’
-All frames are in anaglyph format viewable through green-magenta filters
-
-‘block_lr ’
-Both eyes laced in one Block, Left-eye view is first
-
-‘block_rl ’
-Both eyes laced in one Block, Right-eye view is first
-
-
-
-
-
-
For example a 3D WebM clip can be created using the following command line:
-
-
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
-
-
-
-
4.9.2 Options# TOC
-
-
This muxer supports the following options:
-
-
-reserve_index_space
-By default, this muxer writes the index for seeking (called cues in Matroska
-terms) at the end of the file, because it cannot know in advance how much space
-to leave for the index at the beginning of the file. However for some use cases
-– e.g. streaming where seeking is possible but slow – it is useful to put the
-index at the beginning of the file.
-
-If this option is set to a non-zero value, the muxer will reserve a given amount
-of space in the file header and then try to write the cues there when the muxing
-finishes. If the available space does not suffice, muxing will fail. A safe size
-for most use cases should be about 50kB per hour of video.
-
-Note that cues are only written if the output is seekable and this option will
-have no effect if it is not.
-
-
-
-
-
4.10 md5# TOC
-
-
MD5 testing format.
-
-
This muxer computes and prints the MD5 hash of all the input audio
-and video frames. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-hash.
-
-
The output of the muxer consists of a single line of the form:
-MD5=MD5 , where MD5 is a hexadecimal number representing
-the computed MD5 hash.
-
-
For example to compute the MD5 hash of the input converted to raw
-audio and video, and store it in the file out.md5 :
-
-
ffmpeg -i INPUT -f md5 out.md5
-
-
-
You can print the MD5 to stdout with the command:
-
-
ffmpeg -i INPUT -f md5 -
-
-
-
See also the framemd5 muxer.
-
-
-
4.11 mov, mp4, ismv# TOC
-
-
MOV/MP4/ISMV (Smooth Streaming) muxer.
-
-
The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4
-file has all the metadata about all packets stored in one location
-(written at the end of the file, it can be moved to the start for
-better playback by adding faststart to the movflags , or
-using the qt-faststart
tool). A fragmented
-file consists of a number of fragments, where packets and metadata
-about these packets are stored together. Writing a fragmented
-file has the advantage that the file is decodable even if the
-writing is interrupted (while a normal MOV/MP4 is undecodable if
-it is not properly finished), and it requires less memory when writing
-very long files (since writing normal MOV/MP4 files stores info about
-every single packet in memory until the file is closed). The downside
-is that it is less compatible with other applications.
-
-
-
4.11.1 Options# TOC
-
-
Fragmentation is enabled by setting one of the AVOptions that define
-how to cut the file into fragments:
-
-
--moov_size bytes
-Reserves space for the moov atom at the beginning of the file instead of placing the
-moov atom at the end. If the space reserved is insufficient, muxing will fail.
-
--movflags frag_keyframe
-Start a new fragment at each video keyframe.
-
--frag_duration duration
-Create fragments that are duration microseconds long.
-
--frag_size size
-Create fragments that contain up to size bytes of payload data.
-
--movflags frag_custom
-Allow the caller to manually choose when to cut fragments, by
-calling av_write_frame(ctx, NULL)
to write a fragment with
-the packets written so far. (This is only useful with other
-applications integrating libavformat, not from ffmpeg
.)
-
--min_frag_duration duration
-Don’t create fragments that are shorter than duration microseconds long.
-
-
-
-
If more than one condition is specified, fragments are cut when
-one of the specified conditions is fulfilled. The exception to this is
--min_frag_duration
, which has to be fulfilled for any of the other
-conditions to apply.
-
-
Additionally, the way the output file is written can be adjusted
-through a few other options:
-
-
--movflags empty_moov
-Write an initial moov atom directly at the start of the file, without
-describing any samples in it. Generally, an mdat/moov pair is written
-at the start of the file, as a normal MOV/MP4 file, containing only
-a short portion of the file. With this option set, there is no initial
-mdat atom, and the moov atom only describes the tracks but has
-a zero duration.
-
-This option is implicitly set when writing ismv (Smooth Streaming) files.
-
--movflags separate_moof
-Write a separate moof (movie fragment) atom for each track. Normally,
-packets for all tracks are written in a moof atom (which is slightly
-more efficient), but with this option set, the muxer writes one moof/mdat
-pair for each track, making it easier to separate tracks.
-
-This option is implicitly set when writing ismv (Smooth Streaming) files.
-
--movflags faststart
-Run a second pass moving the index (moov atom) to the beginning of the file.
-This operation can take a while, and will not work in various situations such
-as fragmented output, thus it is not enabled by default.
-
--movflags rtphint
-Add RTP hinting tracks to the output file.
-
--movflags disable_chpl
-Disable Nero chapter markers (chpl atom). Normally, both Nero chapters
-and a QuickTime chapter track are written to the file. With this option
-set, only the QuickTime chapter track will be written. Nero chapters can
-cause failures when the file is reprocessed with certain tagging programs, like
-mp3Tag 2.61a and iTunes 11.3, most likely other versions are affected as well.
-
--movflags omit_tfhd_offset
-Do not write any absolute base_data_offset in tfhd atoms. This avoids
-tying fragments to absolute byte positions in the file/streams.
-
--movflags default_base_moof
-Similarly to the omit_tfhd_offset, this flag avoids writing the
-absolute base_data_offset field in tfhd atoms, but does so by using
-the new default-base-is-moof flag instead. This flag is new from
-14496-12:2012. This may make the fragments easier to parse in certain
-circumstances (avoiding basing track fragment location calculations
-on the implicit end of the previous track fragment).
-
-
-
-
-
4.11.2 Example# TOC
-
-
Smooth Streaming content can be pushed in real time to a publishing
-point on IIS with this muxer. Example:
-
-
ffmpeg -re <normal input/transcoding options> -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1)
-
-
-
-
4.12 mp3# TOC
-
-
The MP3 muxer writes a raw MP3 stream with the following optional features:
-
- An ID3v2 metadata header at the beginning (enabled by default). Versions 2.3 and
-2.4 are supported, the id3v2_version
private option controls which one is
-used (3 or 4). Setting id3v2_version
to 0 disables the ID3v2 header
-completely.
-
-The muxer supports writing attached pictures (APIC frames) to the ID3v2 header.
-The pictures are supplied to the muxer in form of a video stream with a single
-packet. There can be any number of those streams, each will correspond to a
-single APIC frame. The stream metadata tags title and comment map
-to APIC description and picture type respectively. See
-http://id3.org/id3v2.4.0-frames for allowed picture types.
-
-Note that the APIC frames must be written at the beginning, so the muxer will
-buffer the audio frames until it gets all the pictures. It is therefore advised
-to provide the pictures as soon as possible to avoid excessive buffering.
-
- A Xing/LAME frame right after the ID3v2 header (if present). It is enabled by
-default, but will be written only if the output is seekable. The
-write_xing
private option can be used to disable it. The frame contains
-various information that may be useful to the decoder, like the audio duration
-or encoder delay.
-
- A legacy ID3v1 tag at the end of the file (disabled by default). It may be
-enabled with the write_id3v1
private option, but as its capabilities are
-very limited, its usage is not recommended.
-
-
-
Examples:
-
-
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
-
-
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
-
-
-
To attach a picture to an mp3 file select both the audio and the picture stream
-with map
:
-
-
ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
--metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
-
-
-
Write a "clean" MP3 without any extra features:
-
-
ffmpeg -i input.wav -write_xing 0 -id3v2_version 0 out.mp3
-
-
-
-
4.13 mpegts# TOC
-
-
MPEG transport stream muxer.
-
-
This muxer implements ISO 13818-1 and part of ETSI EN 300 468.
-
-
The recognized metadata settings in mpegts muxer are service_provider
-and service_name
. If they are not set the default for
-service_provider
is "FFmpeg" and the default for
-service_name
is "Service01".
-
-
-
4.13.1 Options# TOC
-
-
The muxer options are:
-
-
--mpegts_original_network_id number
-Set the original_network_id (default 0x0001). This is unique identifier
-of a network in DVB. Its main use is in the unique identification of a
-service through the path Original_Network_ID, Transport_Stream_ID.
-
--mpegts_transport_stream_id number
-Set the transport_stream_id (default 0x0001). This identifies a
-transponder in DVB.
-
--mpegts_service_id number
-Set the service_id (default 0x0001) also known as program in DVB.
-
--mpegts_pmt_start_pid number
-Set the first PID for PMT (default 0x1000, max 0x1f00).
-
--mpegts_start_pid number
-Set the first PID for data packets (default 0x0100, max 0x0f00).
-
--mpegts_m2ts_mode number
-Enable m2ts mode if set to 1. Default value is -1 which disables m2ts mode.
-
--muxrate number
-Set a constant muxrate (default VBR).
-
--pcr_period numer
-Override the default PCR retransmission time (default 20ms), ignored
-if variable muxrate is selected.
-
--pes_payload_size number
-Set minimum PES packet payload in bytes.
-
--mpegts_flags flags
-Set flags (see below).
-
--mpegts_copyts number
-Preserve original timestamps, if value is set to 1. Default value is -1, which
-results in shifting timestamps so that they start from 0.
-
--tables_version number
-Set PAT, PMT and SDT version (default 0, valid values are from 0 to 31, inclusively).
-This option allows updating stream structure so that standard consumer may
-detect the change. To do so, reopen output AVFormatContext (in case of API
-usage) or restart ffmpeg instance, cyclically changing tables_version value:
-
-
ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
-ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
-...
-ffmpeg -i source3.ts -codec copy -f mpegts -tables_version 31 udp://1.1.1.1:1111
-ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
-ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
-...
-
-
-
-
-
Option mpegts_flags may take a set of such flags:
-
-
-resend_headers
-Reemit PAT/PMT before writing the next packet.
-
-latm
-Use LATM packetization for AAC.
-
-
-
-
-
4.13.2 Example# TOC
-
-
-
ffmpeg -i file.mpg -c copy \
- -mpegts_original_network_id 0x1122 \
- -mpegts_transport_stream_id 0x3344 \
- -mpegts_service_id 0x5566 \
- -mpegts_pmt_start_pid 0x1500 \
- -mpegts_start_pid 0x150 \
- -metadata service_provider="Some provider" \
- -metadata service_name="Some Channel" \
- -y out.ts
-
-
-
-
4.14 null# TOC
-
-
Null muxer.
-
-
This muxer does not generate any output file, it is mainly useful for
-testing or benchmarking purposes.
-
-
For example to benchmark decoding with ffmpeg
you can use the
-command:
-
-
ffmpeg -benchmark -i INPUT -f null out.null
-
-
-
Note that the above command does not read or write the out.null
-file, but specifying the output file is required by the ffmpeg
-syntax.
-
-
Alternatively you can write the command as:
-
-
ffmpeg -benchmark -i INPUT -f null -
-
-
-
-
4.15 nut# TOC
-
-
--syncpoints flags
-Change the syncpoint usage in nut:
-
-default use the normal low-overhead seeking aids.
-none do not use the syncpoints at all, reducing the overhead but making the stream non-seekable;
-Use of this option is not recommended, as the resulting files are very damage
- sensitive and seeking is not possible. Also in general the overhead from
- syncpoints is negligible. Note, -write_index
0 can be used to disable
- all growing data tables, allowing to mux endless streams with limited memory
- and without these disadvantages.
-
-timestamped extend the syncpoint with a wallclock field.
-
-The none and timestamped flags are experimental.
-
--write_index bool
-Write index at the end, the default is to write an index.
-
-
-
-
-
ffmpeg -i INPUT -f_strict experimental -syncpoints none - | processor
-
-
-
-
4.16 ogg# TOC
-
-
Ogg container muxer.
-
-
--page_duration duration
-Preferred page duration, in microseconds. The muxer will attempt to create
-pages that are approximately duration microseconds long. This allows the
-user to compromise between seek granularity and container overhead. The default
-is 1 second. A value of 0 will fill all segments, making pages as large as
-possible. A value of 1 will effectively use 1 packet-per-page in most
-situations, giving a small seek granularity at the cost of additional container
-overhead.
-
-
-
-
-
4.17 segment, stream_segment, ssegment# TOC
-
-
Basic stream segmenter.
-
-
This muxer outputs streams to a number of separate files of nearly
-fixed duration. Output filename pattern can be set in a fashion similar to
-image2 .
-
-
stream_segment
is a variant of the muxer used to write to
-streaming output formats, i.e. which do not require global headers,
-and is recommended for outputting e.g. to MPEG transport stream segments.
-ssegment
is a shorter alias for stream_segment
.
-
-
Every segment starts with a keyframe of the selected reference stream,
-which is set through the reference_stream option.
-
-
Note that if you want accurate splitting for a video file, you need to
-make the input key frames correspond to the exact splitting times
-expected by the segmenter, or the segment muxer will start the new
-segment with the key frame found next after the specified start
-time.
-
-
The segment muxer works best with a single constant frame rate video.
-
-
Optionally it can generate a list of the created segments, by setting
-the option segment_list . The list type is specified by the
-segment_list_type option. The entry filenames in the segment
-list are set by default to the basename of the corresponding segment
-files.
-
-
See also the hls muxer, which provides a more specific
-implementation for HLS segmentation.
-
-
-
4.17.1 Options# TOC
-
-
The segment muxer supports the following options:
-
-
-reference_stream specifier
-Set the reference stream, as specified by the string specifier .
-If specifier is set to auto
, the reference is chosen
-automatically. Otherwise it must be a stream specifier (see the “Stream
-specifiers” chapter in the ffmpeg manual) which specifies the
-reference stream. The default value is auto
.
-
-
-segment_format format
-Override the inner container format, by default it is guessed by the filename
-extension.
-
-
-segment_format_options options_list
-Set output format options using a :-separated list of key=value
-parameters. Values containing the :
special character must be
-escaped.
-
-
-segment_list name
-Generate also a listfile named name . If not specified no
-listfile is generated.
-
-
-segment_list_flags flags
-Set flags affecting the segment list generation.
-
-It currently supports the following flags:
-
-‘cache ’
-Allow caching (only affects M3U8 list files).
-
-
-‘live ’
-Allow live-friendly file generation.
-
-
-
-
-segment_list_type type
-Select the listing format.
-
-flat use a simple flat list of entries.
-hls use a m3u8-like structure.
-
-
-
-segment_list_size size
-Update the list file so that it contains at most size
-segments. If 0 the list file will contain all the segments. Default
-value is 0.
-
-
-segment_list_entry_prefix prefix
-Prepend prefix to each entry. Useful to generate absolute paths.
-By default no prefix is applied.
-
-The following values are recognized:
-
-‘flat ’
-Generate a flat list for the created segments, one segment per line.
-
-
-‘csv, ext ’
-Generate a list for the created segments, one segment per line,
-each line matching the format (comma-separated values):
-
-
segment_filename ,segment_start_time ,segment_end_time
-
-
-segment_filename is the name of the output file generated by the
-muxer according to the provided pattern. CSV escaping (according to
-RFC4180) is applied if required.
-
-segment_start_time and segment_end_time specify
-the segment start and end time expressed in seconds.
-
-A list file with the suffix ".csv"
or ".ext"
will
-auto-select this format.
-
-‘ext ’ is deprecated in favor or ‘csv ’.
-
-
-‘ffconcat ’
-Generate an ffconcat file for the created segments. The resulting file
-can be read using the FFmpeg concat demuxer.
-
-A list file with the suffix ".ffcat"
or ".ffconcat"
will
-auto-select this format.
-
-
-‘m3u8 ’
-Generate an extended M3U8 file, version 3, compliant with
-http://tools.ietf.org/id/draft-pantos-http-live-streaming .
-
-A list file with the suffix ".m3u8"
will auto-select this format.
-
-
-
-If not specified the type is guessed from the list file name suffix.
-
-
-segment_time time
-Set segment duration to time , the value must be a duration
-specification. Default value is "2". See also the
-segment_times option.
-
-Note that splitting may not be accurate, unless you force the
-reference stream key-frames at the given time. See the introductory
-notice and the examples below.
-
-
-segment_atclocktime 1|0
-If set to "1" split at regular clock time intervals starting from 00:00
-o’clock. The time value specified in segment_time is
-used for setting the length of the splitting interval.
-
-For example with segment_time set to "900" this makes it possible
-to create files at 12:00 o’clock, 12:15, 12:30, etc.
-
-Default value is "0".
-
-
-segment_time_delta delta
-Specify the accuracy time when selecting the start time for a
-segment, expressed as a duration specification. Default value is "0".
-
-When delta is specified a key-frame will start a new segment if its
-PTS satisfies the relation:
-
-
PTS >= start_time - time_delta
-
-
-This option is useful when splitting video content, which is always
-split at GOP boundaries, in case a key frame is found just before the
-specified split time.
-
-In particular may be used in combination with the ffmpeg option
-force_key_frames . The key frame times specified by
-force_key_frames may not be set accurately because of rounding
-issues, with the consequence that a key frame time may result set just
-before the specified time. For constant frame rate videos a value of
-1/(2*frame_rate ) should address the worst case mismatch between
-the specified time and the time set by force_key_frames .
-
-
-segment_times times
-Specify a list of split points. times contains a list of comma
-separated duration specifications, in increasing order. See also
-the segment_time option.
-
-
-segment_frames frames
-Specify a list of split video frame numbers. frames contains a
-list of comma separated integer numbers, in increasing order.
-
-This option specifies to start a new segment whenever a reference
-stream key frame is found and the sequential number (starting from 0)
-of the frame is greater or equal to the next value in the list.
-
-
-segment_wrap limit
-Wrap around segment index once it reaches limit .
-
-
-segment_start_number number
-Set the sequence number of the first segment. Defaults to 0
.
-
-
-reset_timestamps 1|0
-Reset timestamps at the begin of each segment, so that each segment
-will start with near-zero timestamps. It is meant to ease the playback
-of the generated segments. May not work with some combinations of
-muxers/codecs. It is set to 0
by default.
-
-
-initial_offset offset
-Specify timestamp offset to apply to the output packet timestamps. The
-argument must be a time duration specification, and defaults to 0.
-
-
-
-
-
4.17.2 Examples# TOC
-
-
-
-
-
4.18 smoothstreaming# TOC
-
-
Smooth Streaming muxer generates a set of files (Manifest, chunks) suitable for serving with conventional web server.
-
-
-window_size
-Specify the number of fragments kept in the manifest. Default 0 (keep all).
-
-
-extra_window_size
-Specify the number of fragments kept outside of the manifest before removing from disk. Default 5.
-
-
-lookahead_count
-Specify the number of lookahead fragments. Default 2.
-
-
-min_frag_duration
-Specify the minimum fragment duration (in microseconds). Default 5000000.
-
-
-remove_at_exit
-Specify whether to remove all fragments when finished. Default 0 (do not remove).
-
-
-
-
-
-
4.19 tee# TOC
-
-
The tee muxer can be used to write the same data to several files or any
-other kind of muxer. It can be used, for example, to both stream a video to
-the network and save it to disk at the same time.
-
-
It is different from specifying several outputs to the ffmpeg
-command-line tool because the audio and video data will be encoded only once
-with the tee muxer; encoding can be a very expensive process. It is not
-useful when using the libavformat API directly because it is then possible
-to feed the same packets to several muxers directly.
-
-
The slave outputs are specified in the file name given to the muxer,
-separated by ’|’. If any of the slave name contains the ’|’ separator,
-leading or trailing spaces or any special character, it must be
-escaped (see (ffmpeg-utils)the "Quoting and escaping"
-section in the ffmpeg-utils(1) manual ).
-
-
Muxer options can be specified for each slave by prepending them as a list of
-key =value pairs separated by ’:’, between square brackets. If
-the options values contain a special character or the ’:’ separator, they
-must be escaped; note that this is a second level escaping.
-
-
The following special options are also recognized:
-
-f
-Specify the format name. Useful if it cannot be guessed from the
-output name suffix.
-
-
-bsfs[/spec ]
-Specify a list of bitstream filters to apply to the specified
-output.
-
-It is possible to specify to which streams a given bitstream filter
-applies, by appending a stream specifier to the option separated by
-/
. spec must be a stream specifier (see Format stream specifiers ). If the stream specifier is not specified, the
-bitstream filters will be applied to all streams in the output.
-
-Several bitstream filters can be specified, separated by ",".
-
-
-select
-Select the streams that should be mapped to the slave output,
-specified by a stream specifier. If not specified, this defaults to
-all the input streams.
-
-
-
-
-
4.19.1 Examples# TOC
-
-
- Encode something and both archive it in a WebM file and stream it
-as MPEG-TS over UDP (the streams need to be explicitly mapped):
-
-
ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a
- "archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/"
-
-
- Use ffmpeg
to encode the input, and send the output
-to three different destinations. The dump_extra
bitstream
-filter is used to add extradata information to all the output video
-keyframes packets, as requested by the MPEG-TS format. The select
-option is applied to out.aac in order to make it contain only
-audio packets.
-
-
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
- -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac"
-
-
- As below, but select only stream a:1
for the audio output. Note
-that a second level escaping must be performed, as ":" is a special
-character used to separate options.
-
-
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
- -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac"
-
-
-
-
Note: some codecs may need different options depending on the output format;
-the auto-detection of this can not work with the tee muxer. The main example
-is the global_header flag.
-
-
-
4.20 webm_dash_manifest# TOC
-
-
WebM DASH Manifest muxer.
-
-
This muxer implements the WebM DASH Manifest specification to generate the DASH manifest XML.
-
-
-
4.20.1 Options# TOC
-
-
This muxer supports the following options:
-
-
-adaptation_sets
-This option has the following syntax: "id=x,streams=a,b,c id=y,streams=d,e" where x and y are the
-unique identifiers of the adaptation sets and a,b,c,d and e are the indices of the corresponding
-audio and video streams. Any number of adaptation sets can be added using this option.
-
-
-
-
-
4.20.2 Example# TOC
-
-
ffmpeg -f webm_dash_manifest -i video1.webm \
- -f webm_dash_manifest -i video2.webm \
- -f webm_dash_manifest -i audio1.webm \
- -f webm_dash_manifest -i audio2.webm \
- -map 0 -map 1 -map 2 -map 3 \
- -c copy \
- -f webm_dash_manifest \
- -adaptation_sets "id=0,streams=0,1 id=1,streams=2,3" \
- manifest.xml
-
-
-
-
5 Metadata# TOC
-
-
FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded
-INI-like text file and then load it back using the metadata muxer/demuxer.
-
-
The file format is as follows:
-
- A file consists of a header and a number of metadata tags divided into sections,
-each on its own line.
-
- The header is a ’;FFMETADATA’ string, followed by a version number (now 1).
-
- Metadata tags are of the form ’key=value’
-
- Immediately after header follows global metadata
-
- After global metadata there may be sections with per-stream/per-chapter
-metadata.
-
- A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
-brackets (’[’, ’]’) and ends with next section or end of file.
-
- At the beginning of a chapter section there may be an optional timebase to be
-used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and
-den are integers. If the timebase is missing then start/end times are assumed to
-be in milliseconds.
-Next a chapter section must contain chapter start and end times in form
-’START=num’, ’END=num’, where num is a positive integer.
-
- Empty lines and lines starting with ’;’ or ’#’ are ignored.
-
- Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a
-newline) must be escaped with a backslash ’\’.
-
- Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
-the tag (in the example above key is ’foo ’, value is ’ bar’).
-
-
-
A ffmetadata file might look like this:
-
-
;FFMETADATA1
-title=bike\\shed
-;this is a comment
-artist=FFmpeg troll team
-
-[CHAPTER]
-TIMEBASE=1/1000
-START=0
-#chapter ends at 0:01:00
-END=60000
-title=chapter \#1
-[STREAM]
-title=multi\
-line
-
-
-
By using the ffmetadata muxer and demuxer it is possible to extract
-metadata from an input file to an ffmetadata file, and then transcode
-the file into an output file with the edited ffmetadata file.
-
-
Extracting an ffmetadata file with ffmpeg goes as follows:
-
-
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
-
-
-
Reinserting edited metadata information from the FFMETADATAFILE file can
-be done as:
-
-
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
-
-
-
-
-
6 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavformat
-
-
-
-
7 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-protocols.html b/Externals/ffmpeg/dev/doc/ffmpeg-protocols.html
deleted file mode 100644
index 0fd895cce0..0000000000
--- a/Externals/ffmpeg/dev/doc/ffmpeg-protocols.html
+++ /dev/null
@@ -1,1545 +0,0 @@
-
-
-
-
-
-
- FFmpeg Protocols Documentation
-
-
-
-
-
-
-
-
- FFmpeg Protocols Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes the input and output protocols provided by the
-libavformat library.
-
-
-
-
2 Protocols# TOC
-
-
Protocols are configured elements in FFmpeg that enable access to
-resources that require specific protocols.
-
-
When you configure your FFmpeg build, all the supported protocols are
-enabled by default. You can list all available ones using the
-configure option "–list-protocols".
-
-
You can disable all the protocols using the configure option
-"–disable-protocols", and selectively enable a protocol using the
-option "–enable-protocol=PROTOCOL ", or you can disable a
-particular protocol using the option
-"–disable-protocol=PROTOCOL ".
-
-
The option "-protocols" of the ff* tools will display the list of
-supported protocols.
-
-
A description of the currently available protocols follows.
-
-
-
2.1 bluray# TOC
-
-
Read BluRay playlist.
-
-
The accepted options are:
-
-angle
-BluRay angle
-
-
-chapter
-Start chapter (1...N)
-
-
-playlist
-Playlist to read (BDMV/PLAYLIST/?????.mpls)
-
-
-
-
-
Examples:
-
-
Read longest playlist from BluRay mounted to /mnt/bluray:
-
-
-
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
-
-
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
-
-
-
-
2.2 cache# TOC
-
-
Caching wrapper for input stream.
-
-
Cache the input stream to temporary file. It brings seeking capability to live streams.
-
-
-
-
-
2.3 concat# TOC
-
-
Physical concatenation protocol.
-
-
Allow to read and seek from many resource in sequence as if they were
-a unique resource.
-
-
A URL accepted by this protocol has the syntax:
-
-
concat:URL1 |URL2 |...|URLN
-
-
-
where URL1 , URL2 , ..., URLN are the urls of the
-resource to be concatenated, each one possibly specifying a distinct
-protocol.
-
-
For example to read a sequence of files split1.mpeg ,
-split2.mpeg , split3.mpeg with ffplay
use the
-command:
-
-
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
-
-
-
Note that you may need to escape the character "|" which is special for
-many shells.
-
-
-
2.4 crypto# TOC
-
-
AES-encrypted stream reading protocol.
-
-
The accepted options are:
-
-key
-Set the AES decryption key binary block from given hexadecimal representation.
-
-
-iv
-Set the AES decryption initialization vector binary block from given hexadecimal representation.
-
-
-
-
Accepted URL formats:
-
-
crypto:URL
-crypto+URL
-
-
-
-
2.5 data# TOC
-
-
Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme .
-
-
For example, to convert a GIF file given inline with ffmpeg
:
-
-
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
-
-
-
-
2.6 file# TOC
-
-
File access protocol.
-
-
Allow to read from or write to a file.
-
-
A file URL can have the form:
-
-
-
where filename is the path of the file to read.
-
-
An URL that does not have a protocol prefix will be assumed to be a
-file URL. Depending on the build, an URL that looks like a Windows
-path with the drive letter at the beginning will also be assumed to be
-a file URL (usually not the case in builds for unix-like systems).
-
-
For example to read from a file input.mpeg with ffmpeg
-use the command:
-
-
ffmpeg -i file:input.mpeg output.mpeg
-
-
-
This protocol accepts the following options:
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable for files on slow medium.
-
-
-
-
-
-
-
FTP (File Transfer Protocol).
-
-
Allow to read from or write to remote resources using FTP protocol.
-
-
Following syntax is required.
-
-
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-ftp-anonymous-password
-Password used when login as anonymous user. Typically an e-mail address
-should be used.
-
-
-ftp-write-seekable
-Control seekability of connection during encoding. If set to 1 the
-resource is supposed to be seekable, if set to 0 it is assumed not
-to be seekable. Default value is 0.
-
-
-
-
NOTE: Protocol can be used as output, but it is recommended to not do
-it, unless special care is taken (tests, customized server configuration
-etc.). Different FTP servers behave in different way during seek
-operation. ff* tools may produce incomplete content due to server limitations.
-
-
-
2.8 gopher# TOC
-
-
Gopher protocol.
-
-
-
-
-
Read Apple HTTP Live Streaming compliant segmented stream as
-a uniform one. The M3U8 playlists describing the segments can be
-remote HTTP resources or local files, accessed using the standard
-file protocol.
-The nested protocol is declared by specifying
-"+proto " after the hls URI scheme name, where proto
-is either "file" or "http".
-
-
-
hls+http://host/path/to/remote/resource.m3u8
-hls+file://path/to/local/resource.m3u8
-
-
-
Using this protocol is discouraged - the hls demuxer should work
-just as well (if not, please report the issues) and is more complete.
-To use the hls demuxer instead, simply use the direct URLs to the
-m3u8 files.
-
-
-
2.10 http# TOC
-
-
HTTP (Hyper Text Transfer Protocol).
-
-
This protocol accepts the following options:
-
-
-seekable
-Control seekability of connection. If set to 1 the resource is
-supposed to be seekable, if set to 0 it is assumed not to be seekable,
-if set to -1 it will try to autodetect if it is seekable. Default
-value is -1.
-
-
-chunked_post
-If set to 1 use chunked Transfer-Encoding for posts, default is 1.
-
-
-content_type
-Set a specific content type for the POST messages.
-
-
-headers
-Set custom HTTP headers, can override built in default headers. The
-value must be a string encoding the headers.
-
-
-multiple_requests
-Use persistent connections if set to 1, default is 0.
-
-
-post_data
-Set custom HTTP post data.
-
-
-user-agent
-user_agent
-Override the User-Agent header. If not specified the protocol will use a
-string describing the libavformat build. ("Lavf/<version>")
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-mime_type
-Export the MIME type.
-
-
-icy
-If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
-supports this, the metadata has to be retrieved by the application by reading
-the icy_metadata_headers and icy_metadata_packet options.
-The default is 1.
-
-
-icy_metadata_headers
-If the server supports ICY metadata, this contains the ICY-specific HTTP reply
-headers, separated by newline characters.
-
-
-icy_metadata_packet
-If the server supports ICY metadata, and icy was set to 1, this
-contains the last non-empty metadata packet sent by the server. It should be
-polled in regular intervals by applications interested in mid-stream metadata
-updates.
-
-
-cookies
-Set the cookies to be sent in future requests. The format of each cookie is the
-same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
-delimited by a newline character.
-
-
-offset
-Set initial byte offset.
-
-
-end_offset
-Try to limit the request to bytes preceding this offset.
-
-
-
-
-
2.10.1 HTTP Cookies# TOC
-
-
Some HTTP requests will be denied unless cookie values are passed in with the
-request. The cookies option allows these cookies to be specified. At
-the very least, each cookie must specify a value along with a path and domain.
-HTTP requests that match both the domain and path will automatically include the
-cookie value in the HTTP Cookie header field. Multiple cookies can be delimited
-by a newline.
-
-
The required syntax to play a stream specifying a cookie is:
-
-
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
-
-
-
-
2.11 Icecast# TOC
-
-
Icecast protocol (stream to Icecast servers)
-
-
This protocol accepts the following options:
-
-
-ice_genre
-Set the stream genre.
-
-
-ice_name
-Set the stream name.
-
-
-ice_description
-Set the stream description.
-
-
-ice_url
-Set the stream website URL.
-
-
-ice_public
-Set if the stream should be public.
-The default is 0 (not public).
-
-
-user_agent
-Override the User-Agent header. If not specified a string of the form
-"Lavf/<version>" will be used.
-
-
-password
-Set the Icecast mountpoint password.
-
-
-content_type
-Set the stream content type. This must be set if it is different from
-audio/mpeg.
-
-
-legacy_icecast
-This enables support for Icecast versions < 2.4.0, that do not support the
-HTTP PUT method but the SOURCE method.
-
-
-
-
-
-
icecast://[username [:password ]@]server :port /mountpoint
-
-
-
-
2.12 mmst# TOC
-
-
MMS (Microsoft Media Server) protocol over TCP.
-
-
-
2.13 mmsh# TOC
-
-
MMS (Microsoft Media Server) protocol over HTTP.
-
-
The required syntax is:
-
-
mmsh://server [:port ][/app ][/playpath ]
-
-
-
-
2.14 md5# TOC
-
-
MD5 output protocol.
-
-
Computes the MD5 hash of the data to be written, and on close writes
-this to the designated output or stdout if none is specified. It can
-be used to test muxers without writing an actual file.
-
-
Some examples follow.
-
-
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
-ffmpeg -i input.flv -f avi -y md5:output.avi.md5
-
-# Write the MD5 hash of the encoded AVI file to stdout.
-ffmpeg -i input.flv -f avi -y md5:
-
-
-
Note that some formats (typically MOV) require the output protocol to
-be seekable, so they will fail with the MD5 output protocol.
-
-
-
2.15 pipe# TOC
-
-
UNIX pipe access protocol.
-
-
Allow to read and write from UNIX pipes.
-
-
The accepted syntax is:
-
-
-
number is the number corresponding to the file descriptor of the
-pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number
-is not specified, by default the stdout file descriptor will be used
-for writing, stdin for reading.
-
-
For example to read from stdin with ffmpeg
:
-
-
cat test.wav | ffmpeg -i pipe:0
-# ...this is the same as...
-cat test.wav | ffmpeg -i pipe:
-
-
-
For writing to stdout with ffmpeg
:
-
-
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
-# ...this is the same as...
-ffmpeg -i test.wav -f avi pipe: | cat > test.avi
-
-
-
This protocol accepts the following options:
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable if data transmission is slow.
-
-
-
-
Note that some formats (typically MOV), require the output protocol to
-be seekable, so they will fail with the pipe output protocol.
-
-
-
2.16 rtmp# TOC
-
-
Real-Time Messaging Protocol.
-
-
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
-content across a TCP/IP network.
-
-
The required syntax is:
-
-
rtmp://[username :password @]server [:port ][/app ][/instance ][/playpath ]
-
-
-
The accepted parameters are:
-
-username
-An optional username (mostly for publishing).
-
-
-password
-An optional password (mostly for publishing).
-
-
-server
-The address of the RTMP server.
-
-
-port
-The number of the TCP port to use (by default is 1935).
-
-
-app
-It is the name of the application to access. It usually corresponds to
-the path where the application is installed on the RTMP server
-(e.g. /ondemand/ , /flash/live/ , etc.). You can override
-the value parsed from the URI through the rtmp_app
option, too.
-
-
-playpath
-It is the path or name of the resource to play with reference to the
-application specified in app , may be prefixed by "mp4:". You
-can override the value parsed from the URI through the rtmp_playpath
-option, too.
-
-
-listen
-Act as a server, listening for an incoming connection.
-
-
-timeout
-Maximum time to wait for the incoming connection. Implies listen.
-
-
-
-
Additionally, the following parameters can be set via command line options
-(or in code via AVOption
s):
-
-rtmp_app
-Name of application to connect on the RTMP server. This option
-overrides the parameter specified in the URI.
-
-
-rtmp_buffer
-Set the client buffer time in milliseconds. The default is 3000.
-
-
-rtmp_conn
-Extra arbitrary AMF connection parameters, parsed from a string,
-e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0
.
-Each value is prefixed by a single character denoting the type,
-B for Boolean, N for number, S for string, O for object, or Z for null,
-followed by a colon. For Booleans the data must be either 0 or 1 for
-FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
-1 to end or begin an object, respectively. Data items in subobjects may
-be named, by prefixing the type with ’N’ and specifying the name before
-the value (i.e. NB:myFlag:1
). This option may be used multiple
-times to construct arbitrary AMF sequences.
-
-
-rtmp_flashver
-Version of the Flash plugin used to run the SWF player. The default
-is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
-<libavformat version>).)
-
-
-rtmp_flush_interval
-Number of packets flushed in the same request (RTMPT only). The default
-is 10.
-
-
-rtmp_live
-Specify that the media is a live stream. No resuming or seeking in
-live streams is possible. The default value is any
, which means the
-subscriber first tries to play the live stream specified in the
-playpath. If a live stream of that name is not found, it plays the
-recorded stream. The other possible values are live
and
-recorded
.
-
-
-rtmp_pageurl
-URL of the web page in which the media was embedded. By default no
-value will be sent.
-
-
-rtmp_playpath
-Stream identifier to play or to publish. This option overrides the
-parameter specified in the URI.
-
-
-rtmp_subscribe
-Name of live stream to subscribe to. By default no value will be sent.
-It is only sent if the option is specified or if rtmp_live
-is set to live.
-
-
-rtmp_swfhash
-SHA256 hash of the decompressed SWF file (32 bytes).
-
-
-rtmp_swfsize
-Size of the decompressed SWF file, required for SWFVerification.
-
-
-rtmp_swfurl
-URL of the SWF player for the media. By default no value will be sent.
-
-
-rtmp_swfverify
-URL to player swf file, compute hash/size automatically.
-
-
-rtmp_tcurl
-URL of the target stream. Defaults to proto://host[:port]/app.
-
-
-
-
-
For example to read with ffplay
a multimedia resource named
-"sample" from the application "vod" from an RTMP server "myserver":
-
-
ffplay rtmp://myserver/vod/sample
-
-
-
To publish to a password protected server, passing the playpath and
-app names separately:
-
-
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
-
-
-
-
2.17 rtmpe# TOC
-
-
Encrypted Real-Time Messaging Protocol.
-
-
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
-streaming multimedia content within standard cryptographic primitives,
-consisting of Diffie-Hellman key exchange and HMACSHA256, generating
-a pair of RC4 keys.
-
-
-
2.18 rtmps# TOC
-
-
Real-Time Messaging Protocol over a secure SSL connection.
-
-
The Real-Time Messaging Protocol (RTMPS) is used for streaming
-multimedia content across an encrypted connection.
-
-
-
2.19 rtmpt# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
-for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
2.20 rtmpte# TOC
-
-
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
-is used for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
2.21 rtmpts# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTPS.
-
-
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
-for streaming multimedia content within HTTPS requests to traverse
-firewalls.
-
-
-
2.22 libsmbclient# TOC
-
-
libsmbclient permits one to manipulate CIFS/SMB network resources.
-
-
Following syntax is required.
-
-
-
smb://[[domain:]user[:password@]]server[/share[/path[/file]]]
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in miliseconds of socket I/O operations used by the underlying
-low level operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-workgroup
-Set the workgroup used for making connections. By default workgroup is not specified.
-
-
-
-
-
For more information see: http://www.samba.org/ .
-
-
-
2.23 libssh# TOC
-
-
Secure File Transfer Protocol via libssh
-
-
Allow to read from or write to remote resources using SFTP protocol.
-
-
Following syntax is required.
-
-
-
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-private_key
-Specify the path of the file containing private key to use during authorization.
-By default libssh searches for keys in the ~/.ssh/ directory.
-
-
-
-
-
Example: Play a file stored on remote server.
-
-
-
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
-
-
-
-
2.24 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte# TOC
-
-
Real-Time Messaging Protocol and its variants supported through
-librtmp.
-
-
Requires the presence of the librtmp headers and library during
-configuration. You need to explicitly configure the build with
-"–enable-librtmp". If enabled this will replace the native RTMP
-protocol.
-
-
This protocol provides most client functions and a few server
-functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT),
-encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled
-variants of these encrypted types (RTMPTE, RTMPTS).
-
-
The required syntax is:
-
-
rtmp_proto ://server [:port ][/app ][/playpath ] options
-
-
-
where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe",
-"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and
-server , port , app and playpath have the same
-meaning as specified for the RTMP native protocol.
-options contains a list of space-separated options of the form
-key =val .
-
-
See the librtmp manual page (man 3 librtmp) for more information.
-
-
For example, to stream a file in real-time to an RTMP server using
-ffmpeg
:
-
-
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
-
-
-
To play the same stream using ffplay
:
-
-
ffplay "rtmp://myserver/live/mystream live=1"
-
-
-
-
2.25 rtp# TOC
-
-
Real-time Transport Protocol.
-
-
The required syntax for an RTP URL is:
-rtp://hostname [:port ][?option =val ...]
-
-
port specifies the RTP port to use.
-
-
The following URL options are supported:
-
-
-ttl=n
-Set the TTL (Time-To-Live) value (for multicast only).
-
-
-rtcpport=n
-Set the remote RTCP port to n .
-
-
-localrtpport=n
-Set the local RTP port to n .
-
-
-localrtcpport=n '
-Set the local RTCP port to n .
-
-
-pkt_size=n
-Set max packet size (in bytes) to n .
-
-
-connect=0|1
-Do a connect()
on the UDP socket (if set to 1) or not (if set
-to 0).
-
-
-sources=ip [,ip ]
-List allowed source IP addresses.
-
-
-block=ip [,ip ]
-List disallowed (blocked) source IP addresses.
-
-
-write_to_source=0|1
-Send packets to the source address of the latest received packet (if
-set to 1) or to a default remote address (if set to 0).
-
-
-localport=n
-Set the local RTP port to n .
-
-This is a deprecated option. Instead, localrtpport should be
-used.
-
-
-
-
-
Important notes:
-
-
- If rtcpport is not set the RTCP port will be set to the RTP
-port value plus 1.
-
- If localrtpport (the local RTP port) is not set any available
-port will be used for the local RTP and RTCP ports.
-
- If localrtcpport (the local RTCP port) is not set it will be
-set to the local RTP port value plus 1.
-
-
-
-
2.26 rtsp# TOC
-
-
Real-Time Streaming Protocol.
-
-
RTSP is not technically a protocol handler in libavformat, it is a demuxer
-and muxer. The demuxer supports both normal RTSP (with data transferred
-over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
-data transferred over RDT).
-
-
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
-supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s
-RTSP server ).
-
-
The required syntax for a RTSP url is:
-
-
rtsp://hostname [:port ]/path
-
-
-
Options can be set on the ffmpeg
/ffplay
command
-line, or set in code via AVOption
s or in
-avformat_open_input
.
-
-
The following options are supported.
-
-
-initial_pause
-Do not start playing the stream immediately if set to 1. Default value
-is 0.
-
-
-rtsp_transport
-Set RTSP transport protocols.
-
-It accepts the following values:
-
-‘udp ’
-Use UDP as lower transport protocol.
-
-
-‘tcp ’
-Use TCP (interleaving within the RTSP control channel) as lower
-transport protocol.
-
-
-‘udp_multicast ’
-Use UDP multicast as lower transport protocol.
-
-
-‘http ’
-Use HTTP tunneling as lower transport protocol, which is useful for
-passing proxies.
-
-
-
-Multiple lower transport protocols may be specified, in that case they are
-tried one at a time (if the setup of one fails, the next one is tried).
-For the muxer, only the ‘tcp ’ and ‘udp ’ options are supported.
-
-
-rtsp_flags
-Set RTSP flags.
-
-The following values are accepted:
-
-‘filter_src ’
-Accept packets only from negotiated peer address and port.
-
-‘listen ’
-Act as a server, listening for an incoming connection.
-
-‘prefer_tcp ’
-Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
-
-
-
-Default value is ‘none ’.
-
-
-allowed_media_types
-Set media types to accept from the server.
-
-The following flags are accepted:
-
-‘video ’
-‘audio ’
-‘data ’
-
-
-By default it accepts all media types.
-
-
-min_port
-Set minimum local UDP port. Default value is 5000.
-
-
-max_port
-Set maximum local UDP port. Default value is 65000.
-
-
-timeout
-Set maximum timeout (in seconds) to wait for incoming connections.
-
-A value of -1 means infinite (default). This option implies the
-rtsp_flags set to ‘listen ’.
-
-
-reorder_queue_size
-Set number of packets to buffer for handling of reordered packets.
-
-
-stimeout
-Set socket TCP I/O timeout in microseconds.
-
-
-user-agent
-Override User-Agent header. If not specified, it defaults to the
-libavformat identifier string.
-
-
-
-
When receiving data over UDP, the demuxer tries to reorder received packets
-(since they may arrive out of order, or packets may get lost totally). This
-can be disabled by setting the maximum demuxing delay to zero (via
-the max_delay
field of AVFormatContext).
-
-
When watching multi-bitrate Real-RTSP streams with ffplay
, the
-streams to display can be chosen with -vst
n and
--ast
n for video and audio respectively, and can be switched
-on the fly by pressing v
and a
.
-
-
-
2.26.1 Examples# TOC
-
-
The following examples all make use of the ffplay
and
-ffmpeg
tools.
-
-
- Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
-
-
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
-
-
- Watch a stream tunneled over HTTP:
-
-
ffplay -rtsp_transport http rtsp://server/video.mp4
-
-
- Send a stream in realtime to a RTSP server, for others to watch:
-
-
ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
-
-
- Receive a stream in realtime:
-
-
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
-
-
-
-
-
2.27 sap# TOC
-
-
Session Announcement Protocol (RFC 2974). This is not technically a
-protocol handler in libavformat, it is a muxer and demuxer.
-It is used for signalling of RTP streams, by announcing the SDP for the
-streams regularly on a separate port.
-
-
-
2.27.1 Muxer# TOC
-
-
The syntax for a SAP url given to the muxer is:
-
-
sap://destination [:port ][?options ]
-
-
-
The RTP packets are sent to destination on port port ,
-or to port 5004 if no port is specified.
-options is a &
-separated list. The following options
-are supported:
-
-
-announce_addr=address
-Specify the destination IP address for sending the announcements to.
-If omitted, the announcements are sent to the commonly used SAP
-announcement multicast address 224.2.127.254 (sap.mcast.net), or
-ff0e::2:7ffe if destination is an IPv6 address.
-
-
-announce_port=port
-Specify the port to send the announcements on, defaults to
-9875 if not specified.
-
-
-ttl=ttl
-Specify the time to live value for the announcements and RTP packets,
-defaults to 255.
-
-
-same_port=0|1
-If set to 1, send all RTP streams on the same port pair. If zero (the
-default), all streams are sent on unique ports, with each stream on a
-port 2 numbers higher than the previous.
-VLC/Live555 requires this to be set to 1, to be able to receive the stream.
-The RTP stack in libavformat for receiving requires all streams to be sent
-on unique ports.
-
-
-
-
Example command lines follow.
-
-
To broadcast a stream on the local subnet, for watching in VLC:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
-
-
-
Similarly, for watching in ffplay
:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255
-
-
-
And for watching in ffplay
, over IPv6:
-
-
-
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
-
-
-
-
2.27.2 Demuxer# TOC
-
-
The syntax for a SAP url given to the demuxer is:
-
-
sap://[address ][:port ]
-
-
-
address is the multicast address to listen for announcements on,
-if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port
-is the port that is listened on, 9875 if omitted.
-
-
The demuxers listens for announcements on the given address and port.
-Once an announcement is received, it tries to receive that particular stream.
-
-
Example command lines follow.
-
-
To play back the first stream announced on the normal SAP multicast address:
-
-
-
-
To play back the first stream announced on one the default IPv6 SAP multicast address:
-
-
-
ffplay sap://[ff0e::2:7ffe]
-
-
-
-
2.28 sctp# TOC
-
-
Stream Control Transmission Protocol.
-
-
The accepted URL syntax is:
-
-
sctp://host :port [?options ]
-
-
-
The protocol accepts the following options:
-
-listen
-If set to any value, listen for an incoming connection. Outgoing connection is done by default.
-
-
-max_streams
-Set the maximum number of streams. By default no limit is set.
-
-
-
-
-
2.29 srtp# TOC
-
-
Secure Real-time Transport Protocol.
-
-
The accepted options are:
-
-srtp_in_suite
-srtp_out_suite
-Select input and output encoding suites.
-
-Supported values:
-
-‘AES_CM_128_HMAC_SHA1_80 ’
-‘SRTP_AES128_CM_HMAC_SHA1_80 ’
-‘AES_CM_128_HMAC_SHA1_32 ’
-‘SRTP_AES128_CM_HMAC_SHA1_32 ’
-
-
-
-srtp_in_params
-srtp_out_params
-Set input and output encoding parameters, which are expressed by a
-base64-encoded representation of a binary block. The first 16 bytes of
-this binary block are used as master key, the following 14 bytes are
-used as master salt.
-
-
-
-
-
2.30 subfile# TOC
-
-
Virtually extract a segment of a file or another stream.
-The underlying stream must be seekable.
-
-
Accepted options:
-
-start
-Start offset of the extracted segment, in bytes.
-
-end
-End offset of the extracted segment, in bytes.
-
-
-
-
Examples:
-
-
Extract a chapter from a DVD VOB file (start and end sectors obtained
-externally and multiplied by 2048):
-
-
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
-
-
-
Play an AVI file directly from a TAR archive:
-subfile,,start,183241728,end,366490624,,:archive.tar
-
-
-
2.31 tcp# TOC
-
-
Transmission Control Protocol.
-
-
The required syntax for a TCP url is:
-
-
tcp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form
-key =val .
-
-
The list of supported options follows.
-
-
-listen=1|0
-Listen for an incoming connection. Default value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-listen_timeout=microseconds
-Set listen timeout, expressed in microseconds.
-
-
-
-
The following example shows how to setup a listening TCP connection
-with ffmpeg
, which is then accessed with ffplay
:
-
-
ffmpeg -i input -f format tcp://hostname :port ?listen
-ffplay tcp://hostname :port
-
-
-
-
2.32 tls# TOC
-
-
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
-
-
The required syntax for a TLS/SSL url is:
-
-
tls://hostname :port [?options ]
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-ca_file, cafile=filename
-A file containing certificate authority (CA) root certificates to treat
-as trusted. If the linked TLS library contains a default this might not
-need to be specified for verification to work, but not all libraries and
-setups have defaults built in.
-The file must be in OpenSSL PEM format.
-
-
-tls_verify=1|0
-If enabled, try to verify the peer that we are communicating with.
-Note, if using OpenSSL, this currently only makes sure that the
-peer certificate is signed by one of the root certificates in the CA
-database, but it does not validate that the certificate actually
-matches the host name we are trying to connect to. (With GnuTLS,
-the host name is validated as well.)
-
-This is disabled by default since it requires a CA database to be
-provided by the caller in many cases.
-
-
-cert_file, cert=filename
-A file containing a certificate to use in the handshake with the peer.
-(When operating as server, in listen mode, this is more often required
-by the peer, while client certificates only are mandated in certain
-setups.)
-
-
-key_file, key=filename
-A file containing the private key for the certificate.
-
-
-listen=1|0
-If enabled, listen for connections on the provided port, and assume
-the server role in the handshake instead of the client role.
-
-
-
-
-
Example command lines:
-
-
To create a TLS/SSL server that serves an input stream.
-
-
-
ffmpeg -i input -f format tls://hostname :port ?listen&cert=server.crt &key=server.key
-
-
-
To play back a stream from the TLS/SSL server using ffplay
:
-
-
-
ffplay tls://hostname :port
-
-
-
-
2.33 udp# TOC
-
-
User Datagram Protocol.
-
-
The required syntax for an UDP URL is:
-
-
udp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form key =val .
-
-
In case threading is enabled on the system, a circular buffer is used
-to store the incoming data, which allows one to reduce loss of data due to
-UDP socket buffer overruns. The fifo_size and
-overrun_nonfatal options are related to this buffer.
-
-
The list of supported options follows.
-
-
-buffer_size=size
-Set the UDP maximum socket buffer size in bytes. This is used to set either
-the receive or send buffer size, depending on what the socket is used for.
-Default is 64KB. See also fifo_size .
-
-
-localport=port
-Override the local UDP port to bind with.
-
-
-localaddr=addr
-Choose the local IP address. This is useful e.g. if sending multicast
-and the host has multiple interfaces, where the user can choose
-which interface to send on by specifying the IP address of that interface.
-
-
-pkt_size=size
-Set the size in bytes of UDP packets.
-
-
-reuse=1|0
-Explicitly allow or disallow reusing UDP sockets.
-
-
-ttl=ttl
-Set the time to live value (for multicast only).
-
-
-connect=1|0
-Initialize the UDP socket with connect()
. In this case, the
-destination address can’t be changed with ff_udp_set_remote_url later.
-If the destination address isn’t known at the start, this option can
-be specified in ff_udp_set_remote_url, too.
-This allows finding out the source address for the packets with getsockname,
-and makes writes return with AVERROR(ECONNREFUSED) if "destination
-unreachable" is received.
-For receiving, this gives the benefit of only receiving packets from
-the specified peer address/port.
-
-
-sources=address [,address ]
-Only receive packets sent to the multicast group from one of the
-specified sender IP addresses.
-
-
-block=address [,address ]
-Ignore packets sent to the multicast group from the specified
-sender IP addresses.
-
-
-fifo_size=units
-Set the UDP receiving circular buffer size, expressed as a number of
-packets with size of 188 bytes. If not specified defaults to 7*4096.
-
-
-overrun_nonfatal=1|0
-Survive in case of UDP receiving circular buffer overrun. Default
-value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-broadcast=1|0
-Explicitly allow or disallow UDP broadcasting.
-
-Note that broadcasting may not work properly on networks having
-a broadcast storm protection.
-
-
-
-
-
2.33.1 Examples# TOC
-
-
- Use ffmpeg
to stream over UDP to a remote endpoint:
-
-
ffmpeg -i input -f format udp://hostname :port
-
-
- Use ffmpeg
to stream in mpegts format over UDP using 188
-sized UDP packets, using a large input buffer:
-
-
ffmpeg -i input -f mpegts udp://hostname :port ?pkt_size=188&buffer_size=65535
-
-
- Use ffmpeg
to receive over UDP from a remote endpoint:
-
-
ffmpeg -i udp://[multicast-address ]:port ...
-
-
-
-
-
2.34 unix# TOC
-
-
Unix local socket
-
-
The required syntax for a Unix socket URL is:
-
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-timeout
-Timeout in ms.
-
-listen
-Create the Unix socket in listening mode.
-
-
-
-
-
-
3 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavformat
-
-
-
-
4 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-resampler.html b/Externals/ffmpeg/dev/doc/ffmpeg-resampler.html
deleted file mode 100644
index 2611dfc1f0..0000000000
--- a/Externals/ffmpeg/dev/doc/ffmpeg-resampler.html
+++ /dev/null
@@ -1,357 +0,0 @@
-
-
-
-
-
-
- FFmpeg Resampler Documentation
-
-
-
-
-
-
-
-
- FFmpeg Resampler Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The FFmpeg resampler provides a high-level interface to the
-libswresample library audio resampling utilities. In particular it
-allows one to perform audio resampling, audio channel layout rematrixing,
-and convert audio format and packing layout.
-
-
-
-
2 Resampler Options# TOC
-
-
The audio resampler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, option =value for the aresample filter,
-by setting the value explicitly in the
-SwrContext
options or using the libavutil/opt.h API for
-programmatic use.
-
-
-ich, in_channel_count
-Set the number of input channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-in_channel_layout is set.
-
-
-och, out_channel_count
-Set the number of output channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-out_channel_layout is set.
-
-
-uch, used_channel_count
-Set the number of used input channels. Default value is 0. This option is
-only used for special remapping.
-
-
-isr, in_sample_rate
-Set the input sample rate. Default value is 0.
-
-
-osr, out_sample_rate
-Set the output sample rate. Default value is 0.
-
-
-isf, in_sample_fmt
-Specify the input sample format. It is set by default to none
.
-
-
-osf, out_sample_fmt
-Specify the output sample format. It is set by default to none
.
-
-
-tsf, internal_sample_fmt
-Set the internal sample format. Default value is none
.
-This will automatically be chosen when it is not explicitly set.
-
-
-icl, in_channel_layout
-ocl, out_channel_layout
-Set the input/output channel layout.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-clev, center_mix_level
-Set the center mix level. It is a value expressed in deciBel, and must be
-in the interval [-32,32].
-
-
-slev, surround_mix_level
-Set the surround mix level. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-lfe_mix_level
-Set LFE mix into non LFE level. It is used when there is a LFE input but no
-LFE output. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-rmvol, rematrix_volume
-Set rematrix volume. Default value is 1.0.
-
-
-rematrix_maxval
-Set maximum output value for rematrixing.
-This can be used to prevent clipping vs. preventing volumn reduction
-A value of 1.0 prevents cliping.
-
-
-flags, swr_flags
-Set flags used by the converter. Default value is 0.
-
-It supports the following individual flags:
-
-res
-force resampling, this flag forces resampling to be used even when the
-input and output sample rates match.
-
-
-
-
-dither_scale
-Set the dither scale. Default value is 1.
-
-
-dither_method
-Set dither method. Default value is 0.
-
-Supported values:
-
-‘rectangular ’
-select rectangular dither
-
-‘triangular ’
-select triangular dither
-
-‘triangular_hp ’
-select triangular dither with high pass
-
-‘lipshitz ’
-select lipshitz noise shaping dither
-
-‘shibata ’
-select shibata noise shaping dither
-
-‘low_shibata ’
-select low shibata noise shaping dither
-
-‘high_shibata ’
-select high shibata noise shaping dither
-
-‘f_weighted ’
-select f-weighted noise shaping dither
-
-‘modified_e_weighted ’
-select modified-e-weighted noise shaping dither
-
-‘improved_e_weighted ’
-select improved-e-weighted noise shaping dither
-
-
-
-
-
-resampler
-Set resampling engine. Default value is swr.
-
-Supported values:
-
-‘swr ’
-select the native SW Resampler; filter options precision and cheby are not
-applicable in this case.
-
-‘soxr ’
-select the SoX Resampler (where available); compensation, and filter options
-filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
-case.
-
-
-
-
-filter_size
-For swr only, set resampling filter size, default value is 32.
-
-
-phase_shift
-For swr only, set resampling phase shift, default value is 10, and must be in
-the interval [0,30].
-
-
-linear_interp
-Use Linear Interpolation if set to 1, default value is 0.
-
-
-cutoff
-Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
-value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
-(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
-
-
-precision
-For soxr only, the precision in bits to which the resampled signal will be
-calculated. The default value of 20 (which, with suitable dithering, is
-appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
-value of 28 gives SoX’s ’Very High Quality’.
-
-
-cheby
-For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
-approximation for ’irrational’ ratios. Default value is 0.
-
-
-async
-For swr only, simple 1 parameter audio sync to timestamps using stretching,
-squeezing, filling and trimming. Setting this to 1 will enable filling and
-trimming, larger values represent the maximum amount in samples that the data
-may be stretched or squeezed for each second.
-Default value is 0, thus no compensation is applied to make the samples match
-the audio timestamps.
-
-
-first_pts
-For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
-This allows for padding/trimming at the start of stream. By default, no
-assumption is made about the first frame’s expected pts, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative pts due to encoder delay.
-
-
-min_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger stretching/squeezing/filling or trimming of the
-data to make it match the timestamps. The default is that
-stretching/squeezing/filling and trimming is disabled
-(min_comp = FLT_MAX
).
-
-
-min_hard_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger adding/dropping samples to make it match the
-timestamps. This option effectively is a threshold to select between
-hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
-all compensation is by default disabled through min_comp .
-The default is 0.1.
-
-
-comp_duration
-For swr only, set duration (in seconds) over which data is stretched/squeezed
-to make it match the timestamps. Must be a non-negative double float value,
-default value is 1.0.
-
-
-max_soft_comp
-For swr only, set maximum factor by which data is stretched/squeezed to make it
-match the timestamps. Must be a non-negative double float value, default value
-is 0.
-
-
-matrix_encoding
-Select matrixed stereo encoding.
-
-It accepts the following values:
-
-‘none ’
-select none
-
-‘dolby ’
-select Dolby
-
-‘dplii ’
-select Dolby Pro Logic II
-
-
-
-Default value is none
.
-
-
-filter_type
-For swr only, select resampling filter type. This only affects resampling
-operations.
-
-It accepts the following values:
-
-‘cubic ’
-select cubic
-
-‘blackman_nuttall ’
-select Blackman Nuttall Windowed Sinc
-
-‘kaiser ’
-select Kaiser Windowed Sinc
-
-
-
-
-kaiser_beta
-For swr only, set Kaiser Window Beta value. Must be an integer in the
-interval [2,16], default value is 9.
-
-
-output_sample_bits
-For swr only, set number of used output sample bits for dithering. Must be an integer in the
-interval [0,64], default value is 0, which means it’s not used.
-
-
-
-
-
-
-
3 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libswresample
-
-
-
-
4 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-scaler.html b/Externals/ffmpeg/dev/doc/ffmpeg-scaler.html
deleted file mode 100644
index b7e57e3891..0000000000
--- a/Externals/ffmpeg/dev/doc/ffmpeg-scaler.html
+++ /dev/null
@@ -1,231 +0,0 @@
-
-
-
-
-
-
- FFmpeg Scaler Documentation
-
-
-
-
-
-
-
-
- FFmpeg Scaler Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The FFmpeg rescaler provides a high-level interface to the libswscale
-library image conversion utilities. In particular it allows one to perform
-image rescaling and pixel format conversion.
-
-
-
-
2 Scaler Options# TOC
-
-
The video scaler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools. For programmatic use, they can be set explicitly in the
-SwsContext
options or through the libavutil/opt.h API.
-
-
-
-
-sws_flags
-Set the scaler flags. This is also used to set the scaling
-algorithm. Only a single algorithm should be selected.
-
-It accepts the following values:
-
-‘fast_bilinear ’
-Select fast bilinear scaling algorithm.
-
-
-‘bilinear ’
-Select bilinear scaling algorithm.
-
-
-‘bicubic ’
-Select bicubic scaling algorithm.
-
-
-‘experimental ’
-Select experimental scaling algorithm.
-
-
-‘neighbor ’
-Select nearest neighbor rescaling algorithm.
-
-
-‘area ’
-Select averaging area rescaling algorithm.
-
-
-‘bicublin ’
-Select bicubic scaling algorithm for the luma component, bilinear for
-chroma components.
-
-
-‘gauss ’
-Select Gaussian rescaling algorithm.
-
-
-‘sinc ’
-Select sinc rescaling algorithm.
-
-
-‘lanczos ’
-Select lanczos rescaling algorithm.
-
-
-‘spline ’
-Select natural bicubic spline rescaling algorithm.
-
-
-‘print_info ’
-Enable printing/debug logging.
-
-
-‘accurate_rnd ’
-Enable accurate rounding.
-
-
-‘full_chroma_int ’
-Enable full chroma interpolation.
-
-
-‘full_chroma_inp ’
-Select full chroma input.
-
-
-‘bitexact ’
-Enable bitexact output.
-
-
-
-
-srcw
-Set source width.
-
-
-srch
-Set source height.
-
-
-dstw
-Set destination width.
-
-
-dsth
-Set destination height.
-
-
-src_format
-Set source pixel format (must be expressed as an integer).
-
-
-dst_format
-Set destination pixel format (must be expressed as an integer).
-
-
-src_range
-Select source range.
-
-
-dst_range
-Select destination range.
-
-
-param0, param1
-Set scaling algorithm parameters. The specified values are specific of
-some scaling algorithms and ignored by others. The specified values
-are floating point number values.
-
-
-sws_dither
-Set the dithering algorithm. Accepts one of the following
-values. Default value is ‘auto ’.
-
-
-‘auto ’
-automatic choice
-
-
-‘none ’
-no dithering
-
-
-‘bayer ’
-bayer dither
-
-
-‘ed ’
-error diffusion dither
-
-
-‘a_dither ’
-arithmetic dither, based using addition
-
-
-‘x_dither ’
-arithmetic dither, based using xor (more random/less apparent patterning that
-a_dither).
-
-
-
-
-
-
-
-
-
-
3 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libswscale
-
-
-
-
4 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-utils.html b/Externals/ffmpeg/dev/doc/ffmpeg-utils.html
deleted file mode 100644
index 127e624d9d..0000000000
--- a/Externals/ffmpeg/dev/doc/ffmpeg-utils.html
+++ /dev/null
@@ -1,1468 +0,0 @@
-
-
-
-
-
-
- FFmpeg Utilities Documentation
-
-
-
-
-
-
-
-
- FFmpeg Utilities Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes some generic features and utilities provided
-by the libavutil library.
-
-
-
-
2 Syntax# TOC
-
-
This section documents the syntax and formats employed by the FFmpeg
-libraries and tools.
-
-
-
2.1 Quoting and escaping# TOC
-
-
FFmpeg adopts the following quoting and escaping mechanism, unless
-explicitly specified. The following rules are applied:
-
-
- '
and \
are special characters (respectively used for
-quoting and escaping). In addition to them, there might be other
-special characters depending on the specific syntax where the escaping
-and quoting are employed.
-
- A special character is escaped by prefixing it with a ’\’.
-
- All characters enclosed between ” are included literally in the
-parsed string. The quote character '
itself cannot be quoted,
-so you may need to close the quote and escape it.
-
- Leading and trailing whitespaces, unless escaped or quoted, are
-removed from the parsed string.
-
-
-
Note that you may need to add a second level of escaping when using
-the command line or a script, which depends on the syntax of the
-adopted shell language.
-
-
The function av_get_token
defined in
-libavutil/avstring.h can be used to parse a token quoted or
-escaped according to the rules defined above.
-
-
The tool tools/ffescape in the FFmpeg source tree can be used
-to automatically quote or escape a string in a script.
-
-
-
2.1.1 Examples# TOC
-
-
- Escape the string Crime d'Amour
containing the '
special
-character:
-
-
- The string above contains a quote, so the '
needs to be escaped
-when quoting it:
-
-
- Include leading or trailing whitespaces using quoting:
-
-
' this string starts and ends with whitespaces '
-
-
- Escaping and quoting can be mixed together:
-
-
' The string '\'string\'' is a string '
-
-
- To include a literal \
you can use either escaping or quoting:
-
-
'c:\foo' can be written as c:\\foo
-
-
-
-
-
2.2 Date# TOC
-
-
The accepted syntax is:
-
-
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
-now
-
-
-
If the value is "now" it takes the current time.
-
-
Time is local time unless Z is appended, in which case it is
-interpreted as UTC.
-If the year-month-day part is not specified it takes the current
-year-month-day.
-
-
-
2.3 Time duration# TOC
-
-
There are two accepted syntaxes for expressing time duration.
-
-
-
-
HH expresses the number of hours, MM the number of minutes
-for a maximum of 2 digits, and SS the number of seconds for a
-maximum of 2 digits. The m at the end expresses decimal value for
-SS .
-
-
or
-
-
-
-
S expresses the number of seconds, with the optional decimal part
-m .
-
-
In both expressions, the optional ‘- ’ indicates negative duration.
-
-
-
2.3.1 Examples# TOC
-
-
The following examples are all valid time duration:
-
-
-‘55 ’
-55 seconds
-
-
-‘12:03:45 ’
-12 hours, 03 minutes and 45 seconds
-
-
-‘23.189 ’
-23.189 seconds
-
-
-
-
-
2.4 Video size# TOC
-
Specify the size of the sourced video, it may be a string of the form
-width xheight , or the name of a size abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-720x480
-
-‘pal ’
-720x576
-
-‘qntsc ’
-352x240
-
-‘qpal ’
-352x288
-
-‘sntsc ’
-640x480
-
-‘spal ’
-768x576
-
-‘film ’
-352x240
-
-‘ntsc-film ’
-352x240
-
-‘sqcif ’
-128x96
-
-‘qcif ’
-176x144
-
-‘cif ’
-352x288
-
-‘4cif ’
-704x576
-
-‘16cif ’
-1408x1152
-
-‘qqvga ’
-160x120
-
-‘qvga ’
-320x240
-
-‘vga ’
-640x480
-
-‘svga ’
-800x600
-
-‘xga ’
-1024x768
-
-‘uxga ’
-1600x1200
-
-‘qxga ’
-2048x1536
-
-‘sxga ’
-1280x1024
-
-‘qsxga ’
-2560x2048
-
-‘hsxga ’
-5120x4096
-
-‘wvga ’
-852x480
-
-‘wxga ’
-1366x768
-
-‘wsxga ’
-1600x1024
-
-‘wuxga ’
-1920x1200
-
-‘woxga ’
-2560x1600
-
-‘wqsxga ’
-3200x2048
-
-‘wquxga ’
-3840x2400
-
-‘whsxga ’
-6400x4096
-
-‘whuxga ’
-7680x4800
-
-‘cga ’
-320x200
-
-‘ega ’
-640x350
-
-‘hd480 ’
-852x480
-
-‘hd720 ’
-1280x720
-
-‘hd1080 ’
-1920x1080
-
-‘2k ’
-2048x1080
-
-‘2kflat ’
-1998x1080
-
-‘2kscope ’
-2048x858
-
-‘4k ’
-4096x2160
-
-‘4kflat ’
-3996x2160
-
-‘4kscope ’
-4096x1716
-
-‘nhd ’
-640x360
-
-‘hqvga ’
-240x160
-
-‘wqvga ’
-400x240
-
-‘fwqvga ’
-432x240
-
-‘hvga ’
-480x320
-
-‘qhd ’
-960x540
-
-
-
-
-
2.5 Video rate# TOC
-
-
Specify the frame rate of a video, expressed as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a float
-number or a valid video frame rate abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-30000/1001
-
-‘pal ’
-25/1
-
-‘qntsc ’
-30000/1001
-
-‘qpal ’
-25/1
-
-‘sntsc ’
-30000/1001
-
-‘spal ’
-25/1
-
-‘film ’
-24/1
-
-‘ntsc-film ’
-24000/1001
-
-
-
-
-
2.6 Ratio# TOC
-
-
A ratio can be expressed as an expression, or in the form
-numerator :denominator .
-
-
Note that a ratio with infinite (1/0) or negative value is
-considered valid, so you should check on the returned value if you
-want to exclude those values.
-
-
The undefined value can be expressed using the "0:0" string.
-
-
-
2.7 Color# TOC
-
-
It can be the name of a color as defined below (case insensitive match) or a
-[0x|#]RRGGBB[AA]
sequence, possibly followed by @ and a string
-representing the alpha component.
-
-
The alpha component may be a string composed by "0x" followed by an
-hexadecimal number or a decimal number between 0.0 and 1.0, which
-represents the opacity value (‘0x00 ’ or ‘0.0 ’ means completely
-transparent, ‘0xff ’ or ‘1.0 ’ completely opaque). If the alpha
-component is not specified then ‘0xff ’ is assumed.
-
-
The string ‘random ’ will result in a random color.
-
-
The following names of colors are recognized:
-
-‘AliceBlue ’
-0xF0F8FF
-
-‘AntiqueWhite ’
-0xFAEBD7
-
-‘Aqua ’
-0x00FFFF
-
-‘Aquamarine ’
-0x7FFFD4
-
-‘Azure ’
-0xF0FFFF
-
-‘Beige ’
-0xF5F5DC
-
-‘Bisque ’
-0xFFE4C4
-
-‘Black ’
-0x000000
-
-‘BlanchedAlmond ’
-0xFFEBCD
-
-‘Blue ’
-0x0000FF
-
-‘BlueViolet ’
-0x8A2BE2
-
-‘Brown ’
-0xA52A2A
-
-‘BurlyWood ’
-0xDEB887
-
-‘CadetBlue ’
-0x5F9EA0
-
-‘Chartreuse ’
-0x7FFF00
-
-‘Chocolate ’
-0xD2691E
-
-‘Coral ’
-0xFF7F50
-
-‘CornflowerBlue ’
-0x6495ED
-
-‘Cornsilk ’
-0xFFF8DC
-
-‘Crimson ’
-0xDC143C
-
-‘Cyan ’
-0x00FFFF
-
-‘DarkBlue ’
-0x00008B
-
-‘DarkCyan ’
-0x008B8B
-
-‘DarkGoldenRod ’
-0xB8860B
-
-‘DarkGray ’
-0xA9A9A9
-
-‘DarkGreen ’
-0x006400
-
-‘DarkKhaki ’
-0xBDB76B
-
-‘DarkMagenta ’
-0x8B008B
-
-‘DarkOliveGreen ’
-0x556B2F
-
-‘Darkorange ’
-0xFF8C00
-
-‘DarkOrchid ’
-0x9932CC
-
-‘DarkRed ’
-0x8B0000
-
-‘DarkSalmon ’
-0xE9967A
-
-‘DarkSeaGreen ’
-0x8FBC8F
-
-‘DarkSlateBlue ’
-0x483D8B
-
-‘DarkSlateGray ’
-0x2F4F4F
-
-‘DarkTurquoise ’
-0x00CED1
-
-‘DarkViolet ’
-0x9400D3
-
-‘DeepPink ’
-0xFF1493
-
-‘DeepSkyBlue ’
-0x00BFFF
-
-‘DimGray ’
-0x696969
-
-‘DodgerBlue ’
-0x1E90FF
-
-‘FireBrick ’
-0xB22222
-
-‘FloralWhite ’
-0xFFFAF0
-
-‘ForestGreen ’
-0x228B22
-
-‘Fuchsia ’
-0xFF00FF
-
-‘Gainsboro ’
-0xDCDCDC
-
-‘GhostWhite ’
-0xF8F8FF
-
-‘Gold ’
-0xFFD700
-
-‘GoldenRod ’
-0xDAA520
-
-‘Gray ’
-0x808080
-
-‘Green ’
-0x008000
-
-‘GreenYellow ’
-0xADFF2F
-
-‘HoneyDew ’
-0xF0FFF0
-
-‘HotPink ’
-0xFF69B4
-
-‘IndianRed ’
-0xCD5C5C
-
-‘Indigo ’
-0x4B0082
-
-‘Ivory ’
-0xFFFFF0
-
-‘Khaki ’
-0xF0E68C
-
-‘Lavender ’
-0xE6E6FA
-
-‘LavenderBlush ’
-0xFFF0F5
-
-‘LawnGreen ’
-0x7CFC00
-
-‘LemonChiffon ’
-0xFFFACD
-
-‘LightBlue ’
-0xADD8E6
-
-‘LightCoral ’
-0xF08080
-
-‘LightCyan ’
-0xE0FFFF
-
-‘LightGoldenRodYellow ’
-0xFAFAD2
-
-‘LightGreen ’
-0x90EE90
-
-‘LightGrey ’
-0xD3D3D3
-
-‘LightPink ’
-0xFFB6C1
-
-‘LightSalmon ’
-0xFFA07A
-
-‘LightSeaGreen ’
-0x20B2AA
-
-‘LightSkyBlue ’
-0x87CEFA
-
-‘LightSlateGray ’
-0x778899
-
-‘LightSteelBlue ’
-0xB0C4DE
-
-‘LightYellow ’
-0xFFFFE0
-
-‘Lime ’
-0x00FF00
-
-‘LimeGreen ’
-0x32CD32
-
-‘Linen ’
-0xFAF0E6
-
-‘Magenta ’
-0xFF00FF
-
-‘Maroon ’
-0x800000
-
-‘MediumAquaMarine ’
-0x66CDAA
-
-‘MediumBlue ’
-0x0000CD
-
-‘MediumOrchid ’
-0xBA55D3
-
-‘MediumPurple ’
-0x9370D8
-
-‘MediumSeaGreen ’
-0x3CB371
-
-‘MediumSlateBlue ’
-0x7B68EE
-
-‘MediumSpringGreen ’
-0x00FA9A
-
-‘MediumTurquoise ’
-0x48D1CC
-
-‘MediumVioletRed ’
-0xC71585
-
-‘MidnightBlue ’
-0x191970
-
-‘MintCream ’
-0xF5FFFA
-
-‘MistyRose ’
-0xFFE4E1
-
-‘Moccasin ’
-0xFFE4B5
-
-‘NavajoWhite ’
-0xFFDEAD
-
-‘Navy ’
-0x000080
-
-‘OldLace ’
-0xFDF5E6
-
-‘Olive ’
-0x808000
-
-‘OliveDrab ’
-0x6B8E23
-
-‘Orange ’
-0xFFA500
-
-‘OrangeRed ’
-0xFF4500
-
-‘Orchid ’
-0xDA70D6
-
-‘PaleGoldenRod ’
-0xEEE8AA
-
-‘PaleGreen ’
-0x98FB98
-
-‘PaleTurquoise ’
-0xAFEEEE
-
-‘PaleVioletRed ’
-0xD87093
-
-‘PapayaWhip ’
-0xFFEFD5
-
-‘PeachPuff ’
-0xFFDAB9
-
-‘Peru ’
-0xCD853F
-
-‘Pink ’
-0xFFC0CB
-
-‘Plum ’
-0xDDA0DD
-
-‘PowderBlue ’
-0xB0E0E6
-
-‘Purple ’
-0x800080
-
-‘Red ’
-0xFF0000
-
-‘RosyBrown ’
-0xBC8F8F
-
-‘RoyalBlue ’
-0x4169E1
-
-‘SaddleBrown ’
-0x8B4513
-
-‘Salmon ’
-0xFA8072
-
-‘SandyBrown ’
-0xF4A460
-
-‘SeaGreen ’
-0x2E8B57
-
-‘SeaShell ’
-0xFFF5EE
-
-‘Sienna ’
-0xA0522D
-
-‘Silver ’
-0xC0C0C0
-
-‘SkyBlue ’
-0x87CEEB
-
-‘SlateBlue ’
-0x6A5ACD
-
-‘SlateGray ’
-0x708090
-
-‘Snow ’
-0xFFFAFA
-
-‘SpringGreen ’
-0x00FF7F
-
-‘SteelBlue ’
-0x4682B4
-
-‘Tan ’
-0xD2B48C
-
-‘Teal ’
-0x008080
-
-‘Thistle ’
-0xD8BFD8
-
-‘Tomato ’
-0xFF6347
-
-‘Turquoise ’
-0x40E0D0
-
-‘Violet ’
-0xEE82EE
-
-‘Wheat ’
-0xF5DEB3
-
-‘White ’
-0xFFFFFF
-
-‘WhiteSmoke ’
-0xF5F5F5
-
-‘Yellow ’
-0xFFFF00
-
-‘YellowGreen ’
-0x9ACD32
-
-
-
-
-
2.8 Channel Layout# TOC
-
-
A channel layout specifies the spatial disposition of the channels in
-a multi-channel audio stream. To specify a channel layout, FFmpeg
-makes use of a special syntax.
-
-
Individual channels are identified by an id, as given by the table
-below:
-
-‘FL ’
-front left
-
-‘FR ’
-front right
-
-‘FC ’
-front center
-
-‘LFE ’
-low frequency
-
-‘BL ’
-back left
-
-‘BR ’
-back right
-
-‘FLC ’
-front left-of-center
-
-‘FRC ’
-front right-of-center
-
-‘BC ’
-back center
-
-‘SL ’
-side left
-
-‘SR ’
-side right
-
-‘TC ’
-top center
-
-‘TFL ’
-top front left
-
-‘TFC ’
-top front center
-
-‘TFR ’
-top front right
-
-‘TBL ’
-top back left
-
-‘TBC ’
-top back center
-
-‘TBR ’
-top back right
-
-‘DL ’
-downmix left
-
-‘DR ’
-downmix right
-
-‘WL ’
-wide left
-
-‘WR ’
-wide right
-
-‘SDL ’
-surround direct left
-
-‘SDR ’
-surround direct right
-
-‘LFE2 ’
-low frequency 2
-
-
-
-
Standard channel layout compositions can be specified by using the
-following identifiers:
-
-‘mono ’
-FC
-
-‘stereo ’
-FL+FR
-
-‘2.1 ’
-FL+FR+LFE
-
-‘3.0 ’
-FL+FR+FC
-
-‘3.0(back) ’
-FL+FR+BC
-
-‘4.0 ’
-FL+FR+FC+BC
-
-‘quad ’
-FL+FR+BL+BR
-
-‘quad(side) ’
-FL+FR+SL+SR
-
-‘3.1 ’
-FL+FR+FC+LFE
-
-‘5.0 ’
-FL+FR+FC+BL+BR
-
-‘5.0(side) ’
-FL+FR+FC+SL+SR
-
-‘4.1 ’
-FL+FR+FC+LFE+BC
-
-‘5.1 ’
-FL+FR+FC+LFE+BL+BR
-
-‘5.1(side) ’
-FL+FR+FC+LFE+SL+SR
-
-‘6.0 ’
-FL+FR+FC+BC+SL+SR
-
-‘6.0(front) ’
-FL+FR+FLC+FRC+SL+SR
-
-‘hexagonal ’
-FL+FR+FC+BL+BR+BC
-
-‘6.1 ’
-FL+FR+FC+LFE+BC+SL+SR
-
-‘6.1 ’
-FL+FR+FC+LFE+BL+BR+BC
-
-‘6.1(front) ’
-FL+FR+LFE+FLC+FRC+SL+SR
-
-‘7.0 ’
-FL+FR+FC+BL+BR+SL+SR
-
-‘7.0(front) ’
-FL+FR+FC+FLC+FRC+SL+SR
-
-‘7.1 ’
-FL+FR+FC+LFE+BL+BR+SL+SR
-
-‘7.1(wide) ’
-FL+FR+FC+LFE+BL+BR+FLC+FRC
-
-‘7.1(wide-side) ’
-FL+FR+FC+LFE+FLC+FRC+SL+SR
-
-‘octagonal ’
-FL+FR+FC+BL+BR+BC+SL+SR
-
-‘downmix ’
-DL+DR
-
-
-
-
A custom channel layout can be specified as a sequence of terms, separated by
-’+’ or ’|’. Each term can be:
-
- the name of a standard channel layout (e.g. ‘mono ’,
-‘stereo ’, ‘4.0 ’, ‘quad ’, ‘5.0 ’, etc.)
-
- the name of a single channel (e.g. ‘FL ’, ‘FR ’, ‘FC ’, ‘LFE ’, etc.)
-
- a number of channels, in decimal, optionally followed by ’c’, yielding
-the default channel layout for that number of channels (see the
-function av_get_default_channel_layout
)
-
- a channel layout mask, in hexadecimal starting with "0x" (see the
-AV_CH_*
macros in libavutil/channel_layout.h .
-
-
-
Starting from libavutil version 53 the trailing character "c" to
-specify a number of channels will be required, while a channel layout
-mask could also be specified as a decimal number (if and only if not
-followed by "c").
-
-
See also the function av_get_channel_layout
defined in
-libavutil/channel_layout.h .
-
-
-
3 Expression Evaluation# TOC
-
-
When evaluating an arithmetic expression, FFmpeg uses an internal
-formula evaluator, implemented through the libavutil/eval.h
-interface.
-
-
An expression may contain unary, binary operators, constants, and
-functions.
-
-
Two expressions expr1 and expr2 can be combined to form
-another expression "expr1 ;expr2 ".
-expr1 and expr2 are evaluated in turn, and the new
-expression evaluates to the value of expr2 .
-
-
The following binary operators are available: +
, -
,
-*
, /
, ^
.
-
-
The following unary operators are available: +
, -
.
-
-
The following functions are available:
-
-abs(x)
-Compute absolute value of x .
-
-
-acos(x)
-Compute arccosine of x .
-
-
-asin(x)
-Compute arcsine of x .
-
-
-atan(x)
-Compute arctangent of x .
-
-
-between(x, min, max)
-Return 1 if x is greater than or equal to min and lesser than or
-equal to max , 0 otherwise.
-
-
-bitand(x, y)
-bitor(x, y)
-Compute bitwise and/or operation on x and y .
-
-The results of the evaluation of x and y are converted to
-integers before executing the bitwise operation.
-
-Note that both the conversion to integer and the conversion back to
-floating point can lose precision. Beware of unexpected results for
-large numbers (usually 2^53 and larger).
-
-
-ceil(expr)
-Round the value of expression expr upwards to the nearest
-integer. For example, "ceil(1.5)" is "2.0".
-
-
-clip(x, min, max)
-Return the value of x clipped between min and max .
-
-
-cos(x)
-Compute cosine of x .
-
-
-cosh(x)
-Compute hyperbolic cosine of x .
-
-
-eq(x, y)
-Return 1 if x and y are equivalent, 0 otherwise.
-
-
-exp(x)
-Compute exponential of x (with base e
, the Euler’s number).
-
-
-floor(expr)
-Round the value of expression expr downwards to the nearest
-integer. For example, "floor(-1.5)" is "-2.0".
-
-
-gauss(x)
-Compute Gauss function of x , corresponding to
-exp(-x*x/2) / sqrt(2*PI)
.
-
-
-gcd(x, y)
-Return the greatest common divisor of x and y . If both x and
-y are 0 or either or both are less than zero then behavior is undefined.
-
-
-gt(x, y)
-Return 1 if x is greater than y , 0 otherwise.
-
-
-gte(x, y)
-Return 1 if x is greater than or equal to y , 0 otherwise.
-
-
-hypot(x, y)
-This function is similar to the C function with the same name; it returns
-"sqrt(x *x + y *y )", the length of the hypotenuse of a
-right triangle with sides of length x and y , or the distance of the
-point (x , y ) from the origin.
-
-
-if(x, y)
-Evaluate x , and if the result is non-zero return the result of
-the evaluation of y , return 0 otherwise.
-
-
-if(x, y, z)
-Evaluate x , and if the result is non-zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-ifnot(x, y)
-Evaluate x , and if the result is zero return the result of the
-evaluation of y , return 0 otherwise.
-
-
-ifnot(x, y, z)
-Evaluate x , and if the result is zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-isinf(x)
-Return 1.0 if x is +/-INFINITY, 0.0 otherwise.
-
-
-isnan(x)
-Return 1.0 if x is NAN, 0.0 otherwise.
-
-
-ld(var)
-Allow to load the value of the internal variable with number
-var , which was previously stored with st(var , expr ).
-The function returns the loaded value.
-
-
-log(x)
-Compute natural logarithm of x .
-
-
-lt(x, y)
-Return 1 if x is lesser than y , 0 otherwise.
-
-
-lte(x, y)
-Return 1 if x is lesser than or equal to y , 0 otherwise.
-
-
-max(x, y)
-Return the maximum between x and y .
-
-
-min(x, y)
-Return the maximum between x and y .
-
-
-mod(x, y)
-Compute the remainder of division of x by y .
-
-
-not(expr)
-Return 1.0 if expr is zero, 0.0 otherwise.
-
-
-pow(x, y)
-Compute the power of x elevated y , it is equivalent to
-"(x )^(y )".
-
-
-print(t)
-print(t, l)
-Print the value of expression t with loglevel l . If
-l is not specified then a default log level is used.
-Returns the value of the expression printed.
-
-Prints t with loglevel l
-
-
-random(x)
-Return a pseudo random value between 0.0 and 1.0. x is the index of the
-internal variable which will be used to save the seed/state.
-
-
-root(expr, max)
-Find an input value for which the function represented by expr
-with argument ld(0) is 0 in the interval 0..max .
-
-The expression in expr must denote a continuous function or the
-result is undefined.
-
-ld(0) is used to represent the function input value, which means
-that the given expression will be evaluated multiple times with
-various input values that the expression can access through
-ld(0)
. When the expression evaluates to 0 then the
-corresponding input value will be returned.
-
-
-sin(x)
-Compute sine of x .
-
-
-sinh(x)
-Compute hyperbolic sine of x .
-
-
-sqrt(expr)
-Compute the square root of expr . This is equivalent to
-"(expr )^.5".
-
-
-squish(x)
-Compute expression 1/(1 + exp(4*x))
.
-
-
-st(var, expr)
-Allow to store the value of the expression expr in an internal
-variable. var specifies the number of the variable where to
-store the value, and it is a value ranging from 0 to 9. The function
-returns the value stored in the internal variable.
-Note, Variables are currently not shared between expressions.
-
-
-tan(x)
-Compute tangent of x .
-
-
-tanh(x)
-Compute hyperbolic tangent of x .
-
-
-taylor(expr, x)
-taylor(expr, x, id)
-Evaluate a Taylor series at x , given an expression representing
-the ld(id)
-th derivative of a function at 0.
-
-When the series does not converge the result is undefined.
-
-ld(id) is used to represent the derivative order in expr ,
-which means that the given expression will be evaluated multiple times
-with various input values that the expression can access through
-ld(id)
. If id is not specified then 0 is assumed.
-
-Note, when you have the derivatives at y instead of 0,
-taylor(expr, x-y)
can be used.
-
-
-time(0)
-Return the current (wallclock) time in seconds.
-
-
-trunc(expr)
-Round the value of expression expr towards zero to the nearest
-integer. For example, "trunc(-1.5)" is "-1.0".
-
-
-while(cond, expr)
-Evaluate expression expr while the expression cond is
-non-zero, and returns the value of the last expr evaluation, or
-NAN if cond was always false.
-
-
-
-
The following constants are available:
-
-PI
-area of the unit disc, approximately 3.14
-
-E
-exp(1) (Euler’s number), approximately 2.718
-
-PHI
-golden ratio (1+sqrt(5))/2, approximately 1.618
-
-
-
-
Assuming that an expression is considered "true" if it has a non-zero
-value, note that:
-
-
*
works like AND
-
-
+
works like OR
-
-
For example the construct:
-
-
is equivalent to:
-
-
-
In your C code, you can extend the list of unary and binary functions,
-and define recognized constants, so that they are available for your
-expressions.
-
-
The evaluator also recognizes the International System unit prefixes.
-If ’i’ is appended after the prefix, binary prefixes are used, which
-are based on powers of 1024 instead of powers of 1000.
-The ’B’ postfix multiplies the value by 8, and can be appended after a
-unit prefix or used alone. This allows using for example ’KB’, ’MiB’,
-’G’ and ’B’ as number postfix.
-
-
The list of available International System prefixes follows, with
-indication of the corresponding powers of 10 and of 2.
-
-y
-10^-24 / 2^-80
-
-z
-10^-21 / 2^-70
-
-a
-10^-18 / 2^-60
-
-f
-10^-15 / 2^-50
-
-p
-10^-12 / 2^-40
-
-n
-10^-9 / 2^-30
-
-u
-10^-6 / 2^-20
-
-m
-10^-3 / 2^-10
-
-c
-10^-2
-
-d
-10^-1
-
-h
-10^2
-
-k
-10^3 / 2^10
-
-K
-10^3 / 2^10
-
-M
-10^6 / 2^20
-
-G
-10^9 / 2^30
-
-T
-10^12 / 2^40
-
-P
-10^15 / 2^40
-
-E
-10^18 / 2^50
-
-Z
-10^21 / 2^60
-
-Y
-10^24 / 2^70
-
-
-
-
-
-
4 OpenCL Options# TOC
-
-
When FFmpeg is configured with --enable-opencl
, it is possible
-to set the options for the global OpenCL context.
-
-
The list of supported options follows:
-
-
-build_options
-Set build options used to compile the registered kernels.
-
-See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
-
-
-platform_idx
-Select the index of the platform to run OpenCL code.
-
-The specified index must be one of the indexes in the device list
-which can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-device_idx
-Select the index of the device used to run OpenCL code.
-
-The specified index must be one of the indexes in the device list which
-can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-
-
-
-
-
5 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavutil
-
-
-
-
6 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg.html b/Externals/ffmpeg/dev/doc/ffmpeg.html
deleted file mode 100644
index d7524f0917..0000000000
--- a/Externals/ffmpeg/dev/doc/ffmpeg.html
+++ /dev/null
@@ -1,2109 +0,0 @@
-
-
-
-
-
-
- ffmpeg Documentation
-
-
-
-
-
-
-
-
- ffmpeg Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Synopsis# TOC
-
-
ffmpeg [global_options ] {[input_file_options ] -i input_file } ... {[output_file_options ] output_file } ...
-
-
-
2 Description# TOC
-
-
ffmpeg
is a very fast video and audio converter that can also grab from
-a live audio/video source. It can also convert between arbitrary sample
-rates and resize video on the fly with a high quality polyphase filter.
-
-
ffmpeg
reads from an arbitrary number of input "files" (which can be regular
-files, pipes, network streams, grabbing devices, etc.), specified by the
--i
option, and writes to an arbitrary number of output "files", which are
-specified by a plain output filename. Anything found on the command line which
-cannot be interpreted as an option is considered to be an output filename.
-
-
Each input or output file can, in principle, contain any number of streams of
-different types (video/audio/subtitle/attachment/data). The allowed number and/or
-types of streams may be limited by the container format. Selecting which
-streams from which inputs will go into which output is either done automatically
-or with the -map
option (see the Stream selection chapter).
-
-
To refer to input files in options, you must use their indices (0-based). E.g.
-the first input file is 0
, the second is 1
, etc. Similarly, streams
-within a file are referred to by their indices. E.g. 2:3
refers to the
-fourth stream in the third input file. Also see the Stream specifiers chapter.
-
-
As a general rule, options are applied to the next specified
-file. Therefore, order is important, and you can have the same
-option on the command line multiple times. Each occurrence is
-then applied to the next input or output file.
-Exceptions from this rule are the global options (e.g. verbosity level),
-which should be specified first.
-
-
Do not mix input and output files – first specify all input files, then all
-output files. Also do not mix options which belong to different files. All
-options apply ONLY to the next input or output file and are reset between files.
-
-
- To set the video bitrate of the output file to 64 kbit/s:
-
-
ffmpeg -i input.avi -b:v 64k -bufsize 64k output.avi
-
-
- To force the frame rate of the output file to 24 fps:
-
-
ffmpeg -i input.avi -r 24 output.avi
-
-
- To force the frame rate of the input file (valid for raw formats only)
-to 1 fps and the frame rate of the output file to 24 fps:
-
-
ffmpeg -r 1 -i input.m2v -r 24 output.avi
-
-
-
-
The format option may be needed for raw input files.
-
-
-
-
3 Detailed description# TOC
-
-
The transcoding process in ffmpeg
for each output can be described by
-the following diagram:
-
-
-
_______ ______________
-| | | |
-| input | demuxer | encoded data | decoder
-| file | ---------> | packets | -----+
-|_______| |______________| |
- v
- _________
- | |
- | decoded |
- | frames |
- |_________|
- ________ ______________ |
-| | | | |
-| output | <-------- | encoded data | <----+
-| file | muxer | packets | encoder
-|________| |______________|
-
-
-
-
-
ffmpeg
calls the libavformat library (containing demuxers) to read
-input files and get packets containing encoded data from them. When there are
-multiple input files, ffmpeg
tries to keep them synchronized by
-tracking lowest timestamp on any active input stream.
-
-
Encoded packets are then passed to the decoder (unless streamcopy is selected
-for the stream, see further for a description). The decoder produces
-uncompressed frames (raw video/PCM audio/...) which can be processed further by
-filtering (see next section). After filtering, the frames are passed to the
-encoder, which encodes them and outputs encoded packets. Finally those are
-passed to the muxer, which writes the encoded packets to the output file.
-
-
-
3.1 Filtering# TOC
-
Before encoding, ffmpeg
can process raw audio and video frames using
-filters from the libavfilter library. Several chained filters form a filter
-graph. ffmpeg
distinguishes between two types of filtergraphs:
-simple and complex.
-
-
-
3.1.1 Simple filtergraphs# TOC
-
Simple filtergraphs are those that have exactly one input and output, both of
-the same type. In the above diagram they can be represented by simply inserting
-an additional step between decoding and encoding:
-
-
-
_________ ______________
-| | | |
-| decoded | | encoded data |
-| frames |\ _ | packets |
-|_________| \ /||______________|
- \ __________ /
- simple _\|| | / encoder
- filtergraph | filtered |/
- | frames |
- |__________|
-
-
-
-
Simple filtergraphs are configured with the per-stream -filter option
-(with -vf and -af aliases for video and audio respectively).
-A simple filtergraph for video can look for example like this:
-
-
-
_______ _____________ _______ ________
-| | | | | | | |
-| input | ---> | deinterlace | ---> | scale | ---> | output |
-|_______| |_____________| |_______| |________|
-
-
-
-
Note that some filters change frame properties but not frame contents. E.g. the
-fps
filter in the example above changes number of frames, but does not
-touch the frame contents. Another example is the setpts
filter, which
-only sets timestamps and otherwise passes the frames unchanged.
-
-
-
3.1.2 Complex filtergraphs# TOC
-
Complex filtergraphs are those which cannot be described as simply a linear
-processing chain applied to one stream. This is the case, for example, when the graph has
-more than one input and/or output, or when output stream type is different from
-input. They can be represented with the following diagram:
-
-
-
_________
-| |
-| input 0 |\ __________
-|_________| \ | |
- \ _________ /| output 0 |
- \ | | / |__________|
- _________ \| complex | /
-| | | |/
-| input 1 |---->| filter |\
-|_________| | | \ __________
- /| graph | \ | |
- / | | \| output 1 |
- _________ / |_________| |__________|
-| | /
-| input 2 |/
-|_________|
-
-
-
-
Complex filtergraphs are configured with the -filter_complex option.
-Note that this option is global, since a complex filtergraph, by its nature,
-cannot be unambiguously associated with a single stream or file.
-
-
The -lavfi option is equivalent to -filter_complex .
-
-
A trivial example of a complex filtergraph is the overlay
filter, which
-has two video inputs and one video output, containing one video overlaid on top
-of the other. Its audio counterpart is the amix
filter.
-
-
-
3.2 Stream copy# TOC
-
Stream copy is a mode selected by supplying the copy
parameter to the
--codec option. It makes ffmpeg
omit the decoding and encoding
-step for the specified stream, so it does only demuxing and muxing. It is useful
-for changing the container format or modifying container-level metadata. The
-diagram above will, in this case, simplify to this:
-
-
-
_______ ______________ ________
-| | | | | |
-| input | demuxer | encoded data | muxer | output |
-| file | ---------> | packets | -------> | file |
-|_______| |______________| |________|
-
-
-
-
Since there is no decoding or encoding, it is very fast and there is no quality
-loss. However, it might not work in some cases because of many factors. Applying
-filters is obviously also impossible, since filters work on uncompressed data.
-
-
-
-
4 Stream selection# TOC
-
-
By default, ffmpeg
includes only one stream of each type (video, audio, subtitle)
-present in the input files and adds them to each output file. It picks the
-"best" of each based upon the following criteria: for video, it is the stream
-with the highest resolution, for audio, it is the stream with the most channels, for
-subtitles, it is the first subtitle stream. In the case where several streams of
-the same type rate equally, the stream with the lowest index is chosen.
-
-
You can disable some of those defaults by using the -vn/-an/-sn
options. For
-full manual control, use the -map
option, which disables the defaults just
-described.
-
-
-
-
5 Options# TOC
-
-
All the numerical options, if not specified otherwise, accept a string
-representing a number as input, which may be followed by one of the SI
-unit prefixes, for example: ’K’, ’M’, or ’G’.
-
-
If ’i’ is appended to the SI unit prefix, the complete prefix will be
-interpreted as a unit prefix for binary multiples, which are based on
-powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
-prefix multiplies the value by 8. This allows using, for example:
-’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
-
-
Options which do not take arguments are boolean options, and set the
-corresponding value to true. They can be set to false by prefixing
-the option name with "no". For example using "-nofoo"
-will set the boolean option with name "foo" to false.
-
-
-
5.1 Stream specifiers# TOC
-
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
-are used to precisely specify which stream(s) a given option belongs to.
-
-
A stream specifier is a string generally appended to the option name and
-separated from it by a colon. E.g. -codec:a:1 ac3
contains the
-a:1
stream specifier, which matches the second audio stream. Therefore, it
-would select the ac3 codec for the second audio stream.
-
-
A stream specifier can match several streams, so that the option is applied to all
-of them. E.g. the stream specifier in -b:a 128k
matches all audio
-streams.
-
-
An empty stream specifier matches all streams. For example, -codec copy
-or -codec: copy
would copy all the streams without reencoding.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index. E.g. -threads:1 4
would set the
-thread count for the second stream to 4.
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
-’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
-stream number stream_index of this type. Otherwise, it matches all
-streams of this type.
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number stream_index
-in the program with the id program_id . Otherwise, it matches all streams in the
-program.
-
-#stream_id or i:stream_id
-Match the stream by stream id (e.g. PID in MPEG-TS container).
-
-m:key [:value ]
-Matches streams with the metadata tag key having the specified value. If
-value is not given, matches streams that contain the given tag with any
-value.
-
-Note that in ffmpeg
, matching by metadata will only work properly for
-input files.
-
-
-
-
-
5.2 Generic options# TOC
-
-
These options are shared amongst the ff* tools.
-
-
--L
-Show license.
-
-
--h, -?, -help, --help [arg ]
-Show help. An optional parameter may be specified to print help about a specific
-item. If no argument is specified, only basic (non advanced) tool
-options are shown.
-
-Possible values of arg are:
-
-long
-Print advanced tool options in addition to the basic tool options.
-
-
-full
-Print complete list of options, including shared and private options
-for encoders, decoders, demuxers, muxers, filters, etc.
-
-
-decoder=decoder_name
-Print detailed information about the decoder named decoder_name . Use the
--decoders option to get a list of all decoders.
-
-
-encoder=encoder_name
-Print detailed information about the encoder named encoder_name . Use the
--encoders option to get a list of all encoders.
-
-
-demuxer=demuxer_name
-Print detailed information about the demuxer named demuxer_name . Use the
--formats option to get a list of all demuxers and muxers.
-
-
-muxer=muxer_name
-Print detailed information about the muxer named muxer_name . Use the
--formats option to get a list of all muxers and demuxers.
-
-
-filter=filter_name
-Print detailed information about the filter name filter_name . Use the
--filters option to get a list of all filters.
-
-
-
-
--version
-Show version.
-
-
--formats
-Show available formats (including devices).
-
-
--devices
-Show available devices.
-
-
--codecs
-Show all codecs known to libavcodec.
-
-Note that the term ’codec’ is used throughout this documentation as a shortcut
-for what is more correctly called a media bitstream format.
-
-
--decoders
-Show available decoders.
-
-
--encoders
-Show all available encoders.
-
-
--bsfs
-Show available bitstream filters.
-
-
--protocols
-Show available protocols.
-
-
--filters
-Show available libavfilter filters.
-
-
--pix_fmts
-Show available pixel formats.
-
-
--sample_fmts
-Show available sample formats.
-
-
--layouts
-Show channel names and standard channel layouts.
-
-
--colors
-Show recognized color names.
-
-
--sources device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sources of the intput device.
-Some devices may provide system-dependent source names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sources pulse,server=192.168.0.4
-
-
-
--sinks device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sinks of the output device.
-Some devices may provide system-dependent sink names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sinks pulse,server=192.168.0.4
-
-
-
--loglevel [repeat+]loglevel | -v [repeat+]loglevel
-Set the logging level used by the library.
-Adding "repeat+" indicates that repeated log output should not be compressed
-to the first line and the "Last message repeated n times" line will be
-omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-’repeat’ will not change the loglevel.
-loglevel is a string or a number containing one of the following values:
-
-‘quiet, -8 ’
-Show nothing at all; be silent.
-
-‘panic, 0 ’
-Only show fatal errors which could lead the process to crash, such as
-and assert failure. This is not currently used for anything.
-
-‘fatal, 8 ’
-Only show fatal errors. These are errors after which the process absolutely
-cannot continue after.
-
-‘error, 16 ’
-Show all errors, including ones which can be recovered from.
-
-‘warning, 24 ’
-Show all warnings and errors. Any message related to possibly
-incorrect or unexpected events will be shown.
-
-‘info, 32 ’
-Show informative messages during processing. This is in addition to
-warnings and errors. This is the default value.
-
-‘verbose, 40 ’
-Same as info
, except more verbose.
-
-‘debug, 48 ’
-Show everything, including debugging information.
-
-
-
-By default the program logs to stderr, if coloring is supported by the
-terminal, colors are used to mark errors and warnings. Log coloring
-can be disabled setting the environment variable
-AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
-the environment variable AV_LOG_FORCE_COLOR
.
-The use of the environment variable NO_COLOR
is deprecated and
-will be dropped in a following FFmpeg version.
-
-
--report
-Dump full command line and console output to a file named
-program -YYYYMMDD -HHMMSS .log
in the current
-directory.
-This file can be useful for bug reports.
-It also implies -loglevel verbose
.
-
-Setting the environment variable FFREPORT
to any value has the
-same effect. If the value is a ’:’-separated key=value sequence, these
-options will affect the report; option values must be escaped if they
-contain special characters or the options delimiter ’:’ (see the
-“Quoting and escaping” section in the ffmpeg-utils manual).
-
-The following options are recognized:
-
-file
-set the file name to use for the report; %p
is expanded to the name
-of the program, %t
is expanded to a timestamp, %%
is expanded
-to a plain %
-
-level
-set the log verbosity level using a numerical value (see -loglevel
).
-
-
-
-For example, to output a report to a file named ffreport.log
-using a log level of 32
(alias for log level info
):
-
-
-
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
-
-
-Errors in parsing the environment variable are not fatal, and will not
-appear in the report.
-
-
--hide_banner
-Suppress printing banner.
-
-All FFmpeg tools will normally show a copyright notice, build options
-and library versions. This option can be used to suppress printing
-this information.
-
-
--cpuflags flags (global )
-Allows setting and clearing cpu flags. This option is intended
-for testing. Do not use it unless you know what you’re doing.
-
-
ffmpeg -cpuflags -sse+mmx ...
-ffmpeg -cpuflags mmx ...
-ffmpeg -cpuflags 0 ...
-
-Possible flags for this option are:
-
-‘x86 ’
-
-‘mmx ’
-‘mmxext ’
-‘sse ’
-‘sse2 ’
-‘sse2slow ’
-‘sse3 ’
-‘sse3slow ’
-‘ssse3 ’
-‘atom ’
-‘sse4.1 ’
-‘sse4.2 ’
-‘avx ’
-‘xop ’
-‘fma4 ’
-‘3dnow ’
-‘3dnowext ’
-‘cmov ’
-
-
-‘ARM ’
-
-‘armv5te ’
-‘armv6 ’
-‘armv6t2 ’
-‘vfp ’
-‘vfpv3 ’
-‘neon ’
-
-
-‘PowerPC ’
-
-‘altivec ’
-
-
-‘Specific Processors ’
-
-‘pentium2 ’
-‘pentium3 ’
-‘pentium4 ’
-‘k6 ’
-‘k62 ’
-‘athlon ’
-‘athlonxp ’
-‘k8 ’
-
-
-
-
-
--opencl_bench
-Benchmark all available OpenCL devices and show the results. This option
-is only available when FFmpeg has been compiled with --enable-opencl
.
-
-
--opencl_options options (global )
-Set OpenCL environment options. This option is only available when
-FFmpeg has been compiled with --enable-opencl
.
-
-options must be a list of key =value option pairs
-separated by ’:’. See the “OpenCL Options” section in the
-ffmpeg-utils manual for the list of supported options.
-
-
-
-
-
5.3 AVOptions# TOC
-
-
These options are provided directly by the libavformat, libavdevice and
-libavcodec libraries. To see the list of available AVOptions, use the
--help option. They are separated into two categories:
-
-generic
-These options can be set for any container, codec or device. Generic options
-are listed under AVFormatContext options for containers/devices and under
-AVCodecContext options for codecs.
-
-private
-These options are specific to the given container, device or codec. Private
-options are listed under their corresponding containers/devices/codecs.
-
-
-
-
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
-an MP3 file, use the id3v2_version private option of the MP3
-muxer:
-
-
ffmpeg -i input.flac -id3v2_version 3 out.mp3
-
-
-
All codec AVOptions are per-stream, and thus a stream specifier
-should be attached to them.
-
-
Note: the -nooption syntax cannot be used for boolean
-AVOptions, use -option 0 /-option 1 .
-
-
Note: the old undocumented way of specifying per-stream AVOptions by
-prepending v/a/s to the options name is now obsolete and will be
-removed soon.
-
-
-
5.4 Main options# TOC
-
-
--f fmt (input/output )
-Force input or output file format. The format is normally auto detected for input
-files and guessed from the file extension for output files, so this option is not
-needed in most cases.
-
-
--i filename (input )
-input file name
-
-
--y (global )
-Overwrite output files without asking.
-
-
--n (global )
-Do not overwrite output files, and exit immediately if a specified
-output file already exists.
-
-
--c[:stream_specifier ] codec (input/output,per-stream )
--codec[:stream_specifier ] codec (input/output,per-stream )
-Select an encoder (when used before an output file) or a decoder (when used
-before an input file) for one or more streams. codec is the name of a
-decoder/encoder or a special value copy
(output only) to indicate that
-the stream is not to be re-encoded.
-
-For example
-
-
ffmpeg -i INPUT -map 0 -c:v libx264 -c:a copy OUTPUT
-
-encodes all video streams with libx264 and copies all audio streams.
-
-For each stream, the last matching c
option is applied, so
-
-
ffmpeg -i INPUT -map 0 -c copy -c:v:1 libx264 -c:a:137 libvorbis OUTPUT
-
-will copy all the streams except the second video, which will be encoded with
-libx264, and the 138th audio, which will be encoded with libvorbis.
-
-
--t duration (input/output )
-When used as an input option (before -i
), limit the duration of
-data read from the input file.
-
-When used as an output option (before an output filename), stop writing the
-output after its duration reaches duration .
-
-duration may be a number in seconds, or in hh:mm:ss[.xxx]
form.
-
--to and -t are mutually exclusive and -t has priority.
-
-
--to position (output )
-Stop writing the output at position .
-position may be a number in seconds, or in hh:mm:ss[.xxx]
form.
-
--to and -t are mutually exclusive and -t has priority.
-
-
--fs limit_size (output )
-Set the file size limit, expressed in bytes.
-
-
--ss position (input/output )
-When used as an input option (before -i
), seeks in this input file to
-position . Note the in most formats it is not possible to seek exactly, so
-ffmpeg
will seek to the closest seek point before position .
-When transcoding and -accurate_seek is enabled (the default), this
-extra segment between the seek point and position will be decoded and
-discarded. When doing stream copy or when -noaccurate_seek is used, it
-will be preserved.
-
-When used as an output option (before an output filename), decodes but discards
-input until the timestamps reach position .
-
-position may be either in seconds or in hh:mm:ss[.xxx]
form.
-
-
--itsoffset offset (input )
-Set the input time offset.
-
-offset must be a time duration specification,
-see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-The offset is added to the timestamps of the input files. Specifying
-a positive offset means that the corresponding streams are delayed by
-the time duration specified in offset .
-
-
--timestamp date (output )
-Set the recording timestamp in the container.
-
-date must be a time duration specification,
-see (ffmpeg-utils)the Date section in the ffmpeg-utils(1) manual .
-
-
--metadata[:metadata_specifier] key =value (output,per-metadata )
-Set a metadata key/value pair.
-
-An optional metadata_specifier may be given to set metadata
-on streams or chapters. See -map_metadata
documentation for
-details.
-
-This option overrides metadata set with -map_metadata
. It is
-also possible to delete metadata by using an empty value.
-
-For example, for setting the title in the output file:
-
-
ffmpeg -i in.avi -metadata title="my title" out.flv
-
-
-To set the language of the first audio stream:
-
-
ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
-
-
-
--target type (output )
-Specify target file type (vcd
, svcd
, dvd
, dv
,
-dv50
). type may be prefixed with pal-
, ntsc-
or
-film-
to use the corresponding standard. All the format options
-(bitrate, codecs, buffer sizes) are then set automatically. You can just type:
-
-
-
ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg
-
-
-Nevertheless you can specify additional options as long as you know
-they do not conflict with the standard, as in:
-
-
-
ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
-
-
-
--dframes number (output )
-Set the number of data frames to output. This is an alias for -frames:d
.
-
-
--frames[:stream_specifier ] framecount (output,per-stream )
-Stop writing to the stream after framecount frames.
-
-
--q[:stream_specifier ] q (output,per-stream )
--qscale[:stream_specifier ] q (output,per-stream )
-Use fixed quality scale (VBR). The meaning of q /qscale is
-codec-dependent.
-If qscale is used without a stream_specifier then it applies only
-to the video stream, this is to maintain compatibility with previous behavior
-and as specifying the same codec specific value to 2 different codecs that is
-audio and video generally is not what is intended when no stream_specifier is
-used.
-
-
--filter[:stream_specifier ] filtergraph (output,per-stream )
-Create the filtergraph specified by filtergraph and use it to
-filter the stream.
-
-filtergraph is a description of the filtergraph to apply to
-the stream, and must have a single input and a single output of the
-same type of the stream. In the filtergraph, the input is associated
-to the label in
, and the output to the label out
. See
-the ffmpeg-filters manual for more information about the filtergraph
-syntax.
-
-See the -filter_complex option if you
-want to create filtergraphs with multiple inputs and/or outputs.
-
-
--filter_script[:stream_specifier ] filename (output,per-stream )
-This option is similar to -filter , the only difference is that its
-argument is the name of the file from which a filtergraph description is to be
-read.
-
-
--pre[:stream_specifier ] preset_name (output,per-stream )
-Specify the preset for matching stream(s).
-
-
--stats (global )
-Print encoding progress/statistics. It is on by default, to explicitly
-disable it you need to specify -nostats
.
-
-
--progress url (global )
-Send program-friendly progress information to url .
-
-Progress information is written approximately every second and at the end of
-the encoding process. It is made of "key =value " lines. key
-consists of only alphanumeric characters. The last key of a sequence of
-progress information is always "progress".
-
-
--stdin
-Enable interaction on standard input. On by default unless standard input is
-used as an input. To explicitly disable interaction you need to specify
--nostdin
.
-
-Disabling interaction on standard input is useful, for example, if
-ffmpeg is in the background process group. Roughly the same result can
-be achieved with ffmpeg ... < /dev/null
but it requires a
-shell.
-
-
--debug_ts (global )
-Print timestamp information. It is off by default. This option is
-mostly useful for testing and debugging purposes, and the output
-format may change from one version to another, so it should not be
-employed by portable scripts.
-
-See also the option -fdebug ts
.
-
-
--attach filename (output )
-Add an attachment to the output file. This is supported by a few formats
-like Matroska for e.g. fonts used in rendering subtitles. Attachments
-are implemented as a specific type of stream, so this option will add
-a new stream to the file. It is then possible to use per-stream options
-on this stream in the usual way. Attachment streams created with this
-option will be created after all the other streams (i.e. those created
-with -map
or automatic mappings).
-
-Note that for Matroska you also have to set the mimetype metadata tag:
-
-
ffmpeg -i INPUT -attach DejaVuSans.ttf -metadata:s:2 mimetype=application/x-truetype-font out.mkv
-
-(assuming that the attachment stream will be third in the output file).
-
-
--dump_attachment[:stream_specifier ] filename (input,per-stream )
-Extract the matching attachment stream into a file named filename . If
-filename is empty, then the value of the filename
metadata tag
-will be used.
-
-E.g. to extract the first attachment to a file named ’out.ttf’:
-
-
ffmpeg -dump_attachment:t:0 out.ttf -i INPUT
-
-To extract all attachments to files determined by the filename
tag:
-
-
ffmpeg -dump_attachment:t "" -i INPUT
-
-
-Technical note – attachments are implemented as codec extradata, so this
-option can actually be used to extract extradata from any stream, not just
-attachments.
-
-
-
-
-
-
5.5 Video Options# TOC
-
-
--vframes number (output )
-Set the number of video frames to output. This is an alias for -frames:v
.
-
--r[:stream_specifier ] fps (input/output,per-stream )
-Set frame rate (Hz value, fraction or abbreviation).
-
-As an input option, ignore any timestamps stored in the file and instead
-generate timestamps assuming constant frame rate fps .
-This is not the same as the -framerate option used for some input formats
-like image2 or v4l2 (it used to be the same in older versions of FFmpeg).
-If in doubt use -framerate instead of the input option -r .
-
-As an output option, duplicate or drop input frames to achieve constant output
-frame rate fps .
-
-
--s[:stream_specifier ] size (input/output,per-stream )
-Set frame size.
-
-As an input option, this is a shortcut for the video_size private
-option, recognized by some demuxers for which the frame size is either not
-stored in the file or is configurable – e.g. raw video or video grabbers.
-
-As an output option, this inserts the scale
video filter to the
-end of the corresponding filtergraph. Please use the scale
filter
-directly to insert it at the beginning or some other place.
-
-The format is ‘wxh ’ (default - same as source).
-
-
--aspect[:stream_specifier ] aspect (output,per-stream )
-Set the video display aspect ratio specified by aspect .
-
-aspect can be a floating point number string, or a string of the
-form num :den , where num and den are the
-numerator and denominator of the aspect ratio. For example "4:3",
-"16:9", "1.3333", and "1.7777" are valid argument values.
-
-If used together with -vcodec copy , it will affect the aspect ratio
-stored at container level, but not the aspect ratio stored in encoded
-frames, if it exists.
-
-
--vn (output )
-Disable video recording.
-
-
--vcodec codec (output )
-Set the video codec. This is an alias for -codec:v
.
-
-
--pass[:stream_specifier ] n (output,per-stream )
-Select the pass number (1 or 2). It is used to do two-pass
-video encoding. The statistics of the video are recorded in the first
-pass into a log file (see also the option -passlogfile),
-and in the second pass that log file is used to generate the video
-at the exact requested bitrate.
-On pass 1, you may just deactivate audio and set output to null,
-examples for Windows and Unix:
-
-
ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y NUL
-ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y /dev/null
-
-
-
--passlogfile[:stream_specifier ] prefix (output,per-stream )
-Set two-pass log file name prefix to prefix , the default file name
-prefix is “ffmpeg2pass”. The complete file name will be
-PREFIX-N.log , where N is a number specific to the output
-stream
-
-
--vf filtergraph (output )
-Create the filtergraph specified by filtergraph and use it to
-filter the stream.
-
-This is an alias for -filter:v
, see the -filter option .
-
-
-
-
-
5.6 Advanced Video options# TOC
-
-
--pix_fmt[:stream_specifier ] format (input/output,per-stream )
-Set pixel format. Use -pix_fmts
to show all the supported
-pixel formats.
-If the selected pixel format can not be selected, ffmpeg will print a
-warning and select the best pixel format supported by the encoder.
-If pix_fmt is prefixed by a +
, ffmpeg will exit with an error
-if the requested pixel format can not be selected, and automatic conversions
-inside filtergraphs are disabled.
-If pix_fmt is a single +
, ffmpeg selects the same pixel format
-as the input (or graph output) and automatic conversions are disabled.
-
-
--sws_flags flags (input/output )
-Set SwScaler flags.
-
--vdt n
-Discard threshold.
-
-
--rc_override[:stream_specifier ] override (output,per-stream )
-Rate control override for specific intervals, formatted as "int,int,int"
-list separated with slashes. Two first values are the beginning and
-end frame numbers, last one is quantizer to use if positive, or quality
-factor if negative.
-
-
--ilme
-Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
-Use this option if your input file is interlaced and you want
-to keep the interlaced format for minimum losses.
-The alternative is to deinterlace the input stream with
--deinterlace , but deinterlacing introduces losses.
-
--psnr
-Calculate PSNR of compressed frames.
-
--vstats
-Dump video coding statistics to vstats_HHMMSS.log .
-
--vstats_file file
-Dump video coding statistics to file .
-
--top[:stream_specifier ] n (output,per-stream )
-top=1/bottom=0/auto=-1 field first
-
--dc precision
-Intra_dc_precision.
-
--vtag fourcc/tag (output )
-Force video tag/fourcc. This is an alias for -tag:v
.
-
--qphist (global )
-Show QP histogram
-
--vbsf bitstream_filter
-Deprecated see -bsf
-
-
--force_key_frames[:stream_specifier ] time [,time ...] (output,per-stream )
--force_key_frames[:stream_specifier ] expr:expr (output,per-stream )
-Force key frames at the specified timestamps, more precisely at the first
-frames after each specified time.
-
-If the argument is prefixed with expr:
, the string expr
-is interpreted like an expression and is evaluated for each frame. A
-key frame is forced in case the evaluation is non-zero.
-
-If one of the times is "chapters
[delta ]", it is expanded into
-the time of the beginning of all chapters in the file, shifted by
-delta , expressed as a time in seconds.
-This option can be useful to ensure that a seek point is present at a
-chapter mark or any other designated place in the output file.
-
-For example, to insert a key frame at 5 minutes, plus key frames 0.1 second
-before the beginning of every chapter:
-
-
-force_key_frames 0:05:00,chapters-0.1
-
-
-The expression in expr can contain the following constants:
-
-n
-the number of current processed frame, starting from 0
-
-n_forced
-the number of forced frames
-
-prev_forced_n
-the number of the previous forced frame, it is NAN
when no
-keyframe was forced yet
-
-prev_forced_t
-the time of the previous forced frame, it is NAN
when no
-keyframe was forced yet
-
-t
-the time of the current processed frame
-
-
-
-For example to force a key frame every 5 seconds, you can specify:
-
-
-force_key_frames expr:gte(t,n_forced*5)
-
-
-To force a key frame 5 seconds after the time of the last forced one,
-starting from second 13:
-
-
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
-
-
-Note that forcing too many keyframes is very harmful for the lookahead
-algorithms of certain encoders: using fixed-GOP options or similar
-would be more efficient.
-
-
--copyinkf[:stream_specifier ] (output,per-stream )
-When doing stream copy, copy also non-key frames found at the
-beginning.
-
-
--hwaccel[:stream_specifier ] hwaccel (input,per-stream )
-Use hardware acceleration to decode the matching stream(s). The allowed values
-of hwaccel are:
-
-none
-Do not use any hardware acceleration (the default).
-
-
-auto
-Automatically select the hardware acceleration method.
-
-
-vda
-Use Apple VDA hardware acceleration.
-
-
-vdpau
-Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
-
-
-dxva2
-Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
-
-
-
-This option has no effect if the selected hwaccel is not available or not
-supported by the chosen decoder.
-
-Note that most acceleration methods are intended for playback and will not be
-faster than software decoding on modern CPUs. Additionally, ffmpeg
-will usually need to copy the decoded frames from the GPU memory into the system
-memory, resulting in further performance loss. This option is thus mainly
-useful for testing.
-
-
--hwaccel_device[:stream_specifier ] hwaccel_device (input,per-stream )
-Select a device to use for hardware acceleration.
-
-This option only makes sense when the -hwaccel option is also
-specified. Its exact meaning depends on the specific hardware acceleration
-method chosen.
-
-
-vdpau
-For VDPAU, this option specifies the X11 display/screen to use. If this option
-is not specified, the value of the DISPLAY environment variable is used
-
-
-dxva2
-For DXVA2, this option should contain the number of the display adapter to use.
-If this option is not specified, the default adapter is used.
-
-
-
-
-
-
-
5.7 Audio Options# TOC
-
-
--aframes number (output )
-Set the number of audio frames to output. This is an alias for -frames:a
.
-
--ar[:stream_specifier ] freq (input/output,per-stream )
-Set the audio sampling frequency. For output streams it is set by
-default to the frequency of the corresponding input stream. For input
-streams this option only makes sense for audio grabbing devices and raw
-demuxers and is mapped to the corresponding demuxer options.
-
--aq q (output )
-Set the audio quality (codec-specific, VBR). This is an alias for -q:a.
-
--ac[:stream_specifier ] channels (input/output,per-stream )
-Set the number of audio channels. For output streams it is set by
-default to the number of input audio channels. For input streams
-this option only makes sense for audio grabbing devices and raw demuxers
-and is mapped to the corresponding demuxer options.
-
--an (output )
-Disable audio recording.
-
--acodec codec (input/output )
-Set the audio codec. This is an alias for -codec:a
.
-
--sample_fmt[:stream_specifier ] sample_fmt (output,per-stream )
-Set the audio sample format. Use -sample_fmts
to get a list
-of supported sample formats.
-
-
--af filtergraph (output )
-Create the filtergraph specified by filtergraph and use it to
-filter the stream.
-
-This is an alias for -filter:a
, see the -filter option .
-
-
-
-
-
5.8 Advanced Audio options# TOC
-
-
--atag fourcc/tag (output )
-Force audio tag/fourcc. This is an alias for -tag:a
.
-
--absf bitstream_filter
-Deprecated, see -bsf
-
--guess_layout_max channels (input,per-stream )
-If some input channel layout is not known, try to guess only if it
-corresponds to at most the specified number of channels. For example, 2
-tells to ffmpeg
to recognize 1 channel as mono and 2 channels as
-stereo but not 6 channels as 5.1. The default is to always try to guess. Use
-0 to disable all guessing.
-
-
-
-
-
5.9 Subtitle options# TOC
-
-
--scodec codec (input/output )
-Set the subtitle codec. This is an alias for -codec:s
.
-
--sn (output )
-Disable subtitle recording.
-
--sbsf bitstream_filter
-Deprecated, see -bsf
-
-
-
-
-
5.10 Advanced Subtitle options# TOC
-
-
--fix_sub_duration
-Fix subtitles durations. For each subtitle, wait for the next packet in the
-same stream and adjust the duration of the first to avoid overlap. This is
-necessary with some subtitles codecs, especially DVB subtitles, because the
-duration in the original packet is only a rough estimate and the end is
-actually marked by an empty subtitle frame. Failing to use this option when
-necessary can result in exaggerated durations or muxing failures due to
-non-monotonic timestamps.
-
-Note that this option will delay the output of all data until the next
-subtitle packet is decoded: it may increase memory consumption and latency a
-lot.
-
-
--canvas_size size
-Set the size of the canvas used to render subtitles.
-
-
-
-
-
-
5.11 Advanced options# TOC
-
-
--map [-]input_file_id [:stream_specifier ][,sync_file_id [:stream_specifier ]] | [linklabel] (output )
-
-Designate one or more input streams as a source for the output file. Each input
-stream is identified by the input file index input_file_id and
-the input stream index input_stream_id within the input
-file. Both indices start at 0. If specified,
-sync_file_id :stream_specifier sets which input stream
-is used as a presentation sync reference.
-
-The first -map
option on the command line specifies the
-source for output stream 0, the second -map
option specifies
-the source for output stream 1, etc.
-
-A -
character before the stream identifier creates a "negative" mapping.
-It disables matching streams from already created mappings.
-
-An alternative [linklabel] form will map outputs from complex filter
-graphs (see the -filter_complex option) to the output file.
-linklabel must correspond to a defined output link label in the graph.
-
-For example, to map ALL streams from the first input file to output
-
-
ffmpeg -i INPUT -map 0 output
-
-
-For example, if you have two audio streams in the first input file,
-these streams are identified by "0:0" and "0:1". You can use
--map
to select which streams to place in an output file. For
-example:
-
-
ffmpeg -i INPUT -map 0:1 out.wav
-
-will map the input stream in INPUT identified by "0:1" to
-the (single) output stream in out.wav .
-
-For example, to select the stream with index 2 from input file
-a.mov (specified by the identifier "0:2"), and stream with
-index 6 from input b.mov (specified by the identifier "1:6"),
-and copy them to the output file out.mov :
-
-
ffmpeg -i a.mov -i b.mov -c copy -map 0:2 -map 1:6 out.mov
-
-
-To select all video and the third audio stream from an input file:
-
-
ffmpeg -i INPUT -map 0:v -map 0:a:2 OUTPUT
-
-
-To map all the streams except the second audio, use negative mappings
-
-
ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT
-
-
-To pick the English audio stream:
-
-
ffmpeg -i INPUT -map 0:m:language:eng OUTPUT
-
-
-Note that using this option disables the default mappings for this output file.
-
-
--map_channel [input_file_id .stream_specifier .channel_id |-1][:output_file_id .stream_specifier ]
-Map an audio channel from a given input to an output. If
-output_file_id .stream_specifier is not set, the audio channel will
-be mapped on all the audio streams.
-
-Using "-1" instead of
-input_file_id .stream_specifier .channel_id will map a muted
-channel.
-
-For example, assuming INPUT is a stereo audio file, you can switch the
-two audio channels with the following command:
-
-
ffmpeg -i INPUT -map_channel 0.0.1 -map_channel 0.0.0 OUTPUT
-
-
-If you want to mute the first channel and keep the second:
-
-
ffmpeg -i INPUT -map_channel -1 -map_channel 0.0.1 OUTPUT
-
-
-The order of the "-map_channel" option specifies the order of the channels in
-the output stream. The output channel layout is guessed from the number of
-channels mapped (mono if one "-map_channel", stereo if two, etc.). Using "-ac"
-in combination of "-map_channel" makes the channel gain levels to be updated if
-input and output channel layouts don’t match (for instance two "-map_channel"
-options and "-ac 6").
-
-You can also extract each channel of an input to specific outputs; the following
-command extracts two channels of the INPUT audio stream (file 0, stream 0)
-to the respective OUTPUT_CH0 and OUTPUT_CH1 outputs:
-
-
ffmpeg -i INPUT -map_channel 0.0.0 OUTPUT_CH0 -map_channel 0.0.1 OUTPUT_CH1
-
-
-The following example splits the channels of a stereo input into two separate
-streams, which are put into the same output file:
-
-
ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg
-
-
-Note that currently each output stream can only contain channels from a single
-input stream; you can’t for example use "-map_channel" to pick multiple input
-audio channels contained in different streams (from the same or different files)
-and merge them into a single output stream. It is therefore not currently
-possible, for example, to turn two separate mono streams into a single stereo
-stream. However splitting a stereo stream into two single channel mono streams
-is possible.
-
-If you need this feature, a possible workaround is to use the amerge
-filter. For example, if you need to merge a media (here input.mkv ) with 2
-mono audio streams into one single stereo channel audio stream (and keep the
-video stream), you can use the following command:
-
-
ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv
-
-
-
--map_metadata[:metadata_spec_out ] infile [:metadata_spec_in ] (output,per-metadata )
-Set metadata information of the next output file from infile . Note that
-those are file indices (zero-based), not filenames.
-Optional metadata_spec_in/out parameters specify, which metadata to copy.
-A metadata specifier can have the following forms:
-
-g
-global metadata, i.e. metadata that applies to the whole file
-
-
-s [:stream_spec ]
-per-stream metadata. stream_spec is a stream specifier as described
-in the Stream specifiers chapter. In an input metadata specifier, the first
-matching stream is copied from. In an output metadata specifier, all matching
-streams are copied to.
-
-
-c :chapter_index
-per-chapter metadata. chapter_index is the zero-based chapter index.
-
-
-p :program_index
-per-program metadata. program_index is the zero-based program index.
-
-
-If metadata specifier is omitted, it defaults to global.
-
-By default, global metadata is copied from the first input file,
-per-stream and per-chapter metadata is copied along with streams/chapters. These
-default mappings are disabled by creating any mapping of the relevant type. A negative
-file index can be used to create a dummy mapping that just disables automatic copying.
-
-For example to copy metadata from the first stream of the input file to global metadata
-of the output file:
-
-
ffmpeg -i in.ogg -map_metadata 0:s:0 out.mp3
-
-
-To do the reverse, i.e. copy global metadata to all audio streams:
-
-
ffmpeg -i in.mkv -map_metadata:s:a 0:g out.mkv
-
-Note that simple 0
would work as well in this example, since global
-metadata is assumed by default.
-
-
--map_chapters input_file_index (output )
-Copy chapters from input file with index input_file_index to the next
-output file. If no chapter mapping is specified, then chapters are copied from
-the first input file with at least one chapter. Use a negative file index to
-disable any chapter copying.
-
-
--benchmark (global )
-Show benchmarking information at the end of an encode.
-Shows CPU time used and maximum memory consumption.
-Maximum memory consumption is not supported on all systems,
-it will usually display as 0 if not supported.
-
--benchmark_all (global )
-Show benchmarking information during the encode.
-Shows CPU time used in various steps (audio/video encode/decode).
-
--timelimit duration (global )
-Exit after ffmpeg has been running for duration seconds.
-
--dump (global )
-Dump each input packet to stderr.
-
--hex (global )
-When dumping packets, also dump the payload.
-
--re (input )
-Read input at native frame rate. Mainly used to simulate a grab device.
-or live input stream (e.g. when reading from a file). Should not be used
-with actual grab devices or live input streams (where it can cause packet
-loss).
-By default ffmpeg
attempts to read the input(s) as fast as possible.
-This option will slow down the reading of the input(s) to the native frame rate
-of the input(s). It is useful for real-time output (e.g. live streaming).
-
--loop_input
-Loop over the input stream. Currently it works only for image
-streams. This option is used for automatic FFserver testing.
-This option is deprecated, use -loop 1.
-
--loop_output number_of_times
-Repeatedly loop output for formats that support looping such as animated GIF
-(0 will loop the output infinitely).
-This option is deprecated, use -loop.
-
--vsync parameter
-Video sync method.
-For compatibility reasons old values can be specified as numbers.
-Newly added values will have to be specified as strings always.
-
-
-0, passthrough
-Each frame is passed with its timestamp from the demuxer to the muxer.
-
-1, cfr
-Frames will be duplicated and dropped to achieve exactly the requested
-constant frame rate.
-
-2, vfr
-Frames are passed through with their timestamp or dropped so as to
-prevent 2 frames from having the same timestamp.
-
-drop
-As passthrough but destroys all timestamps, making the muxer generate
-fresh timestamps based on frame-rate.
-
--1, auto
-Chooses between 1 and 2 depending on muxer capabilities. This is the
-default method.
-
-
-
-Note that the timestamps may be further modified by the muxer, after this.
-For example, in the case that the format option avoid_negative_ts
-is enabled.
-
-With -map you can select from which stream the timestamps should be
-taken. You can leave either video or audio unchanged and sync the
-remaining stream(s) to the unchanged one.
-
-
--async samples_per_second
-Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
-the parameter is the maximum samples per second by which the audio is changed.
--async 1 is a special case where only the start of the audio stream is corrected
-without any later correction.
-
-Note that the timestamps may be further modified by the muxer, after this.
-For example, in the case that the format option avoid_negative_ts
-is enabled.
-
-This option has been deprecated. Use the aresample
audio filter instead.
-
-
--copyts
-Do not process input timestamps, but keep their values without trying
-to sanitize them. In particular, do not remove the initial start time
-offset value.
-
-Note that, depending on the vsync option or on specific muxer
-processing (e.g. in case the format option avoid_negative_ts
-is enabled) the output timestamps may mismatch with the input
-timestamps even when this option is selected.
-
-
--start_at_zero
-When used with copyts , shift input timestamps so they start at zero.
-
-This means that using e.g. -ss 50
will make output timestamps start at
-50 seconds, regardless of what timestamp the input file started at.
-
-
--copytb mode
-Specify how to set the encoder timebase when stream copying. mode is an
-integer numeric value, and can assume one of the following values:
-
-
-1
-Use the demuxer timebase.
-
-The time base is copied to the output encoder from the corresponding input
-demuxer. This is sometimes required to avoid non monotonically increasing
-timestamps when copying video streams with variable frame rate.
-
-
-0
-Use the decoder timebase.
-
-The time base is copied to the output encoder from the corresponding input
-decoder.
-
-
--1
-Try to make the choice automatically, in order to generate a sane output.
-
-
-
-Default value is -1.
-
-
--shortest (output )
-Finish encoding when the shortest input stream ends.
-
--dts_delta_threshold
-Timestamp discontinuity delta threshold.
-
--muxdelay seconds (input )
-Set the maximum demux-decode delay.
-
--muxpreload seconds (input )
-Set the initial demux-decode delay.
-
--streamid output-stream-index :new-value (output )
-Assign a new stream-id value to an output stream. This option should be
-specified prior to the output filename to which it applies.
-For the situation where multiple output files exist, a streamid
-may be reassigned to a different value.
-
-For example, to set the stream 0 PID to 33 and the stream 1 PID to 36 for
-an output mpegts file:
-
-
ffmpeg -i infile -streamid 0:33 -streamid 1:36 out.ts
-
-
-
--bsf[:stream_specifier ] bitstream_filters (output,per-stream )
-Set bitstream filters for matching streams. bitstream_filters is
-a comma-separated list of bitstream filters. Use the -bsfs
option
-to get the list of bitstream filters.
-
-
ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
-
-
-
ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
-
-
-
--tag[:stream_specifier ] codec_tag (input/output,per-stream )
-Force a tag/fourcc for matching streams.
-
-
--timecode hh :mm :ss SEPff
-Specify Timecode for writing. SEP is ’:’ for non drop timecode and ’;’
-(or ’.’) for drop.
-
-
ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
-
-
-
--filter_complex filtergraph (global )
-Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or
-outputs. For simple graphs – those with one input and one output of the same
-type – see the -filter options. filtergraph is a description of
-the filtergraph, as described in the “Filtergraph syntax” section of the
-ffmpeg-filters manual.
-
-Input link labels must refer to input streams using the
-[file_index:stream_specifier]
syntax (i.e. the same as -map
-uses). If stream_specifier matches multiple streams, the first one will be
-used. An unlabeled input will be connected to the first unused input stream of
-the matching type.
-
-Output link labels are referred to with -map . Unlabeled outputs are
-added to the first output file.
-
-Note that with this option it is possible to use only lavfi sources without
-normal input files.
-
-For example, to overlay an image over video
-
-
ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
-'[out]' out.mkv
-
-Here [0:v]
refers to the first video stream in the first input file,
-which is linked to the first (main) input of the overlay filter. Similarly the
-first video stream in the second input is linked to the second (overlay) input
-of overlay.
-
-Assuming there is only one video stream in each input file, we can omit input
-labels, so the above is equivalent to
-
-
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map
-'[out]' out.mkv
-
-
-Furthermore we can omit the output label and the single output from the filter
-graph will be added to the output file automatically, so we can simply write
-
-
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
-
-
-To generate 5 seconds of pure red video using lavfi color
source:
-
-
ffmpeg -filter_complex 'color=c=red' -t 5 out.mkv
-
-
-
--lavfi filtergraph (global )
-Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or
-outputs. Equivalent to -filter_complex .
-
-
--filter_complex_script filename (global )
-This option is similar to -filter_complex , the only difference is that
-its argument is the name of the file from which a complex filtergraph
-description is to be read.
-
-
--accurate_seek (input )
-This option enables or disables accurate seeking in input files with the
--ss option. It is enabled by default, so seeking is accurate when
-transcoding. Use -noaccurate_seek to disable it, which may be useful
-e.g. when copying some streams and transcoding the others.
-
-
--override_ffserver (global )
-Overrides the input specifications from ffserver
. Using this
-option you can map any input stream to ffserver
and control
-many aspects of the encoding from ffmpeg
. Without this
-option ffmpeg
will transmit to ffserver
what is
-requested by ffserver
.
-
-The option is intended for cases where features are needed that cannot be
-specified to ffserver
but can be to ffmpeg
.
-
-
--sdp_file file (global )
-Print sdp information to file .
-This allows dumping sdp information when at least one output isn’t an
-rtp stream.
-
-
--discard (input )
-Allows discarding specific streams or frames of streams at the demuxer.
-Not all demuxers support this.
-
-
-none
-Discard no frame.
-
-
-default
-Default, which discards no frames.
-
-
-noref
-Discard all non-reference frames.
-
-
-bidir
-Discard all bidirectional frames.
-
-
-nokey
-Discard all frames excepts keyframes.
-
-
-all
-Discard all frames.
-
-
-
-
-
-
-
As a special exception, you can use a bitmap subtitle stream as input: it
-will be converted into a video with the same size as the largest video in
-the file, or 720x576 if no video is present. Note that this is an
-experimental and temporary solution. It will be removed once libavfilter has
-proper support for subtitles.
-
-
For example, to hardcode subtitles on top of a DVB-T recording stored in
-MPEG-TS format, delaying the subtitles by 1 second:
-
-
ffmpeg -i input.ts -filter_complex \
- '[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \
- -sn -map '#0x2dc' output.mkv
-
-
(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video,
-audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too)
-
-
-
5.12 Preset files# TOC
-
A preset file contains a sequence of option =value pairs,
-one for each line, specifying a sequence of options which would be
-awkward to specify on the command line. Lines starting with the hash
-(’#’) character are ignored and are used to provide comments. Check
-the presets directory in the FFmpeg source tree for examples.
-
-
There are two types of preset files: ffpreset and avpreset files.
-
-
-
5.12.1 ffpreset files# TOC
-
ffpreset files are specified with the vpre
, apre
,
-spre
, and fpre
options. The fpre
option takes the
-filename of the preset instead of a preset name as input and can be
-used for any kind of codec. For the vpre
, apre
, and
-spre
options, the options specified in a preset file are
-applied to the currently selected codec of the same type as the preset
-option.
-
-
The argument passed to the vpre
, apre
, and spre
-preset options identifies the preset file to use according to the
-following rules:
-
-
First ffmpeg searches for a file named arg .ffpreset in the
-directories $FFMPEG_DATADIR (if set), and $HOME/.ffmpeg , and in
-the datadir defined at configuration time (usually PREFIX/share/ffmpeg )
-or in a ffpresets folder along the executable on win32,
-in that order. For example, if the argument is libvpx-1080p
, it will
-search for the file libvpx-1080p.ffpreset .
-
-
If no such file is found, then ffmpeg will search for a file named
-codec_name -arg .ffpreset in the above-mentioned
-directories, where codec_name is the name of the codec to which
-the preset file options will be applied. For example, if you select
-the video codec with -vcodec libvpx
and use -vpre 1080p
,
-then it will search for the file libvpx-1080p.ffpreset .
-
-
-
5.12.2 avpreset files# TOC
-
avpreset files are specified with the pre
option. They work similar to
-ffpreset files, but they only allow encoder- specific options. Therefore, an
-option =value pair specifying an encoder cannot be used.
-
-
When the pre
option is specified, ffmpeg will look for files with the
-suffix .avpreset in the directories $AVCONV_DATADIR (if set), and
-$HOME/.avconv , and in the datadir defined at configuration time (usually
-PREFIX/share/ffmpeg ), in that order.
-
-
First ffmpeg searches for a file named codec_name -arg .avpreset in
-the above-mentioned directories, where codec_name is the name of the codec
-to which the preset file options will be applied. For example, if you select the
-video codec with -vcodec libvpx
and use -pre 1080p
, then it will
-search for the file libvpx-1080p.avpreset .
-
-
If no such file is found, then ffmpeg will search for a file named
-arg .avpreset in the same directories.
-
-
-
-
-
-
- For streaming at very low bitrates, use a low frame rate
-and a small GOP size. This is especially true for RealVideo where
-the Linux player does not seem to be very fast, so it can miss
-frames. An example is:
-
-
-
ffmpeg -g 3 -r 3 -t 10 -b:v 50k -s qcif -f rv10 /tmp/b.rm
-
-
- The parameter ’q’ which is displayed while encoding is the current
-quantizer. The value 1 indicates that a very good quality could
-be achieved. The value 31 indicates the worst quality. If q=31 appears
-too often, it means that the encoder cannot compress enough to meet
-your bitrate. You must either increase the bitrate, decrease the
-frame rate or decrease the frame size.
-
- If your computer is not fast enough, you can speed up the
-compression at the expense of the compression ratio. You can use
-’-me zero’ to speed up motion estimation, and ’-g 0’ to disable
-motion estimation completely (you have only I-frames, which means it
-is about as good as JPEG compression).
-
- To have very low audio bitrates, reduce the sampling frequency
-(down to 22050 Hz for MPEG audio, 22050 or 11025 for AC-3).
-
- To have a constant quality (but a variable bitrate), use the option
-’-qscale n’ when ’n’ is between 1 (excellent quality) and 31 (worst
-quality).
-
-
-
-
-
7 Examples# TOC
-
-
-
7.1 Video and Audio grabbing# TOC
-
-
If you specify the input format and device then ffmpeg can grab video
-and audio directly.
-
-
-
ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg
-
-
-
Or with an ALSA audio source (mono input, card id 1) instead of OSS:
-
-
ffmpeg -f alsa -ac 1 -i hw:1 -f video4linux2 -i /dev/video0 /tmp/out.mpg
-
-
-
Note that you must activate the right video source and channel before
-launching ffmpeg with any TV viewer such as
-xawtv by Gerd Knorr. You also
-have to set the audio recording levels correctly with a
-standard mixer.
-
-
-
7.2 X11 grabbing# TOC
-
-
Grab the X11 display with ffmpeg via
-
-
-
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0 /tmp/out.mpg
-
-
-
0.0 is display.screen number of your X11 server, same as
-the DISPLAY environment variable.
-
-
-
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0+10,20 /tmp/out.mpg
-
-
-
0.0 is display.screen number of your X11 server, same as the DISPLAY environment
-variable. 10 is the x-offset and 20 the y-offset for the grabbing.
-
-
-
7.3 Video and Audio file format conversion# TOC
-
-
Any supported file format and protocol can serve as input to ffmpeg:
-
-
Examples:
-
- You can use YUV files as input:
-
-
-
ffmpeg -i /tmp/test%d.Y /tmp/out.mpg
-
-
-It will use the files:
-
-
/tmp/test0.Y, /tmp/test0.U, /tmp/test0.V,
-/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc...
-
-
-The Y files use twice the resolution of the U and V files. They are
-raw files, without header. They can be generated by all decent video
-decoders. You must specify the size of the image with the -s option
-if ffmpeg cannot guess it.
-
- You can input from a raw YUV420P file:
-
-
-
ffmpeg -i /tmp/test.yuv /tmp/out.avi
-
-
-test.yuv is a file containing raw YUV planar data. Each frame is composed
-of the Y plane followed by the U and V planes at half vertical and
-horizontal resolution.
-
- You can output to a raw YUV420P file:
-
-
-
ffmpeg -i mydivx.avi hugefile.yuv
-
-
- You can set several input files and output files:
-
-
-
ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg
-
-
-Converts the audio file a.wav and the raw YUV video file a.yuv
-to MPEG file a.mpg.
-
- You can also do audio and video conversions at the same time:
-
-
-
ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2
-
-
-Converts a.wav to MPEG audio at 22050 Hz sample rate.
-
- You can encode to several formats at the same time and define a
-mapping from input stream to output streams:
-
-
-
ffmpeg -i /tmp/a.wav -map 0:a -b:a 64k /tmp/a.mp2 -map 0:a -b:a 128k /tmp/b.mp2
-
-
-Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. ’-map
-file:index’ specifies which input stream is used for each output
-stream, in the order of the definition of output streams.
-
- You can transcode decrypted VOBs:
-
-
-
ffmpeg -i snatch_1.vob -f avi -c:v mpeg4 -b:v 800k -g 300 -bf 2 -c:a libmp3lame -b:a 128k snatch.avi
-
-
-This is a typical DVD ripping example; the input is a VOB file, the
-output an AVI file with MPEG-4 video and MP3 audio. Note that in this
-command we use B-frames so the MPEG-4 stream is DivX5 compatible, and
-GOP size is 300 which means one intra frame every 10 seconds for 29.97fps
-input video. Furthermore, the audio stream is MP3-encoded so you need
-to enable LAME support by passing --enable-libmp3lame
to configure.
-The mapping is particularly useful for DVD transcoding
-to get the desired audio language.
-
-NOTE: To see the supported input formats, use ffmpeg -formats
.
-
- You can extract images from a video, or create a video from many images:
-
-For extracting images from a video:
-
-
ffmpeg -i foo.avi -r 1 -s WxH -f image2 foo-%03d.jpeg
-
-
-This will extract one video frame per second from the video and will
-output them in files named foo-001.jpeg , foo-002.jpeg ,
-etc. Images will be rescaled to fit the new WxH values.
-
-If you want to extract just a limited number of frames, you can use the
-above command in combination with the -vframes or -t option, or in
-combination with -ss to start extracting from a certain point in time.
-
-For creating a video from many images:
-
-
ffmpeg -f image2 -i foo-%03d.jpeg -r 12 -s WxH foo.avi
-
-
-The syntax foo-%03d.jpeg
specifies to use a decimal number
-composed of three digits padded with zeroes to express the sequence
-number. It is the same syntax supported by the C printf function, but
-only formats accepting a normal integer are suitable.
-
-When importing an image sequence, -i also supports expanding
-shell-like wildcard patterns (globbing) internally, by selecting the
-image2-specific -pattern_type glob
option.
-
-For example, for creating a video from filenames matching the glob pattern
-foo-*.jpeg
:
-
-
ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
-
-
- You can put many streams of the same type in the output:
-
-
-
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
-
-
-The resulting output file test12.nut will contain the first four streams
-from the input files in reverse order.
-
- To force CBR video output:
-
-
ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
-
-
- The four options lmin, lmax, mblmin and mblmax use ’lambda’ units,
-but you may use the QP2LAMBDA constant to easily convert from ’q’ units:
-
-
ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
-
-
-
-
-
-
-
8 See Also# TOC
-
-
ffmpeg-all ,
-ffplay , ffprobe , ffserver ,
-ffmpeg-utils ,
-ffmpeg-scaler ,
-ffmpeg-resampler ,
-ffmpeg-codecs ,
-ffmpeg-bitstream-filters ,
-ffmpeg-formats ,
-ffmpeg-devices ,
-ffmpeg-protocols ,
-ffmpeg-filters
-
-
-
-
9 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffplay-all.html b/Externals/ffmpeg/dev/doc/ffplay-all.html
deleted file mode 100644
index 1264b50ebd..0000000000
--- a/Externals/ffmpeg/dev/doc/ffplay-all.html
+++ /dev/null
@@ -1,21308 +0,0 @@
-
-
-
-
-
-
- ffplay Documentation
-
-
-
-
-
-
-
-
- ffplay Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Synopsis# TOC
-
-
ffplay [options ] [input_file ]
-
-
-
2 Description# TOC
-
-
FFplay is a very simple and portable media player using the FFmpeg
-libraries and the SDL library. It is mostly used as a testbed for the
-various FFmpeg APIs.
-
-
-
3 Options# TOC
-
-
All the numerical options, if not specified otherwise, accept a string
-representing a number as input, which may be followed by one of the SI
-unit prefixes, for example: ’K’, ’M’, or ’G’.
-
-
If ’i’ is appended to the SI unit prefix, the complete prefix will be
-interpreted as a unit prefix for binary multiples, which are based on
-powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
-prefix multiplies the value by 8. This allows using, for example:
-’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
-
-
Options which do not take arguments are boolean options, and set the
-corresponding value to true. They can be set to false by prefixing
-the option name with "no". For example using "-nofoo"
-will set the boolean option with name "foo" to false.
-
-
-
3.1 Stream specifiers# TOC
-
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
-are used to precisely specify which stream(s) a given option belongs to.
-
-
A stream specifier is a string generally appended to the option name and
-separated from it by a colon. E.g. -codec:a:1 ac3
contains the
-a:1
stream specifier, which matches the second audio stream. Therefore, it
-would select the ac3 codec for the second audio stream.
-
-
A stream specifier can match several streams, so that the option is applied to all
-of them. E.g. the stream specifier in -b:a 128k
matches all audio
-streams.
-
-
An empty stream specifier matches all streams. For example, -codec copy
-or -codec: copy
would copy all the streams without reencoding.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index. E.g. -threads:1 4
would set the
-thread count for the second stream to 4.
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
-’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
-stream number stream_index of this type. Otherwise, it matches all
-streams of this type.
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number stream_index
-in the program with the id program_id . Otherwise, it matches all streams in the
-program.
-
-#stream_id or i:stream_id
-Match the stream by stream id (e.g. PID in MPEG-TS container).
-
-m:key [:value ]
-Matches streams with the metadata tag key having the specified value. If
-value is not given, matches streams that contain the given tag with any
-value.
-
-Note that in ffmpeg
, matching by metadata will only work properly for
-input files.
-
-
-
-
-
3.2 Generic options# TOC
-
-
These options are shared amongst the ff* tools.
-
-
--L
-Show license.
-
-
--h, -?, -help, --help [arg ]
-Show help. An optional parameter may be specified to print help about a specific
-item. If no argument is specified, only basic (non advanced) tool
-options are shown.
-
-Possible values of arg are:
-
-long
-Print advanced tool options in addition to the basic tool options.
-
-
-full
-Print complete list of options, including shared and private options
-for encoders, decoders, demuxers, muxers, filters, etc.
-
-
-decoder=decoder_name
-Print detailed information about the decoder named decoder_name . Use the
--decoders option to get a list of all decoders.
-
-
-encoder=encoder_name
-Print detailed information about the encoder named encoder_name . Use the
--encoders option to get a list of all encoders.
-
-
-demuxer=demuxer_name
-Print detailed information about the demuxer named demuxer_name . Use the
--formats option to get a list of all demuxers and muxers.
-
-
-muxer=muxer_name
-Print detailed information about the muxer named muxer_name . Use the
--formats option to get a list of all muxers and demuxers.
-
-
-filter=filter_name
-Print detailed information about the filter name filter_name . Use the
--filters option to get a list of all filters.
-
-
-
-
--version
-Show version.
-
-
--formats
-Show available formats (including devices).
-
-
--devices
-Show available devices.
-
-
--codecs
-Show all codecs known to libavcodec.
-
-Note that the term ’codec’ is used throughout this documentation as a shortcut
-for what is more correctly called a media bitstream format.
-
-
--decoders
-Show available decoders.
-
-
--encoders
-Show all available encoders.
-
-
--bsfs
-Show available bitstream filters.
-
-
--protocols
-Show available protocols.
-
-
--filters
-Show available libavfilter filters.
-
-
--pix_fmts
-Show available pixel formats.
-
-
--sample_fmts
-Show available sample formats.
-
-
--layouts
-Show channel names and standard channel layouts.
-
-
--colors
-Show recognized color names.
-
-
--sources device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sources of the intput device.
-Some devices may provide system-dependent source names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sources pulse,server=192.168.0.4
-
-
-
--sinks device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sinks of the output device.
-Some devices may provide system-dependent sink names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sinks pulse,server=192.168.0.4
-
-
-
--loglevel [repeat+]loglevel | -v [repeat+]loglevel
-Set the logging level used by the library.
-Adding "repeat+" indicates that repeated log output should not be compressed
-to the first line and the "Last message repeated n times" line will be
-omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-’repeat’ will not change the loglevel.
-loglevel is a string or a number containing one of the following values:
-
-‘quiet, -8 ’
-Show nothing at all; be silent.
-
-‘panic, 0 ’
-Only show fatal errors which could lead the process to crash, such as
-and assert failure. This is not currently used for anything.
-
-‘fatal, 8 ’
-Only show fatal errors. These are errors after which the process absolutely
-cannot continue after.
-
-‘error, 16 ’
-Show all errors, including ones which can be recovered from.
-
-‘warning, 24 ’
-Show all warnings and errors. Any message related to possibly
-incorrect or unexpected events will be shown.
-
-‘info, 32 ’
-Show informative messages during processing. This is in addition to
-warnings and errors. This is the default value.
-
-‘verbose, 40 ’
-Same as info
, except more verbose.
-
-‘debug, 48 ’
-Show everything, including debugging information.
-
-
-
-By default the program logs to stderr, if coloring is supported by the
-terminal, colors are used to mark errors and warnings. Log coloring
-can be disabled setting the environment variable
-AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
-the environment variable AV_LOG_FORCE_COLOR
.
-The use of the environment variable NO_COLOR
is deprecated and
-will be dropped in a following FFmpeg version.
-
-
--report
-Dump full command line and console output to a file named
-program -YYYYMMDD -HHMMSS .log
in the current
-directory.
-This file can be useful for bug reports.
-It also implies -loglevel verbose
.
-
-Setting the environment variable FFREPORT
to any value has the
-same effect. If the value is a ’:’-separated key=value sequence, these
-options will affect the report; option values must be escaped if they
-contain special characters or the options delimiter ’:’ (see the
-“Quoting and escaping” section in the ffmpeg-utils manual).
-
-The following options are recognized:
-
-file
-set the file name to use for the report; %p
is expanded to the name
-of the program, %t
is expanded to a timestamp, %%
is expanded
-to a plain %
-
-level
-set the log verbosity level using a numerical value (see -loglevel
).
-
-
-
-For example, to output a report to a file named ffreport.log
-using a log level of 32
(alias for log level info
):
-
-
-
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
-
-
-Errors in parsing the environment variable are not fatal, and will not
-appear in the report.
-
-
--hide_banner
-Suppress printing banner.
-
-All FFmpeg tools will normally show a copyright notice, build options
-and library versions. This option can be used to suppress printing
-this information.
-
-
--cpuflags flags (global )
-Allows setting and clearing cpu flags. This option is intended
-for testing. Do not use it unless you know what you’re doing.
-
-
ffmpeg -cpuflags -sse+mmx ...
-ffmpeg -cpuflags mmx ...
-ffmpeg -cpuflags 0 ...
-
-Possible flags for this option are:
-
-‘x86 ’
-
-‘mmx ’
-‘mmxext ’
-‘sse ’
-‘sse2 ’
-‘sse2slow ’
-‘sse3 ’
-‘sse3slow ’
-‘ssse3 ’
-‘atom ’
-‘sse4.1 ’
-‘sse4.2 ’
-‘avx ’
-‘xop ’
-‘fma4 ’
-‘3dnow ’
-‘3dnowext ’
-‘cmov ’
-
-
-‘ARM ’
-
-‘armv5te ’
-‘armv6 ’
-‘armv6t2 ’
-‘vfp ’
-‘vfpv3 ’
-‘neon ’
-
-
-‘PowerPC ’
-
-‘altivec ’
-
-
-‘Specific Processors ’
-
-‘pentium2 ’
-‘pentium3 ’
-‘pentium4 ’
-‘k6 ’
-‘k62 ’
-‘athlon ’
-‘athlonxp ’
-‘k8 ’
-
-
-
-
-
--opencl_bench
-Benchmark all available OpenCL devices and show the results. This option
-is only available when FFmpeg has been compiled with --enable-opencl
.
-
-
--opencl_options options (global )
-Set OpenCL environment options. This option is only available when
-FFmpeg has been compiled with --enable-opencl
.
-
-options must be a list of key =value option pairs
-separated by ’:’. See the “OpenCL Options” section in the
-ffmpeg-utils manual for the list of supported options.
-
-
-
-
-
3.3 AVOptions# TOC
-
-
These options are provided directly by the libavformat, libavdevice and
-libavcodec libraries. To see the list of available AVOptions, use the
--help option. They are separated into two categories:
-
-generic
-These options can be set for any container, codec or device. Generic options
-are listed under AVFormatContext options for containers/devices and under
-AVCodecContext options for codecs.
-
-private
-These options are specific to the given container, device or codec. Private
-options are listed under their corresponding containers/devices/codecs.
-
-
-
-
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
-an MP3 file, use the id3v2_version private option of the MP3
-muxer:
-
-
ffmpeg -i input.flac -id3v2_version 3 out.mp3
-
-
-
All codec AVOptions are per-stream, and thus a stream specifier
-should be attached to them.
-
-
Note: the -nooption syntax cannot be used for boolean
-AVOptions, use -option 0 /-option 1 .
-
-
Note: the old undocumented way of specifying per-stream AVOptions by
-prepending v/a/s to the options name is now obsolete and will be
-removed soon.
-
-
-
3.4 Main options# TOC
-
-
--x width
-Force displayed width.
-
--y height
-Force displayed height.
-
--s size
-Set frame size (WxH or abbreviation), needed for videos which do
-not contain a header with the frame size like raw YUV. This option
-has been deprecated in favor of private options, try -video_size.
-
--fs
-Start in fullscreen mode.
-
--an
-Disable audio.
-
--vn
-Disable video.
-
--sn
-Disable subtitles.
-
--ss pos
-Seek to a given position in seconds.
-
--t duration
-play <duration> seconds of audio/video
-
--bytes
-Seek by bytes.
-
--nodisp
-Disable graphical display.
-
--f fmt
-Force format.
-
--window_title title
-Set window title (default is the input filename).
-
--loop number
-Loops movie playback <number> times. 0 means forever.
-
--showmode mode
-Set the show mode to use.
-Available values for mode are:
-
-‘0, video ’
-show video
-
-‘1, waves ’
-show audio waves
-
-‘2, rdft ’
-show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform)
-
-
-
-Default value is "video", if video is not present or cannot be played
-"rdft" is automatically selected.
-
-You can interactively cycle through the available show modes by
-pressing the key w .
-
-
--vf filtergraph
-Create the filtergraph specified by filtergraph and use it to
-filter the video stream.
-
-filtergraph is a description of the filtergraph to apply to
-the stream, and must have a single video input and a single video
-output. In the filtergraph, the input is associated to the label
-in
, and the output to the label out
. See the
-ffmpeg-filters manual for more information about the filtergraph
-syntax.
-
-You can specify this parameter multiple times and cycle through the specified
-filtergraphs along with the show modes by pressing the key w .
-
-
--af filtergraph
-filtergraph is a description of the filtergraph to apply to
-the input audio.
-Use the option "-filters" to show all the available filters (including
-sources and sinks).
-
-
--i input_file
-Read input_file .
-
-
-
-
-
3.5 Advanced options# TOC
-
--pix_fmt format
-Set pixel format.
-This option has been deprecated in favor of private options, try -pixel_format.
-
-
--stats
-Print several playback statistics, in particular show the stream
-duration, the codec parameters, the current position in the stream and
-the audio/video synchronisation drift. It is on by default, to
-explicitly disable it you need to specify -nostats
.
-
-
--fast
-Non-spec-compliant optimizations.
-
--genpts
-Generate pts.
-
--sync type
-Set the master clock to audio (type=audio
), video
-(type=video
) or external (type=ext
). Default is audio. The
-master clock is used to control audio-video synchronization. Most media
-players use audio as master clock, but in some cases (streaming or high
-quality broadcast) it is necessary to change that. This option is mainly
-used for debugging purposes.
-
--ast audio_stream_specifier
-Select the desired audio stream using the given stream specifier. The stream
-specifiers are described in the Stream specifiers chapter. If this option
-is not specified, the "best" audio stream is selected in the program of the
-already selected video stream.
-
--vst video_stream_specifier
-Select the desired video stream using the given stream specifier. The stream
-specifiers are described in the Stream specifiers chapter. If this option
-is not specified, the "best" video stream is selected.
-
--sst subtitle_stream_specifier
-Select the desired subtitle stream using the given stream specifier. The stream
-specifiers are described in the Stream specifiers chapter. If this option
-is not specified, the "best" subtitle stream is selected in the program of the
-already selected video or audio stream.
-
--autoexit
-Exit when video is done playing.
-
--exitonkeydown
-Exit if any key is pressed.
-
--exitonmousedown
-Exit if any mouse button is pressed.
-
-
--codec:media_specifier codec_name
-Force a specific decoder implementation for the stream identified by
-media_specifier , which can assume the values a
(audio),
-v
(video), and s
subtitle.
-
-
--acodec codec_name
-Force a specific audio decoder.
-
-
--vcodec codec_name
-Force a specific video decoder.
-
-
--scodec codec_name
-Force a specific subtitle decoder.
-
-
--autorotate
-Automatically rotate the video according to presentation metadata. Enabled by
-default, use -noautorotate to disable it.
-
-
--framedrop
-Drop video frames if video is out of sync. Enabled by default if the master
-clock is not set to video. Use this option to enable frame dropping for all
-master clock sources, use -noframedrop to disable it.
-
-
--infbuf
-Do not limit the input buffer size, read as much data as possible from the
-input as soon as possible. Enabled by default for realtime streams, where data
-may be dropped if not read in time. Use this option to enable infinite buffers
-for all inputs, use -noinfbuf to disable it.
-
-
-
-
-
-
3.6 While playing# TOC
-
-
-q, ESC
-Quit.
-
-
-f
-Toggle full screen.
-
-
-p, SPC
-Pause.
-
-
-a
-Cycle audio channel in the current program.
-
-
-v
-Cycle video channel.
-
-
-t
-Cycle subtitle channel in the current program.
-
-
-c
-Cycle program.
-
-
-w
-Cycle video filters or show modes.
-
-
-s
-Step to the next frame.
-
-Pause if the stream is not already paused, step to the next video
-frame, and pause.
-
-
-left/right
-Seek backward/forward 10 seconds.
-
-
-down/up
-Seek backward/forward 1 minute.
-
-
-page down/page up
-Seek to the previous/next chapter.
-or if there are no chapters
-Seek backward/forward 10 minutes.
-
-
-mouse click
-Seek to percentage in file corresponding to fraction of width.
-
-
-
-
-
-
-
4 Syntax# TOC
-
-
This section documents the syntax and formats employed by the FFmpeg
-libraries and tools.
-
-
-
4.1 Quoting and escaping# TOC
-
-
FFmpeg adopts the following quoting and escaping mechanism, unless
-explicitly specified. The following rules are applied:
-
-
- '
and \
are special characters (respectively used for
-quoting and escaping). In addition to them, there might be other
-special characters depending on the specific syntax where the escaping
-and quoting are employed.
-
- A special character is escaped by prefixing it with a ’\’.
-
- All characters enclosed between ” are included literally in the
-parsed string. The quote character '
itself cannot be quoted,
-so you may need to close the quote and escape it.
-
- Leading and trailing whitespaces, unless escaped or quoted, are
-removed from the parsed string.
-
-
-
Note that you may need to add a second level of escaping when using
-the command line or a script, which depends on the syntax of the
-adopted shell language.
-
-
The function av_get_token
defined in
-libavutil/avstring.h can be used to parse a token quoted or
-escaped according to the rules defined above.
-
-
The tool tools/ffescape in the FFmpeg source tree can be used
-to automatically quote or escape a string in a script.
-
-
-
4.1.1 Examples# TOC
-
-
- Escape the string Crime d'Amour
containing the '
special
-character:
-
-
- The string above contains a quote, so the '
needs to be escaped
-when quoting it:
-
-
- Include leading or trailing whitespaces using quoting:
-
-
' this string starts and ends with whitespaces '
-
-
- Escaping and quoting can be mixed together:
-
-
' The string '\'string\'' is a string '
-
-
- To include a literal \
you can use either escaping or quoting:
-
-
'c:\foo' can be written as c:\\foo
-
-
-
-
-
4.2 Date# TOC
-
-
The accepted syntax is:
-
-
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
-now
-
-
-
If the value is "now" it takes the current time.
-
-
Time is local time unless Z is appended, in which case it is
-interpreted as UTC.
-If the year-month-day part is not specified it takes the current
-year-month-day.
-
-
-
4.3 Time duration# TOC
-
-
There are two accepted syntaxes for expressing time duration.
-
-
-
-
HH expresses the number of hours, MM the number of minutes
-for a maximum of 2 digits, and SS the number of seconds for a
-maximum of 2 digits. The m at the end expresses decimal value for
-SS .
-
-
or
-
-
-
-
S expresses the number of seconds, with the optional decimal part
-m .
-
-
In both expressions, the optional ‘- ’ indicates negative duration.
-
-
-
4.3.1 Examples# TOC
-
-
The following examples are all valid time duration:
-
-
-‘55 ’
-55 seconds
-
-
-‘12:03:45 ’
-12 hours, 03 minutes and 45 seconds
-
-
-‘23.189 ’
-23.189 seconds
-
-
-
-
-
4.4 Video size# TOC
-
Specify the size of the sourced video, it may be a string of the form
-width xheight , or the name of a size abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-720x480
-
-‘pal ’
-720x576
-
-‘qntsc ’
-352x240
-
-‘qpal ’
-352x288
-
-‘sntsc ’
-640x480
-
-‘spal ’
-768x576
-
-‘film ’
-352x240
-
-‘ntsc-film ’
-352x240
-
-‘sqcif ’
-128x96
-
-‘qcif ’
-176x144
-
-‘cif ’
-352x288
-
-‘4cif ’
-704x576
-
-‘16cif ’
-1408x1152
-
-‘qqvga ’
-160x120
-
-‘qvga ’
-320x240
-
-‘vga ’
-640x480
-
-‘svga ’
-800x600
-
-‘xga ’
-1024x768
-
-‘uxga ’
-1600x1200
-
-‘qxga ’
-2048x1536
-
-‘sxga ’
-1280x1024
-
-‘qsxga ’
-2560x2048
-
-‘hsxga ’
-5120x4096
-
-‘wvga ’
-852x480
-
-‘wxga ’
-1366x768
-
-‘wsxga ’
-1600x1024
-
-‘wuxga ’
-1920x1200
-
-‘woxga ’
-2560x1600
-
-‘wqsxga ’
-3200x2048
-
-‘wquxga ’
-3840x2400
-
-‘whsxga ’
-6400x4096
-
-‘whuxga ’
-7680x4800
-
-‘cga ’
-320x200
-
-‘ega ’
-640x350
-
-‘hd480 ’
-852x480
-
-‘hd720 ’
-1280x720
-
-‘hd1080 ’
-1920x1080
-
-‘2k ’
-2048x1080
-
-‘2kflat ’
-1998x1080
-
-‘2kscope ’
-2048x858
-
-‘4k ’
-4096x2160
-
-‘4kflat ’
-3996x2160
-
-‘4kscope ’
-4096x1716
-
-‘nhd ’
-640x360
-
-‘hqvga ’
-240x160
-
-‘wqvga ’
-400x240
-
-‘fwqvga ’
-432x240
-
-‘hvga ’
-480x320
-
-‘qhd ’
-960x540
-
-
-
-
-
4.5 Video rate# TOC
-
-
Specify the frame rate of a video, expressed as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a float
-number or a valid video frame rate abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-30000/1001
-
-‘pal ’
-25/1
-
-‘qntsc ’
-30000/1001
-
-‘qpal ’
-25/1
-
-‘sntsc ’
-30000/1001
-
-‘spal ’
-25/1
-
-‘film ’
-24/1
-
-‘ntsc-film ’
-24000/1001
-
-
-
-
-
4.6 Ratio# TOC
-
-
A ratio can be expressed as an expression, or in the form
-numerator :denominator .
-
-
Note that a ratio with infinite (1/0) or negative value is
-considered valid, so you should check on the returned value if you
-want to exclude those values.
-
-
The undefined value can be expressed using the "0:0" string.
-
-
-
4.7 Color# TOC
-
-
It can be the name of a color as defined below (case insensitive match) or a
-[0x|#]RRGGBB[AA]
sequence, possibly followed by @ and a string
-representing the alpha component.
-
-
The alpha component may be a string composed by "0x" followed by an
-hexadecimal number or a decimal number between 0.0 and 1.0, which
-represents the opacity value (‘0x00 ’ or ‘0.0 ’ means completely
-transparent, ‘0xff ’ or ‘1.0 ’ completely opaque). If the alpha
-component is not specified then ‘0xff ’ is assumed.
-
-
The string ‘random ’ will result in a random color.
-
-
The following names of colors are recognized:
-
-‘AliceBlue ’
-0xF0F8FF
-
-‘AntiqueWhite ’
-0xFAEBD7
-
-‘Aqua ’
-0x00FFFF
-
-‘Aquamarine ’
-0x7FFFD4
-
-‘Azure ’
-0xF0FFFF
-
-‘Beige ’
-0xF5F5DC
-
-‘Bisque ’
-0xFFE4C4
-
-‘Black ’
-0x000000
-
-‘BlanchedAlmond ’
-0xFFEBCD
-
-‘Blue ’
-0x0000FF
-
-‘BlueViolet ’
-0x8A2BE2
-
-‘Brown ’
-0xA52A2A
-
-‘BurlyWood ’
-0xDEB887
-
-‘CadetBlue ’
-0x5F9EA0
-
-‘Chartreuse ’
-0x7FFF00
-
-‘Chocolate ’
-0xD2691E
-
-‘Coral ’
-0xFF7F50
-
-‘CornflowerBlue ’
-0x6495ED
-
-‘Cornsilk ’
-0xFFF8DC
-
-‘Crimson ’
-0xDC143C
-
-‘Cyan ’
-0x00FFFF
-
-‘DarkBlue ’
-0x00008B
-
-‘DarkCyan ’
-0x008B8B
-
-‘DarkGoldenRod ’
-0xB8860B
-
-‘DarkGray ’
-0xA9A9A9
-
-‘DarkGreen ’
-0x006400
-
-‘DarkKhaki ’
-0xBDB76B
-
-‘DarkMagenta ’
-0x8B008B
-
-‘DarkOliveGreen ’
-0x556B2F
-
-‘Darkorange ’
-0xFF8C00
-
-‘DarkOrchid ’
-0x9932CC
-
-‘DarkRed ’
-0x8B0000
-
-‘DarkSalmon ’
-0xE9967A
-
-‘DarkSeaGreen ’
-0x8FBC8F
-
-‘DarkSlateBlue ’
-0x483D8B
-
-‘DarkSlateGray ’
-0x2F4F4F
-
-‘DarkTurquoise ’
-0x00CED1
-
-‘DarkViolet ’
-0x9400D3
-
-‘DeepPink ’
-0xFF1493
-
-‘DeepSkyBlue ’
-0x00BFFF
-
-‘DimGray ’
-0x696969
-
-‘DodgerBlue ’
-0x1E90FF
-
-‘FireBrick ’
-0xB22222
-
-‘FloralWhite ’
-0xFFFAF0
-
-‘ForestGreen ’
-0x228B22
-
-‘Fuchsia ’
-0xFF00FF
-
-‘Gainsboro ’
-0xDCDCDC
-
-‘GhostWhite ’
-0xF8F8FF
-
-‘Gold ’
-0xFFD700
-
-‘GoldenRod ’
-0xDAA520
-
-‘Gray ’
-0x808080
-
-‘Green ’
-0x008000
-
-‘GreenYellow ’
-0xADFF2F
-
-‘HoneyDew ’
-0xF0FFF0
-
-‘HotPink ’
-0xFF69B4
-
-‘IndianRed ’
-0xCD5C5C
-
-‘Indigo ’
-0x4B0082
-
-‘Ivory ’
-0xFFFFF0
-
-‘Khaki ’
-0xF0E68C
-
-‘Lavender ’
-0xE6E6FA
-
-‘LavenderBlush ’
-0xFFF0F5
-
-‘LawnGreen ’
-0x7CFC00
-
-‘LemonChiffon ’
-0xFFFACD
-
-‘LightBlue ’
-0xADD8E6
-
-‘LightCoral ’
-0xF08080
-
-‘LightCyan ’
-0xE0FFFF
-
-‘LightGoldenRodYellow ’
-0xFAFAD2
-
-‘LightGreen ’
-0x90EE90
-
-‘LightGrey ’
-0xD3D3D3
-
-‘LightPink ’
-0xFFB6C1
-
-‘LightSalmon ’
-0xFFA07A
-
-‘LightSeaGreen ’
-0x20B2AA
-
-‘LightSkyBlue ’
-0x87CEFA
-
-‘LightSlateGray ’
-0x778899
-
-‘LightSteelBlue ’
-0xB0C4DE
-
-‘LightYellow ’
-0xFFFFE0
-
-‘Lime ’
-0x00FF00
-
-‘LimeGreen ’
-0x32CD32
-
-‘Linen ’
-0xFAF0E6
-
-‘Magenta ’
-0xFF00FF
-
-‘Maroon ’
-0x800000
-
-‘MediumAquaMarine ’
-0x66CDAA
-
-‘MediumBlue ’
-0x0000CD
-
-‘MediumOrchid ’
-0xBA55D3
-
-‘MediumPurple ’
-0x9370D8
-
-‘MediumSeaGreen ’
-0x3CB371
-
-‘MediumSlateBlue ’
-0x7B68EE
-
-‘MediumSpringGreen ’
-0x00FA9A
-
-‘MediumTurquoise ’
-0x48D1CC
-
-‘MediumVioletRed ’
-0xC71585
-
-‘MidnightBlue ’
-0x191970
-
-‘MintCream ’
-0xF5FFFA
-
-‘MistyRose ’
-0xFFE4E1
-
-‘Moccasin ’
-0xFFE4B5
-
-‘NavajoWhite ’
-0xFFDEAD
-
-‘Navy ’
-0x000080
-
-‘OldLace ’
-0xFDF5E6
-
-‘Olive ’
-0x808000
-
-‘OliveDrab ’
-0x6B8E23
-
-‘Orange ’
-0xFFA500
-
-‘OrangeRed ’
-0xFF4500
-
-‘Orchid ’
-0xDA70D6
-
-‘PaleGoldenRod ’
-0xEEE8AA
-
-‘PaleGreen ’
-0x98FB98
-
-‘PaleTurquoise ’
-0xAFEEEE
-
-‘PaleVioletRed ’
-0xD87093
-
-‘PapayaWhip ’
-0xFFEFD5
-
-‘PeachPuff ’
-0xFFDAB9
-
-‘Peru ’
-0xCD853F
-
-‘Pink ’
-0xFFC0CB
-
-‘Plum ’
-0xDDA0DD
-
-‘PowderBlue ’
-0xB0E0E6
-
-‘Purple ’
-0x800080
-
-‘Red ’
-0xFF0000
-
-‘RosyBrown ’
-0xBC8F8F
-
-‘RoyalBlue ’
-0x4169E1
-
-‘SaddleBrown ’
-0x8B4513
-
-‘Salmon ’
-0xFA8072
-
-‘SandyBrown ’
-0xF4A460
-
-‘SeaGreen ’
-0x2E8B57
-
-‘SeaShell ’
-0xFFF5EE
-
-‘Sienna ’
-0xA0522D
-
-‘Silver ’
-0xC0C0C0
-
-‘SkyBlue ’
-0x87CEEB
-
-‘SlateBlue ’
-0x6A5ACD
-
-‘SlateGray ’
-0x708090
-
-‘Snow ’
-0xFFFAFA
-
-‘SpringGreen ’
-0x00FF7F
-
-‘SteelBlue ’
-0x4682B4
-
-‘Tan ’
-0xD2B48C
-
-‘Teal ’
-0x008080
-
-‘Thistle ’
-0xD8BFD8
-
-‘Tomato ’
-0xFF6347
-
-‘Turquoise ’
-0x40E0D0
-
-‘Violet ’
-0xEE82EE
-
-‘Wheat ’
-0xF5DEB3
-
-‘White ’
-0xFFFFFF
-
-‘WhiteSmoke ’
-0xF5F5F5
-
-‘Yellow ’
-0xFFFF00
-
-‘YellowGreen ’
-0x9ACD32
-
-
-
-
-
4.8 Channel Layout# TOC
-
-
A channel layout specifies the spatial disposition of the channels in
-a multi-channel audio stream. To specify a channel layout, FFmpeg
-makes use of a special syntax.
-
-
Individual channels are identified by an id, as given by the table
-below:
-
-‘FL ’
-front left
-
-‘FR ’
-front right
-
-‘FC ’
-front center
-
-‘LFE ’
-low frequency
-
-‘BL ’
-back left
-
-‘BR ’
-back right
-
-‘FLC ’
-front left-of-center
-
-‘FRC ’
-front right-of-center
-
-‘BC ’
-back center
-
-‘SL ’
-side left
-
-‘SR ’
-side right
-
-‘TC ’
-top center
-
-‘TFL ’
-top front left
-
-‘TFC ’
-top front center
-
-‘TFR ’
-top front right
-
-‘TBL ’
-top back left
-
-‘TBC ’
-top back center
-
-‘TBR ’
-top back right
-
-‘DL ’
-downmix left
-
-‘DR ’
-downmix right
-
-‘WL ’
-wide left
-
-‘WR ’
-wide right
-
-‘SDL ’
-surround direct left
-
-‘SDR ’
-surround direct right
-
-‘LFE2 ’
-low frequency 2
-
-
-
-
Standard channel layout compositions can be specified by using the
-following identifiers:
-
-‘mono ’
-FC
-
-‘stereo ’
-FL+FR
-
-‘2.1 ’
-FL+FR+LFE
-
-‘3.0 ’
-FL+FR+FC
-
-‘3.0(back) ’
-FL+FR+BC
-
-‘4.0 ’
-FL+FR+FC+BC
-
-‘quad ’
-FL+FR+BL+BR
-
-‘quad(side) ’
-FL+FR+SL+SR
-
-‘3.1 ’
-FL+FR+FC+LFE
-
-‘5.0 ’
-FL+FR+FC+BL+BR
-
-‘5.0(side) ’
-FL+FR+FC+SL+SR
-
-‘4.1 ’
-FL+FR+FC+LFE+BC
-
-‘5.1 ’
-FL+FR+FC+LFE+BL+BR
-
-‘5.1(side) ’
-FL+FR+FC+LFE+SL+SR
-
-‘6.0 ’
-FL+FR+FC+BC+SL+SR
-
-‘6.0(front) ’
-FL+FR+FLC+FRC+SL+SR
-
-‘hexagonal ’
-FL+FR+FC+BL+BR+BC
-
-‘6.1 ’
-FL+FR+FC+LFE+BC+SL+SR
-
-‘6.1 ’
-FL+FR+FC+LFE+BL+BR+BC
-
-‘6.1(front) ’
-FL+FR+LFE+FLC+FRC+SL+SR
-
-‘7.0 ’
-FL+FR+FC+BL+BR+SL+SR
-
-‘7.0(front) ’
-FL+FR+FC+FLC+FRC+SL+SR
-
-‘7.1 ’
-FL+FR+FC+LFE+BL+BR+SL+SR
-
-‘7.1(wide) ’
-FL+FR+FC+LFE+BL+BR+FLC+FRC
-
-‘7.1(wide-side) ’
-FL+FR+FC+LFE+FLC+FRC+SL+SR
-
-‘octagonal ’
-FL+FR+FC+BL+BR+BC+SL+SR
-
-‘downmix ’
-DL+DR
-
-
-
-
A custom channel layout can be specified as a sequence of terms, separated by
-’+’ or ’|’. Each term can be:
-
- the name of a standard channel layout (e.g. ‘mono ’,
-‘stereo ’, ‘4.0 ’, ‘quad ’, ‘5.0 ’, etc.)
-
- the name of a single channel (e.g. ‘FL ’, ‘FR ’, ‘FC ’, ‘LFE ’, etc.)
-
- a number of channels, in decimal, optionally followed by ’c’, yielding
-the default channel layout for that number of channels (see the
-function av_get_default_channel_layout
)
-
- a channel layout mask, in hexadecimal starting with "0x" (see the
-AV_CH_*
macros in libavutil/channel_layout.h .
-
-
-
Starting from libavutil version 53 the trailing character "c" to
-specify a number of channels will be required, while a channel layout
-mask could also be specified as a decimal number (if and only if not
-followed by "c").
-
-
See also the function av_get_channel_layout
defined in
-libavutil/channel_layout.h .
-
-
-
5 Expression Evaluation# TOC
-
-
When evaluating an arithmetic expression, FFmpeg uses an internal
-formula evaluator, implemented through the libavutil/eval.h
-interface.
-
-
An expression may contain unary, binary operators, constants, and
-functions.
-
-
Two expressions expr1 and expr2 can be combined to form
-another expression "expr1 ;expr2 ".
-expr1 and expr2 are evaluated in turn, and the new
-expression evaluates to the value of expr2 .
-
-
The following binary operators are available: +
, -
,
-*
, /
, ^
.
-
-
The following unary operators are available: +
, -
.
-
-
The following functions are available:
-
-abs(x)
-Compute absolute value of x .
-
-
-acos(x)
-Compute arccosine of x .
-
-
-asin(x)
-Compute arcsine of x .
-
-
-atan(x)
-Compute arctangent of x .
-
-
-between(x, min, max)
-Return 1 if x is greater than or equal to min and lesser than or
-equal to max , 0 otherwise.
-
-
-bitand(x, y)
-bitor(x, y)
-Compute bitwise and/or operation on x and y .
-
-The results of the evaluation of x and y are converted to
-integers before executing the bitwise operation.
-
-Note that both the conversion to integer and the conversion back to
-floating point can lose precision. Beware of unexpected results for
-large numbers (usually 2^53 and larger).
-
-
-ceil(expr)
-Round the value of expression expr upwards to the nearest
-integer. For example, "ceil(1.5)" is "2.0".
-
-
-clip(x, min, max)
-Return the value of x clipped between min and max .
-
-
-cos(x)
-Compute cosine of x .
-
-
-cosh(x)
-Compute hyperbolic cosine of x .
-
-
-eq(x, y)
-Return 1 if x and y are equivalent, 0 otherwise.
-
-
-exp(x)
-Compute exponential of x (with base e
, the Euler’s number).
-
-
-floor(expr)
-Round the value of expression expr downwards to the nearest
-integer. For example, "floor(-1.5)" is "-2.0".
-
-
-gauss(x)
-Compute Gauss function of x , corresponding to
-exp(-x*x/2) / sqrt(2*PI)
.
-
-
-gcd(x, y)
-Return the greatest common divisor of x and y . If both x and
-y are 0 or either or both are less than zero then behavior is undefined.
-
-
-gt(x, y)
-Return 1 if x is greater than y , 0 otherwise.
-
-
-gte(x, y)
-Return 1 if x is greater than or equal to y , 0 otherwise.
-
-
-hypot(x, y)
-This function is similar to the C function with the same name; it returns
-"sqrt(x *x + y *y )", the length of the hypotenuse of a
-right triangle with sides of length x and y , or the distance of the
-point (x , y ) from the origin.
-
-
-if(x, y)
-Evaluate x , and if the result is non-zero return the result of
-the evaluation of y , return 0 otherwise.
-
-
-if(x, y, z)
-Evaluate x , and if the result is non-zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-ifnot(x, y)
-Evaluate x , and if the result is zero return the result of the
-evaluation of y , return 0 otherwise.
-
-
-ifnot(x, y, z)
-Evaluate x , and if the result is zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-isinf(x)
-Return 1.0 if x is +/-INFINITY, 0.0 otherwise.
-
-
-isnan(x)
-Return 1.0 if x is NAN, 0.0 otherwise.
-
-
-ld(var)
-Allow to load the value of the internal variable with number
-var , which was previously stored with st(var , expr ).
-The function returns the loaded value.
-
-
-log(x)
-Compute natural logarithm of x .
-
-
-lt(x, y)
-Return 1 if x is lesser than y , 0 otherwise.
-
-
-lte(x, y)
-Return 1 if x is lesser than or equal to y , 0 otherwise.
-
-
-max(x, y)
-Return the maximum between x and y .
-
-
-min(x, y)
-Return the maximum between x and y .
-
-
-mod(x, y)
-Compute the remainder of division of x by y .
-
-
-not(expr)
-Return 1.0 if expr is zero, 0.0 otherwise.
-
-
-pow(x, y)
-Compute the power of x elevated y , it is equivalent to
-"(x )^(y )".
-
-
-print(t)
-print(t, l)
-Print the value of expression t with loglevel l . If
-l is not specified then a default log level is used.
-Returns the value of the expression printed.
-
-Prints t with loglevel l
-
-
-random(x)
-Return a pseudo random value between 0.0 and 1.0. x is the index of the
-internal variable which will be used to save the seed/state.
-
-
-root(expr, max)
-Find an input value for which the function represented by expr
-with argument ld(0) is 0 in the interval 0..max .
-
-The expression in expr must denote a continuous function or the
-result is undefined.
-
-ld(0) is used to represent the function input value, which means
-that the given expression will be evaluated multiple times with
-various input values that the expression can access through
-ld(0)
. When the expression evaluates to 0 then the
-corresponding input value will be returned.
-
-
-sin(x)
-Compute sine of x .
-
-
-sinh(x)
-Compute hyperbolic sine of x .
-
-
-sqrt(expr)
-Compute the square root of expr . This is equivalent to
-"(expr )^.5".
-
-
-squish(x)
-Compute expression 1/(1 + exp(4*x))
.
-
-
-st(var, expr)
-Allow to store the value of the expression expr in an internal
-variable. var specifies the number of the variable where to
-store the value, and it is a value ranging from 0 to 9. The function
-returns the value stored in the internal variable.
-Note, Variables are currently not shared between expressions.
-
-
-tan(x)
-Compute tangent of x .
-
-
-tanh(x)
-Compute hyperbolic tangent of x .
-
-
-taylor(expr, x)
-taylor(expr, x, id)
-Evaluate a Taylor series at x , given an expression representing
-the ld(id)
-th derivative of a function at 0.
-
-When the series does not converge the result is undefined.
-
-ld(id) is used to represent the derivative order in expr ,
-which means that the given expression will be evaluated multiple times
-with various input values that the expression can access through
-ld(id)
. If id is not specified then 0 is assumed.
-
-Note, when you have the derivatives at y instead of 0,
-taylor(expr, x-y)
can be used.
-
-
-time(0)
-Return the current (wallclock) time in seconds.
-
-
-trunc(expr)
-Round the value of expression expr towards zero to the nearest
-integer. For example, "trunc(-1.5)" is "-1.0".
-
-
-while(cond, expr)
-Evaluate expression expr while the expression cond is
-non-zero, and returns the value of the last expr evaluation, or
-NAN if cond was always false.
-
-
-
-
The following constants are available:
-
-PI
-area of the unit disc, approximately 3.14
-
-E
-exp(1) (Euler’s number), approximately 2.718
-
-PHI
-golden ratio (1+sqrt(5))/2, approximately 1.618
-
-
-
-
Assuming that an expression is considered "true" if it has a non-zero
-value, note that:
-
-
*
works like AND
-
-
+
works like OR
-
-
For example the construct:
-
-
is equivalent to:
-
-
-
In your C code, you can extend the list of unary and binary functions,
-and define recognized constants, so that they are available for your
-expressions.
-
-
The evaluator also recognizes the International System unit prefixes.
-If ’i’ is appended after the prefix, binary prefixes are used, which
-are based on powers of 1024 instead of powers of 1000.
-The ’B’ postfix multiplies the value by 8, and can be appended after a
-unit prefix or used alone. This allows using for example ’KB’, ’MiB’,
-’G’ and ’B’ as number postfix.
-
-
The list of available International System prefixes follows, with
-indication of the corresponding powers of 10 and of 2.
-
-y
-10^-24 / 2^-80
-
-z
-10^-21 / 2^-70
-
-a
-10^-18 / 2^-60
-
-f
-10^-15 / 2^-50
-
-p
-10^-12 / 2^-40
-
-n
-10^-9 / 2^-30
-
-u
-10^-6 / 2^-20
-
-m
-10^-3 / 2^-10
-
-c
-10^-2
-
-d
-10^-1
-
-h
-10^2
-
-k
-10^3 / 2^10
-
-K
-10^3 / 2^10
-
-M
-10^6 / 2^20
-
-G
-10^9 / 2^30
-
-T
-10^12 / 2^40
-
-P
-10^15 / 2^40
-
-E
-10^18 / 2^50
-
-Z
-10^21 / 2^60
-
-Y
-10^24 / 2^70
-
-
-
-
-
-
6 OpenCL Options# TOC
-
-
When FFmpeg is configured with --enable-opencl
, it is possible
-to set the options for the global OpenCL context.
-
-
The list of supported options follows:
-
-
-build_options
-Set build options used to compile the registered kernels.
-
-See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
-
-
-platform_idx
-Select the index of the platform to run OpenCL code.
-
-The specified index must be one of the indexes in the device list
-which can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-device_idx
-Select the index of the device used to run OpenCL code.
-
-The specified index must be one of the indexes in the device list which
-can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-
-
-
-
7 Codec Options# TOC
-
-
libavcodec provides some generic global options, which can be set on
-all the encoders and decoders. In addition each codec may support
-so-called private options, which are specific for a given codec.
-
-
Sometimes, a global option may only affect a specific kind of codec,
-and may be nonsensical or ignored by another, so you need to be aware
-of the meaning of the specified options. Also some options are
-meant only for decoding or encoding.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVCodecContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follow:
-
-
-b integer (encoding,audio,video )
-Set bitrate in bits/s. Default value is 200K.
-
-
-ab integer (encoding,audio )
-Set audio bitrate (in bits/s). Default value is 128K.
-
-
-bt integer (encoding,video )
-Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate
-tolerance specifies how far ratecontrol is willing to deviate from the
-target average bitrate value. This is not related to min/max
-bitrate. Lowering tolerance too much has an adverse effect on quality.
-
-
-flags flags (decoding/encoding,audio,video,subtitles )
-Set generic flags.
-
-Possible values:
-
-‘mv4 ’
-Use four motion vector by macroblock (mpeg4).
-
-‘qpel ’
-Use 1/4 pel motion compensation.
-
-‘loop ’
-Use loop filter.
-
-‘qscale ’
-Use fixed qscale.
-
-‘gmc ’
-Use gmc.
-
-‘mv0 ’
-Always try a mb with mv=<0,0>.
-
-‘input_preserved ’
-‘pass1 ’
-Use internal 2pass ratecontrol in first pass mode.
-
-‘pass2 ’
-Use internal 2pass ratecontrol in second pass mode.
-
-‘gray ’
-Only decode/encode grayscale.
-
-‘emu_edge ’
-Do not draw edges.
-
-‘psnr ’
-Set error[?] variables during encoding.
-
-‘truncated ’
-‘naq ’
-Normalize adaptive quantization.
-
-‘ildct ’
-Use interlaced DCT.
-
-‘low_delay ’
-Force low delay.
-
-‘global_header ’
-Place global headers in extradata instead of every keyframe.
-
-‘bitexact ’
-Only write platform-, build- and time-independent data. (except (I)DCT).
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-‘aic ’
-Apply H263 advanced intra coding / mpeg4 ac prediction.
-
-‘cbp ’
-Deprecated, use mpegvideo private options instead.
-
-‘qprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘ilme ’
-Apply interlaced motion estimation.
-
-‘cgop ’
-Use closed gop.
-
-
-
-
-me_method integer (encoding,video )
-Set motion estimation method.
-
-Possible values:
-
-‘zero ’
-zero motion estimation (fastest)
-
-‘full ’
-full motion estimation (slowest)
-
-‘epzs ’
-EPZS motion estimation (default)
-
-‘esa ’
-esa motion estimation (alias for full)
-
-‘tesa ’
-tesa motion estimation
-
-‘dia ’
-dia motion estimation (alias for epzs)
-
-‘log ’
-log motion estimation
-
-‘phods ’
-phods motion estimation
-
-‘x1 ’
-X1 motion estimation
-
-‘hex ’
-hex motion estimation
-
-‘umh ’
-umh motion estimation
-
-‘iter ’
-iter motion estimation
-
-
-
-
-extradata_size integer
-Set extradata size.
-
-
-time_base rational number
-Set codec time base.
-
-It is the fundamental unit of time (in seconds) in terms of which
-frame timestamps are represented. For fixed-fps content, timebase
-should be 1 / frame_rate
and timestamp increments should be
-identically 1.
-
-
-g integer (encoding,video )
-Set the group of picture size. Default value is 12.
-
-
-ar integer (decoding/encoding,audio )
-Set audio sampling rate (in Hz).
-
-
-ac integer (decoding/encoding,audio )
-Set number of audio channels.
-
-
-cutoff integer (encoding,audio )
-Set cutoff bandwidth.
-
-
-frame_size integer (encoding,audio )
-Set audio frame size.
-
-Each submitted frame except the last must contain exactly frame_size
-samples per channel. May be 0 when the codec has
-CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not
-restricted. It is set by some decoders to indicate constant frame
-size.
-
-
-frame_number integer
-Set the frame number.
-
-
-delay integer
-qcomp float (encoding,video )
-Set video quantizer scale compression (VBR). It is used as a constant
-in the ratecontrol equation. Recommended range for default rc_eq:
-0.0-1.0.
-
-
-qblur float (encoding,video )
-Set video quantizer scale blur (VBR).
-
-
-qmin integer (encoding,video )
-Set min video quantizer scale (VBR). Must be included between -1 and
-69, default value is 2.
-
-
-qmax integer (encoding,video )
-Set max video quantizer scale (VBR). Must be included between -1 and
-1024, default value is 31.
-
-
-qdiff integer (encoding,video )
-Set max difference between the quantizer scale (VBR).
-
-
-bf integer (encoding,video )
-Set max number of B frames between non-B-frames.
-
-Must be an integer between -1 and 16. 0 means that B-frames are
-disabled. If a value of -1 is used, it will choose an automatic value
-depending on the encoder.
-
-Default value is 0.
-
-
-b_qfactor float (encoding,video )
-Set qp factor between P and B frames.
-
-
-rc_strategy integer (encoding,video )
-Set ratecontrol method.
-
-
-b_strategy integer (encoding,video )
-Set strategy to choose between I/P/B-frames.
-
-
-ps integer (encoding,video )
-Set RTP payload size in bytes.
-
-
-mv_bits integer
-header_bits integer
-i_tex_bits integer
-p_tex_bits integer
-i_count integer
-p_count integer
-skip_count integer
-misc_bits integer
-frame_bits integer
-codec_tag integer
-bug flags (decoding,video )
-Workaround not auto detected encoder bugs.
-
-Possible values:
-
-‘autodetect ’
-‘old_msmpeg4 ’
-some old lavc generated msmpeg4v3 files (no autodetection)
-
-‘xvid_ilace ’
-Xvid interlacing bug (autodetected if fourcc==XVIX)
-
-‘ump4 ’
-(autodetected if fourcc==UMP4)
-
-‘no_padding ’
-padding bug (autodetected)
-
-‘amv ’
-‘ac_vlc ’
-illegal vlc bug (autodetected per fourcc)
-
-‘qpel_chroma ’
-‘std_qpel ’
-old standard qpel (autodetected per fourcc/version)
-
-‘qpel_chroma2 ’
-‘direct_blocksize ’
-direct-qpel-blocksize bug (autodetected per fourcc/version)
-
-‘edge ’
-edge padding bug (autodetected per fourcc/version)
-
-‘hpel_chroma ’
-‘dc_clip ’
-‘ms ’
-Workaround various bugs in microsoft broken decoders.
-
-‘trunc ’
-trancated frames
-
-
-
-
-lelim integer (encoding,video )
-Set single coefficient elimination threshold for luminance (negative
-values also consider DC coefficient).
-
-
-celim integer (encoding,video )
-Set single coefficient elimination threshold for chrominance (negative
-values also consider dc coefficient)
-
-
-strict integer (decoding/encoding,audio,video )
-Specify how strictly to follow the standards.
-
-Possible values:
-
-‘very ’
-strictly conform to a older more strict version of the spec or reference software
-
-‘strict ’
-strictly conform to all the things in the spec no matter what consequences
-
-‘normal ’
-‘unofficial ’
-allow unofficial extensions
-
-‘experimental ’
-allow non standardized experimental things, experimental
-(unfinished/work in progress/not well tested) decoders and encoders.
-Note: experimental decoders can pose a security risk, do not use this for
-decoding untrusted input.
-
-
-
-
-b_qoffset float (encoding,video )
-Set QP offset between P and B frames.
-
-
-err_detect flags (decoding,audio,video )
-Set error detection flags.
-
-Possible values:
-
-‘crccheck ’
-verify embedded CRCs
-
-‘bitstream ’
-detect bitstream specification deviations
-
-‘buffer ’
-detect improper bitstream length
-
-‘explode ’
-abort decoding on minor error detection
-
-‘ignore_err ’
-ignore decoding errors, and continue decoding.
-This is useful if you want to analyze the content of a video and thus want
-everything to be decoded no matter what. This option will not result in a video
-that is pleasing to watch in case of errors.
-
-‘careful ’
-consider things that violate the spec and have not been seen in the wild as errors
-
-‘compliant ’
-consider all spec non compliancies as errors
-
-‘aggressive ’
-consider things that a sane encoder should not do as an error
-
-
-
-
-has_b_frames integer
-block_align integer
-mpeg_quant integer (encoding,video )
-Use MPEG quantizers instead of H.263.
-
-
-qsquish float (encoding,video )
-How to keep quantizer between qmin and qmax (0 = clip, 1 = use
-differentiable function).
-
-
-rc_qmod_amp float (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_qmod_freq integer (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_override_count integer
-rc_eq string (encoding,video )
-Set rate control equation. When computing the expression, besides the
-standard functions defined in the section ’Expression Evaluation’, the
-following functions are available: bits2qp(bits), qp2bits(qp). Also
-the following constants are available: iTex pTex tex mv fCode iCount
-mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
-avgTex.
-
-
-maxrate integer (encoding,audio,video )
-Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
-
-
-minrate integer (encoding,audio,video )
-Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR
-encode. It is of little use elsewise.
-
-
-bufsize integer (encoding,audio,video )
-Set ratecontrol buffer size (in bits).
-
-
-rc_buf_aggressivity float (encoding,video )
-Currently useless.
-
-
-i_qfactor float (encoding,video )
-Set QP factor between P and I frames.
-
-
-i_qoffset float (encoding,video )
-Set QP offset between P and I frames.
-
-
-rc_init_cplx float (encoding,video )
-Set initial complexity for 1-pass encoding.
-
-
-dct integer (encoding,video )
-Set DCT algorithm.
-
-Possible values:
-
-‘auto ’
-autoselect a good one (default)
-
-‘fastint ’
-fast integer
-
-‘int ’
-accurate integer
-
-‘mmx ’
-‘altivec ’
-‘faan ’
-floating point AAN DCT
-
-
-
-
-lumi_mask float (encoding,video )
-Compress bright areas stronger than medium ones.
-
-
-tcplx_mask float (encoding,video )
-Set temporal complexity masking.
-
-
-scplx_mask float (encoding,video )
-Set spatial complexity masking.
-
-
-p_mask float (encoding,video )
-Set inter masking.
-
-
-dark_mask float (encoding,video )
-Compress dark areas stronger than medium ones.
-
-
-idct integer (decoding/encoding,video )
-Select IDCT implementation.
-
-Possible values:
-
-‘auto ’
-‘int ’
-‘simple ’
-‘simplemmx ’
-‘simpleauto ’
-Automatically pick a IDCT compatible with the simple one
-
-
-‘arm ’
-‘altivec ’
-‘sh4 ’
-‘simplearm ’
-‘simplearmv5te ’
-‘simplearmv6 ’
-‘simpleneon ’
-‘simplealpha ’
-‘ipp ’
-‘xvidmmx ’
-‘faani ’
-floating point AAN IDCT
-
-
-
-
-slice_count integer
-ec flags (decoding,video )
-Set error concealment strategy.
-
-Possible values:
-
-‘guess_mvs ’
-iterative motion vector (MV) search (slow)
-
-‘deblock ’
-use strong deblock filter for damaged MBs
-
-‘favor_inter ’
-favor predicting from the previous frame instead of the current
-
-
-
-
-bits_per_coded_sample integer
-pred integer (encoding,video )
-Set prediction method.
-
-Possible values:
-
-‘left ’
-‘plane ’
-‘median ’
-
-
-
-aspect rational number (encoding,video )
-Set sample aspect ratio.
-
-
-debug flags (decoding/encoding,audio,video,subtitles )
-Print specific debug info.
-
-Possible values:
-
-‘pict ’
-picture info
-
-‘rc ’
-rate control
-
-‘bitstream ’
-‘mb_type ’
-macroblock (MB) type
-
-‘qp ’
-per-block quantization parameter (QP)
-
-‘mv ’
-motion vector
-
-‘dct_coeff ’
-‘skip ’
-‘startcode ’
-‘pts ’
-‘er ’
-error recognition
-
-‘mmco ’
-memory management control operations (H.264)
-
-‘bugs ’
-‘vis_qp ’
-visualize quantization parameter (QP), lower QP are tinted greener
-
-‘vis_mb_type ’
-visualize block types
-
-‘buffers ’
-picture buffer allocations
-
-‘thread_ops ’
-threading operations
-
-‘nomc ’
-skip motion compensation
-
-
-
-
-vismv integer (decoding,video )
-Visualize motion vectors (MVs).
-
-This option is deprecated, see the codecview filter instead.
-
-Possible values:
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-cmp integer (encoding,video )
-Set full pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-subcmp integer (encoding,video )
-Set sub pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-mbcmp integer (encoding,video )
-Set macroblock compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-ildctcmp integer (encoding,video )
-Set interlaced dct compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-dia_size integer (encoding,video )
-Set diamond type & size for motion estimation.
-
-
-last_pred integer (encoding,video )
-Set amount of motion predictors from the previous frame.
-
-
-preme integer (encoding,video )
-Set pre motion estimation.
-
-
-precmp integer (encoding,video )
-Set pre motion estimation compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-pre_dia_size integer (encoding,video )
-Set diamond type & size for motion estimation pre-pass.
-
-
-subq integer (encoding,video )
-Set sub pel motion estimation quality.
-
-
-dtg_active_format integer
-me_range integer (encoding,video )
-Set limit motion vectors range (1023 for DivX player).
-
-
-ibias integer (encoding,video )
-Set intra quant bias.
-
-
-pbias integer (encoding,video )
-Set inter quant bias.
-
-
-color_table_id integer
-global_quality integer (encoding,audio,video )
-coder integer (encoding,video )
-
-Possible values:
-
-‘vlc ’
-variable length coder / huffman coder
-
-‘ac ’
-arithmetic coder
-
-‘raw ’
-raw (no encoding)
-
-‘rle ’
-run-length coder
-
-‘deflate ’
-deflate-based coder
-
-
-
-
-context integer (encoding,video )
-Set context model.
-
-
-slice_flags integer
-xvmc_acceleration integer
-mbd integer (encoding,video )
-Set macroblock decision algorithm (high quality mode).
-
-Possible values:
-
-‘simple ’
-use mbcmp (default)
-
-‘bits ’
-use fewest bits
-
-‘rd ’
-use best rate distortion
-
-
-
-
-stream_codec_tag integer
-sc_threshold integer (encoding,video )
-Set scene change threshold.
-
-
-lmin integer (encoding,video )
-Set min lagrange factor (VBR).
-
-
-lmax integer (encoding,video )
-Set max lagrange factor (VBR).
-
-
-nr integer (encoding,video )
-Set noise reduction.
-
-
-rc_init_occupancy integer (encoding,video )
-Set number of bits which should be loaded into the rc buffer before
-decoding starts.
-
-
-flags2 flags (decoding/encoding,audio,video )
-
-Possible values:
-
-‘fast ’
-Allow non spec compliant speedup tricks.
-
-‘sgop ’
-Deprecated, use mpegvideo private options instead.
-
-‘noout ’
-Skip bitstream encoding.
-
-‘ignorecrop ’
-Ignore cropping information from sps.
-
-‘local_header ’
-Place global headers at every keyframe instead of in extradata.
-
-‘chunks ’
-Frame data might be split into multiple chunks.
-
-‘showall ’
-Show all frames before the first keyframe.
-
-‘skiprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘export_mvs ’
-Export motion vectors into frame side-data (see AV_FRAME_DATA_MOTION_VECTORS
)
-for codecs that support it. See also doc/examples/export_mvs.c .
-
-
-
-
-error integer (encoding,video )
-qns integer (encoding,video )
-Deprecated, use mpegvideo private options instead.
-
-
-threads integer (decoding/encoding,video )
-
-Possible values:
-
-‘auto ’
-detect a good number of threads
-
-
-
-
-me_threshold integer (encoding,video )
-Set motion estimation threshold.
-
-
-mb_threshold integer (encoding,video )
-Set macroblock threshold.
-
-
-dc integer (encoding,video )
-Set intra_dc_precision.
-
-
-nssew integer (encoding,video )
-Set nsse weight.
-
-
-skip_top integer (decoding,video )
-Set number of macroblock rows at the top which are skipped.
-
-
-skip_bottom integer (decoding,video )
-Set number of macroblock rows at the bottom which are skipped.
-
-
-profile integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-‘aac_main ’
-‘aac_low ’
-‘aac_ssr ’
-‘aac_ltp ’
-‘aac_he ’
-‘aac_he_v2 ’
-‘aac_ld ’
-‘aac_eld ’
-‘mpeg2_aac_low ’
-‘mpeg2_aac_he ’
-‘mpeg4_sp ’
-‘mpeg4_core ’
-‘mpeg4_main ’
-‘mpeg4_asp ’
-‘dts ’
-‘dts_es ’
-‘dts_96_24 ’
-‘dts_hd_hra ’
-‘dts_hd_ma ’
-
-
-
-level integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-
-
-
-lowres integer (decoding,audio,video )
-Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
-
-
-skip_threshold integer (encoding,video )
-Set frame skip threshold.
-
-
-skip_factor integer (encoding,video )
-Set frame skip factor.
-
-
-skip_exp integer (encoding,video )
-Set frame skip exponent.
-Negative values behave identical to the corresponding positive ones, except
-that the score is normalized.
-Positive values exist primarily for compatibility reasons and are not so useful.
-
-
-skipcmp integer (encoding,video )
-Set frame skip compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-border_mask float (encoding,video )
-Increase the quantizer for macroblocks close to borders.
-
-
-mblmin integer (encoding,video )
-Set min macroblock lagrange factor (VBR).
-
-
-mblmax integer (encoding,video )
-Set max macroblock lagrange factor (VBR).
-
-
-mepc integer (encoding,video )
-Set motion estimation bitrate penalty compensation (1.0 = 256).
-
-
-skip_loop_filter integer (decoding,video )
-skip_idct integer (decoding,video )
-skip_frame integer (decoding,video )
-
-Make decoder discard processing depending on the frame type selected
-by the option value.
-
-skip_loop_filter skips frame loop filtering, skip_idct
-skips frame IDCT/dequantization, skip_frame skips decoding.
-
-Possible values:
-
-‘none ’
-Discard no frame.
-
-
-‘default ’
-Discard useless frames like 0-sized frames.
-
-
-‘noref ’
-Discard all non-reference frames.
-
-
-‘bidir ’
-Discard all bidirectional frames.
-
-
-‘nokey ’
-Discard all frames excepts keyframes.
-
-
-‘all ’
-Discard all frames.
-
-
-
-Default value is ‘default ’.
-
-
-bidir_refine integer (encoding,video )
-Refine the two motion vectors used in bidirectional macroblocks.
-
-
-brd_scale integer (encoding,video )
-Downscale frames for dynamic B-frame decision.
-
-
-keyint_min integer (encoding,video )
-Set minimum interval between IDR-frames.
-
-
-refs integer (encoding,video )
-Set reference frames to consider for motion compensation.
-
-
-chromaoffset integer (encoding,video )
-Set chroma qp offset from luma.
-
-
-trellis integer (encoding,audio,video )
-Set rate-distortion optimal quantization.
-
-
-sc_factor integer (encoding,video )
-Set value multiplied by qscale for each frame and added to
-scene_change_score.
-
-
-mv0_threshold integer (encoding,video )
-b_sensitivity integer (encoding,video )
-Adjust sensitivity of b_frame_strategy 1.
-
-
-compression_level integer (encoding,audio,video )
-min_prediction_order integer (encoding,audio )
-max_prediction_order integer (encoding,audio )
-timecode_frame_start integer (encoding,video )
-Set GOP timecode frame start number, in non drop frame format.
-
-
-request_channels integer (decoding,audio )
-Set desired number of audio channels.
-
-
-bits_per_raw_sample integer
-channel_layout integer (decoding/encoding,audio )
-
-Possible values:
-
-request_channel_layout integer (decoding,audio )
-
-Possible values:
-
-rc_max_vbv_use float (encoding,video )
-rc_min_vbv_use float (encoding,video )
-ticks_per_frame integer (decoding/encoding,audio,video )
-color_primaries integer (decoding/encoding,video )
-color_trc integer (decoding/encoding,video )
-colorspace integer (decoding/encoding,video )
-color_range integer (decoding/encoding,video )
-chroma_sample_location integer (decoding/encoding,video )
-log_level_offset integer
-Set the log level offset.
-
-
-slices integer (encoding,video )
-Number of slices, used in parallelized encoding.
-
-
-thread_type flags (decoding/encoding,video )
-Select which multithreading methods to use.
-
-Use of ‘frame ’ will increase decoding delay by one frame per
-thread, so clients which cannot provide future frames should not use
-it.
-
-Possible values:
-
-‘slice ’
-Decode more than one part of a single frame at once.
-
-Multithreading using slices works only when the video was encoded with
-slices.
-
-
-‘frame ’
-Decode more than one frame at once.
-
-
-
-Default value is ‘slice+frame ’.
-
-
-audio_service_type integer (encoding,audio )
-Set audio service type.
-
-Possible values:
-
-‘ma ’
-Main Audio Service
-
-‘ef ’
-Effects
-
-‘vi ’
-Visually Impaired
-
-‘hi ’
-Hearing Impaired
-
-‘di ’
-Dialogue
-
-‘co ’
-Commentary
-
-‘em ’
-Emergency
-
-‘vo ’
-Voice Over
-
-‘ka ’
-Karaoke
-
-
-
-
-request_sample_fmt sample_fmt (decoding,audio )
-Set sample format audio decoders should prefer. Default value is
-none
.
-
-
-pkt_timebase rational number
-sub_charenc encoding (decoding,subtitles )
-Set the input subtitles character encoding.
-
-
-field_order field_order (video )
-Set/override the field order of the video.
-Possible values:
-
-‘progressive ’
-Progressive video
-
-‘tt ’
-Interlaced video, top field coded and displayed first
-
-‘bb ’
-Interlaced video, bottom field coded and displayed first
-
-‘tb ’
-Interlaced video, top coded first, bottom displayed first
-
-‘bt ’
-Interlaced video, bottom coded first, top displayed first
-
-
-
-
-skip_alpha integer (decoding,video )
-Set to 1 to disable processing alpha (transparency). This works like the
-‘gray ’ flag in the flags option which skips chroma information
-instead of alpha. Default is 0.
-
-
-codec_whitelist list (input )
-"," separated List of allowed decoders. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
-
8 Decoders# TOC
-
-
Decoders are configured elements in FFmpeg which allow the decoding of
-multimedia streams.
-
-
When you configure your FFmpeg build, all the supported native decoders
-are enabled by default. Decoders requiring an external library must be enabled
-manually via the corresponding --enable-lib
option. You can list all
-available decoders using the configure option --list-decoders
.
-
-
You can disable all the decoders with the configure option
---disable-decoders
and selectively enable / disable single decoders
-with the options --enable-decoder=DECODER
/
---disable-decoder=DECODER
.
-
-
The option -decoders
of the ff* tools will display the list of
-enabled decoders.
-
-
-
-
9 Video Decoders# TOC
-
-
A description of some of the currently available video decoders
-follows.
-
-
-
9.1 rawvideo# TOC
-
-
Raw video decoder.
-
-
This decoder decodes rawvideo streams.
-
-
-
9.1.1 Options# TOC
-
-
-top top_field_first
-Specify the assumed field type of the input video.
-
--1
-the video is assumed to be progressive (default)
-
-0
-bottom-field-first is assumed
-
-1
-top-field-first is assumed
-
-
-
-
-
-
-
-
-
10 Audio Decoders# TOC
-
-
A description of some of the currently available audio decoders
-follows.
-
-
-
10.1 ac3# TOC
-
-
AC-3 audio decoder.
-
-
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
-the undocumented RealAudio 3 (a.k.a. dnet).
-
-
-
10.1.1 AC-3 Decoder Options# TOC
-
-
--drc_scale value
-Dynamic Range Scale Factor. The factor to apply to dynamic range values
-from the AC-3 stream. This factor is applied exponentially.
-There are 3 notable scale factor ranges:
-
-drc_scale == 0
-DRC disabled. Produces full range audio.
-
-0 < drc_scale <= 1
-DRC enabled. Applies a fraction of the stream DRC value.
-Audio reproduction is between full range and full compression.
-
-drc_scale > 1
-DRC enabled. Applies drc_scale asymmetrically.
-Loud sounds are fully compressed. Soft sounds are enhanced.
-
-
-
-
-
-
-
-
10.2 ffwavesynth# TOC
-
-
Internal wave synthetizer.
-
-
This decoder generates wave patterns according to predefined sequences. Its
-use is purely internal and the format of the data it accepts is not publicly
-documented.
-
-
-
10.3 libcelt# TOC
-
-
libcelt decoder wrapper.
-
-
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
-Requires the presence of the libcelt headers and library during configuration.
-You need to explicitly configure the build with --enable-libcelt
.
-
-
-
10.4 libgsm# TOC
-
-
libgsm decoder wrapper.
-
-
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
-the presence of the libgsm headers and library during configuration. You need
-to explicitly configure the build with --enable-libgsm
.
-
-
This decoder supports both the ordinary GSM and the Microsoft variant.
-
-
-
10.5 libilbc# TOC
-
-
libilbc decoder wrapper.
-
-
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
-audio codec. Requires the presence of the libilbc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libilbc
.
-
-
-
10.5.1 Options# TOC
-
-
The following option is supported by the libilbc wrapper.
-
-
-enhance
-
-Enable the enhancement of the decoded audio when set to 1. The default
-value is 0 (disabled).
-
-
-
-
-
-
10.6 libopencore-amrnb# TOC
-
-
libopencore-amrnb decoder wrapper.
-
-
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
-Narrowband audio codec. Using it requires the presence of the
-libopencore-amrnb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrnb
.
-
-
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
-without this library.
-
-
-
10.7 libopencore-amrwb# TOC
-
-
libopencore-amrwb decoder wrapper.
-
-
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
-Wideband audio codec. Using it requires the presence of the
-libopencore-amrwb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrwb
.
-
-
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
-without this library.
-
-
-
10.8 libopus# TOC
-
-
libopus decoder wrapper.
-
-
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
-Requires the presence of the libopus headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopus
.
-
-
An FFmpeg native decoder for Opus exists, so users can decode Opus
-without this library.
-
-
-
-
11 Subtitles Decoders# TOC
-
-
-
11.1 dvdsub# TOC
-
-
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
-also be found in VobSub file pairs and in some Matroska files.
-
-
-
11.1.1 Options# TOC
-
-
-palette
-Specify the global palette used by the bitmaps. When stored in VobSub, the
-palette is normally specified in the index file; in Matroska, the palette is
-stored in the codec extra-data in the same format as in VobSub. In DVDs, the
-palette is stored in the IFO file, and therefore not available when reading
-from dumped VOB files.
-
-The format for this option is a string containing 16 24-bits hexadecimal
-numbers (without 0x prefix) separated by comas, for example 0d00ee,
-ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
-7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b
.
-
-
-ifo_palette
-Specify the IFO file from which the global palette is obtained.
-(experimental)
-
-
-forced_subs_only
-Only decode subtitle entries marked as forced. Some titles have forced
-and non-forced subtitles in the same track. Setting this flag to 1
-will only keep the forced subtitles. Default value is 0
.
-
-
-
-
-
11.2 libzvbi-teletext# TOC
-
-
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
-subtitles. Requires the presence of the libzvbi headers and library during
-configuration. You need to explicitly configure the build with
---enable-libzvbi
.
-
-
-
11.2.1 Options# TOC
-
-
-txt_page
-List of teletext page numbers to decode. You may use the special * string to
-match all pages. Pages that do not match the specified list are dropped.
-Default value is *.
-
-txt_chop_top
-Discards the top teletext line. Default value is 1.
-
-txt_format
-Specifies the format of the decoded subtitles. The teletext decoder is capable
-of decoding the teletext pages to bitmaps or to simple text, you should use
-"bitmap" for teletext pages, because certain graphics and colors cannot be
-expressed in simple text. You might use "text" for teletext based subtitles if
-your application can handle simple text based subtitles. Default value is
-bitmap.
-
-txt_left
-X offset of generated bitmaps, default is 0.
-
-txt_top
-Y offset of generated bitmaps, default is 0.
-
-txt_chop_spaces
-Chops leading and trailing spaces and removes empty lines from the generated
-text. This option is useful for teletext based subtitles where empty spaces may
-be present at the start or at the end of the lines or empty lines may be
-present between the subtitle lines because of double-sized teletext charactes.
-Default value is 1.
-
-txt_duration
-Sets the display duration of the decoded teletext pages or subtitles in
-miliseconds. Default value is 30000 which is 30 seconds.
-
-txt_transparent
-Force transparent background of the generated teletext bitmaps. Default value
-is 0 which means an opaque (black) background.
-
-
-
-
-
12 Bitstream Filters# TOC
-
-
When you configure your FFmpeg build, all the supported bitstream
-filters are enabled by default. You can list all available ones using
-the configure option --list-bsfs
.
-
-
You can disable all the bitstream filters using the configure option
---disable-bsfs
, and selectively enable any bitstream filter using
-the option --enable-bsf=BSF
, or you can disable a particular
-bitstream filter using the option --disable-bsf=BSF
.
-
-
The option -bsfs
of the ff* tools will display the list of
-all the supported bitstream filters included in your build.
-
-
The ff* tools have a -bsf option applied per stream, taking a
-comma-separated list of filters, whose parameters follow the filter
-name after a ’=’.
-
-
-
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
-
-
-
Below is a description of the currently available bitstream filters,
-with their parameters, if any.
-
-
-
12.1 aac_adtstoasc# TOC
-
-
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
-bitstream filter.
-
-
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
-ADTS header and removes the ADTS header.
-
-
This is required for example when copying an AAC stream from a raw
-ADTS AAC container to a FLV or a MOV/MP4 file.
-
-
-
12.2 chomp# TOC
-
-
Remove zero padding at the end of a packet.
-
-
-
12.3 dump_extra# TOC
-
-
Add extradata to the beginning of the filtered packets.
-
-
The additional argument specifies which packets should be filtered.
-It accepts the values:
-
-‘a ’
-add extradata to all key packets, but only if local_header is
-set in the flags2 codec context field
-
-
-‘k ’
-add extradata to all key packets
-
-
-‘e ’
-add extradata to all packets
-
-
-
-
If not specified it is assumed ‘k ’.
-
-
For example the following ffmpeg
command forces a global
-header (thus disabling individual packet headers) in the H.264 packets
-generated by the libx264
encoder, but corrects them by adding
-the header stored in extradata to the key packets:
-
-
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
-
-
-
-
12.4 h264_mp4toannexb# TOC
-
-
Convert an H.264 bitstream from length prefixed mode to start code
-prefixed mode (as defined in the Annex B of the ITU-T H.264
-specification).
-
-
This is required by some streaming formats, typically the MPEG-2
-transport stream format ("mpegts").
-
-
For example to remux an MP4 file containing an H.264 stream to mpegts
-format with ffmpeg
, you can use the command:
-
-
-
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
-
-
-
-
12.5 imxdump# TOC
-
-
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
-Pro decoder. This filter only applies to the mpeg2video codec, and is
-likely not needed for Final Cut Pro 7 and newer with the appropriate
--tag:v .
-
-
For example, to remux 30 MB/sec NTSC IMX to MOV:
-
-
-
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
-
-
-
-
12.6 mjpeg2jpeg# TOC
-
-
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
-
-
MJPEG is a video codec wherein each video frame is essentially a
-JPEG image. The individual frames can be extracted without loss,
-e.g. by
-
-
-
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
-
-
-
Unfortunately, these chunks are incomplete JPEG images, because
-they lack the DHT segment required for decoding. Quoting from
-http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml :
-
-
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
-commented that "MJPEG, or at least the MJPEG in AVIs having the
-MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
-Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
-and it must use basic Huffman encoding, not arithmetic or
-progressive. . . . You can indeed extract the MJPEG frames and
-decode them with a regular JPEG decoder, but you have to prepend
-the DHT segment to them, or else the decoder won’t have any idea
-how to decompress the data. The exact table necessary is given in
-the OpenDML spec."
-
-
This bitstream filter patches the header of frames extracted from an MJPEG
-stream (carrying the AVI1 header ID and lacking a DHT segment) to
-produce fully qualified JPEG images.
-
-
-
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
-exiftran -i -9 frame*.jpg
-ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
-
-
-
-
12.7 mjpega_dump_header# TOC
-
-
-
12.8 movsub# TOC
-
-
-
12.9 mp3_header_decompress# TOC
-
-
-
12.10 noise# TOC
-
-
Damages the contents of packets without damaging the container. Can be
-used for fuzzing or testing error resilience/concealment.
-
-
Parameters:
-A numeral string, whose value is related to how often output bytes will
-be modified. Therefore, values below or equal to 0 are forbidden, and
-the lower the more frequent bytes will be modified, with 1 meaning
-every byte is modified.
-
-
-
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
-
-
applies the modification to every byte.
-
-
-
12.11 remove_extra# TOC
-
-
-
13 Format Options# TOC
-
-
The libavformat library provides some generic global options, which
-can be set on all the muxers and demuxers. In addition each muxer or
-demuxer may support so-called private options, which are specific for
-that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follows:
-
-
-avioflags flags (input/output )
-Possible values:
-
-‘direct ’
-Reduce buffering.
-
-
-
-
-probesize integer (input )
-Set probing size in bytes, i.e. the size of the data to analyze to get
-stream information. A higher value will allow to detect more
-information in case it is dispersed into the stream, but will increase
-latency. Must be an integer not lesser than 32. It is 5000000 by default.
-
-
-packetsize integer (output )
-Set packet size.
-
-
-fflags flags (input/output )
-Set format flags.
-
-Possible values:
-
-‘ignidx ’
-Ignore index.
-
-‘genpts ’
-Generate PTS.
-
-‘nofillin ’
-Do not fill in missing values that can be exactly calculated.
-
-‘noparse ’
-Disable AVParsers, this needs +nofillin
too.
-
-‘igndts ’
-Ignore DTS.
-
-‘discardcorrupt ’
-Discard corrupted frames.
-
-‘sortdts ’
-Try to interleave output packets by DTS.
-
-‘keepside ’
-Do not merge side data.
-
-‘latm ’
-Enable RTP MP4A-LATM payload.
-
-‘nobuffer ’
-Reduce the latency introduced by optional buffering
-
-‘bitexact ’
-Only write platform-, build- and time-independent data.
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-
-
-
-seek2any integer (input )
-Allow seeking to non-keyframes on demuxer level when supported if set to 1.
-Default is 0.
-
-
-analyzeduration integer (input )
-Specify how many microseconds are analyzed to probe the input. A
-higher value will allow to detect more accurate information, but will
-increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
-
-
-cryptokey hexadecimal string (input )
-Set decryption key.
-
-
-indexmem integer (input )
-Set max memory used for timestamp index (per stream).
-
-
-rtbufsize integer (input )
-Set max memory used for buffering real-time frames.
-
-
-fdebug flags (input/output )
-Print specific debug info.
-
-Possible values:
-
-‘ts ’
-
-
-
-max_delay integer (input/output )
-Set maximum muxing or demuxing delay in microseconds.
-
-
-fpsprobesize integer (input )
-Set number of frames used to probe fps.
-
-
-audio_preload integer (output )
-Set microseconds by which audio packets should be interleaved earlier.
-
-
-chunk_duration integer (output )
-Set microseconds for each chunk.
-
-
-chunk_size integer (output )
-Set size in bytes for each chunk.
-
-
-err_detect, f_err_detect flags (input )
-Set error detection flags. f_err_detect
is deprecated and
-should be used only via the ffmpeg
tool.
-
-Possible values:
-
-‘crccheck ’
-Verify embedded CRCs.
-
-‘bitstream ’
-Detect bitstream specification deviations.
-
-‘buffer ’
-Detect improper bitstream length.
-
-‘explode ’
-Abort decoding on minor error detection.
-
-‘careful ’
-Consider things that violate the spec and have not been seen in the
-wild as errors.
-
-‘compliant ’
-Consider all spec non compliancies as errors.
-
-‘aggressive ’
-Consider things that a sane encoder should not do as an error.
-
-
-
-
-use_wallclock_as_timestamps integer (input )
-Use wallclock as timestamps.
-
-
-avoid_negative_ts integer (output )
-
-Possible values:
-
-‘make_non_negative ’
-Shift timestamps to make them non-negative.
-Also note that this affects only leading negative timestamps, and not
-non-monotonic negative timestamps.
-
-‘make_zero ’
-Shift timestamps so that the first timestamp is 0.
-
-‘auto (default) ’
-Enables shifting when required by the target format.
-
-‘disabled ’
-Disables shifting of timestamp.
-
-
-
-When shifting is enabled, all output timestamps are shifted by the
-same amount. Audio, video, and subtitles desynching and relative
-timestamp differences are preserved compared to how they would have
-been without shifting.
-
-
-skip_initial_bytes integer (input )
-Set number of bytes to skip before reading header and frames if set to 1.
-Default is 0.
-
-
-correct_ts_overflow integer (input )
-Correct single timestamp overflows if set to 1. Default is 1.
-
-
-flush_packets integer (output )
-Flush the underlying I/O stream after each packet. Default 1 enables it, and
-has the effect of reducing the latency; 0 disables it and may slightly
-increase performance in some cases.
-
-
-output_ts_offset offset (output )
-Set the output time offset.
-
-offset must be a time duration specification,
-see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-The offset is added by the muxer to the output timestamps.
-
-Specifying a positive offset means that the corresponding streams are
-delayed bt the time duration specified in offset . Default value
-is 0
(meaning that no offset is applied).
-
-
-format_whitelist list (input )
-"," separated List of allowed demuxers. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
13.1 Format stream specifiers# TOC
-
-
Format stream specifiers allow selection of one or more streams that
-match specific properties.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index.
-
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio,
-’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If
-stream_index is given, then it matches the stream number
-stream_index of this type. Otherwise, it matches all streams of
-this type.
-
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number
-stream_index in the program with the id
-program_id . Otherwise, it matches all streams in the program.
-
-
-#stream_id
-Matches the stream by a format-specific ID.
-
-
-
-
The exact semantics of stream specifiers is defined by the
-avformat_match_stream_specifier()
function declared in the
-libavformat/avformat.h header.
-
-
-
14 Demuxers# TOC
-
-
Demuxers are configured elements in FFmpeg that can read the
-multimedia streams from a particular type of file.
-
-
When you configure your FFmpeg build, all the supported demuxers
-are enabled by default. You can list all available ones using the
-configure option --list-demuxers
.
-
-
You can disable all the demuxers using the configure option
---disable-demuxers
, and selectively enable a single demuxer with
-the option --enable-demuxer=DEMUXER
, or disable it
-with the option --disable-demuxer=DEMUXER
.
-
-
The option -formats
of the ff* tools will display the list of
-enabled demuxers.
-
-
The description of some of the currently available demuxers follows.
-
-
-
14.1 applehttp# TOC
-
-
Apple HTTP Live Streaming demuxer.
-
-
This demuxer presents all AVStreams from all variant streams.
-The id field is set to the bitrate variant index number. By setting
-the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay),
-the caller can decide which variant streams to actually receive.
-The total bitrate of the variant that the stream belongs to is
-available in a metadata key named "variant_bitrate".
-
-
-
14.2 apng# TOC
-
-
Animated Portable Network Graphics demuxer.
-
-
This demuxer is used to demux APNG files.
-All headers, but the PNG signature, up to (but not including) the first
-fcTL chunk are transmitted as extradata.
-Frames are then split as being all the chunks between two fcTL ones, or
-between the last fcTL and IEND chunks.
-
-
--ignore_loop bool
-Ignore the loop variable in the file if set.
-
--max_fps int
-Maximum framerate in frames per second (0 for no limit).
-
--default_fps int
-Default framerate in frames per second when none is specified in the file
-(0 meaning as fast as possible).
-
-
-
-
-
14.3 asf# TOC
-
-
Advanced Systems Format demuxer.
-
-
This demuxer is used to demux ASF files and MMS network streams.
-
-
--no_resync_search bool
-Do not try to resynchronize by looking for a certain optional start code.
-
-
-
-
-
14.4 concat# TOC
-
-
Virtual concatenation script demuxer.
-
-
This demuxer reads a list of files and other directives from a text file and
-demuxes them one after the other, as if all their packet had been muxed
-together.
-
-
The timestamps in the files are adjusted so that the first file starts at 0
-and each next file starts where the previous one finishes. Note that it is
-done globally and may cause gaps if all streams do not have exactly the same
-length.
-
-
All files must have the same streams (same codecs, same time base, etc.).
-
-
The duration of each file is used to adjust the timestamps of the next file:
-if the duration is incorrect (because it was computed using the bit-rate or
-because the file is truncated, for example), it can cause artifacts. The
-duration
directive can be used to override the duration stored in
-each file.
-
-
-
14.4.1 Syntax# TOC
-
-
The script is a text file in extended-ASCII, with one directive per line.
-Empty lines, leading spaces and lines starting with ’#’ are ignored. The
-following directive is recognized:
-
-
-file path
-Path to a file to read; special characters and spaces must be escaped with
-backslash or single quotes.
-
-All subsequent file-related directives apply to that file.
-
-
-ffconcat version 1.0
-Identify the script type and version. It also sets the safe option
-to 1 if it was to its default -1.
-
-To make FFmpeg recognize the format automatically, this directive must
-appears exactly as is (no extra space or byte-order-mark) on the very first
-line of the script.
-
-
-duration dur
-Duration of the file. This information can be specified from the file;
-specifying it here may be more efficient or help if the information from the
-file is not available or accurate.
-
-If the duration is set for all files, then it is possible to seek in the
-whole concatenated video.
-
-
-stream
-Introduce a stream in the virtual file.
-All subsequent stream-related directives apply to the last introduced
-stream.
-Some streams properties must be set in order to allow identifying the
-matching streams in the subfiles.
-If no streams are defined in the script, the streams from the first file are
-copied.
-
-
-exact_stream_id id
-Set the id of the stream.
-If this directive is given, the string with the corresponding id in the
-subfiles will be used.
-This is especially useful for MPEG-PS (VOB) files, where the order of the
-streams is not reliable.
-
-
-
-
-
-
14.4.2 Options# TOC
-
-
This demuxer accepts the following option:
-
-
-safe
-If set to 1, reject unsafe file paths. A file path is considered safe if it
-does not contain a protocol specification and is relative and all components
-only contain characters from the portable character set (letters, digits,
-period, underscore and hyphen) and have no period at the beginning of a
-component.
-
-If set to 0, any file name is accepted.
-
-The default is -1, it is equivalent to 1 if the format was automatically
-probed and 0 otherwise.
-
-
-auto_convert
-If set to 1, try to perform automatic conversions on packet data to make the
-streams concatenable.
-
-Currently, the only conversion is adding the h264_mp4toannexb bitstream
-filter to H.264 streams in MP4 format. This is necessary in particular if
-there are resolution changes.
-
-
-
-
-
-
14.5 flv# TOC
-
-
Adobe Flash Video Format demuxer.
-
-
This demuxer is used to demux FLV files and RTMP network streams.
-
-
--flv_metadata bool
-Allocate the streams according to the onMetaData array content.
-
-
-
-
-
14.6 libgme# TOC
-
-
The Game Music Emu library is a collection of video game music file emulators.
-
-
See http://code.google.com/p/game-music-emu/ for more information.
-
-
Some files have multiple tracks. The demuxer will pick the first track by
-default. The track_index option can be used to select a different
-track. Track indexes start at 0. The demuxer exports the number of tracks as
-tracks meta data entry.
-
-
For very large files, the max_size option may have to be adjusted.
-
-
-
14.7 libquvi# TOC
-
-
Play media from Internet services using the quvi project.
-
-
The demuxer accepts a format option to request a specific quality. It
-is by default set to best .
-
-
See http://quvi.sourceforge.net/ for more information.
-
-
FFmpeg needs to be built with --enable-libquvi
for this demuxer to be
-enabled.
-
-
-
14.8 gif# TOC
-
-
Animated GIF demuxer.
-
-
It accepts the following options:
-
-
-min_delay
-Set the minimum valid delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 2.
-
-
-default_delay
-Set the default delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 10.
-
-
-ignore_loop
-GIF files can contain information to loop a certain number of times (or
-infinitely). If ignore_loop is set to 1, then the loop setting
-from the input will be ignored and looping will not occur. If set to 0,
-then looping will occur and will cycle the number of times according to
-the GIF. Default value is 1.
-
-
-
-
For example, with the overlay filter, place an infinitely looping GIF
-over another video:
-
-
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
-
-
-
Note that in the above example the shortest option for overlay filter is
-used to end the output video at the length of the shortest input file,
-which in this case is input.mp4 as the GIF in this example loops
-infinitely.
-
-
-
14.9 image2# TOC
-
-
Image file demuxer.
-
-
This demuxer reads from a list of image files specified by a pattern.
-The syntax and meaning of the pattern is specified by the
-option pattern_type .
-
-
The pattern may contain a suffix which is used to automatically
-determine the format of the images contained in the files.
-
-
The size, the pixel format, and the format of each image must be the
-same for all the files in the sequence.
-
-
This demuxer accepts the following options:
-
-framerate
-Set the frame rate for the video stream. It defaults to 25.
-
-loop
-If set to 1, loop over the input. Default value is 0.
-
-pattern_type
-Select the pattern type used to interpret the provided filename.
-
-pattern_type accepts one of the following values.
-
-sequence
-Select a sequence pattern type, used to specify a sequence of files
-indexed by sequential numbers.
-
-A sequence pattern may contain the string "%d" or "%0N d", which
-specifies the position of the characters representing a sequential
-number in each filename matched by the pattern. If the form
-"%d0N d" is used, the string representing the number in each
-filename is 0-padded and N is the total number of 0-padded
-digits representing the number. The literal character ’%’ can be
-specified in the pattern with the string "%%".
-
-If the sequence pattern contains "%d" or "%0N d", the first filename of
-the file list specified by the pattern must contain a number
-inclusively contained between start_number and
-start_number +start_number_range -1, and all the following
-numbers must be sequential.
-
-For example the pattern "img-%03d.bmp" will match a sequence of
-filenames of the form img-001.bmp , img-002.bmp , ...,
-img-010.bmp , etc.; the pattern "i%%m%%g-%d.jpg" will match a
-sequence of filenames of the form i%m%g-1.jpg ,
-i%m%g-2.jpg , ..., i%m%g-10.jpg , etc.
-
-Note that the pattern must not necessarily contain "%d" or
-"%0N d", for example to convert a single image file
-img.jpeg you can employ the command:
-
-
ffmpeg -i img.jpeg img.png
-
-
-
-glob
-Select a glob wildcard pattern type.
-
-The pattern is interpreted like a glob()
pattern. This is only
-selectable if libavformat was compiled with globbing support.
-
-
-glob_sequence (deprecated, will be removed)
-Select a mixed glob wildcard/sequence pattern.
-
-If your version of libavformat was compiled with globbing support, and
-the provided pattern contains at least one glob meta character among
-%*?[]{}
that is preceded by an unescaped "%", the pattern is
-interpreted like a glob()
pattern, otherwise it is interpreted
-like a sequence pattern.
-
-All glob special characters %*?[]{}
must be prefixed
-with "%". To escape a literal "%" you shall use "%%".
-
-For example the pattern foo-%*.jpeg
will match all the
-filenames prefixed by "foo-" and terminating with ".jpeg", and
-foo-%?%?%?.jpeg
will match all the filenames prefixed with
-"foo-", followed by a sequence of three characters, and terminating
-with ".jpeg".
-
-This pattern type is deprecated in favor of glob and
-sequence .
-
-
-
-Default value is glob_sequence .
-
-pixel_format
-Set the pixel format of the images to read. If not specified the pixel
-format is guessed from the first image file in the sequence.
-
-start_number
-Set the index of the file matched by the image file pattern to start
-to read from. Default value is 0.
-
-start_number_range
-Set the index interval range to check when looking for the first image
-file in the sequence, starting from start_number . Default value
-is 5.
-
-ts_from_file
-If set to 1, will set frame timestamp to modification time of image file. Note
-that monotonity of timestamps is not provided: images go in the same order as
-without this option. Default value is 0.
-If set to 2, will set frame timestamp to the modification time of the image file in
-nanosecond precision.
-
-video_size
-Set the video size of the images to read. If not specified the video
-size is guessed from the first image file in the sequence.
-
-
-
-
-
14.9.1 Examples# TOC
-
-
- Use ffmpeg
for creating a video from the images in the file
-sequence img-001.jpeg , img-002.jpeg , ..., assuming an
-input frame rate of 10 frames per second:
-
-
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
-
-
- As above, but start by reading from a file with index 100 in the sequence:
-
-
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
-
-
- Read images matching the "*.png" glob pattern , that is all the files
-terminating with the ".png" suffix:
-
-
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
-
-
-
-
-
14.10 mpegts# TOC
-
-
MPEG-2 transport stream demuxer.
-
-
-fix_teletext_pts
-Overrides teletext packet PTS and DTS values with the timestamps calculated
-from the PCR of the first program which the teletext stream is part of and is
-not discarded. Default value is 1, set this option to 0 if you want your
-teletext packet PTS and DTS values untouched.
-
-
-
-
-
14.11 rawvideo# TOC
-
-
Raw video demuxer.
-
-
This demuxer allows one to read raw video data. Since there is no header
-specifying the assumed video parameters, the user must specify them
-in order to be able to decode the data correctly.
-
-
This demuxer accepts the following options:
-
-framerate
-Set input video frame rate. Default value is 25.
-
-
-pixel_format
-Set the input video pixel format. Default value is yuv420p
.
-
-
-video_size
-Set the input video size. This value must be specified explicitly.
-
-
-
-
For example to read a rawvideo file input.raw with
-ffplay
, assuming a pixel format of rgb24
, a video
-size of 320x240
, and a frame rate of 10 images per second, use
-the command:
-
-
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
-
-
-
-
14.12 sbg# TOC
-
-
SBaGen script demuxer.
-
-
This demuxer reads the script language used by SBaGen
-http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG
-script looks like that:
-
-
-SE
-a: 300-2.5/3 440+4.5/0
-b: 300-2.5/0 440+4.5/3
-off: -
-NOW == a
-+0:07:00 == b
-+0:14:00 == a
-+0:21:00 == b
-+0:30:00 off
-
-
-
A SBG script can mix absolute and relative timestamps. If the script uses
-either only absolute timestamps (including the script start time) or only
-relative ones, then its layout is fixed, and the conversion is
-straightforward. On the other hand, if the script mixes both kind of
-timestamps, then the NOW reference for relative timestamps will be
-taken from the current time of day at the time the script is read, and the
-script layout will be frozen according to that reference. That means that if
-the script is directly played, the actual times will match the absolute
-timestamps up to the sound controller’s clock accuracy, but if the user
-somehow pauses the playback or seeks, all times will be shifted accordingly.
-
-
-
14.13 tedcaptions# TOC
-
-
JSON captions used for TED Talks .
-
-
TED does not provide links to the captions, but they can be guessed from the
-page. The file tools/bookmarklets.html from the FFmpeg source tree
-contains a bookmarklet to expose them.
-
-
This demuxer accepts the following option:
-
-start_time
-Set the start time of the TED talk, in milliseconds. The default is 15000
-(15s). It is used to sync the captions with the downloadable videos, because
-they include a 15s intro.
-
-
-
-
Example: convert the captions to a format most players understand:
-
-
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
-
-
-
-
15 Metadata# TOC
-
-
FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded
-INI-like text file and then load it back using the metadata muxer/demuxer.
-
-
The file format is as follows:
-
- A file consists of a header and a number of metadata tags divided into sections,
-each on its own line.
-
- The header is a ’;FFMETADATA’ string, followed by a version number (now 1).
-
- Metadata tags are of the form ’key=value’
-
- Immediately after header follows global metadata
-
- After global metadata there may be sections with per-stream/per-chapter
-metadata.
-
- A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
-brackets (’[’, ’]’) and ends with next section or end of file.
-
- At the beginning of a chapter section there may be an optional timebase to be
-used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and
-den are integers. If the timebase is missing then start/end times are assumed to
-be in milliseconds.
-Next a chapter section must contain chapter start and end times in form
-’START=num’, ’END=num’, where num is a positive integer.
-
- Empty lines and lines starting with ’;’ or ’#’ are ignored.
-
- Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a
-newline) must be escaped with a backslash ’\’.
-
- Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
-the tag (in the example above key is ’foo ’, value is ’ bar’).
-
-
-
A ffmetadata file might look like this:
-
-
;FFMETADATA1
-title=bike\\shed
-;this is a comment
-artist=FFmpeg troll team
-
-[CHAPTER]
-TIMEBASE=1/1000
-START=0
-#chapter ends at 0:01:00
-END=60000
-title=chapter \#1
-[STREAM]
-title=multi\
-line
-
-
-
By using the ffmetadata muxer and demuxer it is possible to extract
-metadata from an input file to an ffmetadata file, and then transcode
-the file into an output file with the edited ffmetadata file.
-
-
Extracting an ffmetadata file with ffmpeg goes as follows:
-
-
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
-
-
-
Reinserting edited metadata information from the FFMETADATAFILE file can
-be done as:
-
-
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
-
-
-
-
16 Protocols# TOC
-
-
Protocols are configured elements in FFmpeg that enable access to
-resources that require specific protocols.
-
-
When you configure your FFmpeg build, all the supported protocols are
-enabled by default. You can list all available ones using the
-configure option "–list-protocols".
-
-
You can disable all the protocols using the configure option
-"–disable-protocols", and selectively enable a protocol using the
-option "–enable-protocol=PROTOCOL ", or you can disable a
-particular protocol using the option
-"–disable-protocol=PROTOCOL ".
-
-
The option "-protocols" of the ff* tools will display the list of
-supported protocols.
-
-
A description of the currently available protocols follows.
-
-
-
16.1 bluray# TOC
-
-
Read BluRay playlist.
-
-
The accepted options are:
-
-angle
-BluRay angle
-
-
-chapter
-Start chapter (1...N)
-
-
-playlist
-Playlist to read (BDMV/PLAYLIST/?????.mpls)
-
-
-
-
-
Examples:
-
-
Read longest playlist from BluRay mounted to /mnt/bluray:
-
-
-
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
-
-
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
-
-
-
-
16.2 cache# TOC
-
-
Caching wrapper for input stream.
-
-
Cache the input stream to temporary file. It brings seeking capability to live streams.
-
-
-
-
-
16.3 concat# TOC
-
-
Physical concatenation protocol.
-
-
Allow to read and seek from many resource in sequence as if they were
-a unique resource.
-
-
A URL accepted by this protocol has the syntax:
-
-
concat:URL1 |URL2 |...|URLN
-
-
-
where URL1 , URL2 , ..., URLN are the urls of the
-resource to be concatenated, each one possibly specifying a distinct
-protocol.
-
-
For example to read a sequence of files split1.mpeg ,
-split2.mpeg , split3.mpeg with ffplay
use the
-command:
-
-
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
-
-
-
Note that you may need to escape the character "|" which is special for
-many shells.
-
-
-
16.4 crypto# TOC
-
-
AES-encrypted stream reading protocol.
-
-
The accepted options are:
-
-key
-Set the AES decryption key binary block from given hexadecimal representation.
-
-
-iv
-Set the AES decryption initialization vector binary block from given hexadecimal representation.
-
-
-
-
Accepted URL formats:
-
-
crypto:URL
-crypto+URL
-
-
-
-
16.5 data# TOC
-
-
Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme .
-
-
For example, to convert a GIF file given inline with ffmpeg
:
-
-
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
-
-
-
-
16.6 file# TOC
-
-
File access protocol.
-
-
Allow to read from or write to a file.
-
-
A file URL can have the form:
-
-
-
where filename is the path of the file to read.
-
-
An URL that does not have a protocol prefix will be assumed to be a
-file URL. Depending on the build, an URL that looks like a Windows
-path with the drive letter at the beginning will also be assumed to be
-a file URL (usually not the case in builds for unix-like systems).
-
-
For example to read from a file input.mpeg with ffmpeg
-use the command:
-
-
ffmpeg -i file:input.mpeg output.mpeg
-
-
-
This protocol accepts the following options:
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable for files on slow medium.
-
-
-
-
-
16.7 ftp# TOC
-
-
FTP (File Transfer Protocol).
-
-
Allow to read from or write to remote resources using FTP protocol.
-
-
Following syntax is required.
-
-
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-ftp-anonymous-password
-Password used when login as anonymous user. Typically an e-mail address
-should be used.
-
-
-ftp-write-seekable
-Control seekability of connection during encoding. If set to 1 the
-resource is supposed to be seekable, if set to 0 it is assumed not
-to be seekable. Default value is 0.
-
-
-
-
NOTE: Protocol can be used as output, but it is recommended to not do
-it, unless special care is taken (tests, customized server configuration
-etc.). Different FTP servers behave in different way during seek
-operation. ff* tools may produce incomplete content due to server limitations.
-
-
-
16.8 gopher# TOC
-
-
Gopher protocol.
-
-
-
16.9 hls# TOC
-
-
Read Apple HTTP Live Streaming compliant segmented stream as
-a uniform one. The M3U8 playlists describing the segments can be
-remote HTTP resources or local files, accessed using the standard
-file protocol.
-The nested protocol is declared by specifying
-"+proto " after the hls URI scheme name, where proto
-is either "file" or "http".
-
-
-
hls+http://host/path/to/remote/resource.m3u8
-hls+file://path/to/local/resource.m3u8
-
-
-
Using this protocol is discouraged - the hls demuxer should work
-just as well (if not, please report the issues) and is more complete.
-To use the hls demuxer instead, simply use the direct URLs to the
-m3u8 files.
-
-
-
16.10 http# TOC
-
-
HTTP (Hyper Text Transfer Protocol).
-
-
This protocol accepts the following options:
-
-
-seekable
-Control seekability of connection. If set to 1 the resource is
-supposed to be seekable, if set to 0 it is assumed not to be seekable,
-if set to -1 it will try to autodetect if it is seekable. Default
-value is -1.
-
-
-chunked_post
-If set to 1 use chunked Transfer-Encoding for posts, default is 1.
-
-
-content_type
-Set a specific content type for the POST messages.
-
-
-headers
-Set custom HTTP headers, can override built in default headers. The
-value must be a string encoding the headers.
-
-
-multiple_requests
-Use persistent connections if set to 1, default is 0.
-
-
-post_data
-Set custom HTTP post data.
-
-
-user-agent
-user_agent
-Override the User-Agent header. If not specified the protocol will use a
-string describing the libavformat build. ("Lavf/<version>")
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-mime_type
-Export the MIME type.
-
-
-icy
-If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
-supports this, the metadata has to be retrieved by the application by reading
-the icy_metadata_headers and icy_metadata_packet options.
-The default is 1.
-
-
-icy_metadata_headers
-If the server supports ICY metadata, this contains the ICY-specific HTTP reply
-headers, separated by newline characters.
-
-
-icy_metadata_packet
-If the server supports ICY metadata, and icy was set to 1, this
-contains the last non-empty metadata packet sent by the server. It should be
-polled in regular intervals by applications interested in mid-stream metadata
-updates.
-
-
-cookies
-Set the cookies to be sent in future requests. The format of each cookie is the
-same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
-delimited by a newline character.
-
-
-offset
-Set initial byte offset.
-
-
-end_offset
-Try to limit the request to bytes preceding this offset.
-
-
-
-
-
16.10.1 HTTP Cookies# TOC
-
-
Some HTTP requests will be denied unless cookie values are passed in with the
-request. The cookies option allows these cookies to be specified. At
-the very least, each cookie must specify a value along with a path and domain.
-HTTP requests that match both the domain and path will automatically include the
-cookie value in the HTTP Cookie header field. Multiple cookies can be delimited
-by a newline.
-
-
The required syntax to play a stream specifying a cookie is:
-
-
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
-
-
-
-
16.11 Icecast# TOC
-
-
Icecast protocol (stream to Icecast servers)
-
-
This protocol accepts the following options:
-
-
-ice_genre
-Set the stream genre.
-
-
-ice_name
-Set the stream name.
-
-
-ice_description
-Set the stream description.
-
-
-ice_url
-Set the stream website URL.
-
-
-ice_public
-Set if the stream should be public.
-The default is 0 (not public).
-
-
-user_agent
-Override the User-Agent header. If not specified a string of the form
-"Lavf/<version>" will be used.
-
-
-password
-Set the Icecast mountpoint password.
-
-
-content_type
-Set the stream content type. This must be set if it is different from
-audio/mpeg.
-
-
-legacy_icecast
-This enables support for Icecast versions < 2.4.0, that do not support the
-HTTP PUT method but the SOURCE method.
-
-
-
-
-
-
icecast://[username [:password ]@]server :port /mountpoint
-
-
-
-
16.12 mmst# TOC
-
-
MMS (Microsoft Media Server) protocol over TCP.
-
-
-
16.13 mmsh# TOC
-
-
MMS (Microsoft Media Server) protocol over HTTP.
-
-
The required syntax is:
-
-
mmsh://server [:port ][/app ][/playpath ]
-
-
-
-
16.14 md5# TOC
-
-
MD5 output protocol.
-
-
Computes the MD5 hash of the data to be written, and on close writes
-this to the designated output or stdout if none is specified. It can
-be used to test muxers without writing an actual file.
-
-
Some examples follow.
-
-
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
-ffmpeg -i input.flv -f avi -y md5:output.avi.md5
-
-# Write the MD5 hash of the encoded AVI file to stdout.
-ffmpeg -i input.flv -f avi -y md5:
-
-
-
Note that some formats (typically MOV) require the output protocol to
-be seekable, so they will fail with the MD5 output protocol.
-
-
-
16.15 pipe# TOC
-
-
UNIX pipe access protocol.
-
-
Allow to read and write from UNIX pipes.
-
-
The accepted syntax is:
-
-
-
number is the number corresponding to the file descriptor of the
-pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number
-is not specified, by default the stdout file descriptor will be used
-for writing, stdin for reading.
-
-
For example to read from stdin with ffmpeg
:
-
-
cat test.wav | ffmpeg -i pipe:0
-# ...this is the same as...
-cat test.wav | ffmpeg -i pipe:
-
-
-
For writing to stdout with ffmpeg
:
-
-
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
-# ...this is the same as...
-ffmpeg -i test.wav -f avi pipe: | cat > test.avi
-
-
-
This protocol accepts the following options:
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable if data transmission is slow.
-
-
-
-
Note that some formats (typically MOV), require the output protocol to
-be seekable, so they will fail with the pipe output protocol.
-
-
-
16.16 rtmp# TOC
-
-
Real-Time Messaging Protocol.
-
-
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
-content across a TCP/IP network.
-
-
The required syntax is:
-
-
rtmp://[username :password @]server [:port ][/app ][/instance ][/playpath ]
-
-
-
The accepted parameters are:
-
-username
-An optional username (mostly for publishing).
-
-
-password
-An optional password (mostly for publishing).
-
-
-server
-The address of the RTMP server.
-
-
-port
-The number of the TCP port to use (by default is 1935).
-
-
-app
-It is the name of the application to access. It usually corresponds to
-the path where the application is installed on the RTMP server
-(e.g. /ondemand/ , /flash/live/ , etc.). You can override
-the value parsed from the URI through the rtmp_app
option, too.
-
-
-playpath
-It is the path or name of the resource to play with reference to the
-application specified in app , may be prefixed by "mp4:". You
-can override the value parsed from the URI through the rtmp_playpath
-option, too.
-
-
-listen
-Act as a server, listening for an incoming connection.
-
-
-timeout
-Maximum time to wait for the incoming connection. Implies listen.
-
-
-
-
Additionally, the following parameters can be set via command line options
-(or in code via AVOption
s):
-
-rtmp_app
-Name of application to connect on the RTMP server. This option
-overrides the parameter specified in the URI.
-
-
-rtmp_buffer
-Set the client buffer time in milliseconds. The default is 3000.
-
-
-rtmp_conn
-Extra arbitrary AMF connection parameters, parsed from a string,
-e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0
.
-Each value is prefixed by a single character denoting the type,
-B for Boolean, N for number, S for string, O for object, or Z for null,
-followed by a colon. For Booleans the data must be either 0 or 1 for
-FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
-1 to end or begin an object, respectively. Data items in subobjects may
-be named, by prefixing the type with ’N’ and specifying the name before
-the value (i.e. NB:myFlag:1
). This option may be used multiple
-times to construct arbitrary AMF sequences.
-
-
-rtmp_flashver
-Version of the Flash plugin used to run the SWF player. The default
-is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
-<libavformat version>).)
-
-
-rtmp_flush_interval
-Number of packets flushed in the same request (RTMPT only). The default
-is 10.
-
-
-rtmp_live
-Specify that the media is a live stream. No resuming or seeking in
-live streams is possible. The default value is any
, which means the
-subscriber first tries to play the live stream specified in the
-playpath. If a live stream of that name is not found, it plays the
-recorded stream. The other possible values are live
and
-recorded
.
-
-
-rtmp_pageurl
-URL of the web page in which the media was embedded. By default no
-value will be sent.
-
-
-rtmp_playpath
-Stream identifier to play or to publish. This option overrides the
-parameter specified in the URI.
-
-
-rtmp_subscribe
-Name of live stream to subscribe to. By default no value will be sent.
-It is only sent if the option is specified or if rtmp_live
-is set to live.
-
-
-rtmp_swfhash
-SHA256 hash of the decompressed SWF file (32 bytes).
-
-
-rtmp_swfsize
-Size of the decompressed SWF file, required for SWFVerification.
-
-
-rtmp_swfurl
-URL of the SWF player for the media. By default no value will be sent.
-
-
-rtmp_swfverify
-URL to player swf file, compute hash/size automatically.
-
-
-rtmp_tcurl
-URL of the target stream. Defaults to proto://host[:port]/app.
-
-
-
-
-
For example to read with ffplay
a multimedia resource named
-"sample" from the application "vod" from an RTMP server "myserver":
-
-
ffplay rtmp://myserver/vod/sample
-
-
-
To publish to a password protected server, passing the playpath and
-app names separately:
-
-
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
-
-
-
-
16.17 rtmpe# TOC
-
-
Encrypted Real-Time Messaging Protocol.
-
-
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
-streaming multimedia content within standard cryptographic primitives,
-consisting of Diffie-Hellman key exchange and HMACSHA256, generating
-a pair of RC4 keys.
-
-
-
16.18 rtmps# TOC
-
-
Real-Time Messaging Protocol over a secure SSL connection.
-
-
The Real-Time Messaging Protocol (RTMPS) is used for streaming
-multimedia content across an encrypted connection.
-
-
-
16.19 rtmpt# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
-for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
16.20 rtmpte# TOC
-
-
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
-is used for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
16.21 rtmpts# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTPS.
-
-
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
-for streaming multimedia content within HTTPS requests to traverse
-firewalls.
-
-
-
16.22 libsmbclient# TOC
-
-
libsmbclient permits one to manipulate CIFS/SMB network resources.
-
-
Following syntax is required.
-
-
-
smb://[[domain:]user[:password@]]server[/share[/path[/file]]]
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in miliseconds of socket I/O operations used by the underlying
-low level operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-workgroup
-Set the workgroup used for making connections. By default workgroup is not specified.
-
-
-
-
-
For more information see: http://www.samba.org/ .
-
-
-
16.23 libssh# TOC
-
-
Secure File Transfer Protocol via libssh
-
-
Allow to read from or write to remote resources using SFTP protocol.
-
-
Following syntax is required.
-
-
-
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-private_key
-Specify the path of the file containing private key to use during authorization.
-By default libssh searches for keys in the ~/.ssh/ directory.
-
-
-
-
-
Example: Play a file stored on remote server.
-
-
-
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
-
-
-
-
16.24 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte# TOC
-
-
Real-Time Messaging Protocol and its variants supported through
-librtmp.
-
-
Requires the presence of the librtmp headers and library during
-configuration. You need to explicitly configure the build with
-"–enable-librtmp". If enabled this will replace the native RTMP
-protocol.
-
-
This protocol provides most client functions and a few server
-functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT),
-encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled
-variants of these encrypted types (RTMPTE, RTMPTS).
-
-
The required syntax is:
-
-
rtmp_proto ://server [:port ][/app ][/playpath ] options
-
-
-
where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe",
-"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and
-server , port , app and playpath have the same
-meaning as specified for the RTMP native protocol.
-options contains a list of space-separated options of the form
-key =val .
-
-
See the librtmp manual page (man 3 librtmp) for more information.
-
-
For example, to stream a file in real-time to an RTMP server using
-ffmpeg
:
-
-
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
-
-
-
To play the same stream using ffplay
:
-
-
ffplay "rtmp://myserver/live/mystream live=1"
-
-
-
-
16.25 rtp# TOC
-
-
Real-time Transport Protocol.
-
-
The required syntax for an RTP URL is:
-rtp://hostname [:port ][?option =val ...]
-
-
port specifies the RTP port to use.
-
-
The following URL options are supported:
-
-
-ttl=n
-Set the TTL (Time-To-Live) value (for multicast only).
-
-
-rtcpport=n
-Set the remote RTCP port to n .
-
-
-localrtpport=n
-Set the local RTP port to n .
-
-
-localrtcpport=n '
-Set the local RTCP port to n .
-
-
-pkt_size=n
-Set max packet size (in bytes) to n .
-
-
-connect=0|1
-Do a connect()
on the UDP socket (if set to 1) or not (if set
-to 0).
-
-
-sources=ip [,ip ]
-List allowed source IP addresses.
-
-
-block=ip [,ip ]
-List disallowed (blocked) source IP addresses.
-
-
-write_to_source=0|1
-Send packets to the source address of the latest received packet (if
-set to 1) or to a default remote address (if set to 0).
-
-
-localport=n
-Set the local RTP port to n .
-
-This is a deprecated option. Instead, localrtpport should be
-used.
-
-
-
-
-
Important notes:
-
-
- If rtcpport is not set the RTCP port will be set to the RTP
-port value plus 1.
-
- If localrtpport (the local RTP port) is not set any available
-port will be used for the local RTP and RTCP ports.
-
- If localrtcpport (the local RTCP port) is not set it will be
-set to the local RTP port value plus 1.
-
-
-
-
16.26 rtsp# TOC
-
-
Real-Time Streaming Protocol.
-
-
RTSP is not technically a protocol handler in libavformat, it is a demuxer
-and muxer. The demuxer supports both normal RTSP (with data transferred
-over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
-data transferred over RDT).
-
-
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
-supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s
-RTSP server ).
-
-
The required syntax for a RTSP url is:
-
-
rtsp://hostname [:port ]/path
-
-
-
Options can be set on the ffmpeg
/ffplay
command
-line, or set in code via AVOption
s or in
-avformat_open_input
.
-
-
The following options are supported.
-
-
-initial_pause
-Do not start playing the stream immediately if set to 1. Default value
-is 0.
-
-
-rtsp_transport
-Set RTSP transport protocols.
-
-It accepts the following values:
-
-‘udp ’
-Use UDP as lower transport protocol.
-
-
-‘tcp ’
-Use TCP (interleaving within the RTSP control channel) as lower
-transport protocol.
-
-
-‘udp_multicast ’
-Use UDP multicast as lower transport protocol.
-
-
-‘http ’
-Use HTTP tunneling as lower transport protocol, which is useful for
-passing proxies.
-
-
-
-Multiple lower transport protocols may be specified, in that case they are
-tried one at a time (if the setup of one fails, the next one is tried).
-For the muxer, only the ‘tcp ’ and ‘udp ’ options are supported.
-
-
-rtsp_flags
-Set RTSP flags.
-
-The following values are accepted:
-
-‘filter_src ’
-Accept packets only from negotiated peer address and port.
-
-‘listen ’
-Act as a server, listening for an incoming connection.
-
-‘prefer_tcp ’
-Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
-
-
-
-Default value is ‘none ’.
-
-
-allowed_media_types
-Set media types to accept from the server.
-
-The following flags are accepted:
-
-‘video ’
-‘audio ’
-‘data ’
-
-
-By default it accepts all media types.
-
-
-min_port
-Set minimum local UDP port. Default value is 5000.
-
-
-max_port
-Set maximum local UDP port. Default value is 65000.
-
-
-timeout
-Set maximum timeout (in seconds) to wait for incoming connections.
-
-A value of -1 means infinite (default). This option implies the
-rtsp_flags set to ‘listen ’.
-
-
-reorder_queue_size
-Set number of packets to buffer for handling of reordered packets.
-
-
-stimeout
-Set socket TCP I/O timeout in microseconds.
-
-
-user-agent
-Override User-Agent header. If not specified, it defaults to the
-libavformat identifier string.
-
-
-
-
When receiving data over UDP, the demuxer tries to reorder received packets
-(since they may arrive out of order, or packets may get lost totally). This
-can be disabled by setting the maximum demuxing delay to zero (via
-the max_delay
field of AVFormatContext).
-
-
When watching multi-bitrate Real-RTSP streams with ffplay
, the
-streams to display can be chosen with -vst
n and
--ast
n for video and audio respectively, and can be switched
-on the fly by pressing v
and a
.
-
-
-
16.26.1 Examples# TOC
-
-
The following examples all make use of the ffplay
and
-ffmpeg
tools.
-
-
- Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
-
-
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
-
-
- Watch a stream tunneled over HTTP:
-
-
ffplay -rtsp_transport http rtsp://server/video.mp4
-
-
- Send a stream in realtime to a RTSP server, for others to watch:
-
-
ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
-
-
- Receive a stream in realtime:
-
-
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
-
-
-
-
-
16.27 sap# TOC
-
-
Session Announcement Protocol (RFC 2974). This is not technically a
-protocol handler in libavformat, it is a muxer and demuxer.
-It is used for signalling of RTP streams, by announcing the SDP for the
-streams regularly on a separate port.
-
-
-
16.27.1 Muxer# TOC
-
-
The syntax for a SAP url given to the muxer is:
-
-
sap://destination [:port ][?options ]
-
-
-
The RTP packets are sent to destination on port port ,
-or to port 5004 if no port is specified.
-options is a &
-separated list. The following options
-are supported:
-
-
-announce_addr=address
-Specify the destination IP address for sending the announcements to.
-If omitted, the announcements are sent to the commonly used SAP
-announcement multicast address 224.2.127.254 (sap.mcast.net), or
-ff0e::2:7ffe if destination is an IPv6 address.
-
-
-announce_port=port
-Specify the port to send the announcements on, defaults to
-9875 if not specified.
-
-
-ttl=ttl
-Specify the time to live value for the announcements and RTP packets,
-defaults to 255.
-
-
-same_port=0|1
-If set to 1, send all RTP streams on the same port pair. If zero (the
-default), all streams are sent on unique ports, with each stream on a
-port 2 numbers higher than the previous.
-VLC/Live555 requires this to be set to 1, to be able to receive the stream.
-The RTP stack in libavformat for receiving requires all streams to be sent
-on unique ports.
-
-
-
-
Example command lines follow.
-
-
To broadcast a stream on the local subnet, for watching in VLC:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
-
-
-
Similarly, for watching in ffplay
:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255
-
-
-
And for watching in ffplay
, over IPv6:
-
-
-
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
-
-
-
-
16.27.2 Demuxer# TOC
-
-
The syntax for a SAP url given to the demuxer is:
-
-
sap://[address ][:port ]
-
-
-
address is the multicast address to listen for announcements on,
-if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port
-is the port that is listened on, 9875 if omitted.
-
-
The demuxers listens for announcements on the given address and port.
-Once an announcement is received, it tries to receive that particular stream.
-
-
Example command lines follow.
-
-
To play back the first stream announced on the normal SAP multicast address:
-
-
-
-
To play back the first stream announced on one the default IPv6 SAP multicast address:
-
-
-
ffplay sap://[ff0e::2:7ffe]
-
-
-
-
16.28 sctp# TOC
-
-
Stream Control Transmission Protocol.
-
-
The accepted URL syntax is:
-
-
sctp://host :port [?options ]
-
-
-
The protocol accepts the following options:
-
-listen
-If set to any value, listen for an incoming connection. Outgoing connection is done by default.
-
-
-max_streams
-Set the maximum number of streams. By default no limit is set.
-
-
-
-
-
16.29 srtp# TOC
-
-
Secure Real-time Transport Protocol.
-
-
The accepted options are:
-
-srtp_in_suite
-srtp_out_suite
-Select input and output encoding suites.
-
-Supported values:
-
-‘AES_CM_128_HMAC_SHA1_80 ’
-‘SRTP_AES128_CM_HMAC_SHA1_80 ’
-‘AES_CM_128_HMAC_SHA1_32 ’
-‘SRTP_AES128_CM_HMAC_SHA1_32 ’
-
-
-
-srtp_in_params
-srtp_out_params
-Set input and output encoding parameters, which are expressed by a
-base64-encoded representation of a binary block. The first 16 bytes of
-this binary block are used as master key, the following 14 bytes are
-used as master salt.
-
-
-
-
-
16.30 subfile# TOC
-
-
Virtually extract a segment of a file or another stream.
-The underlying stream must be seekable.
-
-
Accepted options:
-
-start
-Start offset of the extracted segment, in bytes.
-
-end
-End offset of the extracted segment, in bytes.
-
-
-
-
Examples:
-
-
Extract a chapter from a DVD VOB file (start and end sectors obtained
-externally and multiplied by 2048):
-
-
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
-
-
-
Play an AVI file directly from a TAR archive:
-subfile,,start,183241728,end,366490624,,:archive.tar
-
-
-
16.31 tcp# TOC
-
-
Transmission Control Protocol.
-
-
The required syntax for a TCP url is:
-
-
tcp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form
-key =val .
-
-
The list of supported options follows.
-
-
-listen=1|0
-Listen for an incoming connection. Default value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-listen_timeout=microseconds
-Set listen timeout, expressed in microseconds.
-
-
-
-
The following example shows how to setup a listening TCP connection
-with ffmpeg
, which is then accessed with ffplay
:
-
-
ffmpeg -i input -f format tcp://hostname :port ?listen
-ffplay tcp://hostname :port
-
-
-
-
16.32 tls# TOC
-
-
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
-
-
The required syntax for a TLS/SSL url is:
-
-
tls://hostname :port [?options ]
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-ca_file, cafile=filename
-A file containing certificate authority (CA) root certificates to treat
-as trusted. If the linked TLS library contains a default this might not
-need to be specified for verification to work, but not all libraries and
-setups have defaults built in.
-The file must be in OpenSSL PEM format.
-
-
-tls_verify=1|0
-If enabled, try to verify the peer that we are communicating with.
-Note, if using OpenSSL, this currently only makes sure that the
-peer certificate is signed by one of the root certificates in the CA
-database, but it does not validate that the certificate actually
-matches the host name we are trying to connect to. (With GnuTLS,
-the host name is validated as well.)
-
-This is disabled by default since it requires a CA database to be
-provided by the caller in many cases.
-
-
-cert_file, cert=filename
-A file containing a certificate to use in the handshake with the peer.
-(When operating as server, in listen mode, this is more often required
-by the peer, while client certificates only are mandated in certain
-setups.)
-
-
-key_file, key=filename
-A file containing the private key for the certificate.
-
-
-listen=1|0
-If enabled, listen for connections on the provided port, and assume
-the server role in the handshake instead of the client role.
-
-
-
-
-
Example command lines:
-
-
To create a TLS/SSL server that serves an input stream.
-
-
-
ffmpeg -i input -f format tls://hostname :port ?listen&cert=server.crt &key=server.key
-
-
-
To play back a stream from the TLS/SSL server using ffplay
:
-
-
-
ffplay tls://hostname :port
-
-
-
-
16.33 udp# TOC
-
-
User Datagram Protocol.
-
-
The required syntax for an UDP URL is:
-
-
udp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form key =val .
-
-
In case threading is enabled on the system, a circular buffer is used
-to store the incoming data, which allows one to reduce loss of data due to
-UDP socket buffer overruns. The fifo_size and
-overrun_nonfatal options are related to this buffer.
-
-
The list of supported options follows.
-
-
-buffer_size=size
-Set the UDP maximum socket buffer size in bytes. This is used to set either
-the receive or send buffer size, depending on what the socket is used for.
-Default is 64KB. See also fifo_size .
-
-
-localport=port
-Override the local UDP port to bind with.
-
-
-localaddr=addr
-Choose the local IP address. This is useful e.g. if sending multicast
-and the host has multiple interfaces, where the user can choose
-which interface to send on by specifying the IP address of that interface.
-
-
-pkt_size=size
-Set the size in bytes of UDP packets.
-
-
-reuse=1|0
-Explicitly allow or disallow reusing UDP sockets.
-
-
-ttl=ttl
-Set the time to live value (for multicast only).
-
-
-connect=1|0
-Initialize the UDP socket with connect()
. In this case, the
-destination address can’t be changed with ff_udp_set_remote_url later.
-If the destination address isn’t known at the start, this option can
-be specified in ff_udp_set_remote_url, too.
-This allows finding out the source address for the packets with getsockname,
-and makes writes return with AVERROR(ECONNREFUSED) if "destination
-unreachable" is received.
-For receiving, this gives the benefit of only receiving packets from
-the specified peer address/port.
-
-
-sources=address [,address ]
-Only receive packets sent to the multicast group from one of the
-specified sender IP addresses.
-
-
-block=address [,address ]
-Ignore packets sent to the multicast group from the specified
-sender IP addresses.
-
-
-fifo_size=units
-Set the UDP receiving circular buffer size, expressed as a number of
-packets with size of 188 bytes. If not specified defaults to 7*4096.
-
-
-overrun_nonfatal=1|0
-Survive in case of UDP receiving circular buffer overrun. Default
-value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-broadcast=1|0
-Explicitly allow or disallow UDP broadcasting.
-
-Note that broadcasting may not work properly on networks having
-a broadcast storm protection.
-
-
-
-
-
16.33.1 Examples# TOC
-
-
- Use ffmpeg
to stream over UDP to a remote endpoint:
-
-
ffmpeg -i input -f format udp://hostname :port
-
-
- Use ffmpeg
to stream in mpegts format over UDP using 188
-sized UDP packets, using a large input buffer:
-
-
ffmpeg -i input -f mpegts udp://hostname :port ?pkt_size=188&buffer_size=65535
-
-
- Use ffmpeg
to receive over UDP from a remote endpoint:
-
-
ffmpeg -i udp://[multicast-address ]:port ...
-
-
-
-
-
16.34 unix# TOC
-
-
Unix local socket
-
-
The required syntax for a Unix socket URL is:
-
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-timeout
-Timeout in ms.
-
-listen
-Create the Unix socket in listening mode.
-
-
-
-
-
17 Device Options# TOC
-
-
The libavdevice library provides the same interface as
-libavformat. Namely, an input device is considered like a demuxer, and
-an output device like a muxer, and the interface and generic device
-options are the same provided by libavformat (see the ffmpeg-formats
-manual).
-
-
In addition each input or output device may support so-called private
-options, which are specific for that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the device
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
-
-
18 Input Devices# TOC
-
-
Input devices are configured elements in FFmpeg which allow to access
-the data coming from a multimedia device attached to your system.
-
-
When you configure your FFmpeg build, all the supported input devices
-are enabled by default. You can list all available ones using the
-configure option "–list-indevs".
-
-
You can disable all the input devices using the configure option
-"–disable-indevs", and selectively enable an input device using the
-option "–enable-indev=INDEV ", or you can disable a particular
-input device using the option "–disable-indev=INDEV ".
-
-
The option "-devices" of the ff* tools will display the list of
-supported input devices.
-
-
A description of the currently available input devices follows.
-
-
-
18.1 alsa# TOC
-
-
ALSA (Advanced Linux Sound Architecture) input device.
-
-
To enable this input device during configuration you need libasound
-installed on your system.
-
-
This device allows capturing from an ALSA device. The name of the
-device to capture has to be an ALSA card identifier.
-
-
An ALSA identifier has the syntax:
-
-
hw:CARD [,DEV [,SUBDEV ]]
-
-
-
where the DEV and SUBDEV components are optional.
-
-
The three arguments (in order: CARD ,DEV ,SUBDEV )
-specify card number or identifier, device number and subdevice number
-(-1 means any).
-
-
To see the list of cards currently recognized by your system check the
-files /proc/asound/cards and /proc/asound/devices .
-
-
For example to capture with ffmpeg
from an ALSA device with
-card id 0, you may run the command:
-
-
ffmpeg -f alsa -i hw:0 alsaout.wav
-
-
-
For more information see:
-http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html
-
-
-
18.2 avfoundation# TOC
-
-
AVFoundation input device.
-
-
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
-The older QTKit framework has been marked deprecated since OSX version 10.7.
-
-
The input filename has to be given in the following syntax:
-
-
-i "[[VIDEO]:[AUDIO]]"
-
-
The first entry selects the video input while the latter selects the audio input.
-The stream has to be specified by the device name or the device index as shown by the device list.
-Alternatively, the video and/or audio input device can be chosen by index using the
-
- -video_device_index <INDEX>
-
-and/or
-
- -audio_device_index <INDEX>
-
-, overriding any
-device name or index given in the input filename.
-
-
All available devices can be enumerated by using -list_devices true , listing
-all device names and corresponding indices.
-
-
There are two device name aliases:
-
-default
-Select the AVFoundation default device of the corresponding type.
-
-
-none
-Do not record the corresponding media type.
-This is equivalent to specifying an empty device name or index.
-
-
-
-
-
-
18.2.1 Options# TOC
-
-
AVFoundation supports the following options:
-
-
--list_devices <TRUE|FALSE>
-If set to true, a list of all available input devices is given showing all
-device names and indices.
-
-
--video_device_index <INDEX>
-Specify the video device by its index. Overrides anything given in the input filename.
-
-
--audio_device_index <INDEX>
-Specify the audio device by its index. Overrides anything given in the input filename.
-
-
--pixel_format <FORMAT>
-Request the video device to use a specific pixel format.
-If the specified format is not supported, a list of available formats is given
-und the first one in this list is used instead. Available pixel formats are:
-monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
- bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
- yuv420p, nv12, yuyv422, gray
-
-
-
-
-
-
18.2.2 Examples# TOC
-
-
- Print the list of AVFoundation supported devices and exit:
-
-
$ ffmpeg -f avfoundation -list_devices true -i ""
-
-
- Record video from video device 0 and audio from audio device 0 into out.avi:
-
-
$ ffmpeg -f avfoundation -i "0:0" out.avi
-
-
- Record video from video device 2 and audio from audio device 1 into out.avi:
-
-
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
-
-
- Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
-
-
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
-
-
-
-
-
-
18.3 bktr# TOC
-
-
BSD video input device.
-
-
-
18.4 dshow# TOC
-
-
Windows DirectShow input device.
-
-
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
-Currently only audio and video devices are supported.
-
-
Multiple devices may be opened as separate inputs, but they may also be
-opened on the same input, which should improve synchronism between them.
-
-
The input name should be in the format:
-
-
-
-
where TYPE can be either audio or video ,
-and NAME is the device’s name.
-
-
-
18.4.1 Options# TOC
-
-
If no options are specified, the device’s defaults are used.
-If the device does not support the requested options, it will
-fail to open.
-
-
-video_size
-Set the video size in the captured video.
-
-
-framerate
-Set the frame rate in the captured video.
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-
-
-sample_size
-Set the sample size (in bits) of the captured audio.
-
-
-channels
-Set the number of channels in the captured audio.
-
-
-list_devices
-If set to true , print a list of devices and exit.
-
-
-list_options
-If set to true , print a list of selected device’s options
-and exit.
-
-
-video_device_number
-Set video device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-audio_device_number
-Set audio device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-pixel_format
-Select pixel format to be used by DirectShow. This may only be set when
-the video codec is not set or set to rawvideo.
-
-
-audio_buffer_size
-Set audio device buffer size in milliseconds (which can directly
-impact latency, depending on the device).
-Defaults to using the audio device’s
-default buffer size (typically some multiple of 500ms).
-Setting this value too low can degrade performance.
-See also
-http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx
-
-
-
-
-
-
18.4.2 Examples# TOC
-
-
- Print the list of DirectShow supported devices and exit:
-
-
$ ffmpeg -list_devices true -f dshow -i dummy
-
-
- Open video device Camera :
-
-
$ ffmpeg -f dshow -i video="Camera"
-
-
- Open second video device with name Camera :
-
-
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
-
-
- Open video device Camera and audio device Microphone :
-
-
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
-
-
- Print the list of supported options in selected device and exit:
-
-
$ ffmpeg -list_options true -f dshow -i video="Camera"
-
-
-
-
-
-
18.5 dv1394# TOC
-
-
Linux DV 1394 input device.
-
-
-
18.6 fbdev# TOC
-
-
Linux framebuffer input device.
-
-
The Linux framebuffer is a graphic hardware-independent abstraction
-layer to show graphics on a computer monitor, typically on the
-console. It is accessed through a file device node, usually
-/dev/fb0 .
-
-
For more detailed information read the file
-Documentation/fb/framebuffer.txt included in the Linux source tree.
-
-
To record from the framebuffer device /dev/fb0 with
-ffmpeg
:
-
-
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
-
-
-
You can take a single screenshot image with the command:
-
-
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
-
-
-
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
-
-
-
18.7 gdigrab# TOC
-
-
Win32 GDI-based screen capture device.
-
-
This device allows you to capture a region of the display on Windows.
-
-
There are two options for the input filename:
-
-
or
-
-
-
The first option will capture the entire desktop, or a fixed region of the
-desktop. The second option will instead capture the contents of a single
-window, regardless of its position on the screen.
-
-
For example, to grab the entire desktop using ffmpeg
:
-
-
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
-
-
-
Grab a 640x480 region at position 10,20
:
-
-
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
-
-
-
Grab the contents of the window named "Calculator"
-
-
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
-
-
-
-
18.7.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. Use the value 0
to
-not draw the pointer. Default value is 1
.
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-Note that show_region is incompatible with grabbing the contents
-of a single window.
-
-For example:
-
-
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
-
-
-
-video_size
-Set the video frame size. The default is to capture the full screen if desktop is selected, or the full window size if title=window_title is selected.
-
-
-offset_x
-When capturing a region with video_size , set the distance from the left edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative offset_x value to move the region to that monitor.
-
-
-offset_y
-When capturing a region with video_size , set the distance from the top edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative offset_y value to move the region to that monitor.
-
-
-
-
-
-
18.8 iec61883# TOC
-
-
FireWire DV/HDV input device using libiec61883.
-
-
To enable this input device, you need libiec61883, libraw1394 and
-libavc1394 installed on your system. Use the configure option
---enable-libiec61883
to compile with the device enabled.
-
-
The iec61883 capture device supports capturing from a video device
-connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
-FireWire stack (juju). This is the default DV/HDV input method in Linux
-Kernel 2.6.37 and later, since the old FireWire stack was removed.
-
-
Specify the FireWire port to be used as input file, or "auto"
-to choose the first port connected.
-
-
-
18.8.1 Options# TOC
-
-
-dvtype
-Override autodetection of DV/HDV. This should only be used if auto
-detection does not work, or if usage of a different device type
-should be prohibited. Treating a DV device as HDV (or vice versa) will
-not work and result in undefined behavior.
-The values auto , dv and hdv are supported.
-
-
-dvbuffer
-Set maximum size of buffer for incoming data, in frames. For DV, this
-is an exact value. For HDV, it is not frame exact, since HDV does
-not have a fixed frame size.
-
-
-dvguid
-Select the capture device by specifying it’s GUID. Capturing will only
-be performed from the specified device and fails if no device with the
-given GUID is found. This is useful to select the input if multiple
-devices are connected at the same time.
-Look at /sys/bus/firewire/devices to find out the GUIDs.
-
-
-
-
-
-
18.8.2 Examples# TOC
-
-
- Grab and show the input of a FireWire DV/HDV device.
-
-
ffplay -f iec61883 -i auto
-
-
- Grab and record the input of a FireWire DV/HDV device,
-using a packet buffer of 100000 packets if the source is HDV.
-
-
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
-
-
-
-
-
-
18.9 jack# TOC
-
-
JACK input device.
-
-
To enable this input device during configuration you need libjack
-installed on your system.
-
-
A JACK input device creates one or more JACK writable clients, one for
-each audio channel, with name client_name :input_N , where
-client_name is the name provided by the application, and N
-is a number which identifies the channel.
-Each writable client will send the acquired data to the FFmpeg input
-device.
-
-
Once you have created one or more JACK readable clients, you need to
-connect them to one or more JACK writable clients.
-
-
To connect or disconnect JACK clients you can use the jack_connect
-and jack_disconnect
programs, or do it through a graphical interface,
-for example with qjackctl
.
-
-
To list the JACK clients and their properties you can invoke the command
-jack_lsp
.
-
-
Follows an example which shows how to capture a JACK readable client
-with ffmpeg
.
-
-
# Create a JACK writable client with name "ffmpeg".
-$ ffmpeg -f jack -i ffmpeg -y out.wav
-
-# Start the sample jack_metro readable client.
-$ jack_metro -b 120 -d 0.2 -f 4000
-
-# List the current JACK clients.
-$ jack_lsp -c
-system:capture_1
-system:capture_2
-system:playback_1
-system:playback_2
-ffmpeg:input_1
-metro:120_bpm
-
-# Connect metro to the ffmpeg writable client.
-$ jack_connect metro:120_bpm ffmpeg:input_1
-
-
-
For more information read:
-http://jackaudio.org/
-
-
-
18.10 lavfi# TOC
-
-
Libavfilter input virtual device.
-
-
This input device reads data from the open output pads of a libavfilter
-filtergraph.
-
-
For each filtergraph open output, the input device will create a
-corresponding stream which is mapped to the generated output. Currently
-only video data is supported. The filtergraph is specified through the
-option graph .
-
-
-
18.10.1 Options# TOC
-
-
-graph
-Specify the filtergraph to use as input. Each video open output must be
-labelled by a unique string of the form "outN ", where N is a
-number starting from 0 corresponding to the mapped input stream
-generated by the device.
-The first unlabelled output is automatically assigned to the "out0"
-label, but all the others need to be specified explicitly.
-
-The suffix "+subcc" can be appended to the output label to create an extra
-stream with the closed captions packets attached to that output
-(experimental; only for EIA-608 / CEA-708 for now).
-The subcc streams are created after all the normal streams, in the order of
-the corresponding stream.
-For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
-stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
-
-If not specified defaults to the filename specified for the input
-device.
-
-
-graph_file
-Set the filename of the filtergraph to be read and sent to the other
-filters. Syntax of the filtergraph is the same as the one specified by
-the option graph .
-
-
-
-
-
-
18.10.2 Examples# TOC
-
-
- Create a color video stream and play it back with ffplay
:
-
-
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
-
-
- As the previous example, but use filename for specifying the graph
-description, and omit the "out0" label:
-
-
ffplay -f lavfi color=c=pink
-
-
- Create three different video test filtered sources and play them:
-
-
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
-
-
- Read an audio stream from a file using the amovie source and play it
-back with ffplay
:
-
-
ffplay -f lavfi "amovie=test.wav"
-
-
- Read an audio stream and a video stream and play it back with
-ffplay
:
-
-
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
-
-
- Dump decoded frames to images and closed captions to a file (experimental):
-
-
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
-
-
-
-
-
-
18.11 libcdio# TOC
-
-
Audio-CD input device based on cdio.
-
-
To enable this input device during configuration you need libcdio
-installed on your system. Requires the configure option
---enable-libcdio
.
-
-
This device allows playing and grabbing from an Audio-CD.
-
-
For example to copy with ffmpeg
the entire Audio-CD in /dev/sr0,
-you may run the command:
-
-
ffmpeg -f libcdio -i /dev/sr0 cd.wav
-
-
-
-
18.12 libdc1394# TOC
-
-
IIDC1394 input device, based on libdc1394 and libraw1394.
-
-
Requires the configure option --enable-libdc1394
.
-
-
-
18.13 openal# TOC
-
-
The OpenAL input device provides audio capture on all systems with a
-working OpenAL 1.1 implementation.
-
-
To enable this input device during configuration, you need OpenAL
-headers and libraries installed on your system, and need to configure
-FFmpeg with --enable-openal
.
-
-
OpenAL headers and libraries should be provided as part of your OpenAL
-implementation, or as an additional download (an SDK). Depending on your
-installation you may need to specify additional flags via the
---extra-cflags
and --extra-ldflags
for allowing the build
-system to locate the OpenAL headers and libraries.
-
-
An incomplete list of OpenAL implementations follows:
-
-
-Creative
-The official Windows implementation, providing hardware acceleration
-with supported devices and software fallback.
-See http://openal.org/ .
-
-OpenAL Soft
-Portable, open source (LGPL) software implementation. Includes
-backends for the most common sound APIs on the Windows, Linux,
-Solaris, and BSD operating systems.
-See http://kcat.strangesoft.net/openal.html .
-
-Apple
-OpenAL is part of Core Audio, the official Mac OS X Audio interface.
-See http://developer.apple.com/technologies/mac/audio-and-video.html
-
-
-
-
This device allows one to capture from an audio input device handled
-through OpenAL.
-
-
You need to specify the name of the device to capture in the provided
-filename. If the empty string is provided, the device will
-automatically select the default device. You can get the list of the
-supported devices by using the option list_devices .
-
-
-
18.13.1 Options# TOC
-
-
-channels
-Set the number of channels in the captured audio. Only the values
-1 (monaural) and 2 (stereo) are currently supported.
-Defaults to 2 .
-
-
-sample_size
-Set the sample size (in bits) of the captured audio. Only the values
-8 and 16 are currently supported. Defaults to
-16 .
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-Defaults to 44.1k .
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-
-
-
-
18.13.2 Examples# TOC
-
-
Print the list of OpenAL supported devices and exit:
-
-
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
-
-
-
Capture from the OpenAL device DR-BT101 via PulseAudio :
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
-
-
-
Capture from the default device (note the empty string ” as filename):
-
-
$ ffmpeg -f openal -i '' out.ogg
-
-
-
Capture from two devices simultaneously, writing to two different files,
-within the same ffmpeg
command:
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
-
-
Note: not all OpenAL implementations support multiple simultaneous capture -
-try the latest OpenAL Soft if the above does not work.
-
-
-
18.14 oss# TOC
-
-
Open Sound System input device.
-
-
The filename to provide to the input device is the device node
-representing the OSS input device, and is usually set to
-/dev/dsp .
-
-
For example to grab from /dev/dsp using ffmpeg
use the
-command:
-
-
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
-
-
-
For more information about OSS see:
-http://manuals.opensound.com/usersguide/dsp.html
-
-
-
18.15 pulse# TOC
-
-
PulseAudio input device.
-
-
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
-
-
The filename to provide to the input device is a source device or the
-string "default"
-
-
To list the PulseAudio source devices and their properties you can invoke
-the command pactl list sources
.
-
-
More information about PulseAudio can be found on http://www.pulseaudio.org .
-
-
-
18.15.1 Options# TOC
-
-server
-Connect to a specific PulseAudio server, specified by an IP address.
-Default server is used when not provided.
-
-
-name
-Specify the application name PulseAudio will use when showing active clients,
-by default it is the LIBAVFORMAT_IDENT
string.
-
-
-stream_name
-Specify the stream name PulseAudio will use when showing active streams,
-by default it is "record".
-
-
-sample_rate
-Specify the samplerate in Hz, by default 48kHz is used.
-
-
-channels
-Specify the channels in use, by default 2 (stereo) is set.
-
-
-frame_size
-Specify the number of bytes per frame, by default it is set to 1024.
-
-
-fragment_size
-Specify the minimal buffering fragment in PulseAudio, it will affect the
-audio latency. By default it is unset.
-
-
-
-
-
18.15.2 Examples# TOC
-
Record a stream from default device:
-
-
ffmpeg -f pulse -i default /tmp/pulse.wav
-
-
-
-
18.16 qtkit# TOC
-
-
QTKit input device.
-
-
The filename passed as input is parsed to contain either a device name or index.
-The device index can also be given by using -video_device_index.
-A given device index will override any given device name.
-If the desired device consists of numbers only, use -video_device_index to identify it.
-The default device will be chosen if an empty string or the device name "default" is given.
-The available devices can be enumerated by using -list_devices.
-
-
-
ffmpeg -f qtkit -i "0" out.mpg
-
-
-
-
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
-
-
-
-
ffmpeg -f qtkit -i "default" out.mpg
-
-
-
-
ffmpeg -f qtkit -list_devices true -i ""
-
-
-
-
18.17 sndio# TOC
-
-
sndio input device.
-
-
To enable this input device during configuration you need libsndio
-installed on your system.
-
-
The filename to provide to the input device is the device node
-representing the sndio input device, and is usually set to
-/dev/audio0 .
-
-
For example to grab from /dev/audio0 using ffmpeg
use the
-command:
-
-
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
-
-
-
-
18.18 video4linux2, v4l2# TOC
-
-
Video4Linux2 input video device.
-
-
"v4l2" can be used as alias for "video4linux2".
-
-
If FFmpeg is built with v4l-utils support (by using the
---enable-libv4l2
configure option), it is possible to use it with the
--use_libv4l2
input device option.
-
-
The name of the device to grab is a file device node, usually Linux
-systems tend to automatically create such nodes when the device
-(e.g. an USB webcam) is plugged into the system, and has a name of the
-kind /dev/videoN , where N is a number associated to
-the device.
-
-
Video4Linux2 devices usually support a limited set of
-width xheight sizes and frame rates. You can check which are
-supported using -list_formats all
for Video4Linux2 devices.
-Some devices, like TV cards, support one or more standards. It is possible
-to list all the supported standards using -list_standards all
.
-
-
The time base for the timestamps is 1 microsecond. Depending on the kernel
-version and configuration, the timestamps may be derived from the real time
-clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
-boot time, unaffected by NTP or manual changes to the clock). The
--timestamps abs or -ts abs option can be used to force
-conversion into the real time clock.
-
-
Some usage examples of the video4linux2 device with ffmpeg
-and ffplay
:
-
- Grab and show the input of a video4linux2 device:
-
-
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
-
-
- Grab and record the input of a video4linux2 device, leave the
-frame rate and size as previously set:
-
-
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
-
-
-
-
For more information about Video4Linux, check http://linuxtv.org/ .
-
-
-
18.18.1 Options# TOC
-
-
-standard
-Set the standard. Must be the name of a supported standard. To get a
-list of the supported standards, use the list_standards
-option.
-
-
-channel
-Set the input channel number. Default to -1, which means using the
-previously selected channel.
-
-
-video_size
-Set the video frame size. The argument must be a string in the form
-WIDTH xHEIGHT or a valid size abbreviation.
-
-
-pixel_format
-Select the pixel format (only valid for raw video input).
-
-
-input_format
-Set the preferred pixel format (for raw video) or a codec name.
-This option allows one to select the input format, when several are
-available.
-
-
-framerate
-Set the preferred video frame rate.
-
-
-list_formats
-List available formats (supported pixel formats, codecs, and frame
-sizes) and exit.
-
-Available values are:
-
-‘all ’
-Show all available (compressed and non-compressed) formats.
-
-
-‘raw ’
-Show only raw video (non-compressed) formats.
-
-
-‘compressed ’
-Show only compressed formats.
-
-
-
-
-list_standards
-List supported standards and exit.
-
-Available values are:
-
-‘all ’
-Show all supported standards.
-
-
-
-
-timestamps, ts
-Set type of timestamps for grabbed frames.
-
-Available values are:
-
-‘default ’
-Use timestamps from the kernel.
-
-
-‘abs ’
-Use absolute timestamps (wall clock).
-
-
-‘mono2abs ’
-Force conversion from monotonic to absolute timestamps.
-
-
-
-Default value is default
.
-
-
-
-
-
18.19 vfwcap# TOC
-
-
VfW (Video for Windows) capture input device.
-
-
The filename passed as input is the capture driver number, ranging from
-0 to 9. You may use "list" as filename to print a list of drivers. Any
-other filename will be interpreted as device number 0.
-
-
-
18.20 x11grab# TOC
-
-
X11 video input device.
-
-
Depends on X11, Xext, and Xfixes. Requires the configure option
---enable-x11grab
.
-
-
This device allows one to capture a region of an X11 display.
-
-
The filename passed as input has the syntax:
-
-
[hostname ]:display_number .screen_number [+x_offset ,y_offset ]
-
-
-
hostname :display_number .screen_number specifies the
-X11 display name of the screen to grab from. hostname can be
-omitted, and defaults to "localhost". The environment variable
-DISPLAY
contains the default display name.
-
-
x_offset and y_offset specify the offsets of the grabbed
-area with respect to the top-left border of the X11 screen. They
-default to 0.
-
-
Check the X11 documentation (e.g. man X) for more detailed information.
-
-
Use the dpyinfo
program for getting basic information about the
-properties of your X11 display (e.g. grep for "name" or "dimensions").
-
-
For example to grab from :0.0 using ffmpeg
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
Grab at position 10,20
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-
-
18.20.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. A value of 0
specify
-not to draw the pointer. Default value is 1
.
-
-
-follow_mouse
-Make the grabbed area follow the mouse. The argument can be
-centered
or a number of pixels PIXELS .
-
-When it is specified with "centered", the grabbing region follows the mouse
-pointer and keeps the pointer at the center of region; otherwise, the region
-follows only when the mouse pointer reaches within PIXELS (greater than
-zero) to the edge of region.
-
-For example:
-
-
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-To follow only when the mouse pointer reaches within 100 pixels to edge:
-
-
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-For example:
-
-
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-With follow_mouse :
-
-
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-video_size
-Set the video frame size. Default value is vga
.
-
-
-use_shm
-Use the MIT-SHM extension for shared memory. Default value is 1
.
-It may be necessary to disable it for remote displays.
-
-
-
-
-
18.21 decklink# TOC
-
-
The decklink input device provides capture capabilities for Blackmagic
-DeckLink devices.
-
-
To enable this input device, you need the Blackmagic DeckLink SDK and you
-need to configure with the appropriate --extra-cflags
-and --extra-ldflags
.
-On Windows, you need to run the IDL files through widl
.
-
-
DeckLink is very picky about the formats it supports. Pixel format is always
-uyvy422, framerate and video size must be determined for your device with
--list_formats 1
. Audio sample rate is always 48 kHz and the number
-of channels currently is limited to 2 (stereo).
-
-
-
18.21.1 Options# TOC
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-list_formats
-If set to true , print a list of supported formats and exit.
-Defaults to false .
-
-
-
-
-
-
18.21.2 Examples# TOC
-
-
- List input devices:
-
-
ffmpeg -f decklink -list_devices 1 -i dummy
-
-
- List supported formats:
-
-
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
-
-
- Capture video clip at 1080i50 (format 11):
-
-
ffmpeg -f decklink -i 'Intensity Pro@11' -acodec copy -vcodec copy output.avi
-
-
-
-
-
-
-
19 Resampler Options# TOC
-
-
The audio resampler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, option =value for the aresample filter,
-by setting the value explicitly in the
-SwrContext
options or using the libavutil/opt.h API for
-programmatic use.
-
-
-ich, in_channel_count
-Set the number of input channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-in_channel_layout is set.
-
-
-och, out_channel_count
-Set the number of output channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-out_channel_layout is set.
-
-
-uch, used_channel_count
-Set the number of used input channels. Default value is 0. This option is
-only used for special remapping.
-
-
-isr, in_sample_rate
-Set the input sample rate. Default value is 0.
-
-
-osr, out_sample_rate
-Set the output sample rate. Default value is 0.
-
-
-isf, in_sample_fmt
-Specify the input sample format. It is set by default to none
.
-
-
-osf, out_sample_fmt
-Specify the output sample format. It is set by default to none
.
-
-
-tsf, internal_sample_fmt
-Set the internal sample format. Default value is none
.
-This will automatically be chosen when it is not explicitly set.
-
-
-icl, in_channel_layout
-ocl, out_channel_layout
-Set the input/output channel layout.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-clev, center_mix_level
-Set the center mix level. It is a value expressed in deciBel, and must be
-in the interval [-32,32].
-
-
-slev, surround_mix_level
-Set the surround mix level. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-lfe_mix_level
-Set LFE mix into non LFE level. It is used when there is a LFE input but no
-LFE output. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-rmvol, rematrix_volume
-Set rematrix volume. Default value is 1.0.
-
-
-rematrix_maxval
-Set maximum output value for rematrixing.
-This can be used to prevent clipping vs. preventing volumn reduction
-A value of 1.0 prevents cliping.
-
-
-flags, swr_flags
-Set flags used by the converter. Default value is 0.
-
-It supports the following individual flags:
-
-res
-force resampling, this flag forces resampling to be used even when the
-input and output sample rates match.
-
-
-
-
-dither_scale
-Set the dither scale. Default value is 1.
-
-
-dither_method
-Set dither method. Default value is 0.
-
-Supported values:
-
-‘rectangular ’
-select rectangular dither
-
-‘triangular ’
-select triangular dither
-
-‘triangular_hp ’
-select triangular dither with high pass
-
-‘lipshitz ’
-select lipshitz noise shaping dither
-
-‘shibata ’
-select shibata noise shaping dither
-
-‘low_shibata ’
-select low shibata noise shaping dither
-
-‘high_shibata ’
-select high shibata noise shaping dither
-
-‘f_weighted ’
-select f-weighted noise shaping dither
-
-‘modified_e_weighted ’
-select modified-e-weighted noise shaping dither
-
-‘improved_e_weighted ’
-select improved-e-weighted noise shaping dither
-
-
-
-
-
-resampler
-Set resampling engine. Default value is swr.
-
-Supported values:
-
-‘swr ’
-select the native SW Resampler; filter options precision and cheby are not
-applicable in this case.
-
-‘soxr ’
-select the SoX Resampler (where available); compensation, and filter options
-filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
-case.
-
-
-
-
-filter_size
-For swr only, set resampling filter size, default value is 32.
-
-
-phase_shift
-For swr only, set resampling phase shift, default value is 10, and must be in
-the interval [0,30].
-
-
-linear_interp
-Use Linear Interpolation if set to 1, default value is 0.
-
-
-cutoff
-Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
-value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
-(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
-
-
-precision
-For soxr only, the precision in bits to which the resampled signal will be
-calculated. The default value of 20 (which, with suitable dithering, is
-appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
-value of 28 gives SoX’s ’Very High Quality’.
-
-
-cheby
-For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
-approximation for ’irrational’ ratios. Default value is 0.
-
-
-async
-For swr only, simple 1 parameter audio sync to timestamps using stretching,
-squeezing, filling and trimming. Setting this to 1 will enable filling and
-trimming, larger values represent the maximum amount in samples that the data
-may be stretched or squeezed for each second.
-Default value is 0, thus no compensation is applied to make the samples match
-the audio timestamps.
-
-
-first_pts
-For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
-This allows for padding/trimming at the start of stream. By default, no
-assumption is made about the first frame’s expected pts, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative pts due to encoder delay.
-
-
-min_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger stretching/squeezing/filling or trimming of the
-data to make it match the timestamps. The default is that
-stretching/squeezing/filling and trimming is disabled
-(min_comp = FLT_MAX
).
-
-
-min_hard_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger adding/dropping samples to make it match the
-timestamps. This option effectively is a threshold to select between
-hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
-all compensation is by default disabled through min_comp .
-The default is 0.1.
-
-
-comp_duration
-For swr only, set duration (in seconds) over which data is stretched/squeezed
-to make it match the timestamps. Must be a non-negative double float value,
-default value is 1.0.
-
-
-max_soft_comp
-For swr only, set maximum factor by which data is stretched/squeezed to make it
-match the timestamps. Must be a non-negative double float value, default value
-is 0.
-
-
-matrix_encoding
-Select matrixed stereo encoding.
-
-It accepts the following values:
-
-‘none ’
-select none
-
-‘dolby ’
-select Dolby
-
-‘dplii ’
-select Dolby Pro Logic II
-
-
-
-Default value is none
.
-
-
-filter_type
-For swr only, select resampling filter type. This only affects resampling
-operations.
-
-It accepts the following values:
-
-‘cubic ’
-select cubic
-
-‘blackman_nuttall ’
-select Blackman Nuttall Windowed Sinc
-
-‘kaiser ’
-select Kaiser Windowed Sinc
-
-
-
-
-kaiser_beta
-For swr only, set Kaiser Window Beta value. Must be an integer in the
-interval [2,16], default value is 9.
-
-
-output_sample_bits
-For swr only, set number of used output sample bits for dithering. Must be an integer in the
-interval [0,64], default value is 0, which means it’s not used.
-
-
-
-
-
-
20 Scaler Options# TOC
-
-
The video scaler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools. For programmatic use, they can be set explicitly in the
-SwsContext
options or through the libavutil/opt.h API.
-
-
-
-
-sws_flags
-Set the scaler flags. This is also used to set the scaling
-algorithm. Only a single algorithm should be selected.
-
-It accepts the following values:
-
-‘fast_bilinear ’
-Select fast bilinear scaling algorithm.
-
-
-‘bilinear ’
-Select bilinear scaling algorithm.
-
-
-‘bicubic ’
-Select bicubic scaling algorithm.
-
-
-‘experimental ’
-Select experimental scaling algorithm.
-
-
-‘neighbor ’
-Select nearest neighbor rescaling algorithm.
-
-
-‘area ’
-Select averaging area rescaling algorithm.
-
-
-‘bicublin ’
-Select bicubic scaling algorithm for the luma component, bilinear for
-chroma components.
-
-
-‘gauss ’
-Select Gaussian rescaling algorithm.
-
-
-‘sinc ’
-Select sinc rescaling algorithm.
-
-
-‘lanczos ’
-Select lanczos rescaling algorithm.
-
-
-‘spline ’
-Select natural bicubic spline rescaling algorithm.
-
-
-‘print_info ’
-Enable printing/debug logging.
-
-
-‘accurate_rnd ’
-Enable accurate rounding.
-
-
-‘full_chroma_int ’
-Enable full chroma interpolation.
-
-
-‘full_chroma_inp ’
-Select full chroma input.
-
-
-‘bitexact ’
-Enable bitexact output.
-
-
-
-
-srcw
-Set source width.
-
-
-srch
-Set source height.
-
-
-dstw
-Set destination width.
-
-
-dsth
-Set destination height.
-
-
-src_format
-Set source pixel format (must be expressed as an integer).
-
-
-dst_format
-Set destination pixel format (must be expressed as an integer).
-
-
-src_range
-Select source range.
-
-
-dst_range
-Select destination range.
-
-
-param0, param1
-Set scaling algorithm parameters. The specified values are specific of
-some scaling algorithms and ignored by others. The specified values
-are floating point number values.
-
-
-sws_dither
-Set the dithering algorithm. Accepts one of the following
-values. Default value is ‘auto ’.
-
-
-‘auto ’
-automatic choice
-
-
-‘none ’
-no dithering
-
-
-‘bayer ’
-bayer dither
-
-
-‘ed ’
-error diffusion dither
-
-
-‘a_dither ’
-arithmetic dither, based using addition
-
-
-‘x_dither ’
-arithmetic dither, based using xor (more random/less apparent patterning that
-a_dither).
-
-
-
-
-
-
-
-
-
21 Filtering Introduction# TOC
-
-
Filtering in FFmpeg is enabled through the libavfilter library.
-
-
In libavfilter, a filter can have multiple inputs and multiple
-outputs.
-To illustrate the sorts of things that are possible, we consider the
-following filtergraph.
-
-
-
[main]
-input --> split ---------------------> overlay --> output
- | ^
- |[tmp] [flip]|
- +-----> crop --> vflip -------+
-
-
-
This filtergraph splits the input stream in two streams, then sends one
-stream through the crop filter and the vflip filter, before merging it
-back with the other stream by overlaying it on top. You can use the
-following command to achieve this:
-
-
-
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
-
-
-
The result will be that the top half of the video is mirrored
-onto the bottom half of the output video.
-
-
Filters in the same linear chain are separated by commas, and distinct
-linear chains of filters are separated by semicolons. In our example,
-crop,vflip are in one linear chain, split and
-overlay are separately in another. The points where the linear
-chains join are labelled by names enclosed in square brackets. In the
-example, the split filter generates two outputs that are associated to
-the labels [main] and [tmp] .
-
-
The stream sent to the second output of split , labelled as
-[tmp] , is processed through the crop filter, which crops
-away the lower half part of the video, and then vertically flipped. The
-overlay filter takes in input the first unchanged output of the
-split filter (which was labelled as [main] ), and overlay on its
-lower half the output generated by the crop,vflip filterchain.
-
-
Some filters take in input a list of parameters: they are specified
-after the filter name and an equal sign, and are separated from each other
-by a colon.
-
-
There exist so-called source filters that do not have an
-audio/video input, and sink filters that will not have audio/video
-output.
-
-
-
-
22 graph2dot# TOC
-
-
The graph2dot program included in the FFmpeg tools
-directory can be used to parse a filtergraph description and issue a
-corresponding textual representation in the dot language.
-
-
Invoke the command:
-
-
-
to see how to use graph2dot .
-
-
You can then pass the dot description to the dot program (from
-the graphviz suite of programs) and obtain a graphical representation
-of the filtergraph.
-
-
For example the sequence of commands:
-
-
echo GRAPH_DESCRIPTION | \
-tools/graph2dot -o graph.tmp && \
-dot -Tpng graph.tmp -o graph.png && \
-display graph.png
-
-
-
can be used to create and display an image representing the graph
-described by the GRAPH_DESCRIPTION string. Note that this string must be
-a complete self-contained graph, with its inputs and outputs explicitly defined.
-For example if your command line is of the form:
-
-
ffmpeg -i infile -vf scale=640:360 outfile
-
-
your GRAPH_DESCRIPTION string will need to be of the form:
-
-
nullsrc,scale=640:360,nullsink
-
-
you may also need to set the nullsrc parameters and add a format
-filter in order to simulate a specific input file.
-
-
-
-
23 Filtergraph description# TOC
-
-
A filtergraph is a directed graph of connected filters. It can contain
-cycles, and there can be multiple links between a pair of
-filters. Each link has one input pad on one side connecting it to one
-filter from which it takes its input, and one output pad on the other
-side connecting it to one filter accepting its output.
-
-
Each filter in a filtergraph is an instance of a filter class
-registered in the application, which defines the features and the
-number of input and output pads of the filter.
-
-
A filter with no input pads is called a "source", and a filter with no
-output pads is called a "sink".
-
-
-
23.1 Filtergraph syntax# TOC
-
-
A filtergraph has a textual representation, which is
-recognized by the -filter /-vf and -filter_complex
-options in ffmpeg
and -vf in ffplay
, and by the
-avfilter_graph_parse()
/avfilter_graph_parse2()
functions defined in
-libavfilter/avfilter.h .
-
-
A filterchain consists of a sequence of connected filters, each one
-connected to the previous one in the sequence. A filterchain is
-represented by a list of ","-separated filter descriptions.
-
-
A filtergraph consists of a sequence of filterchains. A sequence of
-filterchains is represented by a list of ";"-separated filterchain
-descriptions.
-
-
A filter is represented by a string of the form:
-[in_link_1 ]...[in_link_N ]filter_name =arguments [out_link_1 ]...[out_link_M ]
-
-
filter_name is the name of the filter class of which the
-described filter is an instance of, and has to be the name of one of
-the filter classes registered in the program.
-The name of the filter class is optionally followed by a string
-"=arguments ".
-
-
arguments is a string which contains the parameters used to
-initialize the filter instance. It may have one of two forms:
-
- A ’:’-separated list of key=value pairs.
-
- A ’:’-separated list of value . In this case, the keys are assumed to be
-the option names in the order they are declared. E.g. the fade
filter
-declares three options in this order – type , start_frame and
-nb_frames . Then the parameter list in:0:30 means that the value
-in is assigned to the option type , 0 to
-start_frame and 30 to nb_frames .
-
- A ’:’-separated list of mixed direct value and long key=value
-pairs. The direct value must precede the key=value pairs, and
-follow the same constraints order of the previous point. The following
-key=value pairs can be set in any preferred order.
-
-
-
-
If the option value itself is a list of items (e.g. the format
filter
-takes a list of pixel formats), the items in the list are usually separated by
-’|’.
-
-
The list of arguments can be quoted using the character "’" as initial
-and ending mark, and the character ’\’ for escaping the characters
-within the quoted text; otherwise the argument string is considered
-terminated when the next special character (belonging to the set
-"[]=;,") is encountered.
-
-
The name and arguments of the filter are optionally preceded and
-followed by a list of link labels.
-A link label allows one to name a link and associate it to a filter output
-or input pad. The preceding labels in_link_1
-... in_link_N , are associated to the filter input pads,
-the following labels out_link_1 ... out_link_M , are
-associated to the output pads.
-
-
When two link labels with the same name are found in the
-filtergraph, a link between the corresponding input and output pad is
-created.
-
-
If an output pad is not labelled, it is linked by default to the first
-unlabelled input pad of the next filter in the filterchain.
-For example in the filterchain
-
-
nullsrc, split[L1], [L2]overlay, nullsink
-
-
the split filter instance has two output pads, and the overlay filter
-instance two input pads. The first output pad of split is labelled
-"L1", the first input pad of overlay is labelled "L2", and the second
-output pad of split is linked to the second input pad of overlay,
-which are both unlabelled.
-
-
In a complete filterchain all the unlabelled filter input and output
-pads must be connected. A filtergraph is considered valid if all the
-filter input and output pads of all the filterchains are connected.
-
-
Libavfilter will automatically insert scale filters where format
-conversion is required. It is possible to specify swscale flags
-for those automatically inserted scalers by prepending
-sws_flags=flags ;
-to the filtergraph description.
-
-
Here is a BNF description of the filtergraph syntax:
-
-
NAME ::= sequence of alphanumeric characters and '_'
-LINKLABEL ::= "[" NAME "]"
-LINKLABELS ::= LINKLABEL [LINKLABELS ]
-FILTER_ARGUMENTS ::= sequence of chars (possibly quoted)
-FILTER ::= [LINKLABELS ] NAME ["=" FILTER_ARGUMENTS ] [LINKLABELS ]
-FILTERCHAIN ::= FILTER [,FILTERCHAIN ]
-FILTERGRAPH ::= [sws_flags=flags ;] FILTERCHAIN [;FILTERGRAPH ]
-
-
-
-
23.2 Notes on filtergraph escaping# TOC
-
-
Filtergraph description composition entails several levels of
-escaping. See (ffmpeg-utils)the "Quoting and escaping"
-section in the ffmpeg-utils(1) manual for more
-information about the employed escaping procedure.
-
-
A first level escaping affects the content of each filter option
-value, which may contain the special character :
used to
-separate values, or one of the escaping characters \'
.
-
-
A second level escaping affects the whole filter description, which
-may contain the escaping characters \'
or the special
-characters [],;
used by the filtergraph description.
-
-
Finally, when you specify a filtergraph on a shell commandline, you
-need to perform a third level escaping for the shell special
-characters contained within it.
-
-
For example, consider the following string to be embedded in
-the drawtext filter description text value:
-
-
this is a 'string': may contain one, or more, special characters
-
-
-
This string contains the '
special escaping character, and the
-:
special character, so it needs to be escaped in this way:
-
-
text=this is a \'string\'\: may contain one, or more, special characters
-
-
-
A second level of escaping is required when embedding the filter
-description in a filtergraph description, in order to escape all the
-filtergraph special characters. Thus the example above becomes:
-
-
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
-
-
(note that in addition to the \'
escaping special characters,
-also ,
needs to be escaped).
-
-
Finally an additional level of escaping is needed when writing the
-filtergraph description in a shell command, which depends on the
-escaping rules of the adopted shell. For example, assuming that
-\
is special and needs to be escaped with another \
, the
-previous string will finally result in:
-
-
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
-
-
-
-
24 Timeline editing# TOC
-
-
Some filters support a generic enable option. For the filters
-supporting timeline editing, this option can be set to an expression which is
-evaluated before sending a frame to the filter. If the evaluation is non-zero,
-the filter will be enabled, otherwise the frame will be sent unchanged to the
-next filter in the filtergraph.
-
-
The expression accepts the following values:
-
-‘t ’
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-‘n ’
-sequential number of the input frame, starting from 0
-
-
-‘pos ’
-the position in the file of the input frame, NAN if unknown
-
-
-‘w ’
-‘h ’
-width and height of the input frame if video
-
-
-
-
Additionally, these filters support an enable command that can be used
-to re-define the expression.
-
-
Like any other filtering option, the enable option follows the same
-rules.
-
-
For example, to enable a blur filter (smartblur ) from 10 seconds to 3
-minutes, and a curves filter starting at 3 seconds:
-
-
smartblur = enable='between(t,10,3*60)',
-curves = enable='gte(t,3)' : preset=cross_process
-
-
-
-
-
25 Audio Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the audio filters included in your
-build.
-
-
Below is a description of the currently available audio filters.
-
-
-
25.1 adelay# TOC
-
-
Delay one or more audio channels.
-
-
Samples in delayed channel are filled with silence.
-
-
The filter accepts the following option:
-
-
-delays
-Set list of delays in milliseconds for each channel separated by ’|’.
-At least one delay greater than 0 should be provided.
-Unused delays will be silently ignored. If number of given delays is
-smaller than number of channels all remaining channels will not be delayed.
-
-
-
-
-
25.1.1 Examples# TOC
-
-
- Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave
-the second channel (and any other channels that may be present) unchanged.
-
-
-
-
-
25.2 aecho# TOC
-
-
Apply echoing to the input audio.
-
-
Echoes are reflected sound and can occur naturally amongst mountains
-(and sometimes large buildings) when talking or shouting; digital echo
-effects emulate this behaviour and are often used to help fill out the
-sound of a single instrument or vocal. The time difference between the
-original signal and the reflection is the delay
, and the
-loudness of the reflected signal is the decay
.
-Multiple echoes can have different delays and decays.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain of reflected signal. Default is 0.6
.
-
-
-out_gain
-Set output gain of reflected signal. Default is 0.3
.
-
-
-delays
-Set list of time intervals in milliseconds between original signal and reflections
-separated by ’|’. Allowed range for each delay
is (0 - 90000.0]
.
-Default is 1000
.
-
-
-decays
-Set list of loudnesses of reflected signals separated by ’|’.
-Allowed range for each decay
is (0 - 1.0]
.
-Default is 0.5
.
-
-
-
-
-
25.2.1 Examples# TOC
-
-
- Make it sound as if there are twice as many instruments as are actually playing:
-
-
- If delay is very short, then it sound like a (metallic) robot playing music:
-
-
- A longer delay will sound like an open air concert in the mountains:
-
-
aecho=0.8:0.9:1000:0.3
-
-
- Same as above but with one more mountain:
-
-
aecho=0.8:0.9:1000|1800:0.3|0.25
-
-
-
-
-
25.3 aeval# TOC
-
-
Modify an audio signal according to the specified expressions.
-
-
This filter accepts one or more expressions (one for each channel),
-which are evaluated and used to modify a corresponding audio signal.
-
-
It accepts the following parameters:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. If
-the number of input channels is greater than the number of
-expressions, the last specified expression is used for the remaining
-output channels.
-
-
-channel_layout, c
-Set output channel layout. If not specified, the channel layout is
-specified by the number of expressions. If set to ‘same ’, it will
-use by default the same input channel layout.
-
-
-
-
Each expression in exprs can contain the following constants and functions:
-
-
-ch
-channel number of the current expression
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-s
-sample rate
-
-
-t
-time of the evaluated sample expressed in seconds
-
-
-nb_in_channels
-nb_out_channels
-input and output number of channels
-
-
-val(CH)
-the value of input channel with number CH
-
-
-
-
Note: this filter is slow. For faster processing you should use a
-dedicated filter.
-
-
-
25.3.1 Examples# TOC
-
-
- Half volume:
-
-
aeval=val(ch)/2:c=same
-
-
- Invert phase of the second channel:
-
-
-
-
-
25.4 afade# TOC
-
-
Apply fade-in/out effect to input audio.
-
-
A description of the accepted parameters follows.
-
-
-type, t
-Specify the effect type, can be either in
for fade-in, or
-out
for a fade-out effect. Default is in
.
-
-
-start_sample, ss
-Specify the number of the start sample for starting to apply the fade
-effect. Default is 0.
-
-
-nb_samples, ns
-Specify the number of samples for which the fade effect has to last. At
-the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence. Default is 44100.
-
-
-start_time, st
-Specify the start time of the fade effect. Default is 0.
-The value must be specified as a time duration; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-If set this option is used instead of start_sample .
-
-
-duration, d
-Specify the duration of the fade effect. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-At the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence.
-By default the duration is determined by nb_samples .
-If set this option is used instead of nb_samples .
-
-
-curve
-Set curve for fade transition.
-
-It accepts the following values:
-
-tri
-select triangular, linear slope (default)
-
-qsin
-select quarter of sine wave
-
-hsin
-select half of sine wave
-
-esin
-select exponential sine wave
-
-log
-select logarithmic
-
-par
-select inverted parabola
-
-qua
-select quadratic
-
-cub
-select cubic
-
-squ
-select square root
-
-cbr
-select cubic root
-
-
-
-
-
-
-
25.4.1 Examples# TOC
-
-
- Fade in first 15 seconds of audio:
-
-
- Fade out last 25 seconds of a 900 seconds audio:
-
-
afade=t=out:st=875:d=25
-
-
-
-
-
25.5 aformat# TOC
-
-
Set output format constraints for the input audio. The framework will
-negotiate the most appropriate format to minimize conversions.
-
-
It accepts the following parameters:
-
-sample_fmts
-A ’|’-separated list of requested sample formats.
-
-
-sample_rates
-A ’|’-separated list of requested sample rates.
-
-
-channel_layouts
-A ’|’-separated list of requested channel layouts.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-
-
If a parameter is omitted, all values are allowed.
-
-
Force the output to either unsigned 8-bit or signed 16-bit stereo
-
-
aformat=sample_fmts=u8|s16:channel_layouts=stereo
-
-
-
-
25.6 allpass# TOC
-
-
Apply a two-pole all-pass filter with central frequency (in Hz)
-frequency , and filter-width width .
-An all-pass filter changes the audio’s frequency to phase relationship
-without changing its frequency to amplitude relationship.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
25.7 amerge# TOC
-
-
Merge two or more audio streams into a single multi-channel stream.
-
-
The filter accepts the following options:
-
-
-inputs
-Set the number of inputs. Default is 2.
-
-
-
-
-
If the channel layouts of the inputs are disjoint, and therefore compatible,
-the channel layout of the output will be set accordingly and the channels
-will be reordered as necessary. If the channel layouts of the inputs are not
-disjoint, the output will have all the channels of the first input then all
-the channels of the second input, in that order, and the channel layout of
-the output will be the default value corresponding to the total number of
-channels.
-
-
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
-is FC+BL+BR, then the output will be in 5.1, with the channels in the
-following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
-first input, b1 is the first channel of the second input).
-
-
On the other hand, if both input are in stereo, the output channels will be
-in the default order: a1, a2, b1, b2, and the channel layout will be
-arbitrarily set to 4.0, which may or may not be the expected value.
-
-
All inputs must have the same sample rate, and format.
-
-
If inputs do not have the same duration, the output will stop with the
-shortest.
-
-
-
25.7.1 Examples# TOC
-
-
- Merge two mono files into a stereo stream:
-
-
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
-
-
- Multiple merges assuming 1 video stream and 6 audio streams in input.mkv :
-
-
ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
-
-
-
-
-
25.8 amix# TOC
-
-
Mixes multiple audio inputs into a single output.
-
-
Note that this filter only supports float samples (the amerge
-and pan audio filters support many formats). If the amix
-input has integer samples then aresample will be automatically
-inserted to perform the conversion to float samples.
-
-
For example
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
-
-
will mix 3 input audio streams to a single output with the same duration as the
-first input and a dropout transition time of 3 seconds.
-
-
It accepts the following parameters:
-
-inputs
-The number of inputs. If unspecified, it defaults to 2.
-
-
-duration
-How to determine the end-of-stream.
-
-longest
-The duration of the longest input. (default)
-
-
-shortest
-The duration of the shortest input.
-
-
-first
-The duration of the first input.
-
-
-
-
-
-dropout_transition
-The transition time, in seconds, for volume renormalization when an input
-stream ends. The default value is 2 seconds.
-
-
-
-
-
-
25.9 anull# TOC
-
-
Pass the audio source unchanged to the output.
-
-
-
25.10 apad# TOC
-
-
Pad the end of an audio stream with silence.
-
-
This can be used together with ffmpeg
-shortest to
-extend audio streams to the same length as the video stream.
-
-
A description of the accepted options follows.
-
-
-packet_size
-Set silence packet size. Default value is 4096.
-
-
-pad_len
-Set the number of samples of silence to add to the end. After the
-value is reached, the stream is terminated. This option is mutually
-exclusive with whole_len .
-
-
-whole_len
-Set the minimum total number of samples in the output audio stream. If
-the value is longer than the input audio length, silence is added to
-the end, until the value is reached. This option is mutually exclusive
-with pad_len .
-
-
-
-
If neither the pad_len nor the whole_len option is
-set, the filter will add silence to the end of the input stream
-indefinitely.
-
-
-
25.10.1 Examples# TOC
-
-
- Add 1024 samples of silence to the end of the input:
-
-
- Make sure the audio output will contain at least 10000 samples, pad
-the input with silence if required:
-
-
- Use ffmpeg
to pad the audio input with silence, so that the
-video stream will always result the shortest and will be converted
-until the end in the output file when using the shortest
-option:
-
-
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
-
-
-
-
-
25.11 aphaser# TOC
-
Add a phasing effect to the input audio.
-
-
A phaser filter creates series of peaks and troughs in the frequency spectrum.
-The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain. Default is 0.4.
-
-
-out_gain
-Set output gain. Default is 0.74
-
-
-delay
-Set delay in milliseconds. Default is 3.0.
-
-
-decay
-Set decay. Default is 0.4.
-
-
-speed
-Set modulation speed in Hz. Default is 0.5.
-
-
-type
-Set modulation type. Default is triangular.
-
-It accepts the following values:
-
-‘triangular, t ’
-‘sinusoidal, s ’
-
-
-
-
-
-
25.12 aresample# TOC
-
-
Resample the input audio to the specified parameters, using the
-libswresample library. If none are specified then the filter will
-automatically convert between its input and output.
-
-
This filter is also able to stretch/squeeze the audio data to make it match
-the timestamps or to inject silence / cut out audio to make it match the
-timestamps, do a combination of both or do neither.
-
-
The filter accepts the syntax
-[sample_rate :]resampler_options , where sample_rate
-expresses a sample rate and resampler_options is a list of
-key =value pairs, separated by ":". See the
-ffmpeg-resampler manual for the complete list of supported options.
-
-
-
25.12.1 Examples# TOC
-
-
- Resample the input audio to 44100Hz:
-
-
- Stretch/squeeze samples to the given timestamps, with a maximum of 1000
-samples per second compensation:
-
-
-
-
-
25.13 asetnsamples# TOC
-
-
Set the number of samples per each output audio frame.
-
-
The last output packet may contain a different number of samples, as
-the filter will flush all the remaining samples when the input audio
-signal its end.
-
-
The filter accepts the following options:
-
-
-nb_out_samples, n
-Set the number of frames per each output audio frame. The number is
-intended as the number of samples per each channel .
-Default value is 1024.
-
-
-pad, p
-If set to 1, the filter will pad the last audio frame with zeroes, so
-that the last frame will contain the same number of samples as the
-previous ones. Default value is 1.
-
-
-
-
For example, to set the number of per-frame samples to 1234 and
-disable padding for the last frame, use:
-
-
asetnsamples=n=1234:p=0
-
-
-
-
25.14 asetrate# TOC
-
-
Set the sample rate without altering the PCM data.
-This will result in a change of speed and pitch.
-
-
The filter accepts the following options:
-
-
-sample_rate, r
-Set the output sample rate. Default is 44100 Hz.
-
-
-
-
-
25.15 ashowinfo# TOC
-
-
Show a line containing various information for each input audio frame.
-The input audio is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The presentation timestamp of the input frame, in time base units; the time base
-depends on the filter input pad, and is usually 1/sample_rate .
-
-
-pts_time
-The presentation timestamp of the input frame in seconds.
-
-
-pos
-position of the frame in the input stream, -1 if this information in
-unavailable and/or meaningless (for example in case of synthetic audio)
-
-
-fmt
-The sample format.
-
-
-chlayout
-The channel layout.
-
-
-rate
-The sample rate for the audio frame.
-
-
-nb_samples
-The number of samples (per channel) in the frame.
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of the audio data. For planar
-audio, the data is treated as if all the planes were concatenated.
-
-
-plane_checksums
-A list of Adler-32 checksums for each data plane.
-
-
-
-
-
25.16 astats# TOC
-
-
Display time domain statistical information about the audio channels.
-Statistics are calculated and displayed for each audio channel and,
-where applicable, an overall figure is also given.
-
-
It accepts the following option:
-
-length
-Short window length in seconds, used for peak and trough RMS measurement.
-Default is 0.05
(50 milliseconds). Allowed range is [0.1 - 10]
.
-
-
-
-
A description of each shown parameter follows:
-
-
-DC offset
-Mean amplitude displacement from zero.
-
-
-Min level
-Minimal sample level.
-
-
-Max level
-Maximal sample level.
-
-
-Peak level dB
-RMS level dB
-Standard peak and RMS level measured in dBFS.
-
-
-RMS peak dB
-RMS trough dB
-Peak and trough values for RMS level measured over a short window.
-
-
-Crest factor
-Standard ratio of peak to RMS level (note: not in dB).
-
-
-Flat factor
-Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels
-(i.e. either Min level or Max level ).
-
-
-Peak count
-Number of occasions (not the number of samples) that the signal attained either
-Min level or Max level .
-
-
-
-
-
25.17 astreamsync# TOC
-
-
Forward two audio streams and control the order the buffers are forwarded.
-
-
The filter accepts the following options:
-
-
-expr, e
-Set the expression deciding which stream should be
-forwarded next: if the result is negative, the first stream is forwarded; if
-the result is positive or zero, the second stream is forwarded. It can use
-the following variables:
-
-
-b1 b2
-number of buffers forwarded so far on each stream
-
-s1 s2
-number of samples forwarded so far on each stream
-
-t1 t2
-current timestamp of each stream
-
-
-
-The default value is t1-t2
, which means to always forward the stream
-that has a smaller timestamp.
-
-
-
-
-
25.17.1 Examples# TOC
-
-
Stress-test amerge
by randomly sending buffers on the wrong
-input, while avoiding too much of a desynchronization:
-
-
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
-[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
-[a2] [b2] amerge
-
-
-
-
25.18 asyncts# TOC
-
-
Synchronize audio data with timestamps by squeezing/stretching it and/or
-dropping samples/adding silence when needed.
-
-
This filter is not built by default, please use aresample to do squeezing/stretching.
-
-
It accepts the following parameters:
-
-compensate
-Enable stretching/squeezing the data to make it match the timestamps. Disabled
-by default. When disabled, time gaps are covered with silence.
-
-
-min_delta
-The minimum difference between timestamps and audio data (in seconds) to trigger
-adding/dropping samples. The default value is 0.1. If you get an imperfect
-sync with this filter, try setting this parameter to 0.
-
-
-max_comp
-The maximum compensation in samples per second. Only relevant with compensate=1.
-The default value is 500.
-
-
-first_pts
-Assume that the first PTS should be this value. The time base is 1 / sample
-rate. This allows for padding/trimming at the start of the stream. By default,
-no assumption is made about the first frame’s expected PTS, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative PTS due to encoder delay.
-
-
-
-
-
-
25.19 atempo# TOC
-
-
Adjust audio tempo.
-
-
The filter accepts exactly one parameter, the audio tempo. If not
-specified then the filter will assume nominal 1.0 tempo. Tempo must
-be in the [0.5, 2.0] range.
-
-
-
25.19.1 Examples# TOC
-
-
- Slow down audio to 80% tempo:
-
-
- To speed up audio to 125% tempo:
-
-
-
-
-
25.20 atrim# TOC
-
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Timestamp (in seconds) of the start of the section to keep. I.e. the audio
-sample with the timestamp start will be the first sample in the output.
-
-
-end
-Specify time of the first audio sample that will be dropped, i.e. the
-audio sample immediately preceding the one with the timestamp end will be
-the last sample in the output.
-
-
-start_pts
-Same as start , except this option sets the start timestamp in samples
-instead of seconds.
-
-
-end_pts
-Same as end , except this option sets the end timestamp in samples instead
-of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_sample
-The number of the first sample that should be output.
-
-
-end_sample
-The number of the first sample that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _sample options simply count the
-samples that pass through the filter. So start/end_pts and start/end_sample will
-give different results when the timestamps are wrong, inexact or do not start at
-zero. Also note that this filter does not modify the timestamps. If you wish
-to have the output timestamps start at zero, insert the asetpts filter after the
-atrim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all samples that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple atrim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -af atrim=60:120
-
-
- Keep only the first 1000 samples:
-
-
ffmpeg -i INPUT -af atrim=end_sample=1000
-
-
-
-
-
-
25.21 bandpass# TOC
-
-
Apply a two-pole Butterworth band-pass filter with central
-frequency frequency , and (3dB-point) band-width width.
-The csg option selects a constant skirt gain (peak gain = Q)
-instead of the default: constant 0dB peak gain.
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-csg
-Constant skirt gain if set to 1. Defaults to 0.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
25.22 bandreject# TOC
-
-
Apply a two-pole Butterworth band-reject filter with central
-frequency frequency , and (3dB-point) band-width width .
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
25.23 bass# TOC
-
-
Boost or cut the bass (lower) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at 0 Hz. Its useful range is about -20
-(for a large cut) to +20 (for a large boost).
-Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 100
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
25.24 biquad# TOC
-
-
Apply a biquad IIR filter with the given coefficients.
-Where b0 , b1 , b2 and a0 , a1 , a2
-are the numerator and denominator coefficients respectively.
-
-
-
25.25 bs2b# TOC
-
Bauer stereo to binaural transformation, which improves headphone listening of
-stereo audio records.
-
-
It accepts the following parameters:
-
-profile
-Pre-defined crossfeed level.
-
-default
-Default level (fcut=700, feed=50).
-
-
-cmoy
-Chu Moy circuit (fcut=700, feed=60).
-
-
-jmeier
-Jan Meier circuit (fcut=650, feed=95).
-
-
-
-
-
-fcut
-Cut frequency (in Hz).
-
-
-feed
-Feed level (in Hz).
-
-
-
-
-
-
25.26 channelmap# TOC
-
-
Remap input channels to new locations.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the output stream.
-
-
-map
-Map channels from input to output. The argument is a ’|’-separated list of
-mappings, each in the in_channel -out_channel
or
-in_channel form. in_channel can be either the name of the input
-channel (e.g. FL for front left) or its index in the input channel layout.
-out_channel is the name of the output channel or its index in the output
-channel layout. If out_channel is not given then it is implicitly an
-index, starting with zero and increasing by one for each mapping.
-
-
-
-
If no mapping is present, the filter will implicitly map input channels to
-output channels, preserving indices.
-
-
For example, assuming a 5.1+downmix input MOV file,
-
-
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
-
-
will create an output WAV file tagged as stereo from the downmix channels of
-the input.
-
-
To fix a 5.1 WAV improperly encoded in AAC’s native channel order
-
-
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
-
-
-
-
25.27 channelsplit# TOC
-
-
Split each channel from an input audio stream into a separate output stream.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the input stream. The default is "stereo".
-
-
-
-
For example, assuming a stereo input MP3 file,
-
-
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
-
-
will create an output Matroska file with two audio streams, one containing only
-the left channel and the other the right channel.
-
-
Split a 5.1 WAV file into per-channel files:
-
-
ffmpeg -i in.wav -filter_complex
-'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
--map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
-front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
-side_right.wav
-
-
-
-
25.28 compand# TOC
-
Compress or expand the audio’s dynamic range.
-
-
It accepts the following parameters:
-
-
-attacks
-decays
-A list of times in seconds for each channel over which the instantaneous level
-of the input signal is averaged to determine its volume. attacks refers to
-increase of volume and decays refers to decrease of volume. For most
-situations, the attack time (response to the audio getting louder) should be
-shorter than the decay time, because the human ear is more sensitive to sudden
-loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and
-a typical value for decay is 0.8 seconds.
-
-
-points
-A list of points for the transfer function, specified in dB relative to the
-maximum possible signal amplitude. Each key points list must be defined using
-the following syntax: x0/y0|x1/y1|x2/y2|....
or
-x0/y0 x1/y1 x2/y2 ....
-
-The input values must be in strictly increasing order but the transfer function
-does not have to be monotonically rising. The point 0/0
is assumed but
-may be overridden (by 0/out-dBn
). Typical values for the transfer
-function are -70/-70|-60/-20
.
-
-
-soft-knee
-Set the curve radius in dB for all joints. It defaults to 0.01.
-
-
-gain
-Set the additional gain in dB to be applied at all points on the transfer
-function. This allows for easy adjustment of the overall gain.
-It defaults to 0.
-
-
-volume
-Set an initial volume, in dB, to be assumed for each channel when filtering
-starts. This permits the user to supply a nominal level initially, so that, for
-example, a very large gain is not applied to initial signal levels before the
-companding has begun to operate. A typical value for audio which is initially
-quiet is -90 dB. It defaults to 0.
-
-
-delay
-Set a delay, in seconds. The input audio is analyzed immediately, but audio is
-delayed before being fed to the volume adjuster. Specifying a delay
-approximately equal to the attack/decay times allows the filter to effectively
-operate in predictive rather than reactive mode. It defaults to 0.
-
-
-
-
-
-
25.28.1 Examples# TOC
-
-
- Make music with both quiet and loud passages suitable for listening to in a
-noisy environment:
-
-
compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
-
-
- A noise gate for when the noise is at a lower level than the signal:
-
-
compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
-
-
- Here is another noise gate, this time for when the noise is at a higher level
-than the signal (making it, in some ways, similar to squelch):
-
-
compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
-
-
-
-
-
25.29 earwax# TOC
-
-
Make audio easier to listen to on headphones.
-
-
This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio
-so that when listened to on headphones the stereo image is moved from
-inside your head (standard for headphones) to outside and in front of
-the listener (standard for speakers).
-
-
Ported from SoX.
-
-
-
25.30 equalizer# TOC
-
-
Apply a two-pole peaking equalisation (EQ) filter. With this
-filter, the signal-level at and around a selected frequency can
-be increased or decreased, whilst (unlike bandpass and bandreject
-filters) that at all other frequencies is unchanged.
-
-
In order to produce complex equalisation curves, this filter can
-be given several times, each with a different central frequency.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-gain, g
-Set the required gain or attenuation in dB.
-Beware of clipping when using a positive gain.
-
-
-
-
-
25.30.1 Examples# TOC
-
- Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz:
-
-
equalizer=f=1000:width_type=h:width=200:g=-10
-
-
- Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2:
-
-
equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
-
-
-
-
-
25.31 flanger# TOC
-
Apply a flanging effect to the audio.
-
-
The filter accepts the following options:
-
-
-delay
-Set base delay in milliseconds. Range from 0 to 30. Default value is 0.
-
-
-depth
-Set added swep delay in milliseconds. Range from 0 to 10. Default value is 2.
-
-
-regen
-Set percentage regeneration (delayed signal feedback). Range from -95 to 95.
-Default value is 0.
-
-
-width
-Set percentage of delayed signal mixed with original. Range from 0 to 100.
-Default value is 71.
-
-
-speed
-Set sweeps per second (Hz). Range from 0.1 to 10. Default value is 0.5.
-
-
-shape
-Set swept wave shape, can be triangular or sinusoidal .
-Default value is sinusoidal .
-
-
-phase
-Set swept wave percentage-shift for multi channel. Range from 0 to 100.
-Default value is 25.
-
-
-interp
-Set delay-line interpolation, linear or quadratic .
-Default is linear .
-
-
-
-
-
25.32 highpass# TOC
-
-
Apply a high-pass filter with 3dB point frequency.
-The filter can be either single-pole, or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 3000.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
25.33 join# TOC
-
-
Join multiple input streams into one multi-channel stream.
-
-
It accepts the following parameters:
-
-inputs
-The number of input streams. It defaults to 2.
-
-
-channel_layout
-The desired output channel layout. It defaults to stereo.
-
-
-map
-Map channels from inputs to output. The argument is a ’|’-separated list of
-mappings, each in the input_idx .in_channel -out_channel
-form. input_idx is the 0-based index of the input stream. in_channel
-can be either the name of the input channel (e.g. FL for front left) or its
-index in the specified input stream. out_channel is the name of the output
-channel.
-
-
-
-
The filter will attempt to guess the mappings when they are not specified
-explicitly. It does so by first trying to find an unused matching input channel
-and if that fails it picks the first unused input channel.
-
-
Join 3 inputs (with properly set channel layouts):
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
-
-
-
Build a 5.1 output from 6 single-channel streams:
-
-
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
-'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
-out
-
-
-
-
25.34 ladspa# TOC
-
-
Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-ladspa
.
-
-
-file, f
-Specifies the name of LADSPA plugin library to load. If the environment
-variable LADSPA_PATH
is defined, the LADSPA plugin is searched in
-each one of the directories specified by the colon separated list in
-LADSPA_PATH
, otherwise in the standard LADSPA paths, which are in
-this order: HOME/.ladspa/lib/ , /usr/local/lib/ladspa/ ,
-/usr/lib/ladspa/ .
-
-
-plugin, p
-Specifies the plugin within the library. Some libraries contain only
-one plugin, but others contain many of them. If this is not set filter
-will list all available plugins within the specified library.
-
-
-controls, c
-Set the ’|’ separated list of controls which are zero or more floating point
-values that determine the behavior of the loaded plugin (for example delay,
-threshold or gain).
-Controls need to be defined using the following syntax:
-c0=value0 |c1=value1 |c2=value2 |..., where
-valuei is the value set on the i -th control.
-If controls is set to help
, all available controls and
-their valid ranges are printed.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100. Only used if plugin have
-zero inputs.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame, default
-is 1024. Only used if plugin have zero inputs.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified duration,
-as the generated audio is always cut at the end of a complete frame.
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-Only used if plugin have zero inputs.
-
-
-
-
-
-
25.34.1 Examples# TOC
-
-
- List all available plugins within amp (LADSPA example plugin) library:
-
-
- List all available controls and their valid ranges for vcf_notch
-plugin from VCF
library:
-
-
ladspa=f=vcf:p=vcf_notch:c=help
-
-
- Simulate low quality audio equipment using Computer Music Toolkit
(CMT)
-plugin library:
-
-
ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
-
-
- Add reverberation to the audio using TAP-plugins
-(Tom’s Audio Processing plugins):
-
-
ladspa=file=tap_reverb:tap_reverb
-
-
- Generate white noise, with 0.2 amplitude:
-
-
ladspa=file=cmt:noise_source_white:c=c0=.2
-
-
- Generate 20 bpm clicks using plugin C* Click - Metronome
from the
-C* Audio Plugin Suite
(CAPS) library:
-
-
ladspa=file=caps:Click:c=c1=20'
-
-
- Apply C* Eq10X2 - Stereo 10-band equaliser
effect:
-
-
ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
-
-
-
-
-
25.34.2 Commands# TOC
-
-
This filter supports the following commands:
-
-cN
-Modify the N -th control value.
-
-If the specified value is not valid, it is ignored and prior one is kept.
-
-
-
-
-
25.35 lowpass# TOC
-
-
Apply a low-pass filter with 3dB point frequency.
-The filter can be either single-pole or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 500.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
25.36 pan# TOC
-
-
Mix channels with specific gain levels. The filter accepts the output
-channel layout followed by a set of channels definitions.
-
-
This filter is also designed to efficiently remap the channels of an audio
-stream.
-
-
The filter accepts parameters of the form:
-"l |outdef |outdef |..."
-
-
-l
-output channel layout or number of channels
-
-
-outdef
-output channel specification, of the form:
-"out_name =[gain *]in_name [+[gain *]in_name ...]"
-
-
-out_name
-output channel to define, either a channel name (FL, FR, etc.) or a channel
-number (c0, c1, etc.)
-
-
-gain
-multiplicative coefficient for the channel, 1 leaving the volume unchanged
-
-
-in_name
-input channel to use, see out_name for details; it is not possible to mix
-named and numbered input channels
-
-
-
-
If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for
-that specification will be renormalized so that the total is 1, thus
-avoiding clipping noise.
-
-
-
25.36.1 Mixing examples# TOC
-
-
For example, if you want to down-mix from stereo to mono, but with a bigger
-factor for the left channel:
-
-
pan=1c|c0=0.9*c0+0.1*c1
-
-
-
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
-7-channels surround:
-
-
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
-
-
-
Note that ffmpeg
integrates a default down-mix (and up-mix) system
-that should be preferred (see "-ac" option) unless you have very specific
-needs.
-
-
-
25.36.2 Remapping examples# TOC
-
-
The channel remapping will be effective if, and only if:
-
-
- gain coefficients are zeroes or ones,
- only one input per channel output,
-
-
-
If all these conditions are satisfied, the filter will notify the user ("Pure
-channel mapping detected"), and use an optimized and lossless method to do the
-remapping.
-
-
For example, if you have a 5.1 source and want a stereo audio stream by
-dropping the extra channels:
-
-
pan="stereo| c0=FL | c1=FR"
-
-
-
Given the same source, you can also switch front left and front right channels
-and keep the input channel layout:
-
-
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
-
-
-
If the input is a stereo audio stream, you can mute the front left channel (and
-still keep the stereo channel layout) with:
-
-
-
Still with a stereo audio stream input, you can copy the right channel in both
-front left and right:
-
-
pan="stereo| c0=FR | c1=FR"
-
-
-
-
25.37 replaygain# TOC
-
-
ReplayGain scanner filter. This filter takes an audio stream as an input and
-outputs it unchanged.
-At end of filtering it displays track_gain
and track_peak
.
-
-
-
25.38 resample# TOC
-
-
Convert the audio sample format, sample rate and channel layout. It is
-not meant to be used directly.
-
-
-
25.39 silencedetect# TOC
-
-
Detect silence in an audio stream.
-
-
This filter logs a message when it detects that the input audio volume is less
-or equal to a noise tolerance value for a duration greater or equal to the
-minimum detected noise duration.
-
-
The printed times and duration are expressed in seconds.
-
-
The filter accepts the following options:
-
-
-duration, d
-Set silence duration until notification (default is 2 seconds).
-
-
-noise, n
-Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
-specified value) or amplitude ratio. Default is -60dB, or 0.001.
-
-
-
-
-
25.39.1 Examples# TOC
-
-
- Detect 5 seconds of silence with -50dB noise tolerance:
-
-
silencedetect=n=-50dB:d=5
-
-
- Complete example with ffmpeg
to detect silence with 0.0001 noise
-tolerance in silence.mp3 :
-
-
ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
-
-
-
-
-
25.40 silenceremove# TOC
-
-
Remove silence from the beginning, middle or end of the audio.
-
-
The filter accepts the following options:
-
-
-start_periods
-This value is used to indicate if audio should be trimmed at beginning of
-the audio. A value of zero indicates no silence should be trimmed from the
-beginning. When specifying a non-zero value, it trims audio up until it
-finds non-silence. Normally, when trimming silence from beginning of audio
-the start_periods will be 1
but it can be increased to higher
-values to trim all audio up to specific count of non-silence periods.
-Default value is 0
.
-
-
-start_duration
-Specify the amount of time that non-silence must be detected before it stops
-trimming audio. By increasing the duration, bursts of noises can be treated
-as silence and trimmed off. Default value is 0
.
-
-
-start_threshold
-This indicates what sample value should be treated as silence. For digital
-audio, a value of 0
may be fine but for audio recorded from analog,
-you may wish to increase the value to account for background noise.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-stop_periods
-Set the count for trimming silence from the end of audio.
-To remove silence from the middle of a file, specify a stop_periods
-that is negative. This value is then treated as a positive value and is
-used to indicate the effect should restart processing as specified by
-start_periods , making it suitable for removing periods of silence
-in the middle of the audio.
-Default value is 0
.
-
-
-stop_duration
-Specify a duration of silence that must exist before audio is not copied any
-more. By specifying a higher duration, silence that is wanted can be left in
-the audio.
-Default value is 0
.
-
-
-stop_threshold
-This is the same as start_threshold but for trimming silence from
-the end of audio.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-leave_silence
-This indicate that stop_duration length of audio should be left intact
-at the beginning of each period of silence.
-For example, if you want to remove long pauses between words but do not want
-to remove the pauses completely. Default value is 0
.
-
-
-
-
-
-
25.40.1 Examples# TOC
-
-
- The following example shows how this filter can be used to start a recording
-that does not contain the delay at the start which usually occurs between
-pressing the record button and the start of the performance:
-
-
silenceremove=1:5:0.02
-
-
-
-
-
25.41 treble# TOC
-
-
Boost or cut treble (upper) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at whichever is the lower of ~22 kHz and the
-Nyquist frequency. Its useful range is about -20 (for a large cut)
-to +20 (for a large boost). Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 3000
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
25.42 volume# TOC
-
-
Adjust the input audio volume.
-
-
It accepts the following parameters:
-
-volume
-Set audio volume expression.
-
-Output values are clipped to the maximum value.
-
-The output audio volume is given by the relation:
-
-
output_volume = volume * input_volume
-
-
-The default value for volume is "1.0".
-
-
-precision
-This parameter represents the mathematical precision.
-
-It determines which input sample formats will be allowed, which affects the
-precision of the volume scaling.
-
-
-fixed
-8-bit fixed-point; this limits input sample format to U8, S16, and S32.
-
-float
-32-bit floating-point; this limits input sample format to FLT. (default)
-
-double
-64-bit floating-point; this limits input sample format to DBL.
-
-
-
-
-replaygain
-Choose the behaviour on encountering ReplayGain side data in input frames.
-
-
-drop
-Remove ReplayGain side data, ignoring its contents (the default).
-
-
-ignore
-Ignore ReplayGain side data, but leave it in the frame.
-
-
-track
-Prefer the track gain, if present.
-
-
-album
-Prefer the album gain, if present.
-
-
-
-
-replaygain_preamp
-Pre-amplification gain in dB to apply to the selected replaygain gain.
-
-Default value for replaygain_preamp is 0.0.
-
-
-eval
-Set when the volume expression is evaluated.
-
-It accepts the following values:
-
-‘once ’
-only evaluate expression once during the filter initialization, or
-when the ‘volume ’ command is sent
-
-
-‘frame ’
-evaluate expression for each incoming frame
-
-
-
-Default value is ‘once ’.
-
-
-
-
The volume expression can contain the following parameters.
-
-
-n
-frame number (starting at zero)
-
-nb_channels
-number of channels
-
-nb_consumed_samples
-number of samples consumed by the filter
-
-nb_samples
-number of samples in the current frame
-
-pos
-original frame position in the file
-
-pts
-frame PTS
-
-sample_rate
-sample rate
-
-startpts
-PTS at start of stream
-
-startt
-time at start of stream
-
-t
-frame time
-
-tb
-timestamp timebase
-
-volume
-last set volume value
-
-
-
-
Note that when eval is set to ‘once ’ only the
-sample_rate and tb variables are available, all other
-variables will evaluate to NAN.
-
-
-
25.42.1 Commands# TOC
-
-
This filter supports the following commands:
-
-volume
-Modify the volume expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-replaygain_noclip
-Prevent clipping by limiting the gain applied.
-
-Default value for replaygain_noclip is 1.
-
-
-
-
-
-
25.42.2 Examples# TOC
-
-
-
-
-
25.43 volumedetect# TOC
-
-
Detect the volume of the input video.
-
-
The filter has no parameters. The input is not modified. Statistics about
-the volume will be printed in the log when the input stream end is reached.
-
-
In particular it will show the mean volume (root mean square), maximum
-volume (on a per-sample basis), and the beginning of a histogram of the
-registered volume values (from the maximum value to a cumulated 1/1000 of
-the samples).
-
-
All volumes are in decibels relative to the maximum PCM value.
-
-
-
25.43.1 Examples# TOC
-
-
Here is an excerpt of the output:
-
-
[Parsed_volumedetect_0 0xa23120] mean_volume: -27 dB
-[Parsed_volumedetect_0 0xa23120] max_volume: -4 dB
-[Parsed_volumedetect_0 0xa23120] histogram_4db: 6
-[Parsed_volumedetect_0 0xa23120] histogram_5db: 62
-[Parsed_volumedetect_0 0xa23120] histogram_6db: 286
-[Parsed_volumedetect_0 0xa23120] histogram_7db: 1042
-[Parsed_volumedetect_0 0xa23120] histogram_8db: 2551
-[Parsed_volumedetect_0 0xa23120] histogram_9db: 4609
-[Parsed_volumedetect_0 0xa23120] histogram_10db: 8409
-
-
-
It means that:
-
- The mean square energy is approximately -27 dB, or 10^-2.7.
- The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB.
- There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc.
-
-
-
In other words, raising the volume by +4 dB does not cause any clipping,
-raising it by +5 dB causes clipping for 6 samples, etc.
-
-
-
-
26 Audio Sources# TOC
-
-
Below is a description of the currently available audio sources.
-
-
-
26.1 abuffer# TOC
-
-
Buffer audio frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/asrc_abuffer.h .
-
-
It accepts the following parameters:
-
-time_base
-The timebase which will be used for timestamps of submitted frames. It must be
-either a floating-point number or in numerator /denominator form.
-
-
-sample_rate
-The sample rate of the incoming audio buffers.
-
-
-sample_fmt
-The sample format of the incoming audio buffers.
-Either a sample format name or its corresponding integer representation from
-the enum AVSampleFormat in libavutil/samplefmt.h
-
-
-channel_layout
-The channel layout of the incoming audio buffers.
-Either a channel layout name from channel_layout_map in
-libavutil/channel_layout.c or its corresponding integer representation
-from the AV_CH_LAYOUT_* macros in libavutil/channel_layout.h
-
-
-channels
-The number of channels of the incoming audio buffers.
-If both channels and channel_layout are specified, then they
-must be consistent.
-
-
-
-
-
-
26.1.1 Examples# TOC
-
-
-
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
-
-
-
will instruct the source to accept planar 16bit signed stereo at 44100Hz.
-Since the sample format with name "s16p" corresponds to the number
-6 and the "stereo" channel layout corresponds to the value 0x3, this is
-equivalent to:
-
-
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
-
-
-
-
26.2 aevalsrc# TOC
-
-
Generate an audio signal specified by an expression.
-
-
This source accepts in input one or more expressions (one for each
-channel), which are evaluated and used to generate a corresponding
-audio signal.
-
-
This source accepts the following options:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. In case the
-channel_layout option is not specified, the selected channel layout
-depends on the number of provided expressions. Otherwise the last
-specified expression is applied to the remaining output channels.
-
-
-channel_layout, c
-Set the channel layout. The number of channels in the specified layout
-must be equal to the number of specified expressions.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified
-duration, as the generated audio is always cut at the end of a
-complete frame.
-
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame,
-default to 1024.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100.
-
-
-
-
Each expression in exprs can contain the following constants:
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-t
-time of the evaluated sample expressed in seconds, starting from 0
-
-
-s
-sample rate
-
-
-
-
-
-
26.2.1 Examples# TOC
-
-
- Generate silence:
-
-
- Generate a sin signal with frequency of 440 Hz, set sample rate to
-8000 Hz:
-
-
aevalsrc="sin(440*2*PI*t):s=8000"
-
-
- Generate a two channels signal, specify the channel layout (Front
-Center + Back Center) explicitly:
-
-
aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
-
-
- Generate white noise:
-
-
aevalsrc="-2+random(0)"
-
-
- Generate an amplitude modulated signal:
-
-
aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
-
-
- Generate 2.5 Hz binaural beats on a 360 Hz carrier:
-
-
aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
-
-
-
-
-
-
26.3 anullsrc# TOC
-
-
The null audio source, return unprocessed audio frames. It is mainly useful
-as a template and to be employed in analysis / debugging tools, or as
-the source for filters which ignore the input data (for example the sox
-synth filter).
-
-
This source accepts the following options:
-
-
-channel_layout, cl
-
-Specifies the channel layout, and can be either an integer or a string
-representing a channel layout. The default value of channel_layout
-is "stereo".
-
-Check the channel_layout_map definition in
-libavutil/channel_layout.c for the mapping between strings and
-channel layout values.
-
-
-sample_rate, r
-Specifies the sample rate, and defaults to 44100.
-
-
-nb_samples, n
-Set the number of samples per requested frames.
-
-
-
-
-
-
26.3.1 Examples# TOC
-
-
- Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
-
-
- Do the same operation with a more obvious syntax:
-
-
anullsrc=r=48000:cl=mono
-
-
-
-
All the parameters need to be explicitly defined.
-
-
-
26.4 flite# TOC
-
-
Synthesize a voice utterance using the libflite library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libflite
.
-
-
Note that the flite library is not thread-safe.
-
-
The filter accepts the following options:
-
-
-list_voices
-If set to 1, list the names of the available voices and exit
-immediately. Default value is 0.
-
-
-nb_samples, n
-Set the maximum number of samples per frame. Default value is 512.
-
-
-textfile
-Set the filename containing the text to speak.
-
-
-text
-Set the text to speak.
-
-
-voice, v
-Set the voice to use for the speech synthesis. Default value is
-kal
. See also the list_voices option.
-
-
-
-
-
26.4.1 Examples# TOC
-
-
- Read from file speech.txt , and synthesize the text using the
-standard flite voice:
-
-
flite=textfile=speech.txt
-
-
- Read the specified text selecting the slt
voice:
-
-
flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Input text to ffmpeg:
-
-
ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Make ffplay speak the specified text, using flite
and
-the lavfi
device:
-
-
ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
-
-
-
-
For more information about libflite, check:
-http://www.speech.cs.cmu.edu/flite/
-
-
-
26.5 sine# TOC
-
-
Generate an audio signal made of a sine wave with amplitude 1/8.
-
-
The audio signal is bit-exact.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the carrier frequency. Default is 440 Hz.
-
-
-beep_factor, b
-Enable a periodic beep every second with frequency beep_factor times
-the carrier frequency. Default is 0, meaning the beep is disabled.
-
-
-sample_rate, r
-Specify the sample rate, default is 44100.
-
-
-duration, d
-Specify the duration of the generated audio stream.
-
-
-samples_per_frame
-Set the number of samples per output frame, default is 1024.
-
-
-
-
-
26.5.1 Examples# TOC
-
-
- Generate a simple 440 Hz sine wave:
-
-
- Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
-
-
sine=220:4:d=5
-sine=f=220:b=4:d=5
-sine=frequency=220:beep_factor=4:duration=5
-
-
-
-
-
-
-
27 Audio Sinks# TOC
-
-
Below is a description of the currently available audio sinks.
-
-
-
27.1 abuffersink# TOC
-
-
Buffer audio frames, and make them available to the end of filter chain.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVABufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
27.2 anullsink# TOC
-
-
Null audio sink; do absolutely nothing with the input audio. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
28 Video Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the video filters included in your
-build.
-
-
Below is a description of the currently available video filters.
-
-
-
28.1 alphaextract# TOC
-
-
Extract the alpha component from the input as a grayscale video. This
-is especially useful with the alphamerge filter.
-
-
-
28.2 alphamerge# TOC
-
-
Add or replace the alpha component of the primary input with the
-grayscale value of a second input. This is intended for use with
-alphaextract to allow the transmission or storage of frame
-sequences that have alpha in a format that doesn’t support an alpha
-channel.
-
-
For example, to reconstruct full frames from a normal YUV-encoded video
-and a separate video created with alphaextract , you might use:
-
-
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
-
-
-
Since this filter is designed for reconstruction, it operates on frame
-sequences without considering timestamps, and terminates when either
-input reaches end of stream. This will cause problems if your encoding
-pipeline drops frames. If you’re trying to apply an image as an
-overlay to a video stream, consider the overlay filter instead.
-
-
-
28.3 ass# TOC
-
-
Same as the subtitles filter, except that it doesn’t require libavcodec
-and libavformat to work. On the other hand, it is limited to ASS (Advanced
-Substation Alpha) subtitles files.
-
-
This filter accepts the following option in addition to the common options from
-the subtitles filter:
-
-
-shaping
-Set the shaping engine
-
-Available values are:
-
-‘auto ’
-The default libass shaping engine, which is the best available.
-
-‘simple ’
-Fast, font-agnostic shaper that can do only substitutions
-
-‘complex ’
-Slower shaper using OpenType for substitutions and positioning
-
-
-
-The default is auto
.
-
-
-
-
-
28.4 bbox# TOC
-
-
Compute the bounding box for the non-black pixels in the input frame
-luminance plane.
-
-
This filter computes the bounding box containing all the pixels with a
-luminance value greater than the minimum allowed value.
-The parameters describing the bounding box are printed on the filter
-log.
-
-
The filter accepts the following option:
-
-
-min_val
-Set the minimal luminance value. Default is 16
.
-
-
-
-
-
28.5 blackdetect# TOC
-
-
Detect video intervals that are (almost) completely black. Can be
-useful to detect chapter transitions, commercials, or invalid
-recordings. Output lines contains the time for the start, end and
-duration of the detected black interval expressed in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
The filter accepts the following options:
-
-
-black_min_duration, d
-Set the minimum detected black duration expressed in seconds. It must
-be a non-negative floating point number.
-
-Default value is 2.0.
-
-
-picture_black_ratio_th, pic_th
-Set the threshold for considering a picture "black".
-Express the minimum value for the ratio:
-
-
nb_black_pixels / nb_pixels
-
-
-for which a picture is considered black.
-Default value is 0.98.
-
-
-pixel_black_th, pix_th
-Set the threshold for considering a pixel "black".
-
-The threshold expresses the maximum pixel luminance value for which a
-pixel is considered "black". The provided value is scaled according to
-the following equation:
-
-
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
-
-
-luminance_range_size and luminance_minimum_value depend on
-the input video format, the range is [0-255] for YUV full-range
-formats and [16-235] for YUV non full-range formats.
-
-Default value is 0.10.
-
-
-
-
The following example sets the maximum pixel threshold to the minimum
-value, and detects only black intervals of 2 or more seconds:
-
-
blackdetect=d=2:pix_th=0.00
-
-
-
-
28.6 blackframe# TOC
-
-
Detect frames that are (almost) completely black. Can be useful to
-detect chapter transitions or commercials. Output lines consist of
-the frame number of the detected frame, the percentage of blackness,
-the position in the file if known or -1 and the timestamp in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
It accepts the following parameters:
-
-
-amount
-The percentage of the pixels that have to be below the threshold; it defaults to
-98
.
-
-
-threshold, thresh
-The threshold below which a pixel value is considered black; it defaults to
-32
.
-
-
-
-
-
-
28.7 blend, tblend# TOC
-
-
Blend two video frames into each other.
-
-
The blend
filter takes two input streams and outputs one
-stream, the first input is the "top" layer and second input is
-"bottom" layer. Output terminates when shortest input terminates.
-
-
The tblend
(time blend) filter takes two consecutive frames
-from one single stream, and outputs the result obtained by blending
-the new frame on top of the old frame.
-
-
A description of the accepted options follows.
-
-
-c0_mode
-c1_mode
-c2_mode
-c3_mode
-all_mode
-Set blend mode for specific pixel component or all pixel components in case
-of all_mode . Default value is normal
.
-
-Available values for component modes are:
-
-‘addition ’
-‘and ’
-‘average ’
-‘burn ’
-‘darken ’
-‘difference ’
-‘difference128 ’
-‘divide ’
-‘dodge ’
-‘exclusion ’
-‘hardlight ’
-‘lighten ’
-‘multiply ’
-‘negation ’
-‘normal ’
-‘or ’
-‘overlay ’
-‘phoenix ’
-‘pinlight ’
-‘reflect ’
-‘screen ’
-‘softlight ’
-‘subtract ’
-‘vividlight ’
-‘xor ’
-
-
-
-c0_opacity
-c1_opacity
-c2_opacity
-c3_opacity
-all_opacity
-Set blend opacity for specific pixel component or all pixel components in case
-of all_opacity . Only used in combination with pixel component blend modes.
-
-
-c0_expr
-c1_expr
-c2_expr
-c3_expr
-all_expr
-Set blend expression for specific pixel component or all pixel components in case
-of all_expr . Note that related mode options will be ignored if those are set.
-
-The expressions can use the following variables:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-the coordinates of the current sample
-
-
-W
-H
-the width and height of currently filtered plane
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-TOP, A
-Value of pixel component at current location for first video frame (top layer).
-
-
-BOTTOM, B
-Value of pixel component at current location for second video frame (bottom layer).
-
-
-
-
-shortest
-Force termination when the shortest input terminates. Default is
-0
. This option is only defined for the blend
filter.
-
-
-repeatlast
-Continue applying the last bottom frame after the end of the stream. A value of
-0
disable the filter after the last frame of the bottom layer is reached.
-Default is 1
. This option is only defined for the blend
filter.
-
-
-
-
-
28.7.1 Examples# TOC
-
-
- Apply transition from bottom layer to top layer in first 10 seconds:
-
-
blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
-
-
- Apply 1x1 checkerboard effect:
-
-
blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
-
-
- Apply uncover left effect:
-
-
blend=all_expr='if(gte(N*SW+X,W),A,B)'
-
-
- Apply uncover down effect:
-
-
blend=all_expr='if(gte(Y-N*SH,0),A,B)'
-
-
- Apply uncover up-left effect:
-
-
blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
-
-
- Display differences between the current and the previous frame:
-
-
tblend=all_mode=difference128
-
-
-
-
-
28.8 boxblur# TOC
-
-
Apply a boxblur algorithm to the input video.
-
-
It accepts the following parameters:
-
-
-luma_radius, lr
-luma_power, lp
-chroma_radius, cr
-chroma_power, cp
-alpha_radius, ar
-alpha_power, ap
-
-
-
A description of the accepted options follows.
-
-
-luma_radius, lr
-chroma_radius, cr
-alpha_radius, ar
-Set an expression for the box radius in pixels used for blurring the
-corresponding input plane.
-
-The radius value must be a non-negative number, and must not be
-greater than the value of the expression min(w,h)/2
for the
-luma and alpha planes, and of min(cw,ch)/2
for the chroma
-planes.
-
-Default value for luma_radius is "2". If not specified,
-chroma_radius and alpha_radius default to the
-corresponding value set for luma_radius .
-
-The expressions can contain the following constants:
-
-w
-h
-The input width and height in pixels.
-
-
-cw
-ch
-The input chroma image width and height in pixels.
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p", hsub is 2 and vsub is 1.
-
-
-
-
-luma_power, lp
-chroma_power, cp
-alpha_power, ap
-Specify how many times the boxblur filter is applied to the
-corresponding plane.
-
-Default value for luma_power is 2. If not specified,
-chroma_power and alpha_power default to the
-corresponding value set for luma_power .
-
-A value of 0 will disable the effect.
-
-
-
-
-
28.8.1 Examples# TOC
-
-
- Apply a boxblur filter with the luma, chroma, and alpha radii
-set to 2:
-
-
boxblur=luma_radius=2:luma_power=1
-boxblur=2:1
-
-
- Set the luma radius to 2, and alpha and chroma radius to 0:
-
-
- Set the luma and chroma radii to a fraction of the video dimension:
-
-
boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
-
-
-
-
-
28.9 codecview# TOC
-
-
Visualize information exported by some codecs.
-
-
Some codecs can export information through frames using side-data or other
-means. For example, some MPEG based codecs export motion vectors through the
-export_mvs flag in the codec flags2 option.
-
-
The filter accepts the following option:
-
-
-mv
-Set motion vectors to visualize.
-
-Available flags for mv are:
-
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-
-
-
28.9.1 Examples# TOC
-
-
- Visualizes multi-directionals MVs from P and B-Frames using ffplay
:
-
-
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
-
-
-
-
-
28.10 colorbalance# TOC
-
Modify intensity of primary colors (red, green and blue) of input frames.
-
-
The filter allows an input frame to be adjusted in the shadows, midtones or highlights
-regions for the red-cyan, green-magenta or blue-yellow balance.
-
-
A positive adjustment value shifts the balance towards the primary color, a negative
-value towards the complementary color.
-
-
The filter accepts the following options:
-
-
-rs
-gs
-bs
-Adjust red, green and blue shadows (darkest pixels).
-
-
-rm
-gm
-bm
-Adjust red, green and blue midtones (medium pixels).
-
-
-rh
-gh
-bh
-Adjust red, green and blue highlights (brightest pixels).
-
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-
-
-
28.10.1 Examples# TOC
-
-
- Add red color cast to shadows:
-
-
-
-
-
28.11 colorlevels# TOC
-
-
Adjust video input frames using levels.
-
-
The filter accepts the following options:
-
-
-rimin
-gimin
-bimin
-aimin
-Adjust red, green, blue and alpha input black point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-rimax
-gimax
-bimax
-aimax
-Adjust red, green, blue and alpha input white point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 1
.
-
-Input levels are used to lighten highlights (bright tones), darken shadows
-(dark tones), change the balance of bright and dark tones.
-
-
-romin
-gomin
-bomin
-aomin
-Adjust red, green, blue and alpha output black point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 0
.
-
-
-romax
-gomax
-bomax
-aomax
-Adjust red, green, blue and alpha output white point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 1
.
-
-Output levels allows manual selection of a constrained output level range.
-
-
-
-
-
28.11.1 Examples# TOC
-
-
- Make video output darker:
-
-
colorlevels=rimin=0.058:gimin=0.058:bimin=0.058
-
-
- Increase contrast:
-
-
colorlevels=rimin=0.039:gimin=0.039:bimin=0.039:rimax=0.96:gimax=0.96:bimax=0.96
-
-
- Make video output lighter:
-
-
colorlevels=rimax=0.902:gimax=0.902:bimax=0.902
-
-
- Increase brightness:
-
-
colorlevels=romin=0.5:gomin=0.5:bomin=0.5
-
-
-
-
-
28.12 colorchannelmixer# TOC
-
-
Adjust video input frames by re-mixing color channels.
-
-
This filter modifies a color channel by adding the values associated to
-the other channels of the same pixels. For example if the value to
-modify is red, the output value will be:
-
-
red =red *rr + blue *rb + green *rg + alpha *ra
-
-
-
The filter accepts the following options:
-
-
-rr
-rg
-rb
-ra
-Adjust contribution of input red, green, blue and alpha channels for output red channel.
-Default is 1
for rr , and 0
for rg , rb and ra .
-
-
-gr
-gg
-gb
-ga
-Adjust contribution of input red, green, blue and alpha channels for output green channel.
-Default is 1
for gg , and 0
for gr , gb and ga .
-
-
-br
-bg
-bb
-ba
-Adjust contribution of input red, green, blue and alpha channels for output blue channel.
-Default is 1
for bb , and 0
for br , bg and ba .
-
-
-ar
-ag
-ab
-aa
-Adjust contribution of input red, green, blue and alpha channels for output alpha channel.
-Default is 1
for aa , and 0
for ar , ag and ab .
-
-Allowed ranges for options are [-2.0, 2.0]
.
-
-
-
-
-
28.12.1 Examples# TOC
-
-
- Convert source to grayscale:
-
-
colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
-
- Simulate sepia tones:
-
-
colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
-
-
-
-
-
28.13 colormatrix# TOC
-
-
Convert color matrix.
-
-
The filter accepts the following options:
-
-
-src
-dst
-Specify the source and destination color matrix. Both values must be
-specified.
-
-The accepted values are:
-
-‘bt709 ’
-BT.709
-
-
-‘bt601 ’
-BT.601
-
-
-‘smpte240m ’
-SMPTE-240M
-
-
-‘fcc ’
-FCC
-
-
-
-
-
-
For example to convert from BT.601 to SMPTE-240M, use the command:
-
-
colormatrix=bt601:smpte240m
-
-
-
-
28.14 copy# TOC
-
-
Copy the input source unchanged to the output. This is mainly useful for
-testing purposes.
-
-
-
28.15 crop# TOC
-
-
Crop the input video to given dimensions.
-
-
It accepts the following parameters:
-
-
-w, out_w
-The width of the output video. It defaults to iw
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-h, out_h
-The height of the output video. It defaults to ih
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-x
-The horizontal position, in the input video, of the left edge of the output
-video. It defaults to (in_w-out_w)/2
.
-This expression is evaluated per-frame.
-
-
-y
-The vertical position, in the input video, of the top edge of the output video.
-It defaults to (in_h-out_h)/2
.
-This expression is evaluated per-frame.
-
-
-keep_aspect
-If set to 1 will force the output display aspect ratio
-to be the same of the input, by changing the output sample aspect
-ratio. It defaults to 0.
-
-
-
-
The out_w , out_h , x , y parameters are
-expressions containing the following constants:
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-in_w
-in_h
-The input width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (cropped) width and height.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-n
-The number of the input frame, starting from 0.
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
The expression for out_w may depend on the value of out_h ,
-and the expression for out_h may depend on out_w , but they
-cannot depend on x and y , as x and y are
-evaluated after out_w and out_h .
-
-
The x and y parameters specify the expressions for the
-position of the top-left corner of the output (non-cropped) area. They
-are evaluated for each frame. If the evaluated value is not valid, it
-is approximated to the nearest valid value.
-
-
The expression for x may depend on y , and the expression
-for y may depend on x .
-
-
-
28.15.1 Examples# TOC
-
-
-
-
-
28.16 cropdetect# TOC
-
-
Auto-detect the crop size.
-
-
It calculates the necessary cropping parameters and prints the
-recommended parameters via the logging system. The detected dimensions
-correspond to the non-black area of the input video.
-
-
It accepts the following parameters:
-
-
-limit
-Set higher black value threshold, which can be optionally specified
-from nothing (0) to everything (255 for 8bit based formats). An intensity
-value greater to the set value is considered non-black. It defaults to 24.
-You can also specify a value between 0.0 and 1.0 which will be scaled depending
-on the bitdepth of the pixel format.
-
-
-round
-The value which the width/height should be divisible by. It defaults to
-16. The offset is automatically adjusted to center the video. Use 2 to
-get only even dimensions (needed for 4:2:2 video). 16 is best when
-encoding to most video codecs.
-
-
-reset_count, reset
-Set the counter that determines after how many frames cropdetect will
-reset the previously detected largest video area and start over to
-detect the current optimal crop area. Default value is 0.
-
-This can be useful when channel logos distort the video area. 0
-indicates ’never reset’, and returns the largest area encountered during
-playback.
-
-
-
-
-
28.17 curves# TOC
-
-
Apply color adjustments using curves.
-
-
This filter is similar to the Adobe Photoshop and GIMP curves tools. Each
-component (red, green and blue) has its values defined by N key points
-tied from each other using a smooth curve. The x-axis represents the pixel
-values from the input frame, and the y-axis the new pixel values to be set for
-the output frame.
-
-
By default, a component curve is defined by the two points (0;0) and
-(1;1) . This creates a straight line where each original pixel value is
-"adjusted" to its own value, which means no change to the image.
-
-
The filter allows you to redefine these two points and add some more. A new
-curve (using a natural cubic spline interpolation) will be define to pass
-smoothly through all these new coordinates. The new defined points needs to be
-strictly increasing over the x-axis, and their x and y values must
-be in the [0;1] interval. If the computed curves happened to go outside
-the vector spaces, the values will be clipped accordingly.
-
-
If there is no key point defined in x=0
, the filter will automatically
-insert a (0;0) point. In the same way, if there is no key point defined
-in x=1
, the filter will automatically insert a (1;1) point.
-
-
The filter accepts the following options:
-
-
-preset
-Select one of the available color presets. This option can be used in addition
-to the r , g , b parameters; in this case, the later
-options takes priority on the preset values.
-Available presets are:
-
-‘none ’
-‘color_negative ’
-‘cross_process ’
-‘darker ’
-‘increase_contrast ’
-‘lighter ’
-‘linear_contrast ’
-‘medium_contrast ’
-‘negative ’
-‘strong_contrast ’
-‘vintage ’
-
-Default is none
.
-
-master, m
-Set the master key points. These points will define a second pass mapping. It
-is sometimes called a "luminance" or "value" mapping. It can be used with
-r , g , b or all since it acts like a
-post-processing LUT.
-
-red, r
-Set the key points for the red component.
-
-green, g
-Set the key points for the green component.
-
-blue, b
-Set the key points for the blue component.
-
-all
-Set the key points for all components (not including master).
-Can be used in addition to the other key points component
-options. In this case, the unset component(s) will fallback on this
-all setting.
-
-psfile
-Specify a Photoshop curves file (.asv
) to import the settings from.
-
-
-
-
To avoid some filtergraph syntax conflicts, each key points list need to be
-defined using the following syntax: x0/y0 x1/y1 x2/y2 ...
.
-
-
-
28.17.1 Examples# TOC
-
-
-
-
-
28.18 dctdnoiz# TOC
-
-
Denoise frames using 2D DCT (frequency domain filtering).
-
-
This filter is not designed for real time.
-
-
The filter accepts the following options:
-
-
-sigma, s
-Set the noise sigma constant.
-
-This sigma defines a hard threshold of 3 * sigma
; every DCT
-coefficient (absolute value) below this threshold with be dropped.
-
-If you need a more advanced filtering, see expr .
-
-Default is 0
.
-
-
-overlap
-Set number overlapping pixels for each block. Since the filter can be slow, you
-may want to reduce this value, at the cost of a less effective filter and the
-risk of various artefacts.
-
-If the overlapping value doesn’t allow to process the whole input width or
-height, a warning will be displayed and according borders won’t be denoised.
-
-Default value is blocksize -1, which is the best possible setting.
-
-
-expr, e
-Set the coefficient factor expression.
-
-For each coefficient of a DCT block, this expression will be evaluated as a
-multiplier value for the coefficient.
-
-If this is option is set, the sigma option will be ignored.
-
-The absolute value of the coefficient can be accessed through the c
-variable.
-
-
-n
-Set the blocksize using the number of bits. 1<<n
defines the
-blocksize , which is the width and height of the processed blocks.
-
-The default value is 3 (8x8) and can be raised to 4 for a
-blocksize of 16x16. Note that changing this setting has huge consequences
-on the speed processing. Also, a larger block size does not necessarily means a
-better de-noising.
-
-
-
-
-
28.18.1 Examples# TOC
-
-
Apply a denoise with a sigma of 4.5
:
-
-
-
The same operation can be achieved using the expression system:
-
-
dctdnoiz=e='gte(c, 4.5*3)'
-
-
-
Violent denoise using a block size of 16x16
:
-
-
-
-
28.19 decimate# TOC
-
-
Drop duplicated frames at regular intervals.
-
-
The filter accepts the following options:
-
-
-cycle
-Set the number of frames from which one will be dropped. Setting this to
-N means one frame in every batch of N frames will be dropped.
-Default is 5
.
-
-
-dupthresh
-Set the threshold for duplicate detection. If the difference metric for a frame
-is less than or equal to this value, then it is declared as duplicate. Default
-is 1.1
-
-
-scthresh
-Set scene change threshold. Default is 15
.
-
-
-blockx
-blocky
-Set the size of the x and y-axis blocks used during metric calculations.
-Larger blocks give better noise suppression, but also give worse detection of
-small movements. Must be a power of two. Default is 32
.
-
-
-ppsrc
-Mark main input as a pre-processed input and activate clean source input
-stream. This allows the input to be pre-processed with various filters to help
-the metrics calculation while keeping the frame selection lossless. When set to
-1
, the first stream is for the pre-processed input, and the second
-stream is the clean source from where the kept frames are chosen. Default is
-0
.
-
-
-chroma
-Set whether or not chroma is considered in the metric calculations. Default is
-1
.
-
-
-
-
-
28.20 dejudder# TOC
-
-
Remove judder produced by partially interlaced telecined content.
-
-
Judder can be introduced, for instance, by pullup filter. If the original
-source was partially telecined content then the output of pullup,dejudder
-will have a variable frame rate. May change the recorded frame rate of the
-container. Aside from that change, this filter will not affect constant frame
-rate video.
-
-
The option available in this filter is:
-
-cycle
-Specify the length of the window over which the judder repeats.
-
-Accepts any integer greater than 1. Useful values are:
-
-‘4 ’
-If the original was telecined from 24 to 30 fps (Film to NTSC).
-
-
-‘5 ’
-If the original was telecined from 25 to 30 fps (PAL to NTSC).
-
-
-‘20 ’
-If a mixture of the two.
-
-
-
-The default is ‘4 ’.
-
-
-
-
-
28.21 delogo# TOC
-
-
Suppress a TV station logo by a simple interpolation of the surrounding
-pixels. Just set a rectangle covering the logo and watch it disappear
-(and sometimes something even uglier appear - your mileage may vary).
-
-
It accepts the following parameters:
-
-x
-y
-Specify the top left corner coordinates of the logo. They must be
-specified.
-
-
-w
-h
-Specify the width and height of the logo to clear. They must be
-specified.
-
-
-band, t
-Specify the thickness of the fuzzy edge of the rectangle (added to
-w and h ). The default value is 4.
-
-
-show
-When set to 1, a green rectangle is drawn on the screen to simplify
-finding the right x , y , w , and h parameters.
-The default value is 0.
-
-The rectangle is drawn on the outermost pixels which will be (partly)
-replaced with interpolated values. The values of the next pixels
-immediately outside this rectangle in each direction will be used to
-compute the interpolated pixel values inside the rectangle.
-
-
-
-
-
-
28.21.1 Examples# TOC
-
-
- Set a rectangle covering the area with top left corner coordinates 0,0
-and size 100x77, and a band of size 10:
-
-
delogo=x=0:y=0:w=100:h=77:band=10
-
-
-
-
-
-
28.22 deshake# TOC
-
-
Attempt to fix small changes in horizontal and/or vertical shift. This
-filter helps remove camera shake from hand-holding a camera, bumping a
-tripod, moving on a vehicle, etc.
-
-
The filter accepts the following options:
-
-
-x
-y
-w
-h
-Specify a rectangular area where to limit the search for motion
-vectors.
-If desired the search for motion vectors can be limited to a
-rectangular area of the frame defined by its top left corner, width
-and height. These parameters have the same meaning as the drawbox
-filter which can be used to visualise the position of the bounding
-box.
-
-This is useful when simultaneous movement of subjects within the frame
-might be confused for camera motion by the motion vector search.
-
-If any or all of x , y , w and h are set to -1
-then the full frame is used. This allows later options to be set
-without specifying the bounding box for the motion vector search.
-
-Default - search the whole frame.
-
-
-rx
-ry
-Specify the maximum extent of movement in x and y directions in the
-range 0-64 pixels. Default 16.
-
-
-edge
-Specify how to generate pixels to fill blanks at the edge of the
-frame. Available values are:
-
-‘blank, 0 ’
-Fill zeroes at blank locations
-
-‘original, 1 ’
-Original image at blank locations
-
-‘clamp, 2 ’
-Extruded edge value at blank locations
-
-‘mirror, 3 ’
-Mirrored edge at blank locations
-
-
-Default value is ‘mirror ’.
-
-
-blocksize
-Specify the blocksize to use for motion search. Range 4-128 pixels,
-default 8.
-
-
-contrast
-Specify the contrast threshold for blocks. Only blocks with more than
-the specified contrast (difference between darkest and lightest
-pixels) will be considered. Range 1-255, default 125.
-
-
-search
-Specify the search strategy. Available values are:
-
-‘exhaustive, 0 ’
-Set exhaustive search
-
-‘less, 1 ’
-Set less exhaustive search.
-
-
-Default value is ‘exhaustive ’.
-
-
-filename
-If set then a detailed log of the motion search is written to the
-specified file.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
-
28.23 drawbox# TOC
-
-
Draw a colored box on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the top left corner coordinates of the box. It defaults to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the box; if 0 they are interpreted as
-the input width and height. It defaults to 0.
-
-
-color, c
-Specify the color of the box to write. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the box edge color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the box edge. Default value is 3
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y offset coordinates where the box is drawn.
-
-
-w
-h
-The width and height of the drawn box.
-
-
-t
-The thickness of the drawn box.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
28.23.1 Examples# TOC
-
-
-
-
-
28.24 drawgrid# TOC
-
-
Draw a grid on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the
-input width and height, respectively, minus thickness
, so image gets
-framed. Default to 0.
-
-
-color, c
-Specify the color of the grid. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the grid color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the grid line. Default value is 1
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input grid cell width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y coordinates of some point of grid intersection (meant to configure offset).
-
-
-w
-h
-The width and height of the drawn cell.
-
-
-t
-The thickness of the drawn cell.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
28.24.1 Examples# TOC
-
-
- Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%:
-
-
drawgrid=width=100:height=100:thickness=2:color=red@0.5
-
-
- Draw a white 3x3 grid with an opacity of 50%:
-
-
drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
-
-
-
-
-
28.25 drawtext# TOC
-
-
Draw a text string or text from a specified file on top of a video, using the
-libfreetype library.
-
-
To enable compilation of this filter, you need to configure FFmpeg with
---enable-libfreetype
.
-To enable default font fallback and the font option you need to
-configure FFmpeg with --enable-libfontconfig
.
-To enable the text_shaping option, you need to configure FFmpeg with
---enable-libfribidi
.
-
-
-
28.25.1 Syntax# TOC
-
-
It accepts the following parameters:
-
-
-box
-Used to draw a box around text using the background color.
-The value must be either 1 (enable) or 0 (disable).
-The default value of box is 0.
-
-
-boxcolor
-The color to be used for drawing box around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of boxcolor is "white".
-
-
-borderw
-Set the width of the border to be drawn around the text using bordercolor .
-The default value of borderw is 0.
-
-
-bordercolor
-Set the color to be used for drawing border around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of bordercolor is "black".
-
-
-expansion
-Select how the text is expanded. Can be either none
,
-strftime
(deprecated) or
-normal
(default). See the Text expansion section
-below for details.
-
-
-fix_bounds
-If true, check and fix text coords to avoid clipping.
-
-
-fontcolor
-The color to be used for drawing fonts. For the syntax of this option, check
-the "Color" section in the ffmpeg-utils manual.
-
-The default value of fontcolor is "black".
-
-
-fontcolor_expr
-String which is expanded the same way as text to obtain dynamic
-fontcolor value. By default this option has empty value and is not
-processed. When this option is set, it overrides fontcolor option.
-
-
-font
-The font family to be used for drawing text. By default Sans.
-
-
-fontfile
-The font file to be used for drawing text. The path must be included.
-This parameter is mandatory if the fontconfig support is disabled.
-
-
-fontsize
-The font size to be used for drawing text.
-The default value of fontsize is 16.
-
-
-text_shaping
-If set to 1, attempt to shape the text (for example, reverse the order of
-right-to-left text and join Arabic characters) before drawing it.
-Otherwise, just draw the text exactly as given.
-By default 1 (if supported).
-
-
-ft_load_flags
-The flags to be used for loading the fonts.
-
-The flags map the corresponding flags supported by libfreetype, and are
-a combination of the following values:
-
-default
-no_scale
-no_hinting
-render
-no_bitmap
-vertical_layout
-force_autohint
-crop_bitmap
-pedantic
-ignore_global_advance_width
-no_recurse
-ignore_transform
-monochrome
-linear_design
-no_autohint
-
-
-Default value is "default".
-
-For more information consult the documentation for the FT_LOAD_*
-libfreetype flags.
-
-
-shadowcolor
-The color to be used for drawing a shadow behind the drawn text. For the
-syntax of this option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of shadowcolor is "black".
-
-
-shadowx
-shadowy
-The x and y offsets for the text shadow position with respect to the
-position of the text. They can be either positive or negative
-values. The default value for both is "0".
-
-
-start_number
-The starting frame number for the n/frame_num variable. The default value
-is "0".
-
-
-tabsize
-The size in number of spaces to use for rendering the tab.
-Default value is 4.
-
-
-timecode
-Set the initial timecode representation in "hh:mm:ss[:;.]ff"
-format. It can be used with or without text parameter. timecode_rate
-option must be specified.
-
-
-timecode_rate, rate, r
-Set the timecode frame rate (timecode only).
-
-
-text
-The text string to be drawn. The text must be a sequence of UTF-8
-encoded characters.
-This parameter is mandatory if no file is specified with the parameter
-textfile .
-
-
-textfile
-A text file containing text to be drawn. The text must be a sequence
-of UTF-8 encoded characters.
-
-This parameter is mandatory if no text string is specified with the
-parameter text .
-
-If both text and textfile are specified, an error is thrown.
-
-
-reload
-If set to 1, the textfile will be reloaded before each frame.
-Be sure to update it atomically, or it may be read partially, or even fail.
-
-
-x
-y
-The expressions which specify the offsets where text will be drawn
-within the video frame. They are relative to the top/left border of the
-output image.
-
-The default value of x and y is "0".
-
-See below for the list of accepted constants and functions.
-
-
-
-
The parameters for x and y are expressions containing the
-following constants and functions:
-
-
-dar
-input display aspect ratio, it is the same as (w / h ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-line_h, lh
-the height of each text line
-
-
-main_h, h, H
-the input height
-
-
-main_w, w, W
-the input width
-
-
-max_glyph_a, ascent
-the maximum distance from the baseline to the highest/upper grid
-coordinate used to place a glyph outline point, for all the rendered
-glyphs.
-It is a positive value, due to the grid’s orientation with the Y axis
-upwards.
-
-
-max_glyph_d, descent
-the maximum distance from the baseline to the lowest grid coordinate
-used to place a glyph outline point, for all the rendered glyphs.
-This is a negative value, due to the grid’s orientation, with the Y axis
-upwards.
-
-
-max_glyph_h
-maximum glyph height, that is the maximum height for all the glyphs
-contained in the rendered text, it is equivalent to ascent -
-descent .
-
-
-max_glyph_w
-maximum glyph width, that is the maximum width for all the glyphs
-contained in the rendered text
-
-
-n
-the number of input frame, starting from 0
-
-
-rand(min, max)
-return a random number included between min and max
-
-
-sar
-The input sample aspect ratio.
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-text_h, th
-the height of the rendered text
-
-
-text_w, tw
-the width of the rendered text
-
-
-x
-y
-the x and y offset coordinates where the text is drawn.
-
-These parameters allow the x and y expressions to refer
-each other, so you can for example specify y=x/dar
.
-
-
-
-
-
28.25.2 Text expansion# TOC
-
-
If expansion is set to strftime
,
-the filter recognizes strftime() sequences in the provided text and
-expands them accordingly. Check the documentation of strftime(). This
-feature is deprecated.
-
-
If expansion is set to none
, the text is printed verbatim.
-
-
If expansion is set to normal
(which is the default),
-the following expansion mechanism is used.
-
-
The backslash character ’\’, followed by any character, always expands to
-the second character.
-
-
Sequence of the form %{...}
are expanded. The text between the
-braces is a function name, possibly followed by arguments separated by ’:’.
-If the arguments contain special characters or delimiters (’:’ or ’}’),
-they should be escaped.
-
-
Note that they probably must also be escaped as the value for the
-text option in the filter argument string and as the filter
-argument in the filtergraph description, and possibly also for the shell,
-that makes up to four levels of escaping; using a text file avoids these
-problems.
-
-
The following functions are available:
-
-
-expr, e
-The expression evaluation result.
-
-It must take one argument specifying the expression to be evaluated,
-which accepts the same constants and functions as the x and
-y values. Note that not all constants should be used, for
-example the text size is not known when evaluating the expression, so
-the constants text_w and text_h will have an undefined
-value.
-
-
-expr_int_format, eif
-Evaluate the expression’s value and output as formatted integer.
-
-The first argument is the expression to be evaluated, just as for the expr function.
-The second argument specifies the output format. Allowed values are ’x’, ’X’, ’d’ and
-’u’. They are treated exactly as in the printf function.
-The third parameter is optional and sets the number of positions taken by the output.
-It can be used to add padding with zeros from the left.
-
-
-gmtime
-The time at which the filter is running, expressed in UTC.
-It can accept an argument: a strftime() format string.
-
-
-localtime
-The time at which the filter is running, expressed in the local time zone.
-It can accept an argument: a strftime() format string.
-
-
-metadata
-Frame metadata. It must take one argument specifying metadata key.
-
-
-n, frame_num
-The frame number, starting from 0.
-
-
-pict_type
-A 1 character description of the current picture type.
-
-
-pts
-The timestamp of the current frame.
-It can take up to two arguments.
-
-The first argument is the format of the timestamp; it defaults to flt
-for seconds as a decimal number with microsecond accuracy; hms
stands
-for a formatted [-]HH:MM:SS.mmm timestamp with millisecond accuracy.
-
-The second argument is an offset added to the timestamp.
-
-
-
-
-
-
28.25.3 Examples# TOC
-
-
-
-
For more information about libfreetype, check:
-http://www.freetype.org/ .
-
-
For more information about fontconfig, check:
-http://freedesktop.org/software/fontconfig/fontconfig-user.html .
-
-
For more information about libfribidi, check:
-http://fribidi.org/ .
-
-
-
28.26 edgedetect# TOC
-
-
Detect and draw edges. The filter uses the Canny Edge Detection algorithm.
-
-
The filter accepts the following options:
-
-
-low
-high
-Set low and high threshold values used by the Canny thresholding
-algorithm.
-
-The high threshold selects the "strong" edge pixels, which are then
-connected through 8-connectivity with the "weak" edge pixels selected
-by the low threshold.
-
-low and high threshold values must be chosen in the range
-[0,1], and low should be lesser or equal to high .
-
-Default value for low is 20/255
, and default value for high
-is 50/255
.
-
-
-mode
-Define the drawing mode.
-
-
-‘wires ’
-Draw white/gray wires on black background.
-
-
-‘colormix ’
-Mix the colors to create a paint/cartoon effect.
-
-
-
-Default value is wires .
-
-
-
-
-
28.26.1 Examples# TOC
-
-
- Standard edge detection with custom values for the hysteresis thresholding:
-
-
edgedetect=low=0.1:high=0.4
-
-
- Painting effect without thresholding:
-
-
edgedetect=mode=colormix:high=0
-
-
-
-
-
28.27 extractplanes# TOC
-
-
Extract color channel components from input video stream into
-separate grayscale video streams.
-
-
The filter accepts the following option:
-
-
-planes
-Set plane(s) to extract.
-
-Available values for planes are:
-
-‘y ’
-‘u ’
-‘v ’
-‘a ’
-‘r ’
-‘g ’
-‘b ’
-
-
-Choosing planes not available in the input will result in an error.
-That means you cannot select r
, g
, b
planes
-with y
, u
, v
planes at same time.
-
-
-
-
-
28.27.1 Examples# TOC
-
-
- Extract luma, u and v color channel component from input video frame
-into 3 grayscale outputs:
-
-
ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
-
-
-
-
-
28.28 elbg# TOC
-
-
Apply a posterize effect using the ELBG (Enhanced LBG) algorithm.
-
-
For each input image, the filter will compute the optimal mapping from
-the input to the output given the codebook length, that is the number
-of distinct output colors.
-
-
This filter accepts the following options.
-
-
-codebook_length, l
-Set codebook length. The value must be a positive integer, and
-represents the number of distinct output colors. Default value is 256.
-
-
-nb_steps, n
-Set the maximum number of iterations to apply for computing the optimal
-mapping. The higher the value the better the result and the higher the
-computation time. Default value is 1.
-
-
-seed, s
-Set a random seed, must be an integer included between 0 and
-UINT32_MAX. If not specified, or if explicitly set to -1, the filter
-will try to use a good random seed on a best effort basis.
-
-
-
-
-
28.29 fade# TOC
-
-
Apply a fade-in/out effect to the input video.
-
-
It accepts the following parameters:
-
-
-type, t
-The effect type can be either "in" for a fade-in, or "out" for a fade-out
-effect.
-Default is in
.
-
-
-start_frame, s
-Specify the number of the frame to start applying the fade
-effect at. Default is 0.
-
-
-nb_frames, n
-The number of frames that the fade effect lasts. At the end of the
-fade-in effect, the output video will have the same intensity as the input video.
-At the end of the fade-out transition, the output video will be filled with the
-selected color .
-Default is 25.
-
-
-alpha
-If set to 1, fade only alpha channel, if one exists on the input.
-Default value is 0.
-
-
-start_time, st
-Specify the timestamp (in seconds) of the frame to start to apply the fade
-effect. If both start_frame and start_time are specified, the fade will start at
-whichever comes last. Default is 0.
-
-
-duration, d
-The number of seconds for which the fade effect has to last. At the end of the
-fade-in effect the output video will have the same intensity as the input video,
-at the end of the fade-out transition the output video will be filled with the
-selected color .
-If both duration and nb_frames are specified, duration is used. Default is 0.
-
-
-color, c
-Specify the color of the fade. Default is "black".
-
-
-
-
-
28.29.1 Examples# TOC
-
-
-
-
-
28.30 field# TOC
-
-
Extract a single field from an interlaced image using stride
-arithmetic to avoid wasting CPU time. The output frames are marked as
-non-interlaced.
-
-
The filter accepts the following options:
-
-
-type
-Specify whether to extract the top (if the value is 0
or
-top
) or the bottom field (if the value is 1
or
-bottom
).
-
-
-
-
-
28.31 fieldmatch# TOC
-
-
Field matching filter for inverse telecine. It is meant to reconstruct the
-progressive frames from a telecined stream. The filter does not drop duplicated
-frames, so to achieve a complete inverse telecine fieldmatch
needs to be
-followed by a decimation filter such as decimate in the filtergraph.
-
-
The separation of the field matching and the decimation is notably motivated by
-the possibility of inserting a de-interlacing filter fallback between the two.
-If the source has mixed telecined and real interlaced content,
-fieldmatch
will not be able to match fields for the interlaced parts.
-But these remaining combed frames will be marked as interlaced, and thus can be
-de-interlaced by a later filter such as yadif before decimation.
-
-
In addition to the various configuration options, fieldmatch
can take an
-optional second stream, activated through the ppsrc option. If
-enabled, the frames reconstruction will be based on the fields and frames from
-this second stream. This allows the first input to be pre-processed in order to
-help the various algorithms of the filter, while keeping the output lossless
-(assuming the fields are matched properly). Typically, a field-aware denoiser,
-or brightness/contrast adjustments can help.
-
-
Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project)
-and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
-which fieldmatch
is based on. While the semantic and usage are very
-close, some behaviour and options names can differ.
-
-
The decimate filter currently only works for constant frame rate input.
-Do not use fieldmatch
and decimate if your input has mixed
-telecined and progressive content with changing framerate.
-
-
The filter accepts the following options:
-
-
-order
-Specify the assumed field order of the input stream. Available values are:
-
-
-‘auto ’
-Auto detect parity (use FFmpeg’s internal parity value).
-
-‘bff ’
-Assume bottom field first.
-
-‘tff ’
-Assume top field first.
-
-
-
-Note that it is sometimes recommended not to trust the parity announced by the
-stream.
-
-Default value is auto .
-
-
-mode
-Set the matching mode or strategy to use. pc mode is the safest in the
-sense that it won’t risk creating jerkiness due to duplicate frames when
-possible, but if there are bad edits or blended fields it will end up
-outputting combed frames when a good match might actually exist. On the other
-hand, pcn_ub mode is the most risky in terms of creating jerkiness,
-but will almost always find a good frame if there is one. The other values are
-all somewhere in between pc and pcn_ub in terms of risking
-jerkiness and creating duplicate frames versus finding good matches in sections
-with bad edits, orphaned fields, blended fields, etc.
-
-More details about p/c/n/u/b are available in p/c/n/u/b meaning section.
-
-Available values are:
-
-
-‘pc ’
-2-way matching (p/c)
-
-‘pc_n ’
-2-way matching, and trying 3rd match if still combed (p/c + n)
-
-‘pc_u ’
-2-way matching, and trying 3rd match (same order) if still combed (p/c + u)
-
-‘pc_n_ub ’
-2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if
-still combed (p/c + n + u/b)
-
-‘pcn ’
-3-way matching (p/c/n)
-
-‘pcn_ub ’
-3-way matching, and trying 4th/5th matches if all 3 of the original matches are
-detected as combed (p/c/n + u/b)
-
-
-
-The parenthesis at the end indicate the matches that would be used for that
-mode assuming order =tff (and field on auto or
-top ).
-
-In terms of speed pc mode is by far the fastest and pcn_ub is
-the slowest.
-
-Default value is pc_n .
-
-
-ppsrc
-Mark the main input stream as a pre-processed input, and enable the secondary
-input stream as the clean source to pick the fields from. See the filter
-introduction for more details. It is similar to the clip2 feature from
-VFM/TFM.
-
-Default value is 0
(disabled).
-
-
-field
-Set the field to match from. It is recommended to set this to the same value as
-order unless you experience matching failures with that setting. In
-certain circumstances changing the field that is used to match from can have a
-large impact on matching performance. Available values are:
-
-
-‘auto ’
-Automatic (same value as order ).
-
-‘bottom ’
-Match from the bottom field.
-
-‘top ’
-Match from the top field.
-
-
-
-Default value is auto .
-
-
-mchroma
-Set whether or not chroma is included during the match comparisons. In most
-cases it is recommended to leave this enabled. You should set this to 0
-only if your clip has bad chroma problems such as heavy rainbowing or other
-artifacts. Setting this to 0
could also be used to speed things up at
-the cost of some accuracy.
-
-Default value is 1
.
-
-
-y0
-y1
-These define an exclusion band which excludes the lines between y0 and
-y1 from being included in the field matching decision. An exclusion
-band can be used to ignore subtitles, a logo, or other things that may
-interfere with the matching. y0 sets the starting scan line and
-y1 sets the ending line; all lines in between y0 and
-y1 (including y0 and y1 ) will be ignored. Setting
-y0 and y1 to the same value will disable the feature.
-y0 and y1 defaults to 0
.
-
-
-scthresh
-Set the scene change detection threshold as a percentage of maximum change on
-the luma plane. Good values are in the [8.0, 14.0]
range. Scene change
-detection is only relevant in case combmatch =sc . The range for
-scthresh is [0.0, 100.0]
.
-
-Default value is 12.0
.
-
-
-combmatch
-When combatch is not none , fieldmatch
will take into
-account the combed scores of matches when deciding what match to use as the
-final match. Available values are:
-
-
-‘none ’
-No final matching based on combed scores.
-
-‘sc ’
-Combed scores are only used when a scene change is detected.
-
-‘full ’
-Use combed scores all the time.
-
-
-
-Default is sc .
-
-
-combdbg
-Force fieldmatch
to calculate the combed metrics for certain matches and
-print them. This setting is known as micout in TFM/VFM vocabulary.
-Available values are:
-
-
-‘none ’
-No forced calculation.
-
-‘pcn ’
-Force p/c/n calculations.
-
-‘pcnub ’
-Force p/c/n/u/b calculations.
-
-
-
-Default value is none .
-
-
-cthresh
-This is the area combing threshold used for combed frame detection. This
-essentially controls how "strong" or "visible" combing must be to be detected.
-Larger values mean combing must be more visible and smaller values mean combing
-can be less visible or strong and still be detected. Valid settings are from
--1
(every pixel will be detected as combed) to 255
(no pixel will
-be detected as combed). This is basically a pixel difference value. A good
-range is [8, 12]
.
-
-Default value is 9
.
-
-
-chroma
-Sets whether or not chroma is considered in the combed frame decision. Only
-disable this if your source has chroma problems (rainbowing, etc.) that are
-causing problems for the combed frame detection with chroma enabled. Actually,
-using chroma =0 is usually more reliable, except for the case
-where there is chroma only combing in the source.
-
-Default value is 0
.
-
-
-blockx
-blocky
-Respectively set the x-axis and y-axis size of the window used during combed
-frame detection. This has to do with the size of the area in which
-combpel pixels are required to be detected as combed for a frame to be
-declared combed. See the combpel parameter description for more info.
-Possible values are any number that is a power of 2 starting at 4 and going up
-to 512.
-
-Default value is 16
.
-
-
-combpel
-The number of combed pixels inside any of the blocky by
-blockx size blocks on the frame for the frame to be detected as
-combed. While cthresh controls how "visible" the combing must be, this
-setting controls "how much" combing there must be in any localized area (a
-window defined by the blockx and blocky settings) on the
-frame. Minimum value is 0
and maximum is blocky x blockx
(at
-which point no frames will ever be detected as combed). This setting is known
-as MI in TFM/VFM vocabulary.
-
-Default value is 80
.
-
-
-
-
-
28.31.1 p/c/n/u/b meaning# TOC
-
-
-
28.31.1.1 p/c/n# TOC
-
-
We assume the following telecined stream:
-
-
-
Top fields: 1 2 2 3 4
-Bottom fields: 1 2 3 4 4
-
-
-
The numbers correspond to the progressive frame the fields relate to. Here, the
-first two frames are progressive, the 3rd and 4th are combed, and so on.
-
-
When fieldmatch
is configured to run a matching from bottom
-(field =bottom ) this is how this input stream get transformed:
-
-
-
Input stream:
- T 1 2 2 3 4
- B 1 2 3 4 4 <-- matching reference
-
-Matches: c c n n c
-
-Output stream:
- T 1 2 3 4 4
- B 1 2 3 4 4
-
-
-
As a result of the field matching, we can see that some frames get duplicated.
-To perform a complete inverse telecine, you need to rely on a decimation filter
-after this operation. See for instance the decimate filter.
-
-
The same operation now matching from top fields (field =top )
-looks like this:
-
-
-
Input stream:
- T 1 2 2 3 4 <-- matching reference
- B 1 2 3 4 4
-
-Matches: c c p p c
-
-Output stream:
- T 1 2 2 3 4
- B 1 2 2 3 4
-
-
-
In these examples, we can see what p , c and n mean;
-basically, they refer to the frame and field of the opposite parity:
-
-
- p matches the field of the opposite parity in the previous frame
- c matches the field of the opposite parity in the current frame
- n matches the field of the opposite parity in the next frame
-
-
-
-
28.31.1.2 u/b# TOC
-
-
The u and b matching are a bit special in the sense that they match
-from the opposite parity flag. In the following examples, we assume that we are
-currently matching the 2nd frame (Top:2, bottom:2). According to the match, a
-’x’ is placed above and below each matched fields.
-
-
With bottom matching (field =bottom ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 1 2 2 2
- 2 2 2 1 3
-
-
-
With top matching (field =top ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 2 2 1 2
- 2 1 3 2 2
-
-
-
-
28.31.2 Examples# TOC
-
-
Simple IVTC of a top field first telecined stream:
-
-
fieldmatch=order=tff:combmatch=none, decimate
-
-
-
Advanced IVTC, with fallback on yadif for still combed frames:
-
-
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
-
-
-
-
28.32 fieldorder# TOC
-
-
Transform the field order of the input video.
-
-
It accepts the following parameters:
-
-
-order
-The output field order. Valid values are tff for top field first or bff
-for bottom field first.
-
-
-
-
The default value is ‘tff ’.
-
-
The transformation is done by shifting the picture content up or down
-by one line, and filling the remaining line with appropriate picture content.
-This method is consistent with most broadcast field order converters.
-
-
If the input video is not flagged as being interlaced, or it is already
-flagged as being of the required output field order, then this filter does
-not alter the incoming video.
-
-
It is very useful when converting to or from PAL DV material,
-which is bottom field first.
-
-
For example:
-
-
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
-
-
-
-
28.33 fifo# TOC
-
-
Buffer input images and send them when they are requested.
-
-
It is mainly useful when auto-inserted by the libavfilter
-framework.
-
-
It does not take parameters.
-
-
-
28.34 format# TOC
-
-
Convert the input video to one of the specified pixel formats.
-Libavfilter will try to pick one that is suitable as input to
-the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-"pix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
28.34.1 Examples# TOC
-
-
-
-
-
28.35 fps# TOC
-
-
Convert the video to specified constant frame rate by duplicating or dropping
-frames as necessary.
-
-
It accepts the following parameters:
-
-fps
-The desired output frame rate. The default is 25
.
-
-
-round
-Rounding method.
-
-Possible values are:
-
-zero
-zero round towards 0
-
-inf
-round away from 0
-
-down
-round towards -infinity
-
-up
-round towards +infinity
-
-near
-round to nearest
-
-
-The default is near
.
-
-
-start_time
-Assume the first PTS should be the given value, in seconds. This allows for
-padding/trimming at the start of stream. By default, no assumption is made
-about the first frame’s expected PTS, so no padding or trimming is done.
-For example, this could be set to 0 to pad the beginning with duplicates of
-the first frame if a video stream starts after the audio stream or to trim any
-frames with a negative PTS.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-fps [:round ].
-
-
See also the setpts filter.
-
-
-
28.35.1 Examples# TOC
-
-
- A typical usage in order to set the fps to 25:
-
-
- Sets the fps to 24, using abbreviation and rounding method to round to nearest:
-
-
fps=fps=film:round=near
-
-
-
-
-
28.36 framepack# TOC
-
-
Pack two different video streams into a stereoscopic video, setting proper
-metadata on supported codecs. The two views should have the same size and
-framerate and processing will stop when the shorter video ends. Please note
-that you may conveniently adjust view properties with the scale and
-fps filters.
-
-
It accepts the following parameters:
-
-format
-The desired packing format. Supported values are:
-
-
-sbs
-The views are next to each other (default).
-
-
-tab
-The views are on top of each other.
-
-
-lines
-The views are packed by line.
-
-
-columns
-The views are packed by column.
-
-
-frameseq
-The views are temporally interleaved.
-
-
-
-
-
-
-
-
Some examples:
-
-
-
# Convert left and right views into a frame-sequential video
-ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
-
-# Convert views into a side-by-side video with the same output resolution as the input
-ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
-
-
-
-
28.37 framestep# TOC
-
-
Select one frame every N-th frame.
-
-
This filter accepts the following option:
-
-step
-Select frame after every step
frames.
-Allowed values are positive integers higher than 0. Default value is 1
.
-
-
-
-
-
28.38 frei0r# TOC
-
-
Apply a frei0r effect to the input video.
-
-
To enable the compilation of this filter, you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the frei0r effect to load. If the environment variable
-FREI0R_PATH
is defined, the frei0r effect is searched for in each of the
-directories specified by the colon-separated list in FREIOR_PATH
.
-Otherwise, the standard frei0r paths are searched, in this order:
-HOME/.frei0r-1/lib/ , /usr/local/lib/frei0r-1/ ,
-/usr/lib/frei0r-1/ .
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r effect.
-
-
-
-
-
A frei0r effect parameter can be a boolean (its value is either
-"y" or "n"), a double, a color (specified as
-R /G /B , where R , G , and B are floating point
-numbers between 0.0 and 1.0, inclusive) or by a color description specified in the "Color"
-section in the ffmpeg-utils manual), a position (specified as X /Y , where
-X and Y are floating point numbers) and/or a string.
-
-
The number and types of parameters depend on the loaded effect. If an
-effect parameter is not specified, the default value is set.
-
-
-
28.38.1 Examples# TOC
-
-
- Apply the distort0r effect, setting the first two double parameters:
-
-
frei0r=filter_name=distort0r:filter_params=0.5|0.01
-
-
- Apply the colordistance effect, taking a color as the first parameter:
-
-
frei0r=colordistance:0.2/0.3/0.4
-frei0r=colordistance:violet
-frei0r=colordistance:0x112233
-
-
- Apply the perspective effect, specifying the top left and top right image
-positions:
-
-
frei0r=perspective:0.2/0.2|0.8/0.2
-
-
-
-
For more information, see
-http://frei0r.dyne.org
-
-
-
28.39 fspp# TOC
-
-
Apply fast and simple postprocessing. It is a faster version of spp .
-
-
It splits (I)DCT into horizontal/vertical passes. Unlike the simple post-
-processing filter, one of them is performed once per block, not per pixel.
-This allows for much higher speed.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 4-5. Default value is 4
.
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range 0-63.
-If not set, the filter will use the QP from the video stream (if available).
-
-
-strength
-Set filter strength. It accepts an integer in range -15 to 32. Lower values mean
-more details but also more artifacts, while higher values make the image smoother
-but also blurrier. Default value is 0
− PSNR optimal.
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
-
28.40 geq# TOC
-
-
The filter accepts the following options:
-
-
-lum_expr, lum
-Set the luminance expression.
-
-cb_expr, cb
-Set the chrominance blue expression.
-
-cr_expr, cr
-Set the chrominance red expression.
-
-alpha_expr, a
-Set the alpha expression.
-
-red_expr, r
-Set the red expression.
-
-green_expr, g
-Set the green expression.
-
-blue_expr, b
-Set the blue expression.
-
-
-
-
The colorspace is selected according to the specified options. If one
-of the lum_expr , cb_expr , or cr_expr
-options is specified, the filter will automatically select a YCbCr
-colorspace. If one of the red_expr , green_expr , or
-blue_expr options is specified, it will select an RGB
-colorspace.
-
-
If one of the chrominance expression is not defined, it falls back on the other
-one. If no alpha expression is specified it will evaluate to opaque value.
-If none of chrominance expressions are specified, they will evaluate
-to the luminance expression.
-
-
The expressions can use the following variables and functions:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-The coordinates of the current sample.
-
-
-W
-H
-The width and height of the image.
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-p(x, y)
-Return the value of the pixel at location (x ,y ) of the current
-plane.
-
-
-lum(x, y)
-Return the value of the pixel at location (x ,y ) of the luminance
-plane.
-
-
-cb(x, y)
-Return the value of the pixel at location (x ,y ) of the
-blue-difference chroma plane. Return 0 if there is no such plane.
-
-
-cr(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red-difference chroma plane. Return 0 if there is no such plane.
-
-
-r(x, y)
-g(x, y)
-b(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red/green/blue component. Return 0 if there is no such component.
-
-
-alpha(x, y)
-Return the value of the pixel at location (x ,y ) of the alpha
-plane. Return 0 if there is no such plane.
-
-
-
-
For functions, if x and y are outside the area, the value will be
-automatically clipped to the closer edge.
-
-
-
28.40.1 Examples# TOC
-
-
- Flip the image horizontally:
-
-
- Generate a bidimensional sine wave, with angle PI/3
and a
-wavelength of 100 pixels:
-
-
geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
-
-
- Generate a fancy enigmatic moving light:
-
-
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
-
-
- Generate a quick emboss effect:
-
-
format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
-
-
- Modify RGB components depending on pixel position:
-
-
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
-
-
- Create a radial gradient that is the same size as the input (also see
-the vignette filter):
-
-
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
-
-
- Create a linear gradient to use as a mask for another filter, then
-compose with overlay . In this example the video will gradually
-become more blurry from the top to the bottom of the y-axis as defined
-by the linear gradient:
-
-
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
-
-
-
-
-
28.41 gradfun# TOC
-
-
Fix the banding artifacts that are sometimes introduced into nearly flat
-regions by truncation to 8bit color depth.
-Interpolate the gradients that should go where the bands are, and
-dither them.
-
-
It is designed for playback only. Do not use it prior to
-lossy compression, because compression tends to lose the dither and
-bring back the bands.
-
-
It accepts the following parameters:
-
-
-strength
-The maximum amount by which the filter will change any one pixel. This is also
-the threshold for detecting nearly flat regions. Acceptable values range from
-.51 to 64; the default value is 1.2. Out-of-range values will be clipped to the
-valid range.
-
-
-radius
-The neighborhood to fit the gradient to. A larger radius makes for smoother
-gradients, but also prevents the filter from modifying the pixels near detailed
-regions. Acceptable values are 8-32; the default value is 16. Out-of-range
-values will be clipped to the valid range.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-strength [:radius ]
-
-
-
28.41.1 Examples# TOC
-
-
- Apply the filter with a 3.5
strength and radius of 8
:
-
-
- Specify radius, omitting the strength (which will fall-back to the default
-value):
-
-
-
-
-
-
28.42 haldclut# TOC
-
-
Apply a Hald CLUT to a video stream.
-
-
First input is the video stream to process, and second one is the Hald CLUT.
-The Hald CLUT input can be a simple picture or a complete video stream.
-
-
The filter accepts the following options:
-
-
-shortest
-Force termination when the shortest input terminates. Default is 0
.
-
-repeatlast
-Continue applying the last CLUT after the end of the stream. A value of
-0
disable the filter after the last frame of the CLUT is reached.
-Default is 1
.
-
-
-
-
haldclut
also has the same interpolation options as lut3d (both
-filters share the same internals).
-
-
More information about the Hald CLUT can be found on Eskil Steenberg’s website
-(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html .
-
-
-
28.42.1 Workflow examples# TOC
-
-
-
28.42.1.1 Hald CLUT video stream# TOC
-
-
Generate an identity Hald CLUT stream altered with various effects:
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
-
-
-
Note: make sure you use a lossless codec.
-
-
Then use it with haldclut
to apply it on some random stream:
-
-
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
-
-
-
The Hald CLUT will be applied to the 10 first seconds (duration of
-clut.nut ), then the latest picture of that CLUT stream will be applied
-to the remaining frames of the mandelbrot
stream.
-
-
-
28.42.1.2 Hald CLUT with preview# TOC
-
-
A Hald CLUT is supposed to be a squared image of Level*Level*Level
by
-Level*Level*Level
pixels. For a given Hald CLUT, FFmpeg will select the
-biggest possible square starting at the top left of the picture. The remaining
-padding pixels (bottom or right) will be ignored. This area can be used to add
-a preview of the Hald CLUT.
-
-
Typically, the following generated Hald CLUT will be supported by the
-haldclut
filter:
-
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "
- pad=iw+320 [padded_clut];
- smptebars=s=320x256, split [a][b];
- [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
- [main][b] overlay=W-320" -frames:v 1 clut.png
-
-
-
It contains the original and a preview of the effect of the CLUT: SMPTE color
-bars are displayed on the right-top, and below the same color bars processed by
-the color changes.
-
-
Then, the effect of this Hald CLUT can be visualized with:
-
-
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
-
-
-
-
28.43 hflip# TOC
-
-
Flip the input video horizontally.
-
-
For example, to horizontally flip the input video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "hflip" out.avi
-
-
-
-
28.44 histeq# TOC
-
This filter applies a global color histogram equalization on a
-per-frame basis.
-
-
It can be used to correct video that has a compressed range of pixel
-intensities. The filter redistributes the pixel intensities to
-equalize their distribution across the intensity range. It may be
-viewed as an "automatically adjusting contrast filter". This filter is
-useful only for correcting degraded or poorly captured source
-video.
-
-
The filter accepts the following options:
-
-
-strength
-Determine the amount of equalization to be applied. As the strength
-is reduced, the distribution of pixel intensities more-and-more
-approaches that of the input frame. The value must be a float number
-in the range [0,1] and defaults to 0.200.
-
-
-intensity
-Set the maximum intensity that can generated and scale the output
-values appropriately. The strength should be set as desired and then
-the intensity can be limited if needed to avoid washing-out. The value
-must be a float number in the range [0,1] and defaults to 0.210.
-
-
-antibanding
-Set the antibanding level. If enabled the filter will randomly vary
-the luminance of output pixels by a small amount to avoid banding of
-the histogram. Possible values are none
, weak
or
-strong
. It defaults to none
.
-
-
-
-
-
28.45 histogram# TOC
-
-
Compute and draw a color distribution histogram for the input video.
-
-
The computed histogram is a representation of the color component
-distribution in an image.
-
-
The filter accepts the following options:
-
-
-mode
-Set histogram mode.
-
-It accepts the following values:
-
-‘levels ’
-Standard histogram that displays the color components distribution in an
-image. Displays color graph for each color component. Shows distribution of
-the Y, U, V, A or R, G, B components, depending on input format, in the
-current frame. Below each graph a color component scale meter is shown.
-
-
-‘color ’
-Displays chroma values (U/V color placement) in a two dimensional
-graph (which is called a vectorscope). The brighter a pixel in the
-vectorscope, the more pixels of the input frame correspond to that pixel
-(i.e., more pixels have this chroma value). The V component is displayed on
-the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost
-side being V = 255. The U component is displayed on the vertical (Y) axis,
-with the top representing U = 0 and the bottom representing U = 255.
-
-The position of a white pixel in the graph corresponds to the chroma value of
-a pixel of the input clip. The graph can therefore be used to read the hue
-(color flavor) and the saturation (the dominance of the hue in the color). As
-the hue of a color changes, it moves around the square. At the center of the
-square the saturation is zero, which means that the corresponding pixel has no
-color. If the amount of a specific color is increased (while leaving the other
-colors unchanged) the saturation increases, and the indicator moves towards
-the edge of the square.
-
-
-‘color2 ’
-Chroma values in vectorscope, similar as color
but actual chroma values
-are displayed.
-
-
-‘waveform ’
-Per row/column color component graph. In row mode, the graph on the left side
-represents color component value 0 and the right side represents value = 255.
-In column mode, the top side represents color component value = 0 and bottom
-side represents value = 255.
-
-
-Default value is levels
.
-
-
-level_height
-Set height of level in levels
. Default value is 200
.
-Allowed range is [50, 2048].
-
-
-scale_height
-Set height of color scale in levels
. Default value is 12
.
-Allowed range is [0, 40].
-
-
-step
-Set step for waveform
mode. Smaller values are useful to find out how
-many values of the same luminance are distributed across input rows/columns.
-Default value is 10
. Allowed range is [1, 255].
-
-
-waveform_mode
-Set mode for waveform
. Can be either row
, or column
.
-Default is row
.
-
-
-waveform_mirror
-Set mirroring mode for waveform
. 0
means unmirrored, 1
-means mirrored. In mirrored mode, higher values will be represented on the left
-side for row
mode and at the top for column
mode. Default is
-0
(unmirrored).
-
-
-display_mode
-Set display mode for waveform
and levels
.
-It accepts the following values:
-
-‘parade ’
-Display separate graph for the color components side by side in
-row
waveform mode or one below the other in column
waveform mode
-for waveform
histogram mode. For levels
histogram mode,
-per color component graphs are placed below each other.
-
-Using this display mode in waveform
histogram mode makes it easy to
-spot color casts in the highlights and shadows of an image, by comparing the
-contours of the top and the bottom graphs of each waveform. Since whites,
-grays, and blacks are characterized by exactly equal amounts of red, green,
-and blue, neutral areas of the picture should display three waveforms of
-roughly equal width/height. If not, the correction is easy to perform by
-making level adjustments the three waveforms.
-
-
-‘overlay ’
-Presents information identical to that in the parade
, except
-that the graphs representing color components are superimposed directly
-over one another.
-
-This display mode in waveform
histogram mode makes it easier to spot
-relative differences or similarities in overlapping areas of the color
-components that are supposed to be identical, such as neutral whites, grays,
-or blacks.
-
-
-Default is parade
.
-
-
-levels_mode
-Set mode for levels
. Can be either linear
, or logarithmic
.
-Default is linear
.
-
-
-
-
-
28.45.1 Examples# TOC
-
-
- Calculate and draw histogram:
-
-
ffplay -i input -vf histogram
-
-
-
-
-
-
28.46 hqdn3d# TOC
-
-
This is a high precision/quality 3d denoise filter. It aims to reduce
-image noise, producing smooth images and making still images really
-still. It should enhance compressibility.
-
-
It accepts the following optional parameters:
-
-
-luma_spatial
-A non-negative floating point number which specifies spatial luma strength.
-It defaults to 4.0.
-
-
-chroma_spatial
-A non-negative floating point number which specifies spatial chroma strength.
-It defaults to 3.0*luma_spatial /4.0.
-
-
-luma_tmp
-A floating point number which specifies luma temporal strength. It defaults to
-6.0*luma_spatial /4.0.
-
-
-chroma_tmp
-A floating point number which specifies chroma temporal strength. It defaults to
-luma_tmp *chroma_spatial /luma_spatial .
-
-
-
-
-
28.47 hqx# TOC
-
-
Apply a high-quality magnification filter designed for pixel art. This filter
-was originally created by Maxim Stepin.
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for hq2x
, 3
for
-hq3x
and 4
for hq4x
.
-Default is 3
.
-
-
-
-
-
28.48 hue# TOC
-
-
Modify the hue and/or the saturation of the input.
-
-
It accepts the following parameters:
-
-
-h
-Specify the hue angle as a number of degrees. It accepts an expression,
-and defaults to "0".
-
-
-s
-Specify the saturation in the [-10,10] range. It accepts an expression and
-defaults to "1".
-
-
-H
-Specify the hue angle as a number of radians. It accepts an
-expression, and defaults to "0".
-
-
-b
-Specify the brightness in the [-10,10] range. It accepts an expression and
-defaults to "0".
-
-
-
-
h and H are mutually exclusive, and can’t be
-specified at the same time.
-
-
The b , h , H and s option values are
-expressions containing the following constants:
-
-
-n
-frame count of the input frame starting from 0
-
-
-pts
-presentation timestamp of the input frame expressed in time base units
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-tb
-time base of the input video
-
-
-
-
-
28.48.1 Examples# TOC
-
-
-
-
-
28.48.2 Commands# TOC
-
-
This filter supports the following commands:
-
-b
-s
-h
-H
-Modify the hue and/or the saturation and/or brightness of the input video.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
28.49 idet# TOC
-
-
Detect video interlacing type.
-
-
This filter tries to detect if the input frames as interlaced, progressive,
-top or bottom field first. It will also try and detect fields that are
-repeated between adjacent frames (a sign of telecine).
-
-
Single frame detection considers only immediately adjacent frames when classifying each frame.
-Multiple frame detection incorporates the classification history of previous frames.
-
-
The filter will log these metadata values:
-
-
-single.current_frame
-Detected type of current frame using single-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-single.tff
-Cumulative number of frames detected as top field first using single-frame detection.
-
-
-multiple.tff
-Cumulative number of frames detected as top field first using multiple-frame detection.
-
-
-single.bff
-Cumulative number of frames detected as bottom field first using single-frame detection.
-
-
-multiple.current_frame
-Detected type of current frame using multiple-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-multiple.bff
-Cumulative number of frames detected as bottom field first using multiple-frame detection.
-
-
-single.progressive
-Cumulative number of frames detected as progressive using single-frame detection.
-
-
-multiple.progressive
-Cumulative number of frames detected as progressive using multiple-frame detection.
-
-
-single.undetermined
-Cumulative number of frames that could not be classified using single-frame detection.
-
-
-multiple.undetermined
-Cumulative number of frames that could not be classified using multiple-frame detection.
-
-
-repeated.current_frame
-Which field in the current frame is repeated from the last. One of “neither”, “top”, or “bottom”.
-
-
-repeated.neither
-Cumulative number of frames with no repeated field.
-
-
-repeated.top
-Cumulative number of frames with the top field repeated from the previous frame’s top field.
-
-
-repeated.bottom
-Cumulative number of frames with the bottom field repeated from the previous frame’s bottom field.
-
-
-
-
The filter accepts the following options:
-
-
-intl_thres
-Set interlacing threshold.
-
-prog_thres
-Set progressive threshold.
-
-repeat_thres
-Threshold for repeated field detection.
-
-half_life
-Number of frames after which a given frame’s contribution to the
-statistics is halved (i.e., it contributes only 0.5 to it’s
-classification). The default of 0 means that all frames seen are given
-full weight of 1.0 forever.
-
-analyze_interlaced_flag
-When this is not 0 then idet will use the specified number of frames to determine
-if the interlaced flag is accurate, it will not count undetermined frames.
-If the flag is found to be accurate it will be used without any further
-computations, if it is found to be inaccuarte it will be cleared without any
-further computations. This allows inserting the idet filter as a low computational
-method to clean up the interlaced flag
-
-
-
-
-
28.50 il# TOC
-
-
Deinterleave or interleave fields.
-
-
This filter allows one to process interlaced images fields without
-deinterlacing them. Deinterleaving splits the input frame into 2
-fields (so called half pictures). Odd lines are moved to the top
-half of the output image, even lines to the bottom half.
-You can process (filter) them independently and then re-interleave them.
-
-
The filter accepts the following options:
-
-
-luma_mode, l
-chroma_mode, c
-alpha_mode, a
-Available values for luma_mode , chroma_mode and
-alpha_mode are:
-
-
-‘none ’
-Do nothing.
-
-
-‘deinterleave, d ’
-Deinterleave fields, placing one above the other.
-
-
-‘interleave, i ’
-Interleave fields. Reverse the effect of deinterleaving.
-
-
-Default value is none
.
-
-
-luma_swap, ls
-chroma_swap, cs
-alpha_swap, as
-Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0
.
-
-
-
-
-
28.51 interlace# TOC
-
-
Simple interlacing filter from progressive contents. This interleaves upper (or
-lower) lines from odd frames with lower (or upper) lines from even frames,
-halving the frame rate and preserving image height.
-
-
-
Original Original New Frame
- Frame 'j' Frame 'j+1' (tff)
- ========== =========== ==================
- Line 0 --------------------> Frame 'j' Line 0
- Line 1 Line 1 ----> Frame 'j+1' Line 1
- Line 2 ---------------------> Frame 'j' Line 2
- Line 3 Line 3 ----> Frame 'j+1' Line 3
- ... ... ...
-New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
-
-
-
It accepts the following optional parameters:
-
-
-scan
-This determines whether the interlaced frame is taken from the even
-(tff - default) or odd (bff) lines of the progressive frame.
-
-
-lowpass
-Enable (default) or disable the vertical lowpass filter to avoid twitter
-interlacing and reduce moire patterns.
-
-
-
-
-
28.52 kerndeint# TOC
-
-
Deinterlace input video by applying Donald Graft’s adaptive kernel
-deinterling. Work on interlaced parts of a video to produce
-progressive frames.
-
-
The description of the accepted parameters follows.
-
-
-thresh
-Set the threshold which affects the filter’s tolerance when
-determining if a pixel line must be processed. It must be an integer
-in the range [0,255] and defaults to 10. A value of 0 will result in
-applying the process on every pixels.
-
-
-map
-Paint pixels exceeding the threshold value to white if set to 1.
-Default is 0.
-
-
-order
-Set the fields order. Swap fields if set to 1, leave fields alone if
-0. Default is 0.
-
-
-sharp
-Enable additional sharpening if set to 1. Default is 0.
-
-
-twoway
-Enable twoway sharpening if set to 1. Default is 0.
-
-
-
-
-
28.52.1 Examples# TOC
-
-
- Apply default values:
-
-
kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
-
-
- Enable additional sharpening:
-
-
- Paint processed pixels in white:
-
-
-
-
-
28.53 lenscorrection# TOC
-
-
Correct radial lens distortion
-
-
This filter can be used to correct for radial distortion as can result from the use
-of wide angle lenses, and thereby re-rectify the image. To find the right parameters
-one can use tools available for example as part of opencv or simply trial-and-error.
-To use opencv use the calibration sample (under samples/cpp) from the opencv sources
-and extract the k1 and k2 coefficients from the resulting matrix.
-
-
Note that effectively the same filter is available in the open-source tools Krita and
-Digikam from the KDE project.
-
-
In contrast to the vignette filter, which can also be used to compensate lens errors,
-this filter corrects the distortion of the image, whereas vignette corrects the
-brightness distribution, so you may want to use both filters together in certain
-cases, though you will have to take care of ordering, i.e. whether vignetting should
-be applied before or after lens correction.
-
-
-
28.53.1 Options# TOC
-
-
The filter accepts the following options:
-
-
-cx
-Relative x-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-width.
-
-cy
-Relative y-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-height.
-
-k1
-Coefficient of the quadratic correction term. 0.5 means no correction.
-
-k2
-Coefficient of the double quadratic correction term. 0.5 means no correction.
-
-
-
-
The formula that generates the correction is:
-
-
r_src = r_tgt * (1 + k1 * (r_tgt / r_0 )^2 + k2 * (r_tgt / r_0 )^4)
-
-
where r_0 is halve of the image diagonal and r_src and r_tgt are the
-distances from the focal point in the source and target images, respectively.
-
-
-
28.54 lut3d# TOC
-
-
Apply a 3D LUT to an input video.
-
-
The filter accepts the following options:
-
-
-file
-Set the 3D LUT file name.
-
-Currently supported formats:
-
-‘3dl ’
-AfterEffects
-
-‘cube ’
-Iridas
-
-‘dat ’
-DaVinci
-
-‘m3d ’
-Pandora
-
-
-
-interp
-Select interpolation mode.
-
-Available values are:
-
-
-‘nearest ’
-Use values from the nearest defined point.
-
-‘trilinear ’
-Interpolate values using the 8 points defining a cube.
-
-‘tetrahedral ’
-Interpolate values using a tetrahedron.
-
-
-
-
-
-
-
28.55 lut, lutrgb, lutyuv# TOC
-
-
Compute a look-up table for binding each pixel component input value
-to an output value, and apply it to the input video.
-
-
lutyuv applies a lookup table to a YUV input video, lutrgb
-to an RGB input video.
-
-
These filters accept the following parameters:
-
-c0
-set first pixel component expression
-
-c1
-set second pixel component expression
-
-c2
-set third pixel component expression
-
-c3
-set fourth pixel component expression, corresponds to the alpha component
-
-
-r
-set red component expression
-
-g
-set green component expression
-
-b
-set blue component expression
-
-a
-alpha component expression
-
-
-y
-set Y/luminance component expression
-
-u
-set U/Cb component expression
-
-v
-set V/Cr component expression
-
-
-
-
Each of them specifies the expression to use for computing the lookup table for
-the corresponding pixel component values.
-
-
The exact component associated to each of the c* options depends on the
-format in input.
-
-
The lut filter requires either YUV or RGB pixel formats in input,
-lutrgb requires RGB pixel formats in input, and lutyuv requires YUV.
-
-
The expressions can contain the following constants and functions:
-
-
-w
-h
-The input width and height.
-
-
-val
-The input value for the pixel component.
-
-
-clipval
-The input value, clipped to the minval -maxval range.
-
-
-maxval
-The maximum value for the pixel component.
-
-
-minval
-The minimum value for the pixel component.
-
-
-negval
-The negated value for the pixel component value, clipped to the
-minval -maxval range; it corresponds to the expression
-"maxval-clipval+minval".
-
-
-clip(val)
-The computed value in val , clipped to the
-minval -maxval range.
-
-
-gammaval(gamma)
-The computed gamma correction value of the pixel component value,
-clipped to the minval -maxval range. It corresponds to the
-expression
-"pow((clipval-minval)/(maxval-minval)\,gamma )*(maxval-minval)+minval"
-
-
-
-
-
All expressions default to "val".
-
-
-
28.55.1 Examples# TOC
-
-
-
-
-
28.56 mergeplanes# TOC
-
-
Merge color channel components from several video streams.
-
-
The filter accepts up to 4 input streams, and merge selected input
-planes to the output video.
-
-
This filter accepts the following options:
-
-mapping
-Set input to output plane mapping. Default is 0
.
-
-The mappings is specified as a bitmap. It should be specified as a
-hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the
-mapping for the first plane of the output stream. ’A’ sets the number of
-the input stream to use (from 0 to 3), and ’a’ the plane number of the
-corresponding input to use (from 0 to 3). The rest of the mappings is
-similar, ’Bb’ describes the mapping for the output stream second
-plane, ’Cc’ describes the mapping for the output stream third plane and
-’Dd’ describes the mapping for the output stream fourth plane.
-
-
-format
-Set output pixel format. Default is yuva444p
.
-
-
-
-
-
28.56.1 Examples# TOC
-
-
- Merge three gray video streams of same width and height into single video stream:
-
-
[a0][a1][a2]mergeplanes=0x001020:yuv444p
-
-
- Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream:
-
-
[a0][a1]mergeplanes=0x00010210:yuva444p
-
-
- Swap Y and A plane in yuva444p stream:
-
-
format=yuva444p,mergeplanes=0x03010200:yuva444p
-
-
- Swap U and V plane in yuv420p stream:
-
-
format=yuv420p,mergeplanes=0x000201:yuv420p
-
-
- Cast a rgb24 clip to yuv444p:
-
-
format=rgb24,mergeplanes=0x000102:yuv444p
-
-
-
-
-
28.57 mcdeint# TOC
-
-
Apply motion-compensation deinterlacing.
-
-
It needs one field per frame as input and must thus be used together
-with yadif=1/3 or equivalent.
-
-
This filter accepts the following options:
-
-mode
-Set the deinterlacing mode.
-
-It accepts one of the following values:
-
-‘fast ’
-‘medium ’
-‘slow ’
-use iterative motion estimation
-
-‘extra_slow ’
-like ‘slow ’, but use multiple reference frames.
-
-
-Default value is ‘fast ’.
-
-
-parity
-Set the picture field parity assumed for the input video. It must be
-one of the following values:
-
-
-‘0, tff ’
-assume top field first
-
-‘1, bff ’
-assume bottom field first
-
-
-
-Default value is ‘bff ’.
-
-
-qp
-Set per-block quantization parameter (QP) used by the internal
-encoder.
-
-Higher values should result in a smoother motion vector field but less
-optimal individual vectors. Default value is 1.
-
-
-
-
-
28.58 mp# TOC
-
-
Apply an MPlayer filter to the input video.
-
-
This filter provides a wrapper around some of the filters of
-MPlayer/MEncoder.
-
-
This wrapper is considered experimental. Some of the wrapped filters
-may not work properly and we may drop support for them, as they will
-be implemented natively into FFmpeg. Thus you should avoid
-depending on them when writing portable scripts.
-
-
The filter accepts the parameters:
-filter_name [:=]filter_params
-
-
filter_name is the name of a supported MPlayer filter,
-filter_params is a string containing the parameters accepted by
-the named filter.
-
-
The list of the currently supported filters follows:
-
-eq2
-eq
-ilpack
-softpulldown
-
-
-
The parameter syntax and behavior for the listed filters are the same
-of the corresponding MPlayer filters. For detailed instructions check
-the "VIDEO FILTERS" section in the MPlayer manual.
-
-
-
28.58.1 Examples# TOC
-
-
- Adjust gamma, brightness, contrast:
-
-
-
-
See also mplayer(1), http://www.mplayerhq.hu/ .
-
-
-
28.59 mpdecimate# TOC
-
-
Drop frames that do not differ greatly from the previous frame in
-order to reduce frame rate.
-
-
The main use of this filter is for very-low-bitrate encoding
-(e.g. streaming over dialup modem), but it could in theory be used for
-fixing movies that were inverse-telecined incorrectly.
-
-
A description of the accepted options follows.
-
-
-max
-Set the maximum number of consecutive frames which can be dropped (if
-positive), or the minimum interval between dropped frames (if
-negative). If the value is 0, the frame is dropped unregarding the
-number of previous sequentially dropped frames.
-
-Default value is 0.
-
-
-hi
-lo
-frac
-Set the dropping threshold values.
-
-Values for hi and lo are for 8x8 pixel blocks and
-represent actual pixel value differences, so a threshold of 64
-corresponds to 1 unit of difference for each pixel, or the same spread
-out differently over the block.
-
-A frame is a candidate for dropping if no 8x8 blocks differ by more
-than a threshold of hi , and if no more than frac blocks (1
-meaning the whole image) differ by more than a threshold of lo .
-
-Default value for hi is 64*12, default value for lo is
-64*5, and default value for frac is 0.33.
-
-
-
-
-
-
28.60 negate# TOC
-
-
Negate input video.
-
-
It accepts an integer in input; if non-zero it negates the
-alpha component (if available). The default value in input is 0.
-
-
-
28.61 noformat# TOC
-
-
Force libavfilter not to use any of the specified pixel formats for the
-input to the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-apix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
28.61.1 Examples# TOC
-
-
- Force libavfilter to use a format different from yuv420p for the
-input to the vflip filter:
-
-
noformat=pix_fmts=yuv420p,vflip
-
-
- Convert the input video to any of the formats not contained in the list:
-
-
noformat=yuv420p|yuv444p|yuv410p
-
-
-
-
-
28.62 noise# TOC
-
-
Add noise on video input frame.
-
-
The filter accepts the following options:
-
-
-all_seed
-c0_seed
-c1_seed
-c2_seed
-c3_seed
-Set noise seed for specific pixel component or all pixel components in case
-of all_seed . Default value is 123457
.
-
-
-all_strength, alls
-c0_strength, c0s
-c1_strength, c1s
-c2_strength, c2s
-c3_strength, c3s
-Set noise strength for specific pixel component or all pixel components in case
-all_strength . Default value is 0
. Allowed range is [0, 100].
-
-
-all_flags, allf
-c0_flags, c0f
-c1_flags, c1f
-c2_flags, c2f
-c3_flags, c3f
-Set pixel component flags or set flags for all components if all_flags .
-Available values for component flags are:
-
-‘a ’
-averaged temporal noise (smoother)
-
-‘p ’
-mix random noise with a (semi)regular pattern
-
-‘t ’
-temporal noise (noise pattern changes between frames)
-
-‘u ’
-uniform noise (gaussian otherwise)
-
-
-
-
-
-
-
28.62.1 Examples# TOC
-
-
Add temporal and uniform noise to input video:
-
-
noise=alls=20:allf=t+u
-
-
-
-
28.63 null# TOC
-
-
Pass the video source unchanged to the output.
-
-
-
28.64 ocv# TOC
-
-
Apply a video transform using libopencv.
-
-
To enable this filter, install the libopencv library and headers and
-configure FFmpeg with --enable-libopencv
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the libopencv filter to apply.
-
-
-filter_params
-The parameters to pass to the libopencv filter. If not specified, the default
-values are assumed.
-
-
-
-
-
Refer to the official libopencv documentation for more precise
-information:
-http://docs.opencv.org/master/modules/imgproc/doc/filtering.html
-
-
Several libopencv filters are supported; see the following subsections.
-
-
-
28.64.1 dilate# TOC
-
-
Dilate an image by using a specific structuring element.
-It corresponds to the libopencv function cvDilate
.
-
-
It accepts the parameters: struct_el |nb_iterations .
-
-
struct_el represents a structuring element, and has the syntax:
-cols xrows +anchor_x xanchor_y /shape
-
-
cols and rows represent the number of columns and rows of
-the structuring element, anchor_x and anchor_y the anchor
-point, and shape the shape for the structuring element. shape
-must be "rect", "cross", "ellipse", or "custom".
-
-
If the value for shape is "custom", it must be followed by a
-string of the form "=filename ". The file with name
-filename is assumed to represent a binary image, with each
-printable character corresponding to a bright pixel. When a custom
-shape is used, cols and rows are ignored, the number
-or columns and rows of the read file are assumed instead.
-
-
The default value for struct_el is "3x3+0x0/rect".
-
-
nb_iterations specifies the number of times the transform is
-applied to the image, and defaults to 1.
-
-
Some examples:
-
-
# Use the default values
-ocv=dilate
-
-# Dilate using a structuring element with a 5x5 cross, iterating two times
-ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
-
-# Read the shape from the file diamond.shape, iterating two times.
-# The file diamond.shape may contain a pattern of characters like this
-# *
-# ***
-# *****
-# ***
-# *
-# The specified columns and rows are ignored
-# but the anchor point coordinates are not
-ocv=dilate:0x0+2x2/custom=diamond.shape|2
-
-
-
-
28.64.2 erode# TOC
-
-
Erode an image by using a specific structuring element.
-It corresponds to the libopencv function cvErode
.
-
-
It accepts the parameters: struct_el :nb_iterations ,
-with the same syntax and semantics as the dilate filter.
-
-
-
28.64.3 smooth# TOC
-
-
Smooth the input video.
-
-
The filter takes the following parameters:
-type |param1 |param2 |param3 |param4 .
-
-
type is the type of smooth filter to apply, and must be one of
-the following values: "blur", "blur_no_scale", "median", "gaussian",
-or "bilateral". The default value is "gaussian".
-
-
The meaning of param1 , param2 , param3 , and param4
-depend on the smooth type. param1 and
-param2 accept integer positive values or 0. param3 and
-param4 accept floating point values.
-
-
The default value for param1 is 3. The default value for the
-other parameters is 0.
-
-
These parameters correspond to the parameters assigned to the
-libopencv function cvSmooth
.
-
-
-
28.65 overlay# TOC
-
-
Overlay one video on top of another.
-
-
It takes two inputs and has one output. The first input is the "main"
-video on which the second input is overlaid.
-
-
It accepts the following parameters:
-
-
A description of the accepted options follows.
-
-
-x
-y
-Set the expression for the x and y coordinates of the overlaid video
-on the main video. Default value is "0" for both expressions. In case
-the expression is invalid, it is set to a huge value (meaning that the
-overlay will not be displayed within the output visible area).
-
-
-eof_action
-The action to take when EOF is encountered on the secondary input; it accepts
-one of the following values:
-
-
-repeat
-Repeat the last frame (the default).
-
-endall
-End both streams.
-
-pass
-Pass the main input through.
-
-
-
-
-eval
-Set when the expressions for x , and y are evaluated.
-
-It accepts the following values:
-
-‘init ’
-only evaluate expressions once during the filter initialization or
-when a command is processed
-
-
-‘frame ’
-evaluate expressions for each incoming frame
-
-
-
-Default value is ‘frame ’.
-
-
-shortest
-If set to 1, force the output to terminate when the shortest input
-terminates. Default value is 0.
-
-
-format
-Set the format for the output video.
-
-It accepts the following values:
-
-‘yuv420 ’
-force YUV420 output
-
-
-‘yuv422 ’
-force YUV422 output
-
-
-‘yuv444 ’
-force YUV444 output
-
-
-‘rgb ’
-force RGB output
-
-
-
-Default value is ‘yuv420 ’.
-
-
-rgb (deprecated)
-If set to 1, force the filter to accept inputs in the RGB
-color space. Default value is 0. This option is deprecated, use
-format instead.
-
-
-repeatlast
-If set to 1, force the filter to draw the last overlay frame over the
-main input until the end of the stream. A value of 0 disables this
-behavior. Default value is 1.
-
-
-
-
The x , and y expressions can contain the following
-parameters.
-
-
-main_w, W
-main_h, H
-The main input width and height.
-
-
-overlay_w, w
-overlay_h, h
-The overlay input width and height.
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values of the output
-format. For example for the pixel format "yuv422p" hsub is 2 and
-vsub is 1.
-
-
-n
-the number of input frame, starting from 0
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp, expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
Note that the n , pos , t variables are available only
-when evaluation is done per frame , and will evaluate to NAN
-when eval is set to ‘init ’.
-
-
Be aware that frames are taken from each input video in timestamp
-order, hence, if their initial timestamps differ, it is a good idea
-to pass the two inputs through a setpts=PTS-STARTPTS filter to
-have them begin in the same zero timestamp, as the example for
-the movie filter does.
-
-
You can chain together more overlays but you should test the
-efficiency of such approach.
-
-
-
28.65.1 Commands# TOC
-
-
This filter supports the following commands:
-
-x
-y
-Modify the x and y of the overlay input.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
28.65.2 Examples# TOC
-
-
-
-
-
28.66 owdenoise# TOC
-
-
Apply Overcomplete Wavelet denoiser.
-
-
The filter accepts the following options:
-
-
-depth
-Set depth.
-
-Larger depth values will denoise lower frequency components more, but
-slow down filtering.
-
-Must be an int in the range 8-16, default is 8
.
-
-
-luma_strength, ls
-Set luma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-chroma_strength, cs
-Set chroma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-
-
-
28.67 pad# TOC
-
-
Add paddings to the input image, and place the original input at the
-provided x , y coordinates.
-
-
It accepts the following parameters:
-
-
-width, w
-height, h
-Specify an expression for the size of the output image with the
-paddings added. If the value for width or height is 0, the
-corresponding input size is used for the output.
-
-The width expression can reference the value set by the
-height expression, and vice versa.
-
-The default value of width and height is 0.
-
-
-x
-y
-Specify the offsets to place the input image at within the padded area,
-with respect to the top/left border of the output image.
-
-The x expression can reference the value set by the y
-expression, and vice versa.
-
-The default value of x and y is 0.
-
-
-color
-Specify the color of the padded area. For the syntax of this option,
-check the "Color" section in the ffmpeg-utils manual.
-
-The default value of color is "black".
-
-
-
-
The value for the width , height , x , and y
-options are expressions containing the following constants:
-
-
-in_w
-in_h
-The input video width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output width and height (the size of the padded area), as
-specified by the width and height expressions.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-x
-y
-The x and y offsets as specified by the x and y
-expressions, or NAN if not yet specified.
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
28.67.1 Examples# TOC
-
-
-
-
-
28.68 perspective# TOC
-
-
Correct perspective of video not recorded perpendicular to the screen.
-
-
A description of the accepted parameters follows.
-
-
-x0
-y0
-x1
-y1
-x2
-y2
-x3
-y3
-Set coordinates expression for top left, top right, bottom left and bottom right corners.
-Default values are 0:0:W:0:0:H:W:H
with which perspective will remain unchanged.
-If the sense
option is set to source
, then the specified points will be sent
-to the corners of the destination. If the sense
option is set to destination
,
-then the corners of the source will be sent to the specified coordinates.
-
-The expressions can use the following variables:
-
-
-W
-H
-the width and height of video frame.
-
-
-
-
-interpolation
-Set interpolation for perspective correction.
-
-It accepts the following values:
-
-‘linear ’
-‘cubic ’
-
-
-Default value is ‘linear ’.
-
-
-sense
-Set interpretation of coordinate options.
-
-It accepts the following values:
-
-‘0, source ’
-
-Send point in the source specified by the given coordinates to
-the corners of the destination.
-
-
-‘1, destination ’
-
-Send the corners of the source to the point in the destination specified
-by the given coordinates.
-
-Default value is ‘source ’.
-
-
-
-
-
-
-
28.69 phase# TOC
-
-
Delay interlaced video by one field time so that the field order changes.
-
-
The intended use is to fix PAL movies that have been captured with the
-opposite field order to the film-to-video transfer.
-
-
A description of the accepted parameters follows.
-
-
-mode
-Set phase mode.
-
-It accepts the following values:
-
-‘t ’
-Capture field order top-first, transfer bottom-first.
-Filter will delay the bottom field.
-
-
-‘b ’
-Capture field order bottom-first, transfer top-first.
-Filter will delay the top field.
-
-
-‘p ’
-Capture and transfer with the same field order. This mode only exists
-for the documentation of the other options to refer to, but if you
-actually select it, the filter will faithfully do nothing.
-
-
-‘a ’
-Capture field order determined automatically by field flags, transfer
-opposite.
-Filter selects among ‘t ’ and ‘b ’ modes on a frame by frame
-basis using field flags. If no field information is available,
-then this works just like ‘u ’.
-
-
-‘u ’
-Capture unknown or varying, transfer opposite.
-Filter selects among ‘t ’ and ‘b ’ on a frame by frame basis by
-analyzing the images and selecting the alternative that produces best
-match between the fields.
-
-
-‘T ’
-Capture top-first, transfer unknown or varying.
-Filter selects among ‘t ’ and ‘p ’ using image analysis.
-
-
-‘B ’
-Capture bottom-first, transfer unknown or varying.
-Filter selects among ‘b ’ and ‘p ’ using image analysis.
-
-
-‘A ’
-Capture determined by field flags, transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using field flags and
-image analysis. If no field information is available, then this works just
-like ‘U ’. This is the default mode.
-
-
-‘U ’
-Both capture and transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using image analysis only.
-
-
-
-
-
-
-
28.70 pixdesctest# TOC
-
-
Pixel format descriptor test filter, mainly useful for internal
-testing. The output video should be equal to the input video.
-
-
For example:
-
-
format=monow, pixdesctest
-
-
-
can be used to test the monowhite pixel format descriptor definition.
-
-
-
28.71 pp# TOC
-
-
Enable the specified chain of postprocessing subfilters using libpostproc. This
-library should be automatically selected with a GPL build (--enable-gpl
).
-Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’.
-Each subfilter and some options have a short and a long name that can be used
-interchangeably, i.e. dr/dering are the same.
-
-
The filters accept the following options:
-
-
-subfilters
-Set postprocessing subfilters string.
-
-
-
-
All subfilters share common options to determine their scope:
-
-
-a/autoq
-Honor the quality commands for this subfilter.
-
-
-c/chrom
-Do chrominance filtering, too (default).
-
-
-y/nochrom
-Do luminance filtering only (no chrominance).
-
-
-n/noluma
-Do chrominance filtering only (no luminance).
-
-
-
-
These options can be appended after the subfilter name, separated by a ’|’.
-
-
Available subfilters are:
-
-
-hb/hdeblock[|difference[|flatness]]
-Horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-vb/vdeblock[|difference[|flatness]]
-Vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-ha/hadeblock[|difference[|flatness]]
-Accurate horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-va/vadeblock[|difference[|flatness]]
-Accurate vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-
-
The horizontal and vertical deblocking filters share the difference and
-flatness values so you cannot set different horizontal and vertical
-thresholds.
-
-
-h1/x1hdeblock
-Experimental horizontal deblocking filter
-
-
-v1/x1vdeblock
-Experimental vertical deblocking filter
-
-
-dr/dering
-Deringing filter
-
-
-tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
-
-threshold1
-larger -> stronger filtering
-
-threshold2
-larger -> stronger filtering
-
-threshold3
-larger -> stronger filtering
-
-
-
-
-al/autolevels[:f/fullyrange], automatic brightness / contrast correction
-
-f/fullyrange
-Stretch luminance to 0-255
.
-
-
-
-
-lb/linblenddeint
-Linear blend deinterlacing filter that deinterlaces the given block by
-filtering all lines with a (1 2 1)
filter.
-
-
-li/linipoldeint
-Linear interpolating deinterlacing filter that deinterlaces the given block by
-linearly interpolating every second line.
-
-
-ci/cubicipoldeint
-Cubic interpolating deinterlacing filter deinterlaces the given block by
-cubically interpolating every second line.
-
-
-md/mediandeint
-Median deinterlacing filter that deinterlaces the given block by applying a
-median filter to every second line.
-
-
-fd/ffmpegdeint
-FFmpeg deinterlacing filter that deinterlaces the given block by filtering every
-second line with a (-1 4 2 4 -1)
filter.
-
-
-l5/lowpass5
-Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given
-block by filtering all lines with a (-1 2 6 2 -1)
filter.
-
-
-fq/forceQuant[|quantizer]
-Overrides the quantizer table from the input with the constant quantizer you
-specify.
-
-quantizer
-Quantizer to use
-
-
-
-
-de/default
-Default pp filter combination (hb|a,vb|a,dr|a
)
-
-
-fa/fast
-Fast pp filter combination (h1|a,v1|a,dr|a
)
-
-
-ac
-High quality pp filter combination (ha|a|128|7,va|a,dr|a
)
-
-
-
-
-
28.71.1 Examples# TOC
-
-
- Apply horizontal and vertical deblocking, deringing and automatic
-brightness/contrast:
-
-
- Apply default filters without brightness/contrast correction:
-
-
- Apply default filters and temporal denoiser:
-
-
pp=default/tmpnoise|1|2|3
-
-
- Apply deblocking on luminance only, and switch vertical deblocking on or off
-automatically depending on available CPU time:
-
-
-
-
-
28.72 pp7# TOC
-
Apply Postprocessing filter 7. It is variant of the spp filter,
-similar to spp = 6 with 7 point DCT, where only the center sample is
-used after IDCT.
-
-
The filter accepts the following options:
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range
-0 to 63. If not set, the filter will use the QP from the video stream
-(if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding.
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-‘medium ’
-Set medium thresholding (good results, default).
-
-
-
-
-
-
-
28.73 psnr# TOC
-
-
Obtain the average, maximum and minimum PSNR (Peak Signal to Noise
-Ratio) between two input videos.
-
-
This filter takes in input two input videos, the first input is
-considered the "main" source and is passed unchanged to the
-output. The second input is used as a "reference" video for computing
-the PSNR.
-
-
Both video inputs must have the same resolution and pixel format for
-this filter to work correctly. Also it assumes that both inputs
-have the same number of frames, which are compared one by one.
-
-
The obtained average PSNR is printed through the logging system.
-
-
The filter stores the accumulated MSE (mean squared error) of each
-frame, and at the end of the processing it is averaged across all frames
-equally, and the following formula is applied to obtain the PSNR:
-
-
-
PSNR = 10*log10(MAX^2/MSE)
-
-
-
Where MAX is the average of the maximum values of each component of the
-image.
-
-
The description of the accepted parameters follows.
-
-
-stats_file, f
-If specified the filter will use the named file to save the PSNR of
-each individual frame.
-
-
-
-
The file printed if stats_file is selected, contains a sequence of
-key/value pairs of the form key :value for each compared
-couple of frames.
-
-
A description of each shown parameter follows:
-
-
-n
-sequential number of the input frame, starting from 1
-
-
-mse_avg
-Mean Square Error pixel-by-pixel average difference of the compared
-frames, averaged over all the image components.
-
-
-mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
-Mean Square Error pixel-by-pixel average difference of the compared
-frames for the component specified by the suffix.
-
-
-psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
-Peak Signal to Noise ratio of the compared frames for the component
-specified by the suffix.
-
-
-
-
For example:
-
-
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
-[main][ref] psnr="stats_file=stats.log" [out]
-
-
-
On this example the input file being processed is compared with the
-reference file ref_movie.mpg . The PSNR of each individual frame
-is stored in stats.log .
-
-
-
28.74 pullup# TOC
-
-
Pulldown reversal (inverse telecine) filter, capable of handling mixed
-hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive
-content.
-
-
The pullup filter is designed to take advantage of future context in making
-its decisions. This filter is stateless in the sense that it does not lock
-onto a pattern to follow, but it instead looks forward to the following
-fields in order to identify matches and rebuild progressive frames.
-
-
To produce content with an even framerate, insert the fps filter after
-pullup, use fps=24000/1001
if the input frame rate is 29.97fps,
-fps=24
for 30fps and the (rare) telecined 25fps input.
-
-
The filter accepts the following options:
-
-
-jl
-jr
-jt
-jb
-These options set the amount of "junk" to ignore at the left, right, top, and
-bottom of the image, respectively. Left and right are in units of 8 pixels,
-while top and bottom are in units of 2 lines.
-The default is 8 pixels on each side.
-
-
-sb
-Set the strict breaks. Setting this option to 1 will reduce the chances of
-filter generating an occasional mismatched frame, but it may also cause an
-excessive number of frames to be dropped during high motion sequences.
-Conversely, setting it to -1 will make filter match fields more easily.
-This may help processing of video where there is slight blurring between
-the fields, but may also cause there to be interlaced frames in the output.
-Default value is 0
.
-
-
-mp
-Set the metric plane to use. It accepts the following values:
-
-‘l ’
-Use luma plane.
-
-
-‘u ’
-Use chroma blue plane.
-
-
-‘v ’
-Use chroma red plane.
-
-
-
-This option may be set to use chroma plane instead of the default luma plane
-for doing filter’s computations. This may improve accuracy on very clean
-source material, but more likely will decrease accuracy, especially if there
-is chroma noise (rainbow effect) or any grayscale video.
-The main purpose of setting mp to a chroma plane is to reduce CPU
-load and make pullup usable in realtime on slow machines.
-
-
-
-
For best results (without duplicated frames in the output file) it is
-necessary to change the output frame rate. For example, to inverse
-telecine NTSC input:
-
-
ffmpeg -i input -vf pullup -r 24000/1001 ...
-
-
-
-
28.75 qp# TOC
-
-
Change video quantization parameters (QP).
-
-
The filter accepts the following option:
-
-
-qp
-Set expression for quantization parameter.
-
-
-
-
The expression is evaluated through the eval API and can contain, among others,
-the following constants:
-
-
-known
-1 if index is not 129, 0 otherwise.
-
-
-qp
-Sequentional index starting from -129 to 128.
-
-
-
-
-
28.75.1 Examples# TOC
-
-
- Some equation like:
-
-
-
-
-
28.76 removelogo# TOC
-
-
Suppress a TV station logo, using an image file to determine which
-pixels comprise the logo. It works by filling in the pixels that
-comprise the logo with neighboring pixels.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filter bitmap file, which can be any image format supported by
-libavformat. The width and height of the image file must match those of the
-video stream being processed.
-
-
-
-
Pixels in the provided bitmap image with a value of zero are not
-considered part of the logo, non-zero pixels are considered part of
-the logo. If you use white (255) for the logo and black (0) for the
-rest, you will be safe. For making the filter bitmap, it is
-recommended to take a screen capture of a black frame with the logo
-visible, and then using a threshold filter followed by the erode
-filter once or twice.
-
-
If needed, little splotches can be fixed manually. Remember that if
-logo pixels are not covered, the filter quality will be much
-reduced. Marking too many pixels as part of the logo does not hurt as
-much, but it will increase the amount of blurring needed to cover over
-the image and will destroy more information than necessary, and extra
-pixels will slow things down on a large logo.
-
-
-
28.77 rotate# TOC
-
-
Rotate video by an arbitrary angle expressed in radians.
-
-
The filter accepts the following options:
-
-
A description of the optional parameters follows.
-
-angle, a
-Set an expression for the angle by which to rotate the input video
-clockwise, expressed as a number of radians. A negative value will
-result in a counter-clockwise rotation. By default it is set to "0".
-
-This expression is evaluated for each frame.
-
-
-out_w, ow
-Set the output width expression, default value is "iw".
-This expression is evaluated just once during configuration.
-
-
-out_h, oh
-Set the output height expression, default value is "ih".
-This expression is evaluated just once during configuration.
-
-
-bilinear
-Enable bilinear interpolation if set to 1, a value of 0 disables
-it. Default value is 1.
-
-
-fillcolor, c
-Set the color used to fill the output area not covered by the rotated
-image. For the general syntax of this option, check the "Color" section in the
-ffmpeg-utils manual. If the special value "none" is selected then no
-background is printed (useful for example if the background is never shown).
-
-Default value is "black".
-
-
-
-
The expressions for the angle and the output size can contain the
-following constants and functions:
-
-
-n
-sequential number of the input frame, starting from 0. It is always NAN
-before the first frame is filtered.
-
-
-t
-time in seconds of the input frame, it is set to 0 when the filter is
-configured. It is always NAN before the first frame is filtered.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_w, iw
-in_h, ih
-the input video width and height
-
-
-out_w, ow
-out_h, oh
-the output width and height, that is the size of the padded area as
-specified by the width and height expressions
-
-
-rotw(a)
-roth(a)
-the minimal width/height required for completely containing the input
-video rotated by a radians.
-
-These are only available when computing the out_w and
-out_h expressions.
-
-
-
-
-
28.77.1 Examples# TOC
-
-
- Rotate the input by PI/6 radians clockwise:
-
-
- Rotate the input by PI/6 radians counter-clockwise:
-
-
- Rotate the input by 45 degrees clockwise:
-
-
- Apply a constant rotation with period T, starting from an angle of PI/3:
-
-
- Make the input video rotation oscillating with a period of T
-seconds and an amplitude of A radians:
-
-
rotate=A*sin(2*PI/T*t)
-
-
- Rotate the video, output size is chosen so that the whole rotating
-input video is always completely contained in the output:
-
-
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
-
-
- Rotate the video, reduce the output size so that no background is ever
-shown:
-
-
rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
-
-
-
-
-
28.77.2 Commands# TOC
-
-
The filter supports the following commands:
-
-
-a, angle
-Set the angle expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
28.78 sab# TOC
-
-
Apply Shape Adaptive Blur.
-
-
The filter accepts the following options:
-
-
-luma_radius, lr
-Set luma blur filter strength, must be a value in range 0.1-4.0, default
-value is 1.0. A greater value will result in a more blurred image, and
-in slower processing.
-
-
-luma_pre_filter_radius, lpfr
-Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default
-value is 1.0.
-
-
-luma_strength, ls
-Set luma maximum difference between pixels to still be considered, must
-be a value in the 0.1-100.0 range, default value is 1.0.
-
-
-chroma_radius, cr
-Set chroma blur filter strength, must be a value in range 0.1-4.0. A
-greater value will result in a more blurred image, and in slower
-processing.
-
-
-chroma_pre_filter_radius, cpfr
-Set chroma pre-filter radius, must be a value in the 0.1-2.0 range.
-
-
-chroma_strength, cs
-Set chroma maximum difference between pixels to still be considered,
-must be a value in the 0.1-100.0 range.
-
-
-
-
Each chroma option value, if not explicitly specified, is set to the
-corresponding luma option value.
-
-
-
28.79 scale# TOC
-
-
Scale (resize) the input video, using the libswscale library.
-
-
The scale filter forces the output display aspect ratio to be the same
-of the input, by changing the output sample aspect ratio.
-
-
If the input image format is different from the format requested by
-the next filter, the scale filter will convert the input to the
-requested format.
-
-
-
28.79.1 Options# TOC
-
The filter accepts the following options, or any of the options
-supported by the libswscale scaler.
-
-
See (ffmpeg-scaler)the ffmpeg-scaler manual for
-the complete list of scaler options.
-
-
-width, w
-height, h
-Set the output video dimension expression. Default value is the input
-dimension.
-
-If the value is 0, the input width is used for the output.
-
-If one of the values is -1, the scale filter will use a value that
-maintains the aspect ratio of the input image, calculated from the
-other specified dimension. If both of them are -1, the input size is
-used
-
-If one of the values is -n with n > 1, the scale filter will also use a value
-that maintains the aspect ratio of the input image, calculated from the other
-specified dimension. After that it will, however, make sure that the calculated
-dimension is divisible by n and adjust the value if necessary.
-
-See below for the list of accepted constants for use in the dimension
-expression.
-
-
-interl
-Set the interlacing mode. It accepts the following values:
-
-
-‘1 ’
-Force interlaced aware scaling.
-
-
-‘0 ’
-Do not apply interlaced scaling.
-
-
-‘-1 ’
-Select interlaced aware scaling depending on whether the source frames
-are flagged as interlaced or not.
-
-
-
-Default value is ‘0 ’.
-
-
-flags
-Set libswscale scaling flags. See
-(ffmpeg-scaler)the ffmpeg-scaler manual for the
-complete list of values. If not explicitly specified the filter applies
-the default flags.
-
-
-size, s
-Set the video size. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-in_color_matrix
-out_color_matrix
-Set in/output YCbCr color space type.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder.
-
-If not specified, the color space type depends on the pixel format.
-
-Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘bt709 ’
-Format conforming to International Telecommunication Union (ITU)
-Recommendation BT.709.
-
-
-‘fcc ’
-Set color space conforming to the United States Federal Communications
-Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a).
-
-
-‘bt601 ’
-Set color space conforming to:
-
-
- ITU Radiocommunication Sector (ITU-R) Recommendation BT.601
-
- ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G
-
- Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004
-
-
-
-
-‘smpte240m ’
-Set color space conforming to SMPTE ST 240:1999.
-
-
-
-
-in_range
-out_range
-Set in/output YCbCr sample range.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder. If not specified, the
-range depends on the pixel format. Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘jpeg/full/pc ’
-Set full range (0-255 in case of 8-bit luma).
-
-
-‘mpeg/tv ’
-Set "MPEG" range (16-235 in case of 8-bit luma).
-
-
-
-
-force_original_aspect_ratio
-Enable decreasing or increasing output video width or height if necessary to
-keep the original aspect ratio. Possible values:
-
-
-‘disable ’
-Scale the video as specified and disable this feature.
-
-
-‘decrease ’
-The output video dimensions will automatically be decreased if needed.
-
-
-‘increase ’
-The output video dimensions will automatically be increased if needed.
-
-
-
-
-One useful instance of this option is that when you know a specific device’s
-maximum allowed resolution, you can use this to limit the output video to
-that, while retaining the aspect ratio. For example, device A allows
-1280x720 playback, and your video is 1920x800. Using this option (set it to
-decrease) and specifying 1280x720 to the command line makes the output
-1280x533.
-
-Please note that this is a different thing than specifying -1 for w
-or h , you still need to specify the output resolution for this option
-to work.
-
-
-
-
-
The values of the w and h options are expressions
-containing the following constants:
-
-
-in_w
-in_h
-The input width and height
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (scaled) width and height
-
-
-ow
-oh
-These are the same as out_w and out_h
-
-
-a
-The same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-The input display aspect ratio. Calculated from (iw / ih) * sar
.
-
-
-hsub
-vsub
-horizontal and vertical input chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-ohsub
-ovsub
-horizontal and vertical output chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
28.79.2 Examples# TOC
-
-
-
-
-
28.80 separatefields# TOC
-
-
The separatefields
takes a frame-based video input and splits
-each frame into its components fields, producing a new half height clip
-with twice the frame rate and twice the frame count.
-
-
This filter use field-dominance information in frame to decide which
-of each pair of fields to place first in the output.
-If it gets it wrong use setfield filter before separatefields
filter.
-
-
-
28.81 setdar, setsar# TOC
-
-
The setdar
filter sets the Display Aspect Ratio for the filter
-output video.
-
-
This is done by changing the specified Sample (aka Pixel) Aspect
-Ratio, according to the following equation:
-
-
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
-
-
-
Keep in mind that the setdar
filter does not modify the pixel
-dimensions of the video frame. Also, the display aspect ratio set by
-this filter may be changed by later filters in the filterchain,
-e.g. in case of scaling or if another "setdar" or a "setsar" filter is
-applied.
-
-
The setsar
filter sets the Sample (aka Pixel) Aspect Ratio for
-the filter output video.
-
-
Note that as a consequence of the application of this filter, the
-output display aspect ratio will change according to the equation
-above.
-
-
Keep in mind that the sample aspect ratio set by the setsar
-filter may be changed by later filters in the filterchain, e.g. if
-another "setsar" or a "setdar" filter is applied.
-
-
It accepts the following parameters:
-
-
-r, ratio, dar (setdar
only), sar (setsar
only)
-Set the aspect ratio used by the filter.
-
-The parameter can be a floating point number string, an expression, or
-a string of the form num :den , where num and
-den are the numerator and denominator of the aspect ratio. If
-the parameter is not specified, it is assumed the value "0".
-In case the form "num :den " is used, the :
character
-should be escaped.
-
-
-max
-Set the maximum integer value to use for expressing numerator and
-denominator when reducing the expressed aspect ratio to a rational.
-Default value is 100
.
-
-
-
-
-
The parameter sar is an expression containing
-the following constants:
-
-
-E, PI, PHI
-These are approximated values for the mathematical constants e
-(Euler’s number), pi (Greek pi), and phi (the golden ratio).
-
-
-w, h
-The input width and height.
-
-
-a
-These are the same as w / h .
-
-
-sar
-The input sample aspect ratio.
-
-
-dar
-The input display aspect ratio. It is the same as
-(w / h ) * sar .
-
-
-hsub, vsub
-Horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
28.81.1 Examples# TOC
-
-
- To change the display aspect ratio to 16:9, specify one of the following:
-
-
setdar=dar=1.77777
-setdar=dar=16/9
-setdar=dar=1.77777
-
-
- To change the sample aspect ratio to 10:11, specify:
-
-
- To set a display aspect ratio of 16:9, and specify a maximum integer value of
-1000 in the aspect ratio reduction, use the command:
-
-
setdar=ratio=16/9:max=1000
-
-
-
-
-
-
28.82 setfield# TOC
-
-
Force field for the output video frame.
-
-
The setfield
filter marks the interlace type field for the
-output frames. It does not change the input frame, but only sets the
-corresponding property, which affects how the frame is treated by
-following filters (e.g. fieldorder
or yadif
).
-
-
The filter accepts the following options:
-
-
-mode
-Available values are:
-
-
-‘auto ’
-Keep the same field property.
-
-
-‘bff ’
-Mark the frame as bottom-field-first.
-
-
-‘tff ’
-Mark the frame as top-field-first.
-
-
-‘prog ’
-Mark the frame as progressive.
-
-
-
-
-
-
-
28.83 showinfo# TOC
-
-
Show a line containing various information for each input video frame.
-The input video is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The Presentation TimeStamp of the input frame, expressed as a number of
-time base units. The time base unit depends on the filter input pad.
-
-
-pts_time
-The Presentation TimeStamp of the input frame, expressed as a number of
-seconds.
-
-
-pos
-The position of the frame in the input stream, or -1 if this information is
-unavailable and/or meaningless (for example in case of synthetic video).
-
-
-fmt
-The pixel format name.
-
-
-sar
-The sample aspect ratio of the input frame, expressed in the form
-num /den .
-
-
-s
-The size of the input frame. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-i
-The type of interlaced mode ("P" for "progressive", "T" for top field first, "B"
-for bottom field first).
-
-
-iskey
-This is 1 if the frame is a key frame, 0 otherwise.
-
-
-type
-The picture type of the input frame ("I" for an I-frame, "P" for a
-P-frame, "B" for a B-frame, or "?" for an unknown type).
-Also refer to the documentation of the AVPictureType
enum and of
-the av_get_picture_type_char
function defined in
-libavutil/avutil.h .
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame.
-
-
-plane_checksum
-The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
-expressed in the form "[c0 c1 c2 c3 ]".
-
-
-
-
-
28.84 shuffleplanes# TOC
-
-
Reorder and/or duplicate video planes.
-
-
It accepts the following parameters:
-
-
-map0
-The index of the input plane to be used as the first output plane.
-
-
-map1
-The index of the input plane to be used as the second output plane.
-
-
-map2
-The index of the input plane to be used as the third output plane.
-
-
-map3
-The index of the input plane to be used as the fourth output plane.
-
-
-
-
-
The first plane has the index 0. The default is to keep the input unchanged.
-
-
Swap the second and third planes of the input:
-
-
ffmpeg -i INPUT -vf shuffleplanes=0:2:1:3 OUTPUT
-
-
-
-
28.85 signalstats# TOC
-
Evaluate various visual metrics that assist in determining issues associated
-with the digitization of analog video media.
-
-
By default the filter will log these metadata values:
-
-
-YMIN
-Display the minimal Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-YLOW
-Display the Y value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YAVG
-Display the average Y value within the input frame. Expressed in range of
-[0-255].
-
-
-YHIGH
-Display the Y value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YMAX
-Display the maximum Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-UMIN
-Display the minimal U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-ULOW
-Display the U value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UAVG
-Display the average U value within the input frame. Expressed in range of
-[0-255].
-
-
-UHIGH
-Display the U value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UMAX
-Display the maximum U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VMIN
-Display the minimal V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VLOW
-Display the V value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VAVG
-Display the average V value within the input frame. Expressed in range of
-[0-255].
-
-
-VHIGH
-Display the V value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VMAX
-Display the maximum V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-SATMIN
-Display the minimal saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATLOW
-Display the saturation value at the 10% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATAVG
-Display the average saturation value within the input frame. Expressed in range
-of [0-~181.02].
-
-
-SATHIGH
-Display the saturation value at the 90% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATMAX
-Display the maximum saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-HUEMED
-Display the median value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-HUEAVG
-Display the average value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-YDIF
-Display the average of sample value difference between all values of the Y
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-UDIF
-Display the average of sample value difference between all values of the U
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-VDIF
-Display the average of sample value difference between all values of the V
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-
-
The filter accepts the following options:
-
-
-stat
-out
-
-stat specify an additional form of image analysis.
-out output video with the specified type of pixel highlighted.
-
-Both options accept the following values:
-
-
-‘tout ’
-Identify temporal outliers pixels. A temporal outlier is a pixel
-unlike the neighboring pixels of the same field. Examples of temporal outliers
-include the results of video dropouts, head clogs, or tape tracking issues.
-
-
-‘vrep ’
-Identify vertical line repetition . Vertical line repetition includes
-similar rows of pixels within a frame. In born-digital video vertical line
-repetition is common, but this pattern is uncommon in video digitized from an
-analog source. When it occurs in video that results from the digitization of an
-analog source it can indicate concealment from a dropout compensator.
-
-
-‘brng ’
-Identify pixels that fall outside of legal broadcast range.
-
-
-
-
-color, c
-Set the highlight color for the out option. The default color is
-yellow.
-
-
-
-
-
28.85.1 Examples# TOC
-
-
-
-
-
28.86 smartblur# TOC
-
-
Blur the input video without impacting the outlines.
-
-
It accepts the following options:
-
-
-luma_radius, lr
-Set the luma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-luma_strength, ls
-Set the luma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-luma_threshold, lt
-Set the luma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-chroma_radius, cr
-Set the chroma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-chroma_strength, cs
-Set the chroma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-chroma_threshold, ct
-Set the chroma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-
-
If a chroma option is not explicitly set, the corresponding luma value
-is set.
-
-
-
28.87 stereo3d# TOC
-
-
Convert between different stereoscopic image formats.
-
-
The filters accept the following options:
-
-
-in
-Set stereoscopic image format of input.
-
-Available values for input image formats are:
-
-‘sbsl ’
-side by side parallel (left eye left, right eye right)
-
-
-‘sbsr ’
-side by side crosseye (right eye left, left eye right)
-
-
-‘sbs2l ’
-side by side parallel with half width resolution
-(left eye left, right eye right)
-
-
-‘sbs2r ’
-side by side crosseye with half width resolution
-(right eye left, left eye right)
-
-
-‘abl ’
-above-below (left eye above, right eye below)
-
-
-‘abr ’
-above-below (right eye above, left eye below)
-
-
-‘ab2l ’
-above-below with half height resolution
-(left eye above, right eye below)
-
-
-‘ab2r ’
-above-below with half height resolution
-(right eye above, left eye below)
-
-
-‘al ’
-alternating frames (left eye first, right eye second)
-
-
-‘ar ’
-alternating frames (right eye first, left eye second)
-
-Default value is ‘sbsl ’.
-
-
-
-
-out
-Set stereoscopic image format of output.
-
-Available values for output image formats are all the input formats as well as:
-
-‘arbg ’
-anaglyph red/blue gray
-(red filter on left eye, blue filter on right eye)
-
-
-‘argg ’
-anaglyph red/green gray
-(red filter on left eye, green filter on right eye)
-
-
-‘arcg ’
-anaglyph red/cyan gray
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arch ’
-anaglyph red/cyan half colored
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcc ’
-anaglyph red/cyan color
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcd ’
-anaglyph red/cyan color optimized with the least squares projection of dubois
-(red filter on left eye, cyan filter on right eye)
-
-
-‘agmg ’
-anaglyph green/magenta gray
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmh ’
-anaglyph green/magenta half colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmc ’
-anaglyph green/magenta colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmd ’
-anaglyph green/magenta color optimized with the least squares projection of dubois
-(green filter on left eye, magenta filter on right eye)
-
-
-‘aybg ’
-anaglyph yellow/blue gray
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybh ’
-anaglyph yellow/blue half colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybc ’
-anaglyph yellow/blue colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybd ’
-anaglyph yellow/blue color optimized with the least squares projection of dubois
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘irl ’
-interleaved rows (left eye has top row, right eye starts on next row)
-
-
-‘irr ’
-interleaved rows (right eye has top row, left eye starts on next row)
-
-
-‘ml ’
-mono output (left eye only)
-
-
-‘mr ’
-mono output (right eye only)
-
-
-
-Default value is ‘arcd ’.
-
-
-
-
-
28.87.1 Examples# TOC
-
-
- Convert input video from side by side parallel to anaglyph yellow/blue dubois:
-
-
- Convert input video from above bellow (left eye above, right eye below) to side by side crosseye.
-
-
-
-
-
28.88 spp# TOC
-
-
Apply a simple postprocessing filter that compresses and decompresses the image
-at several (or - in the case of quality level 6
- all) shifts
-and average the results.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-6. If set to 0
, the filter will have no
-effect. A value of 6
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding (default).
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
28.89 subtitles# TOC
-
-
Draw subtitles on top of input video using the libass library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libass
. This filter also requires a build with libavcodec and
-libavformat to convert the passed subtitles file to ASS (Advanced Substation
-Alpha) subtitles format.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filename of the subtitle file to read. It must be specified.
-
-
-original_size
-Specify the size of the original video, the video for which the ASS file
-was composed. For the syntax of this option, check the "Video size" section in
-the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic,
-this is necessary to correctly scale the fonts if the aspect ratio has been
-changed.
-
-
-charenc
-Set subtitles input character encoding. subtitles
filter only. Only
-useful if not UTF-8.
-
-
-stream_index, si
-Set subtitles stream index. subtitles
filter only.
-
-
-
-
If the first key is not specified, it is assumed that the first value
-specifies the filename .
-
-
For example, to render the file sub.srt on top of the input
-video, use the command:
-
-
-
which is equivalent to:
-
-
subtitles=filename=sub.srt
-
-
-
To render the default subtitles stream from file video.mkv , use:
-
-
-
To render the second subtitles stream from that file, use:
-
-
subtitles=video.mkv:si=1
-
-
-
-
28.90 super2xsai# TOC
-
-
Scale the input by 2x and smooth using the Super2xSaI (Scale and
-Interpolate) pixel art scaling algorithm.
-
-
Useful for enlarging pixel art images without reducing sharpness.
-
-
-
28.91 swapuv# TOC
-
Swap U & V plane.
-
-
-
28.92 telecine# TOC
-
-
Apply telecine process to the video.
-
-
This filter accepts the following options:
-
-
-first_field
-
-‘top, t ’
-top field first
-
-‘bottom, b ’
-bottom field first
-The default value is top
.
-
-
-
-
-pattern
-A string of numbers representing the pulldown pattern you wish to apply.
-The default value is 23
.
-
-
-
-
-
Some typical patterns:
-
-NTSC output (30i):
-27.5p: 32222
-24p: 23 (classic)
-24p: 2332 (preferred)
-20p: 33
-18p: 334
-16p: 3444
-
-PAL output (25i):
-27.5p: 12222
-24p: 222222222223 ("Euro pulldown")
-16.67p: 33
-16p: 33333334
-
-
-
-
28.93 thumbnail# TOC
-
Select the most representative frame in a given sequence of consecutive frames.
-
-
The filter accepts the following options:
-
-
-n
-Set the frames batch size to analyze; in a set of n frames, the filter
-will pick one of them, and then handle the next batch of n frames until
-the end. Default is 100
.
-
-
-
-
Since the filter keeps track of the whole frames sequence, a bigger n
-value will result in a higher memory usage, so a high value is not recommended.
-
-
-
28.93.1 Examples# TOC
-
-
- Extract one picture each 50 frames:
-
-
- Complete example of a thumbnail creation with ffmpeg
:
-
-
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
-
-
-
-
-
28.94 tile# TOC
-
-
Tile several successive frames together.
-
-
The filter accepts the following options:
-
-
-layout
-Set the grid size (i.e. the number of lines and columns). For the syntax of
-this option, check the "Video size" section in the ffmpeg-utils manual.
-
-
-nb_frames
-Set the maximum number of frames to render in the given area. It must be less
-than or equal to w xh . The default value is 0
, meaning all
-the area will be used.
-
-
-margin
-Set the outer border margin in pixels.
-
-
-padding
-Set the inner border thickness (i.e. the number of pixels between frames). For
-more advanced padding options (such as having different values for the edges),
-refer to the pad video filter.
-
-
-color
-Specify the color of the unused area. For the syntax of this option, check the
-"Color" section in the ffmpeg-utils manual. The default value of color
-is "black".
-
-
-
-
-
28.94.1 Examples# TOC
-
-
-
-
-
28.95 tinterlace# TOC
-
-
Perform various types of temporal field interlacing.
-
-
Frames are counted starting from 1, so the first input frame is
-considered odd.
-
-
The filter accepts the following options:
-
-
-mode
-Specify the mode of the interlacing. This option can also be specified
-as a value alone. See below for a list of values for this option.
-
-Available values are:
-
-
-‘merge, 0 ’
-Move odd frames into the upper field, even into the lower field,
-generating a double height frame at half frame rate.
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-‘drop_odd, 1 ’
-Only output even frames, odd frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
- 22222 44444
- 22222 44444
- 22222 44444
- 22222 44444
-
-
-
-‘drop_even, 2 ’
-Only output odd frames, even frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-11111 33333
-11111 33333
-11111 33333
-
-
-
-‘pad, 3 ’
-Expand each frame to full height, but pad alternate lines with black,
-generating a frame with double height at the same input frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-
-
-
-
-‘interleave_top, 4 ’
-Interleave the upper field from odd frames with the lower field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-
-‘interleave_bottom, 5 ’
-Interleave the lower field from odd frames with the upper field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-
-Output:
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-
-
-
-
-‘interlacex2, 6 ’
-Double frame rate with unchanged height. Frames are inserted each
-containing the second temporal field from the previous input frame and
-the first temporal field from the next input frame. This mode relies on
-the top_field_first flag. Useful for interlaced video displays with no
-field synchronisation.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
- 11111 22222 33333 44444
-11111 22222 33333 44444
- 11111 22222 33333 44444
-
-Output:
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-
-
-
-
-
-
-Numeric values are deprecated but are accepted for backward
-compatibility reasons.
-
-Default mode is merge
.
-
-
-flags
-Specify flags influencing the filter process.
-
-Available value for flags is:
-
-
-low_pass_filter, vlfp
-Enable vertical low-pass filtering in the filter.
-Vertical low-pass filtering is required when creating an interlaced
-destination from a progressive source which contains high-frequency
-vertical detail. Filtering will reduce interlace ’twitter’ and Moire
-patterning.
-
-Vertical low-pass filtering can only be enabled for mode
-interleave_top and interleave_bottom .
-
-
-
-
-
-
-
-
28.96 transpose# TOC
-
-
Transpose rows with columns in the input video and optionally flip it.
-
-
It accepts the following parameters:
-
-
-dir
-Specify the transposition direction.
-
-Can assume the following values:
-
-‘0, 4, cclock_flip ’
-Rotate by 90 degrees counterclockwise and vertically flip (default), that is:
-
-
L.R L.l
-. . -> . .
-l.r R.r
-
-
-
-‘1, 5, clock ’
-Rotate by 90 degrees clockwise, that is:
-
-
L.R l.L
-. . -> . .
-l.r r.R
-
-
-
-‘2, 6, cclock ’
-Rotate by 90 degrees counterclockwise, that is:
-
-
L.R R.r
-. . -> . .
-l.r L.l
-
-
-
-‘3, 7, clock_flip ’
-Rotate by 90 degrees clockwise and vertically flip, that is:
-
-
L.R r.R
-. . -> . .
-l.r l.L
-
-
-
-
-For values between 4-7, the transposition is only done if the input
-video geometry is portrait and not landscape. These values are
-deprecated, the passthrough
option should be used instead.
-
-Numerical values are deprecated, and should be dropped in favor of
-symbolic constants.
-
-
-passthrough
-Do not apply the transposition if the input geometry matches the one
-specified by the specified value. It accepts the following values:
-
-‘none ’
-Always apply transposition.
-
-‘portrait ’
-Preserve portrait geometry (when height >= width ).
-
-‘landscape ’
-Preserve landscape geometry (when width >= height ).
-
-
-
-Default value is none
.
-
-
-
-
For example to rotate by 90 degrees clockwise and preserve portrait
-layout:
-
-
transpose=dir=1:passthrough=portrait
-
-
-
The command above can also be specified as:
-
-
-
-
28.97 trim# TOC
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Specify the time of the start of the kept section, i.e. the frame with the
-timestamp start will be the first frame in the output.
-
-
-end
-Specify the time of the first frame that will be dropped, i.e. the frame
-immediately preceding the one with the timestamp end will be the last
-frame in the output.
-
-
-start_pts
-This is the same as start , except this option sets the start timestamp
-in timebase units instead of seconds.
-
-
-end_pts
-This is the same as end , except this option sets the end timestamp
-in timebase units instead of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_frame
-The number of the first frame that should be passed to the output.
-
-
-end_frame
-The number of the first frame that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _frame variants simply count the
-frames that pass through the filter. Also note that this filter does not modify
-the timestamps. If you wish for the output timestamps to start at zero, insert a
-setpts filter after the trim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all the frames that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple trim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -vf trim=60:120
-
-
- Keep only the first second:
-
-
ffmpeg -i INPUT -vf trim=duration=1
-
-
-
-
-
-
-
28.98 unsharp# TOC
-
-
Sharpen or blur the input video.
-
-
It accepts the following parameters:
-
-
-luma_msize_x, lx
-Set the luma matrix horizontal size. It must be an odd integer between
-3 and 63. The default value is 5.
-
-
-luma_msize_y, ly
-Set the luma matrix vertical size. It must be an odd integer between 3
-and 63. The default value is 5.
-
-
-luma_amount, la
-Set the luma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 1.0.
-
-
-chroma_msize_x, cx
-Set the chroma matrix horizontal size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_msize_y, cy
-Set the chroma matrix vertical size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_amount, ca
-Set the chroma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 0.0.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
All parameters are optional and default to the equivalent of the
-string ’5:5:1.0:5:5:0.0’.
-
-
-
28.98.1 Examples# TOC
-
-
- Apply strong luma sharpen effect:
-
-
unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
-
-
- Apply a strong blur of both luma and chroma parameters:
-
-
-
-
-
28.99 uspp# TOC
-
-
Apply ultra slow/simple postprocessing filter that compresses and decompresses
-the image at several (or - in the case of quality level 8
- all)
-shifts and average the results.
-
-
The way this differs from the behavior of spp is that uspp actually encodes &
-decodes each case with libavcodec Snow, whereas spp uses a simplified intra only 8x8
-DCT similar to MJPEG.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-8. If set to 0
, the filter will have no
-effect. A value of 8
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-
-
-
28.100 vidstabdetect# TOC
-
-
Analyze video stabilization/deshaking. Perform pass 1 of 2, see
-vidstabtransform for pass 2.
-
-
This filter generates a file with relative translation and rotation
-transform information about subsequent frames, which is then used by
-the vidstabtransform filter.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
This filter accepts the following options:
-
-
-result
-Set the path to the file used to write the transforms information.
-Default value is transforms.trf .
-
-
-shakiness
-Set how shaky the video is and how quick the camera is. It accepts an
-integer in the range 1-10, a value of 1 means little shakiness, a
-value of 10 means strong shakiness. Default value is 5.
-
-
-accuracy
-Set the accuracy of the detection process. It must be a value in the
-range 1-15. A value of 1 means low accuracy, a value of 15 means high
-accuracy. Default value is 15.
-
-
-stepsize
-Set stepsize of the search process. The region around minimum is
-scanned with 1 pixel resolution. Default value is 6.
-
-
-mincontrast
-Set minimum contrast. Below this value a local measurement field is
-discarded. Must be a floating point value in the range 0-1. Default
-value is 0.3.
-
-
-tripod
-Set reference frame number for tripod mode.
-
-If enabled, the motion of the frames is compared to a reference frame
-in the filtered stream, identified by the specified number. The idea
-is to compensate all movements in a more-or-less static scene and keep
-the camera view absolutely still.
-
-If set to 0, it is disabled. The frames are counted starting from 1.
-
-
-show
-Show fields and transforms in the resulting frames. It accepts an
-integer in the range 0-2. Default value is 0, which disables any
-visualization.
-
-
-
-
-
28.100.1 Examples# TOC
-
-
- Use default values:
-
-
- Analyze strongly shaky movie and put the results in file
-mytransforms.trf :
-
-
vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
-
-
- Visualize the result of internal transformations in the resulting
-video:
-
-
- Analyze a video with medium shakiness using ffmpeg
:
-
-
ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
-
-
-
-
-
28.101 vidstabtransform# TOC
-
-
Video stabilization/deshaking: pass 2 of 2,
-see vidstabdetect for pass 1.
-
-
Read a file with transform information for each frame and
-apply/compensate them. Together with the vidstabdetect
-filter this can be used to deshake videos. See also
-http://public.hronopik.de/vid.stab . It is important to also use
-the unsharp filter, see below.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
-
28.101.1 Options# TOC
-
-
-input
-Set path to the file used to read the transforms. Default value is
-transforms.trf .
-
-
-smoothing
-Set the number of frames (value*2 + 1) used for lowpass filtering the
-camera movements. Default value is 10.
-
-For example a number of 10 means that 21 frames are used (10 in the
-past and 10 in the future) to smoothen the motion in the video. A
-larger value leads to a smoother video, but limits the acceleration of
-the camera (pan/tilt movements). 0 is a special case where a static
-camera is simulated.
-
-
-optalgo
-Set the camera path optimization algorithm.
-
-Accepted values are:
-
-‘gauss ’
-gaussian kernel low-pass filter on camera motion (default)
-
-‘avg ’
-averaging on transformations
-
-
-
-
-maxshift
-Set maximal number of pixels to translate frames. Default value is -1,
-meaning no limit.
-
-
-maxangle
-Set maximal angle in radians (degree*PI/180) to rotate frames. Default
-value is -1, meaning no limit.
-
-
-crop
-Specify how to deal with borders that may be visible due to movement
-compensation.
-
-Available values are:
-
-‘keep ’
-keep image information from previous frame (default)
-
-‘black ’
-fill the border black
-
-
-
-
-invert
-Invert transforms if set to 1. Default value is 0.
-
-
-relative
-Consider transforms as relative to previous frame if set to 1,
-absolute if set to 0. Default value is 0.
-
-
-zoom
-Set percentage to zoom. A positive value will result in a zoom-in
-effect, a negative value in a zoom-out effect. Default value is 0 (no
-zoom).
-
-
-optzoom
-Set optimal zooming to avoid borders.
-
-Accepted values are:
-
-‘0 ’
-disabled
-
-‘1 ’
-optimal static zoom value is determined (only very strong movements
-will lead to visible borders) (default)
-
-‘2 ’
-optimal adaptive zoom value is determined (no borders will be
-visible), see zoomspeed
-
-
-
-Note that the value given at zoom is added to the one calculated here.
-
-
-zoomspeed
-Set percent to zoom maximally each frame (enabled when
-optzoom is set to 2). Range is from 0 to 5, default value is
-0.25.
-
-
-interpol
-Specify type of interpolation.
-
-Available values are:
-
-‘no ’
-no interpolation
-
-‘linear ’
-linear only horizontal
-
-‘bilinear ’
-linear in both directions (default)
-
-‘bicubic ’
-cubic in both directions (slow)
-
-
-
-
-tripod
-Enable virtual tripod mode if set to 1, which is equivalent to
-relative=0:smoothing=0
. Default value is 0.
-
-Use also tripod
option of vidstabdetect .
-
-
-debug
-Increase log verbosity if set to 1. Also the detected global motions
-are written to the temporary file global_motions.trf . Default
-value is 0.
-
-
-
-
-
28.101.2 Examples# TOC
-
-
-
-
-
28.102 vflip# TOC
-
-
Flip the input video vertically.
-
-
For example, to vertically flip a video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "vflip" out.avi
-
-
-
-
28.103 vignette# TOC
-
-
Make or reverse a natural vignetting effect.
-
-
The filter accepts the following options:
-
-
-angle, a
-Set lens angle expression as a number of radians.
-
-The value is clipped in the [0,PI/2]
range.
-
-Default value: "PI/5"
-
-
-x0
-y0
-Set center coordinates expressions. Respectively "w/2"
and "h/2"
-by default.
-
-
-mode
-Set forward/backward mode.
-
-Available modes are:
-
-‘forward ’
-The larger the distance from the central point, the darker the image becomes.
-
-
-‘backward ’
-The larger the distance from the central point, the brighter the image becomes.
-This can be used to reverse a vignette effect, though there is no automatic
-detection to extract the lens angle and other settings (yet). It can
-also be used to create a burning effect.
-
-
-
-Default value is ‘forward ’.
-
-
-eval
-Set evaluation mode for the expressions (angle , x0 , y0 ).
-
-It accepts the following values:
-
-‘init ’
-Evaluate expressions only once during the filter initialization.
-
-
-‘frame ’
-Evaluate expressions for each incoming frame. This is way slower than the
-‘init ’ mode since it requires all the scalers to be re-computed, but it
-allows advanced dynamic expressions.
-
-
-
-Default value is ‘init ’.
-
-
-dither
-Set dithering to reduce the circular banding effects. Default is 1
-(enabled).
-
-
-aspect
-Set vignette aspect. This setting allows one to adjust the shape of the vignette.
-Setting this value to the SAR of the input will make a rectangular vignetting
-following the dimensions of the video.
-
-Default is 1/1
.
-
-
-
-
-
28.103.1 Expressions# TOC
-
-
The alpha , x0 and y0 expressions can contain the
-following parameters.
-
-
-w
-h
-input width and height
-
-
-n
-the number of input frame, starting from 0
-
-
-pts
-the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in
-TB units, NAN if undefined
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-the PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in seconds, NAN if undefined
-
-
-tb
-time base of the input video
-
-
-
-
-
-
28.103.2 Examples# TOC
-
-
- Apply simple strong vignetting effect:
-
-
- Make a flickering vignetting:
-
-
vignette='PI/4+random(1)*PI/50':eval=frame
-
-
-
-
-
-
28.104 w3fdif# TOC
-
-
Deinterlace the input video ("w3fdif" stands for "Weston 3 Field
-Deinterlacing Filter").
-
-
Based on the process described by Martin Weston for BBC R&D, and
-implemented based on the de-interlace algorithm written by Jim
-Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter
-uses filter coefficients calculated by BBC R&D.
-
-
There are two sets of filter coefficients, so called "simple":
-and "complex". Which set of filter coefficients is used can
-be set by passing an optional parameter:
-
-
-filter
-Set the interlacing filter coefficients. Accepts one of the following values:
-
-
-‘simple ’
-Simple filter coefficient set.
-
-‘complex ’
-More-complex filter coefficient set.
-
-
-Default value is ‘complex ’.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following values:
-
-
-‘all ’
-Deinterlace all frames,
-
-‘interlaced ’
-Only deinterlace frames marked as interlaced.
-
-
-
-Default value is ‘all ’.
-
-
-
-
-
28.105 xbr# TOC
-
Apply the xBR high-quality magnification filter which is designed for pixel
-art. It follows a set of edge-detection rules, see
-http://www.libretro.com/forums/viewtopic.php?f=6&t=134 .
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for 2xBR
, 3
for
-3xBR
and 4
for 4xBR
.
-Default is 3
.
-
-
-
-
-
28.106 yadif# TOC
-
-
Deinterlace the input video ("yadif" means "yet another deinterlacing
-filter").
-
-
It accepts the following parameters:
-
-
-
-mode
-The interlacing mode to adopt. It accepts one of the following values:
-
-
-0, send_frame
-Output one frame for each frame.
-
-1, send_field
-Output one frame for each field.
-
-2, send_frame_nospatial
-Like send_frame
, but it skips the spatial interlacing check.
-
-3, send_field_nospatial
-Like send_field
, but it skips the spatial interlacing check.
-
-
-
-The default value is send_frame
.
-
-
-parity
-The picture field parity assumed for the input interlaced video. It accepts one
-of the following values:
-
-
-0, tff
-Assume the top field is first.
-
-1, bff
-Assume the bottom field is first.
-
--1, auto
-Enable automatic detection of field parity.
-
-
-
-The default value is auto
.
-If the interlacing is unknown or the decoder does not export this information,
-top field first will be assumed.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following
-values:
-
-
-0, all
-Deinterlace all frames.
-
-1, interlaced
-Only deinterlace frames marked as interlaced.
-
-
-
-The default value is all
.
-
-
-
-
-
28.107 zoompan# TOC
-
-
Apply Zoom & Pan effect.
-
-
This filter accepts the following options:
-
-
-zoom, z
-Set the zoom expression. Default is 1.
-
-
-x
-y
-Set the x and y expression. Default is 0.
-
-
-d
-Set the duration expression in number of frames.
-This sets for how many number of frames effect will last for
-single input image.
-
-
-s
-Set the output image size, default is ’hd720’.
-
-
-
-
Each expression can contain the following constants:
-
-
-in_w, iw
-Input width.
-
-
-in_h, ih
-Input height.
-
-
-out_w, ow
-Output width.
-
-
-out_h, oh
-Output height.
-
-
-in
-Input frame count.
-
-
-on
-Output frame count.
-
-
-x
-y
-Last calculated ’x’ and ’y’ position from ’x’ and ’y’ expression
-for current input frame.
-
-
-px
-py
-’x’ and ’y’ of last output frame of previous input frame or 0 when there was
-not yet such frame (first input frame).
-
-
-zoom
-Last calculated zoom from ’z’ expression for current input frame.
-
-
-pzoom
-Last calculated zoom of last output frame of previous input frame.
-
-
-duration
-Number of output frames for current input frame. Calculated from ’d’ expression
-for each input frame.
-
-
-pduration
-number of output frames created for previous input frame
-
-
-a
-Rational number: input width / input height
-
-
-sar
-sample aspect ratio
-
-
-dar
-display aspect ratio
-
-
-
-
-
-
28.107.1 Examples# TOC
-
-
- Zoom-in up to 1.5 and pan at same time to some spot near center of picture:
-
-
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
-
-
-
-
-
-
29 Video Sources# TOC
-
-
Below is a description of the currently available video sources.
-
-
-
29.1 buffer# TOC
-
-
Buffer video frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/vsrc_buffer.h .
-
-
It accepts the following parameters:
-
-
-video_size
-Specify the size (width and height) of the buffered video frames. For the
-syntax of this option, check the "Video size" section in the ffmpeg-utils
-manual.
-
-
-width
-The input video width.
-
-
-height
-The input video height.
-
-
-pix_fmt
-A string representing the pixel format of the buffered video frames.
-It may be a number corresponding to a pixel format, or a pixel format
-name.
-
-
-time_base
-Specify the timebase assumed by the timestamps of the buffered frames.
-
-
-frame_rate
-Specify the frame rate expected for the video stream.
-
-
-pixel_aspect, sar
-The sample (pixel) aspect ratio of the input video.
-
-
-sws_param
-Specify the optional parameters to be used for the scale filter which
-is automatically inserted when an input change is detected in the
-input size or format.
-
-
-
-
For example:
-
-
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
-
-
-
will instruct the source to accept video frames with size 320x240 and
-with format "yuv410p", assuming 1/24 as the timestamps timebase and
-square pixels (1:1 sample aspect ratio).
-Since the pixel format with name "yuv410p" corresponds to the number 6
-(check the enum AVPixelFormat definition in libavutil/pixfmt.h ),
-this example corresponds to:
-
-
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
-
-
-
Alternatively, the options can be specified as a flat string, but this
-syntax is deprecated:
-
-
width :height :pix_fmt :time_base.num :time_base.den :pixel_aspect.num :pixel_aspect.den [:sws_param ]
-
-
-
29.2 cellauto# TOC
-
-
Create a pattern generated by an elementary cellular automaton.
-
-
The initial state of the cellular automaton can be defined through the
-filename , and pattern options. If such options are
-not specified an initial state is created randomly.
-
-
At each new frame a new row in the video is filled with the result of
-the cellular automaton next generation. The behavior when the whole
-frame is filled is defined by the scroll option.
-
-
This source accepts the following options:
-
-
-filename, f
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified file.
-In the file, each non-whitespace character is considered an alive
-cell, a newline will terminate the row, and further characters in the
-file will be ignored.
-
-
-pattern, p
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified string.
-
-Each non-whitespace character in the string is considered an alive
-cell, a newline will terminate the row, and further characters in the
-string will be ignored.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial cellular automaton row. It
-is a floating point number value ranging from 0 to 1, defaults to
-1/PHI.
-
-This option is ignored when a file or a pattern is specified.
-
-
-random_seed, seed
-Set the seed for filling randomly the initial row, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the cellular automaton rule, it is a number ranging from 0 to 255.
-Default value is 110.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual.
-
-If filename or pattern is specified, the size is set
-by default to the width of the specified initial state row, and the
-height is set to width * PHI.
-
-If size is set, it must contain the width of the specified
-pattern string, and the specified pattern will be centered in the
-larger row.
-
-If a filename or a pattern string is not specified, the size value
-defaults to "320x518" (used for a randomly generated initial state).
-
-
-scroll
-If set to 1, scroll the output upward when all the rows in the output
-have been already filled. If set to 0, the new generated row will be
-written over the top row just after the bottom row is filled.
-Defaults to 1.
-
-
-start_full, full
-If set to 1, completely fill the output with generated rows before
-outputting the first frame.
-This is the default behavior, for disabling set the value to 0.
-
-
-stitch
-If set to 1, stitch the left and right row edges together.
-This is the default behavior, for disabling set the value to 0.
-
-
-
-
-
29.2.1 Examples# TOC
-
-
- Read the initial state from pattern , and specify an output of
-size 200x400.
-
-
cellauto=f=pattern:s=200x400
-
-
- Generate a random initial row with a width of 200 cells, with a fill
-ratio of 2/3:
-
-
cellauto=ratio=2/3:s=200x200
-
-
- Create a pattern generated by rule 18 starting by a single alive cell
-centered on an initial row with width 100:
-
-
cellauto=p=@:s=100x400:full=0:rule=18
-
-
- Specify a more elaborated initial pattern:
-
-
cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
-
-
-
-
-
-
29.3 mandelbrot# TOC
-
-
Generate a Mandelbrot set fractal, and progressively zoom towards the
-point specified with start_x and start_y .
-
-
This source accepts the following options:
-
-
-end_pts
-Set the terminal pts value. Default value is 400.
-
-
-end_scale
-Set the terminal scale value.
-Must be a floating point value. Default value is 0.3.
-
-
-inner
-Set the inner coloring mode, that is the algorithm used to draw the
-Mandelbrot fractal internal region.
-
-It shall assume one of the following values:
-
-black
-Set black mode.
-
-convergence
-Show time until convergence.
-
-mincol
-Set color based on point closest to the origin of the iterations.
-
-period
-Set period mode.
-
-
-
-Default value is mincol .
-
-
-bailout
-Set the bailout value. Default value is 10.0.
-
-
-maxiter
-Set the maximum of iterations performed by the rendering
-algorithm. Default value is 7189.
-
-
-outer
-Set outer coloring mode.
-It shall assume one of following values:
-
-iteration_count
-Set iteration cound mode.
-
-normalized_iteration_count
-set normalized iteration count mode.
-
-
-Default value is normalized_iteration_count .
-
-
-rate, r
-Set frame rate, expressed as number of frames per second. Default
-value is "25".
-
-
-size, s
-Set frame size. For the syntax of this option, check the "Video
-size" section in the ffmpeg-utils manual. Default value is "640x480".
-
-
-start_scale
-Set the initial scale value. Default value is 3.0.
-
-
-start_x
-Set the initial x position. Must be a floating point value between
--100 and 100. Default value is -0.743643887037158704752191506114774.
-
-
-start_y
-Set the initial y position. Must be a floating point value between
--100 and 100. Default value is -0.131825904205311970493132056385139.
-
-
-
-
-
29.4 mptestsrc# TOC
-
-
Generate various test patterns, as generated by the MPlayer test filter.
-
-
The size of the generated video is fixed, and is 256x256.
-This source is useful in particular for testing encoding features.
-
-
This source accepts the following options:
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-test, t
-
-Set the number or the name of the test to perform. Supported tests are:
-
-dc_luma
-dc_chroma
-freq_luma
-freq_chroma
-amp_luma
-amp_chroma
-cbp
-mv
-ring1
-ring2
-all
-
-
-Default value is "all", which will cycle through the list of all tests.
-
-
-
-
Some examples:
-
-
-
will generate a "dc_luma" test pattern.
-
-
-
29.5 frei0r_src# TOC
-
-
Provide a frei0r source.
-
-
To enable compilation of this filter you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
This source accepts the following parameters:
-
-
-size
-The size of the video to generate. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-
-framerate
-The framerate of the generated video. It may be a string of the form
-num /den or a frame rate abbreviation.
-
-
-filter_name
-The name to the frei0r source to load. For more information regarding frei0r and
-how to set the parameters, read the frei0r section in the video filters
-documentation.
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r source.
-
-
-
-
-
For example, to generate a frei0r partik0l source with size 200x200
-and frame rate 10 which is overlaid on the overlay filter main input:
-
-
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
-
-
-
-
29.6 life# TOC
-
-
Generate a life pattern.
-
-
This source is based on a generalization of John Conway’s life game.
-
-
The sourced input represents a life grid, each pixel represents a cell
-which can be in one of two possible states, alive or dead. Every cell
-interacts with its eight neighbours, which are the cells that are
-horizontally, vertically, or diagonally adjacent.
-
-
At each interaction the grid evolves according to the adopted rule,
-which specifies the number of neighbor alive cells which will make a
-cell stay alive or born. The rule option allows one to specify
-the rule to adopt.
-
-
This source accepts the following options:
-
-
-filename, f
-Set the file from which to read the initial grid state. In the file,
-each non-whitespace character is considered an alive cell, and newline
-is used to delimit the end of each row.
-
-If this option is not specified, the initial grid is generated
-randomly.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial random grid. It is a
-floating point number value ranging from 0 to 1, defaults to 1/PHI.
-It is ignored when a file is specified.
-
-
-random_seed, seed
-Set the seed for filling the initial random grid, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the life rule.
-
-A rule can be specified with a code of the kind "SNS /BNB ",
-where NS and NB are sequences of numbers in the range 0-8,
-NS specifies the number of alive neighbor cells which make a
-live cell stay alive, and NB the number of alive neighbor cells
-which make a dead cell to become alive (i.e. to "born").
-"s" and "b" can be used in place of "S" and "B", respectively.
-
-Alternatively a rule can be specified by an 18-bits integer. The 9
-high order bits are used to encode the next cell state if it is alive
-for each number of neighbor alive cells, the low order bits specify
-the rule for "borning" new cells. Higher order bits encode for an
-higher number of neighbor cells.
-For example the number 6153 = (12<<9)+9
specifies a stay alive
-rule of 12 and a born rule of 9, which corresponds to "S23/B03".
-
-Default value is "S23/B3", which is the original Conway’s game of life
-rule, and will keep a cell alive if it has 2 or 3 neighbor alive
-cells, and will born a new cell if there are three alive cells around
-a dead cell.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-If filename is specified, the size is set by default to the
-same size of the input file. If size is set, it must contain
-the size specified in the input file, and the initial grid defined in
-that file is centered in the larger resulting area.
-
-If a filename is not specified, the size value defaults to "320x240"
-(used for a randomly generated initial grid).
-
-
-stitch
-If set to 1, stitch the left and right grid edges together, and the
-top and bottom edges also. Defaults to 1.
-
-
-mold
-Set cell mold speed. If set, a dead cell will go from death_color to
-mold_color with a step of mold . mold can have a
-value from 0 to 255.
-
-
-life_color
-Set the color of living (or new born) cells.
-
-
-death_color
-Set the color of dead cells. If mold is set, this is the first color
-used to represent a dead cell.
-
-
-mold_color
-Set mold color, for definitely dead and moldy cells.
-
-For the syntax of these 3 color options, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-
-
-
29.6.1 Examples# TOC
-
-
- Read a grid from pattern , and center it on a grid of size
-300x300 pixels:
-
-
life=f=pattern:s=300x300
-
-
- Generate a random grid of size 200x200, with a fill ratio of 2/3:
-
-
life=ratio=2/3:s=200x200
-
-
- Specify a custom rule for evolving a randomly generated grid:
-
-
- Full example with slow death effect (mold) using ffplay
:
-
-
ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
-
-
-
-
-
29.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc# TOC
-
-
The color
source provides an uniformly colored input.
-
-
The haldclutsrc
source provides an identity Hald CLUT. See also
-haldclut filter.
-
-
The nullsrc
source returns unprocessed video frames. It is
-mainly useful to be employed in analysis / debugging tools, or as the
-source for filters which ignore the input data.
-
-
The rgbtestsrc
source generates an RGB test pattern useful for
-detecting RGB vs BGR issues. You should see a red, green and blue
-stripe from top to bottom.
-
-
The smptebars
source generates a color bars pattern, based on
-the SMPTE Engineering Guideline EG 1-1990.
-
-
The smptehdbars
source generates a color bars pattern, based on
-the SMPTE RP 219-2002.
-
-
The testsrc
source generates a test video pattern, showing a
-color pattern, a scrolling gradient and a timestamp. This is mainly
-intended for testing purposes.
-
-
The sources accept the following parameters:
-
-
-color, c
-Specify the color of the source, only available in the color
-source. For the syntax of this option, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-level
-Specify the level of the Hald CLUT, only available in the haldclutsrc
-source. A level of N
generates a picture of N*N*N
by N*N*N
-pixels to be used as identity matrix for 3D lookup tables. Each component is
-coded on a 1/(N*N)
scale.
-
-
-size, s
-Specify the size of the sourced video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual. The default value is
-"320x240".
-
-This option is not available with the haldclutsrc
filter.
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-sar
-Set the sample aspect ratio of the sourced video.
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-decimals, n
-Set the number of decimals to show in the timestamp, only available in the
-testsrc
source.
-
-The displayed timestamp value will correspond to the original
-timestamp value multiplied by the power of 10 of the specified
-value. Default value is 0.
-
-
-
-
For example the following:
-
-
testsrc=duration=5.3:size=qcif:rate=10
-
-
-
will generate a video with a duration of 5.3 seconds, with size
-176x144 and a frame rate of 10 frames per second.
-
-
The following graph description will generate a red source
-with an opacity of 0.2, with size "qcif" and a frame rate of 10
-frames per second.
-
-
color=c=red@0.2:s=qcif:r=10
-
-
-
If the input content is to be ignored, nullsrc
can be used. The
-following command generates noise in the luminance plane by employing
-the geq
filter:
-
-
nullsrc=s=256x256, geq=random(1)*255:128:128
-
-
-
-
29.7.1 Commands# TOC
-
-
The color
source supports the following commands:
-
-
-c, color
-Set the color of the created image. Accepts the same syntax of the
-corresponding color option.
-
-
-
-
-
-
30 Video Sinks# TOC
-
-
Below is a description of the currently available video sinks.
-
-
-
30.1 buffersink# TOC
-
-
Buffer video frames, and make them available to the end of the filter
-graph.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVBufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
-
30.2 nullsink# TOC
-
-
Null video sink: do absolutely nothing with the input video. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
31 Multimedia Filters# TOC
-
-
Below is a description of the currently available multimedia filters.
-
-
-
31.1 avectorscope# TOC
-
-
Convert input audio to a video output, representing the audio vector
-scope.
-
-
The filter is used to measure the difference between channels of stereo
-audio stream. A monoaural signal, consisting of identical left and right
-signal, results in straight vertical line. Any stereo separation is visible
-as a deviation from this line, creating a Lissajous figure.
-If the straight (or deviation from it) but horizontal line appears this
-indicates that the left and right channels are out of phase.
-
-
The filter accepts the following options:
-
-
-mode, m
-Set the vectorscope mode.
-
-Available values are:
-
-‘lissajous ’
-Lissajous rotated by 45 degrees.
-
-
-‘lissajous_xy ’
-Same as above but not rotated.
-
-
-
-Default value is ‘lissajous ’.
-
-
-size, s
-Set the video size for the output. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual. Default value is 400x400
.
-
-
-rate, r
-Set the output frame rate. Default value is 25
.
-
-
-rc
-gc
-bc
-Specify the red, green and blue contrast. Default values are 40
, 160
and 80
.
-Allowed range is [0, 255]
.
-
-
-rf
-gf
-bf
-Specify the red, green and blue fade. Default values are 15
, 10
and 5
.
-Allowed range is [0, 255]
.
-
-
-zoom
-Set the zoom factor. Default value is 1
. Allowed range is [1, 10]
.
-
-
-
-
-
31.1.1 Examples# TOC
-
-
- Complete example using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
-
-
-
-
-
31.2 concat# TOC
-
-
Concatenate audio and video streams, joining them together one after the
-other.
-
-
The filter works on segments of synchronized video and audio streams. All
-segments must have the same number of streams of each type, and that will
-also be the number of streams at output.
-
-
The filter accepts the following options:
-
-
-n
-Set the number of segments. Default is 2.
-
-
-v
-Set the number of output video streams, that is also the number of video
-streams in each segment. Default is 1.
-
-
-a
-Set the number of output audio streams, that is also the number of audio
-streams in each segment. Default is 0.
-
-
-unsafe
-Activate unsafe mode: do not fail if segments have a different format.
-
-
-
-
-
The filter has v +a outputs: first v video outputs, then
-a audio outputs.
-
-
There are n x(v +a ) inputs: first the inputs for the first
-segment, in the same order as the outputs, then the inputs for the second
-segment, etc.
-
-
Related streams do not always have exactly the same duration, for various
-reasons including codec frame size or sloppy authoring. For that reason,
-related synchronized streams (e.g. a video and its audio track) should be
-concatenated at once. The concat filter will use the duration of the longest
-stream in each segment (except the last one), and if necessary pad shorter
-audio streams with silence.
-
-
For this filter to work correctly, all segments must start at timestamp 0.
-
-
All corresponding streams must have the same parameters in all segments; the
-filtering system will automatically select a common pixel format for video
-streams, and a common sample format, sample rate and channel layout for
-audio streams, but other settings, such as resolution, must be converted
-explicitly by the user.
-
-
Different frame rates are acceptable but will result in variable frame rate
-at output; be sure to configure the output file to handle it.
-
-
-
31.2.1 Examples# TOC
-
-
-
-
-
31.3 ebur128# TOC
-
-
EBU R128 scanner filter. This filter takes an audio stream as input and outputs
-it unchanged. By default, it logs a message at a frequency of 10Hz with the
-Momentary loudness (identified by M
), Short-term loudness (S
),
-Integrated loudness (I
) and Loudness Range (LRA
).
-
-
The filter also has a video output (see the video option) with a real
-time graph to observe the loudness evolution. The graphic contains the logged
-message mentioned above, so it is not printed anymore when this option is set,
-unless the verbose logging is set. The main graphing area contains the
-short-term loudness (3 seconds of analysis), and the gauge on the right is for
-the momentary loudness (400 milliseconds).
-
-
More information about the Loudness Recommendation EBU R128 on
-http://tech.ebu.ch/loudness .
-
-
The filter accepts the following options:
-
-
-video
-Activate the video output. The audio stream is passed unchanged whether this
-option is set or no. The video stream will be the first output stream if
-activated. Default is 0
.
-
-
-size
-Set the video size. This option is for video only. For the syntax of this
-option, check the "Video size" section in the ffmpeg-utils manual. Default
-and minimum resolution is 640x480
.
-
-
-meter
-Set the EBU scale meter. Default is 9
. Common values are 9
and
-18
, respectively for EBU scale meter +9 and EBU scale meter +18. Any
-other integer value between this range is allowed.
-
-
-metadata
-Set metadata injection. If set to 1
, the audio input will be segmented
-into 100ms output frames, each of them containing various loudness information
-in metadata. All the metadata keys are prefixed with lavfi.r128.
.
-
-Default is 0
.
-
-
-framelog
-Force the frame logging level.
-
-Available values are:
-
-‘info ’
-information logging level
-
-‘verbose ’
-verbose logging level
-
-
-
-By default, the logging level is set to info . If the video or
-the metadata options are set, it switches to verbose .
-
-
-peak
-Set peak mode(s).
-
-Available modes can be cumulated (the option is a flag
type). Possible
-values are:
-
-‘none ’
-Disable any peak mode (default).
-
-‘sample ’
-Enable sample-peak mode.
-
-Simple peak mode looking for the higher sample value. It logs a message
-for sample-peak (identified by SPK
).
-
-‘true ’
-Enable true-peak mode.
-
-If enabled, the peak lookup is done on an over-sampled version of the input
-stream for better peak accuracy. It logs a message for true-peak.
-(identified by TPK
) and true-peak per frame (identified by FTPK
).
-This mode requires a build with libswresample
.
-
-
-
-
-
-
-
-
31.3.1 Examples# TOC
-
-
- Real-time graph using ffplay
, with a EBU scale meter +18:
-
-
ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
-
-
- Run an analysis with ffmpeg
:
-
-
ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
-
-
-
-
-
31.4 interleave, ainterleave# TOC
-
-
Temporally interleave frames from several inputs.
-
-
interleave
works with video inputs, ainterleave
with audio.
-
-
These filters read frames from several inputs and send the oldest
-queued frame to the output.
-
-
Input streams must have a well defined, monotonically increasing frame
-timestamp values.
-
-
In order to submit one frame to output, these filters need to enqueue
-at least one frame for each input, so they cannot work in case one
-input is not yet terminated and will not receive incoming frames.
-
-
For example consider the case when one input is a select
filter
-which always drop input frames. The interleave
filter will keep
-reading from that input, but it will never be able to send new frames
-to output until the input will send an end-of-stream signal.
-
-
Also, depending on inputs synchronization, the filters will drop
-frames in case one input receives more frames than the other ones, and
-the queue is already filled.
-
-
These filters accept the following options:
-
-
-nb_inputs, n
-Set the number of different inputs, it is 2 by default.
-
-
-
-
-
31.4.1 Examples# TOC
-
-
- Interleave frames belonging to different streams using ffmpeg
:
-
-
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
-
-
- Add flickering blur effect:
-
-
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
-
-
-
-
-
31.5 perms, aperms# TOC
-
-
Set read/write permissions for the output frames.
-
-
These filters are mainly aimed at developers to test direct path in the
-following filter in the filtergraph.
-
-
The filters accept the following options:
-
-
-mode
-Select the permissions mode.
-
-It accepts the following values:
-
-‘none ’
-Do nothing. This is the default.
-
-‘ro ’
-Set all the output frames read-only.
-
-‘rw ’
-Set all the output frames directly writable.
-
-‘toggle ’
-Make the frame read-only if writable, and writable if read-only.
-
-‘random ’
-Set each output frame read-only or writable randomly.
-
-
-
-
-seed
-Set the seed for the random mode, must be an integer included between
-0
and UINT32_MAX
. If not specified, or if explicitly set to
--1
, the filter will try to use a good random seed on a best effort
-basis.
-
-
-
-
Note: in case of auto-inserted filter between the permission filter and the
-following one, the permission might not be received as expected in that
-following filter. Inserting a format or aformat filter before the
-perms/aperms filter can avoid this problem.
-
-
-
31.6 select, aselect# TOC
-
-
Select frames to pass in output.
-
-
This filter accepts the following options:
-
-
-expr, e
-Set expression, which is evaluated for each input frame.
-
-If the expression is evaluated to zero, the frame is discarded.
-
-If the evaluation result is negative or NaN, the frame is sent to the
-first output; otherwise it is sent to the output with index
-ceil(val)-1
, assuming that the input index starts from 0.
-
-For example a value of 1.2
corresponds to the output with index
-ceil(1.2)-1 = 2-1 = 1
, that is the second output.
-
-
-outputs, n
-Set the number of outputs. The output to which to send the selected
-frame is based on the result of the evaluation. Default value is 1.
-
-
-
-
The expression can contain the following constants:
-
-
-n
-The (sequential) number of the filtered frame, starting from 0.
-
-
-selected_n
-The (sequential) number of the selected frame, starting from 0.
-
-
-prev_selected_n
-The sequential number of the last selected frame. It’s NAN if undefined.
-
-
-TB
-The timebase of the input timestamps.
-
-
-pts
-The PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in TB units. It’s NAN if undefined.
-
-
-t
-The PTS of the filtered video frame,
-expressed in seconds. It’s NAN if undefined.
-
-
-prev_pts
-The PTS of the previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_pts
-The PTS of the last previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_t
-The PTS of the last previously selected video frame. It’s NAN if undefined.
-
-
-start_pts
-The PTS of the first video frame in the video. It’s NAN if undefined.
-
-
-start_t
-The time of the first video frame in the video. It’s NAN if undefined.
-
-
-pict_type (video only)
-The type of the filtered frame. It can assume one of the following
-values:
-
-I
-P
-B
-S
-SI
-SP
-BI
-
-
-
-interlace_type (video only)
-The frame interlace type. It can assume one of the following values:
-
-PROGRESSIVE
-The frame is progressive (not interlaced).
-
-TOPFIRST
-The frame is top-field-first.
-
-BOTTOMFIRST
-The frame is bottom-field-first.
-
-
-
-
-consumed_sample_n (audio only)
-the number of selected samples before the current frame
-
-
-samples_n (audio only)
-the number of samples in the current frame
-
-
-sample_rate (audio only)
-the input sample rate
-
-
-key
-This is 1 if the filtered frame is a key-frame, 0 otherwise.
-
-
-pos
-the position in the file of the filtered frame, -1 if the information
-is not available (e.g. for synthetic video)
-
-
-scene (video only)
-value between 0 and 1 to indicate a new scene; a low value reflects a low
-probability for the current frame to introduce a new scene, while a higher
-value means the current frame is more likely to be one (see the example below)
-
-
-
-
-
The default value of the select expression is "1".
-
-
-
31.6.1 Examples# TOC
-
-
-
-
-
31.7 sendcmd, asendcmd# TOC
-
-
Send commands to filters in the filtergraph.
-
-
These filters read commands to be sent to other filters in the
-filtergraph.
-
-
sendcmd
must be inserted between two video filters,
-asendcmd
must be inserted between two audio filters, but apart
-from that they act the same way.
-
-
The specification of commands can be provided in the filter arguments
-with the commands option, or in a file specified by the
-filename option.
-
-
These filters accept the following options:
-
-commands, c
-Set the commands to be read and sent to the other filters.
-
-filename, f
-Set the filename of the commands to be read and sent to the other
-filters.
-
-
-
-
-
31.7.1 Commands syntax# TOC
-
-
A commands description consists of a sequence of interval
-specifications, comprising a list of commands to be executed when a
-particular event related to that interval occurs. The occurring event
-is typically the current frame time entering or leaving a given time
-interval.
-
-
An interval is specified by the following syntax:
-
-
-
The time interval is specified by the START and END times.
-END is optional and defaults to the maximum time.
-
-
The current frame time is considered within the specified interval if
-it is included in the interval [START , END ), that is when
-the time is greater or equal to START and is lesser than
-END .
-
-
COMMANDS consists of a sequence of one or more command
-specifications, separated by ",", relating to that interval. The
-syntax of a command specification is given by:
-
-
[FLAGS ] TARGET COMMAND ARG
-
-
-
FLAGS is optional and specifies the type of events relating to
-the time interval which enable sending the specified command, and must
-be a non-null sequence of identifier flags separated by "+" or "|" and
-enclosed between "[" and "]".
-
-
The following flags are recognized:
-
-enter
-The command is sent when the current frame timestamp enters the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was not in the given interval, and the
-current is.
-
-
-leave
-The command is sent when the current frame timestamp leaves the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was in the given interval, and the
-current is not.
-
-
-
-
If FLAGS is not specified, a default value of [enter]
is
-assumed.
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional list of argument for
-the given COMMAND .
-
-
Between one interval specification and another, whitespaces, or
-sequences of characters starting with #
until the end of line,
-are ignored and can be used to annotate comments.
-
-
A simplified BNF description of the commands specification syntax
-follows:
-
-
COMMAND_FLAG ::= "enter" | "leave"
-COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG ]
-COMMAND ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG ]
-COMMANDS ::= COMMAND [,COMMANDS ]
-INTERVAL ::= START [-END ] COMMANDS
-INTERVALS ::= INTERVAL [;INTERVALS ]
-
-
-
-
31.7.2 Examples# TOC
-
-
-
-
-
31.8 setpts, asetpts# TOC
-
-
Change the PTS (presentation timestamp) of the input frames.
-
-
setpts
works on video frames, asetpts
on audio frames.
-
-
This filter accepts the following options:
-
-
-expr
-The expression which is evaluated for each frame to construct its timestamp.
-
-
-
-
-
The expression is evaluated through the eval API and can contain the following
-constants:
-
-
-FRAME_RATE
-frame rate, only defined for constant frame-rate video
-
-
-PTS
-The presentation timestamp in input
-
-
-N
-The count of the input frame for video or the number of consumed samples,
-not including the current frame for audio, starting from 0.
-
-
-NB_CONSUMED_SAMPLES
-The number of consumed samples, not including the current frame (only
-audio)
-
-
-NB_SAMPLES, S
-The number of samples in the current frame (only audio)
-
-
-SAMPLE_RATE, SR
-The audio sample rate.
-
-
-STARTPTS
-The PTS of the first frame.
-
-
-STARTT
-the time in seconds of the first frame
-
-
-INTERLACED
-State whether the current frame is interlaced.
-
-
-T
-the time in seconds of the current frame
-
-
-POS
-original position in the file of the frame, or undefined if undefined
-for the current frame
-
-
-PREV_INPTS
-The previous input PTS.
-
-
-PREV_INT
-previous input time in seconds
-
-
-PREV_OUTPTS
-The previous output PTS.
-
-
-PREV_OUTT
-previous output time in seconds
-
-
-RTCTIME
-The wallclock (RTC) time in microseconds. This is deprecated, use time(0)
-instead.
-
-
-RTCSTART
-The wallclock (RTC) time at the start of the movie in microseconds.
-
-
-TB
-The timebase of the input timestamps.
-
-
-
-
-
-
31.8.1 Examples# TOC
-
-
- Start counting PTS from zero
-
-
- Apply fast motion effect:
-
-
- Apply slow motion effect:
-
-
- Set fixed rate of 25 frames per second:
-
-
- Set fixed rate 25 fps with some jitter:
-
-
setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
-
-
- Apply an offset of 10 seconds to the input PTS:
-
-
- Generate timestamps from a "live source" and rebase onto the current timebase:
-
-
setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
-
-
- Generate timestamps by counting samples:
-
-
-
-
-
-
31.9 settb, asettb# TOC
-
-
Set the timebase to use for the output frames timestamps.
-It is mainly useful for testing timebase configuration.
-
-
It accepts the following parameters:
-
-
-expr, tb
-The expression which is evaluated into the output timebase.
-
-
-
-
-
The value for tb is an arithmetic expression representing a
-rational. The expression can contain the constants "AVTB" (the default
-timebase), "intb" (the input timebase) and "sr" (the sample rate,
-audio only). Default value is "intb".
-
-
-
31.9.1 Examples# TOC
-
-
- Set the timebase to 1/25:
-
-
- Set the timebase to 1/10:
-
-
- Set the timebase to 1001/1000:
-
-
- Set the timebase to 2*intb:
-
-
- Set the default timebase value:
-
-
-
-
-
31.10 showcqt# TOC
-
Convert input audio to a video output representing
-frequency spectrum logarithmically (using constant Q transform with
-Brown-Puckette algorithm), with musical tone scale, from E0 to D#10 (10 octaves).
-
-
The filter accepts the following options:
-
-
-volume
-Specify transform volume (multiplier) expression. The expression can contain
-variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-a_weighting(f)
-A-weighting of equal loudness
-
-b_weighting(f)
-B-weighting of equal loudness
-
-c_weighting(f)
-C-weighting of equal loudness
-
-
-Default value is 16
.
-
-
-tlength
-Specify transform length expression. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-Default value is 384/f*tc/(384/f+tc)
.
-
-
-timeclamp
-Specify the transform timeclamp. At low frequency, there is trade-off between
-accuracy in time domain and frequency domain. If timeclamp is lower,
-event in time domain is represented more accurately (such as fast bass drum),
-otherwise event in frequency domain is represented more accurately
-(such as bass guitar). Acceptable value is [0.1, 1.0]. Default value is 0.17
.
-
-
-coeffclamp
-Specify the transform coeffclamp. If coeffclamp is lower, transform is
-more accurate, otherwise transform is faster. Acceptable value is [0.1, 10.0].
-Default value is 1.0
.
-
-
-gamma
-Specify gamma. Lower gamma makes the spectrum more contrast, higher gamma
-makes the spectrum having more range. Acceptable value is [1.0, 7.0].
-Default value is 3.0
.
-
-
-fontfile
-Specify font file for use with freetype. If not specified, use embedded font.
-
-
-fontcolor
-Specify font color expression. This is arithmetic expression that should return
-integer value 0xRRGGBB. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-midi(f)
-midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
-
-r(x), g(x), b(x)
-red, green, and blue value of intensity x
-
-
-Default value is st(0, (midi(f)-59.5)/12);
-st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
-r(1-ld(1)) + b(ld(1))
-
-
-fullhd
-If set to 1 (the default), the video size is 1920x1080 (full HD),
-if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
-
-
-fps
-Specify video fps. Default value is 25
.
-
-
-count
-Specify number of transform per frame, so there are fps*count transforms
-per second. Note that audio data rate must be divisible by fps*count.
-Default value is 6
.
-
-
-
-
-
-
31.10.1 Examples# TOC
-
-
- Playing audio while showing the spectrum:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with frame rate 30 fps:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fps=30:count=5 [out0]'
-
-
- Playing at 960x540 and lower CPU usage:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fullhd=0:count=3 [out0]'
-
-
- A1 and its harmonics: A1, A2, (near)E3, A3:
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with more accuracy in frequency domain (and slower):
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
-
-
- B-weighting of equal loudness
-
-
volume=16*b_weighting(f)
-
-
- Lower Q factor
-
-
tlength=100/f*tc/(100/f+tc)
-
-
- Custom fontcolor, C-note is colored green, others are colored blue
-
-
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
-
-
-
-
-
-
31.11 showspectrum# TOC
-
-
Convert input audio to a video output, representing the audio frequency
-spectrum.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value is
-640x512
.
-
-
-slide
-Specify how the spectrum should slide along the window.
-
-It accepts the following values:
-
-‘replace ’
-the samples start again on the left when they reach the right
-
-‘scroll ’
-the samples scroll from right to left
-
-‘fullframe ’
-frames are only produced when the samples reach the right
-
-
-
-Default value is replace
.
-
-
-mode
-Specify display mode.
-
-It accepts the following values:
-
-‘combined ’
-all channels are displayed in the same row
-
-‘separate ’
-all channels are displayed in separate rows
-
-
-
-Default value is ‘combined ’.
-
-
-color
-Specify display color mode.
-
-It accepts the following values:
-
-‘channel ’
-each channel is displayed in a separate color
-
-‘intensity ’
-each channel is is displayed using the same color scheme
-
-
-
-Default value is ‘channel ’.
-
-
-scale
-Specify scale used for calculating intensity color values.
-
-It accepts the following values:
-
-‘lin ’
-linear
-
-‘sqrt ’
-square root, default
-
-‘cbrt ’
-cubic root
-
-‘log ’
-logarithmic
-
-
-
-Default value is ‘sqrt ’.
-
-
-saturation
-Set saturation modifier for displayed colors. Negative values provide
-alternative color scheme. 0
is no saturation at all.
-Saturation must be in [-10.0, 10.0] range.
-Default value is 1
.
-
-
-win_func
-Set window function.
-
-It accepts the following values:
-
-‘none ’
-No samples pre-processing (do not expect this to be faster)
-
-‘hann ’
-Hann window
-
-‘hamming ’
-Hamming window
-
-‘blackman ’
-Blackman window
-
-
-
-Default value is hann
.
-
-
-
-
The usage is very similar to the showwaves filter; see the examples in that
-section.
-
-
-
31.11.1 Examples# TOC
-
-
- Large window with logarithmic color scaling:
-
-
showspectrum=s=1280x480:scale=log
-
-
- Complete example for a colored and sliding spectrum per channel using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
-
-
-
-
-
31.12 showwaves# TOC
-
-
Convert input audio to a video output, representing the samples waves.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value
-is "600x240".
-
-
-mode
-Set display mode.
-
-Available values are:
-
-‘point ’
-Draw a point for each sample.
-
-
-‘line ’
-Draw a vertical line for each sample.
-
-
-‘p2p ’
-Draw a point for each sample and a line between them.
-
-
-‘cline ’
-Draw a centered vertical line for each sample.
-
-
-
-Default value is point
.
-
-
-n
-Set the number of samples which are printed on the same column. A
-larger value will decrease the frame rate. Must be a positive
-integer. This option can be set only if the value for rate
-is not explicitly specified.
-
-
-rate, r
-Set the (approximate) output frame rate. This is done by setting the
-option n . Default value is "25".
-
-
-split_channels
-Set if channels should be drawn separately or overlap. Default value is 0.
-
-
-
-
-
-
31.12.1 Examples# TOC
-
-
- Output the input file audio and the corresponding video representation
-at the same time:
-
-
amovie=a.mp3,asplit[out0],showwaves[out1]
-
-
- Create a synthetic signal and show it with showwaves, forcing a
-frame rate of 30 frames per second:
-
-
aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
-
-
-
-
-
31.13 split, asplit# TOC
-
-
Split input into several identical outputs.
-
-
asplit
works with audio input, split
with video.
-
-
The filter accepts a single parameter which specifies the number of outputs. If
-unspecified, it defaults to 2.
-
-
-
31.13.1 Examples# TOC
-
-
- Create two separate outputs from the same input:
-
-
[in] split [out0][out1]
-
-
- To create 3 or more outputs, you need to specify the number of
-outputs, like in:
-
-
[in] asplit=3 [out0][out1][out2]
-
-
- Create two separate outputs from the same input, one cropped and
-one padded:
-
-
[in] split [splitout1][splitout2];
-[splitout1] crop=100:100:0:0 [cropout];
-[splitout2] pad=200:200:100:100 [padout];
-
-
- Create 5 copies of the input audio with ffmpeg
:
-
-
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
-
-
-
-
-
31.14 zmq, azmq# TOC
-
-
Receive commands sent through a libzmq client, and forward them to
-filters in the filtergraph.
-
-
zmq
and azmq
work as a pass-through filters. zmq
-must be inserted between two video filters, azmq
between two
-audio filters.
-
-
To enable these filters you need to install the libzmq library and
-headers and configure FFmpeg with --enable-libzmq
.
-
-
For more information about libzmq see:
-http://www.zeromq.org/
-
-
The zmq
and azmq
filters work as a libzmq server, which
-receives messages sent through a network interface defined by the
-bind_address option.
-
-
The received message must be in the form:
-
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional argument list for the
-given COMMAND .
-
-
Upon reception, the message is processed and the corresponding command
-is injected into the filtergraph. Depending on the result, the filter
-will send a reply to the client, adopting the format:
-
-
ERROR_CODE ERROR_REASON
-MESSAGE
-
-
-
MESSAGE is optional.
-
-
-
31.14.1 Examples# TOC
-
-
Look at tools/zmqsend for an example of a zmq client which can
-be used to send commands processed by these filters.
-
-
Consider the following filtergraph generated by ffplay
-
-
ffplay -dumpgraph 1 -f lavfi "
-color=s=100x100:c=red [l];
-color=s=100x100:c=blue [r];
-nullsrc=s=200x100, zmq [bg];
-[bg][l] overlay [bg+l];
-[bg+l][r] overlay=x=100 "
-
-
-
To change the color of the left side of the video, the following
-command can be used:
-
-
echo Parsed_color_0 c yellow | tools/zmqsend
-
-
-
To change the right side:
-
-
echo Parsed_color_1 c pink | tools/zmqsend
-
-
-
-
-
32 Multimedia Sources# TOC
-
-
Below is a description of the currently available multimedia sources.
-
-
-
32.1 amovie# TOC
-
-
This is the same as movie source, except it selects an audio
-stream by default.
-
-
-
32.2 movie# TOC
-
-
Read audio and/or video stream(s) from a movie container.
-
-
It accepts the following parameters:
-
-
-filename
-The name of the resource to read (not necessarily a file; it can also be a
-device or a stream accessed through some protocol).
-
-
-format_name, f
-Specifies the format assumed for the movie to read, and can be either
-the name of a container or an input device. If not specified, the
-format is guessed from movie_name or by probing.
-
-
-seek_point, sp
-Specifies the seek point in seconds. The frames will be output
-starting from this seek point. The parameter is evaluated with
-av_strtod
, so the numerical value may be suffixed by an IS
-postfix. The default value is "0".
-
-
-streams, s
-Specifies the streams to read. Several streams can be specified,
-separated by "+". The source will then have as many outputs, in the
-same order. The syntax is explained in the “Stream specifiers”
-section in the ffmpeg manual. Two special names, "dv" and "da" specify
-respectively the default (best suited) video and audio stream. Default
-is "dv", or "da" if the filter is called as "amovie".
-
-
-stream_index, si
-Specifies the index of the video stream to read. If the value is -1,
-the most suitable video stream will be automatically selected. The default
-value is "-1". Deprecated. If the filter is called "amovie", it will select
-audio instead of video.
-
-
-loop
-Specifies how many times to read the stream in sequence.
-If the value is less than 1, the stream will be read again and again.
-Default value is "1".
-
-Note that when the movie is looped the source timestamps are not
-changed, so it will generate non monotonically increasing timestamps.
-
-
-
-
It allows overlaying a second video on top of the main input of
-a filtergraph, as shown in this graph:
-
-
input -----------> deltapts0 --> overlay --> output
- ^
- |
-movie --> scale--> deltapts1 -------+
-
-
-
32.2.1 Examples# TOC
-
-
- Skip 3.2 seconds from the start of the AVI file in.avi, and overlay it
-on top of the input labelled "in":
-
-
movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read from a video4linux2 device, and overlay it on top of the input
-labelled "in":
-
-
movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read the first video stream and the audio stream with id 0x81 from
-dvd.vob; the video is connected to the pad named "video" and the audio is
-connected to the pad named "audio":
-
-
movie=dvd.vob:s=v:0+#0x81 [video] [audio]
-
-
-
-
-
-
33 See Also# TOC
-
-
ffplay ,
-ffmpeg , ffprobe , ffserver ,
-ffmpeg-utils ,
-ffmpeg-scaler ,
-ffmpeg-resampler ,
-ffmpeg-codecs ,
-ffmpeg-bitstream-filters ,
-ffmpeg-formats ,
-ffmpeg-devices ,
-ffmpeg-protocols ,
-ffmpeg-filters
-
-
-
-
34 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffplay.html b/Externals/ffmpeg/dev/doc/ffplay.html
deleted file mode 100644
index e072758fa8..0000000000
--- a/Externals/ffmpeg/dev/doc/ffplay.html
+++ /dev/null
@@ -1,745 +0,0 @@
-
-
-
-
-
-
- ffplay Documentation
-
-
-
-
-
-
-
-
- ffplay Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Synopsis# TOC
-
-
ffplay [options ] [input_file ]
-
-
-
2 Description# TOC
-
-
FFplay is a very simple and portable media player using the FFmpeg
-libraries and the SDL library. It is mostly used as a testbed for the
-various FFmpeg APIs.
-
-
-
3 Options# TOC
-
-
All the numerical options, if not specified otherwise, accept a string
-representing a number as input, which may be followed by one of the SI
-unit prefixes, for example: ’K’, ’M’, or ’G’.
-
-
If ’i’ is appended to the SI unit prefix, the complete prefix will be
-interpreted as a unit prefix for binary multiples, which are based on
-powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
-prefix multiplies the value by 8. This allows using, for example:
-’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
-
-
Options which do not take arguments are boolean options, and set the
-corresponding value to true. They can be set to false by prefixing
-the option name with "no". For example using "-nofoo"
-will set the boolean option with name "foo" to false.
-
-
-
3.1 Stream specifiers# TOC
-
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
-are used to precisely specify which stream(s) a given option belongs to.
-
-
A stream specifier is a string generally appended to the option name and
-separated from it by a colon. E.g. -codec:a:1 ac3
contains the
-a:1
stream specifier, which matches the second audio stream. Therefore, it
-would select the ac3 codec for the second audio stream.
-
-
A stream specifier can match several streams, so that the option is applied to all
-of them. E.g. the stream specifier in -b:a 128k
matches all audio
-streams.
-
-
An empty stream specifier matches all streams. For example, -codec copy
-or -codec: copy
would copy all the streams without reencoding.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index. E.g. -threads:1 4
would set the
-thread count for the second stream to 4.
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
-’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
-stream number stream_index of this type. Otherwise, it matches all
-streams of this type.
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number stream_index
-in the program with the id program_id . Otherwise, it matches all streams in the
-program.
-
-#stream_id or i:stream_id
-Match the stream by stream id (e.g. PID in MPEG-TS container).
-
-m:key [:value ]
-Matches streams with the metadata tag key having the specified value. If
-value is not given, matches streams that contain the given tag with any
-value.
-
-Note that in ffmpeg
, matching by metadata will only work properly for
-input files.
-
-
-
-
-
3.2 Generic options# TOC
-
-
These options are shared amongst the ff* tools.
-
-
--L
-Show license.
-
-
--h, -?, -help, --help [arg ]
-Show help. An optional parameter may be specified to print help about a specific
-item. If no argument is specified, only basic (non advanced) tool
-options are shown.
-
-Possible values of arg are:
-
-long
-Print advanced tool options in addition to the basic tool options.
-
-
-full
-Print complete list of options, including shared and private options
-for encoders, decoders, demuxers, muxers, filters, etc.
-
-
-decoder=decoder_name
-Print detailed information about the decoder named decoder_name . Use the
--decoders option to get a list of all decoders.
-
-
-encoder=encoder_name
-Print detailed information about the encoder named encoder_name . Use the
--encoders option to get a list of all encoders.
-
-
-demuxer=demuxer_name
-Print detailed information about the demuxer named demuxer_name . Use the
--formats option to get a list of all demuxers and muxers.
-
-
-muxer=muxer_name
-Print detailed information about the muxer named muxer_name . Use the
--formats option to get a list of all muxers and demuxers.
-
-
-filter=filter_name
-Print detailed information about the filter name filter_name . Use the
--filters option to get a list of all filters.
-
-
-
-
--version
-Show version.
-
-
--formats
-Show available formats (including devices).
-
-
--devices
-Show available devices.
-
-
--codecs
-Show all codecs known to libavcodec.
-
-Note that the term ’codec’ is used throughout this documentation as a shortcut
-for what is more correctly called a media bitstream format.
-
-
--decoders
-Show available decoders.
-
-
--encoders
-Show all available encoders.
-
-
--bsfs
-Show available bitstream filters.
-
-
--protocols
-Show available protocols.
-
-
--filters
-Show available libavfilter filters.
-
-
--pix_fmts
-Show available pixel formats.
-
-
--sample_fmts
-Show available sample formats.
-
-
--layouts
-Show channel names and standard channel layouts.
-
-
--colors
-Show recognized color names.
-
-
--sources device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sources of the intput device.
-Some devices may provide system-dependent source names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sources pulse,server=192.168.0.4
-
-
-
--sinks device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sinks of the output device.
-Some devices may provide system-dependent sink names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sinks pulse,server=192.168.0.4
-
-
-
--loglevel [repeat+]loglevel | -v [repeat+]loglevel
-Set the logging level used by the library.
-Adding "repeat+" indicates that repeated log output should not be compressed
-to the first line and the "Last message repeated n times" line will be
-omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-’repeat’ will not change the loglevel.
-loglevel is a string or a number containing one of the following values:
-
-‘quiet, -8 ’
-Show nothing at all; be silent.
-
-‘panic, 0 ’
-Only show fatal errors which could lead the process to crash, such as
-and assert failure. This is not currently used for anything.
-
-‘fatal, 8 ’
-Only show fatal errors. These are errors after which the process absolutely
-cannot continue after.
-
-‘error, 16 ’
-Show all errors, including ones which can be recovered from.
-
-‘warning, 24 ’
-Show all warnings and errors. Any message related to possibly
-incorrect or unexpected events will be shown.
-
-‘info, 32 ’
-Show informative messages during processing. This is in addition to
-warnings and errors. This is the default value.
-
-‘verbose, 40 ’
-Same as info
, except more verbose.
-
-‘debug, 48 ’
-Show everything, including debugging information.
-
-
-
-By default the program logs to stderr, if coloring is supported by the
-terminal, colors are used to mark errors and warnings. Log coloring
-can be disabled setting the environment variable
-AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
-the environment variable AV_LOG_FORCE_COLOR
.
-The use of the environment variable NO_COLOR
is deprecated and
-will be dropped in a following FFmpeg version.
-
-
--report
-Dump full command line and console output to a file named
-program -YYYYMMDD -HHMMSS .log
in the current
-directory.
-This file can be useful for bug reports.
-It also implies -loglevel verbose
.
-
-Setting the environment variable FFREPORT
to any value has the
-same effect. If the value is a ’:’-separated key=value sequence, these
-options will affect the report; option values must be escaped if they
-contain special characters or the options delimiter ’:’ (see the
-“Quoting and escaping” section in the ffmpeg-utils manual).
-
-The following options are recognized:
-
-file
-set the file name to use for the report; %p
is expanded to the name
-of the program, %t
is expanded to a timestamp, %%
is expanded
-to a plain %
-
-level
-set the log verbosity level using a numerical value (see -loglevel
).
-
-
-
-For example, to output a report to a file named ffreport.log
-using a log level of 32
(alias for log level info
):
-
-
-
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
-
-
-Errors in parsing the environment variable are not fatal, and will not
-appear in the report.
-
-
--hide_banner
-Suppress printing banner.
-
-All FFmpeg tools will normally show a copyright notice, build options
-and library versions. This option can be used to suppress printing
-this information.
-
-
--cpuflags flags (global )
-Allows setting and clearing cpu flags. This option is intended
-for testing. Do not use it unless you know what you’re doing.
-
-
ffmpeg -cpuflags -sse+mmx ...
-ffmpeg -cpuflags mmx ...
-ffmpeg -cpuflags 0 ...
-
-Possible flags for this option are:
-
-‘x86 ’
-
-‘mmx ’
-‘mmxext ’
-‘sse ’
-‘sse2 ’
-‘sse2slow ’
-‘sse3 ’
-‘sse3slow ’
-‘ssse3 ’
-‘atom ’
-‘sse4.1 ’
-‘sse4.2 ’
-‘avx ’
-‘xop ’
-‘fma4 ’
-‘3dnow ’
-‘3dnowext ’
-‘cmov ’
-
-
-‘ARM ’
-
-‘armv5te ’
-‘armv6 ’
-‘armv6t2 ’
-‘vfp ’
-‘vfpv3 ’
-‘neon ’
-
-
-‘PowerPC ’
-
-‘altivec ’
-
-
-‘Specific Processors ’
-
-‘pentium2 ’
-‘pentium3 ’
-‘pentium4 ’
-‘k6 ’
-‘k62 ’
-‘athlon ’
-‘athlonxp ’
-‘k8 ’
-
-
-
-
-
--opencl_bench
-Benchmark all available OpenCL devices and show the results. This option
-is only available when FFmpeg has been compiled with --enable-opencl
.
-
-
--opencl_options options (global )
-Set OpenCL environment options. This option is only available when
-FFmpeg has been compiled with --enable-opencl
.
-
-options must be a list of key =value option pairs
-separated by ’:’. See the “OpenCL Options” section in the
-ffmpeg-utils manual for the list of supported options.
-
-
-
-
-
3.3 AVOptions# TOC
-
-
These options are provided directly by the libavformat, libavdevice and
-libavcodec libraries. To see the list of available AVOptions, use the
--help option. They are separated into two categories:
-
-generic
-These options can be set for any container, codec or device. Generic options
-are listed under AVFormatContext options for containers/devices and under
-AVCodecContext options for codecs.
-
-private
-These options are specific to the given container, device or codec. Private
-options are listed under their corresponding containers/devices/codecs.
-
-
-
-
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
-an MP3 file, use the id3v2_version private option of the MP3
-muxer:
-
-
ffmpeg -i input.flac -id3v2_version 3 out.mp3
-
-
-
All codec AVOptions are per-stream, and thus a stream specifier
-should be attached to them.
-
-
Note: the -nooption syntax cannot be used for boolean
-AVOptions, use -option 0 /-option 1 .
-
-
Note: the old undocumented way of specifying per-stream AVOptions by
-prepending v/a/s to the options name is now obsolete and will be
-removed soon.
-
-
-
3.4 Main options# TOC
-
-
--x width
-Force displayed width.
-
--y height
-Force displayed height.
-
--s size
-Set frame size (WxH or abbreviation), needed for videos which do
-not contain a header with the frame size like raw YUV. This option
-has been deprecated in favor of private options, try -video_size.
-
--fs
-Start in fullscreen mode.
-
--an
-Disable audio.
-
--vn
-Disable video.
-
--sn
-Disable subtitles.
-
--ss pos
-Seek to a given position in seconds.
-
--t duration
-play <duration> seconds of audio/video
-
--bytes
-Seek by bytes.
-
--nodisp
-Disable graphical display.
-
--f fmt
-Force format.
-
--window_title title
-Set window title (default is the input filename).
-
--loop number
-Loops movie playback <number> times. 0 means forever.
-
--showmode mode
-Set the show mode to use.
-Available values for mode are:
-
-‘0, video ’
-show video
-
-‘1, waves ’
-show audio waves
-
-‘2, rdft ’
-show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform)
-
-
-
-Default value is "video", if video is not present or cannot be played
-"rdft" is automatically selected.
-
-You can interactively cycle through the available show modes by
-pressing the key w .
-
-
--vf filtergraph
-Create the filtergraph specified by filtergraph and use it to
-filter the video stream.
-
-filtergraph is a description of the filtergraph to apply to
-the stream, and must have a single video input and a single video
-output. In the filtergraph, the input is associated to the label
-in
, and the output to the label out
. See the
-ffmpeg-filters manual for more information about the filtergraph
-syntax.
-
-You can specify this parameter multiple times and cycle through the specified
-filtergraphs along with the show modes by pressing the key w .
-
-
--af filtergraph
-filtergraph is a description of the filtergraph to apply to
-the input audio.
-Use the option "-filters" to show all the available filters (including
-sources and sinks).
-
-
--i input_file
-Read input_file .
-
-
-
-
-
3.5 Advanced options# TOC
-
--pix_fmt format
-Set pixel format.
-This option has been deprecated in favor of private options, try -pixel_format.
-
-
--stats
-Print several playback statistics, in particular show the stream
-duration, the codec parameters, the current position in the stream and
-the audio/video synchronisation drift. It is on by default, to
-explicitly disable it you need to specify -nostats
.
-
-
--fast
-Non-spec-compliant optimizations.
-
--genpts
-Generate pts.
-
--sync type
-Set the master clock to audio (type=audio
), video
-(type=video
) or external (type=ext
). Default is audio. The
-master clock is used to control audio-video synchronization. Most media
-players use audio as master clock, but in some cases (streaming or high
-quality broadcast) it is necessary to change that. This option is mainly
-used for debugging purposes.
-
--ast audio_stream_specifier
-Select the desired audio stream using the given stream specifier. The stream
-specifiers are described in the Stream specifiers chapter. If this option
-is not specified, the "best" audio stream is selected in the program of the
-already selected video stream.
-
--vst video_stream_specifier
-Select the desired video stream using the given stream specifier. The stream
-specifiers are described in the Stream specifiers chapter. If this option
-is not specified, the "best" video stream is selected.
-
--sst subtitle_stream_specifier
-Select the desired subtitle stream using the given stream specifier. The stream
-specifiers are described in the Stream specifiers chapter. If this option
-is not specified, the "best" subtitle stream is selected in the program of the
-already selected video or audio stream.
-
--autoexit
-Exit when video is done playing.
-
--exitonkeydown
-Exit if any key is pressed.
-
--exitonmousedown
-Exit if any mouse button is pressed.
-
-
--codec:media_specifier codec_name
-Force a specific decoder implementation for the stream identified by
-media_specifier , which can assume the values a
(audio),
-v
(video), and s
subtitle.
-
-
--acodec codec_name
-Force a specific audio decoder.
-
-
--vcodec codec_name
-Force a specific video decoder.
-
-
--scodec codec_name
-Force a specific subtitle decoder.
-
-
--autorotate
-Automatically rotate the video according to presentation metadata. Enabled by
-default, use -noautorotate to disable it.
-
-
--framedrop
-Drop video frames if video is out of sync. Enabled by default if the master
-clock is not set to video. Use this option to enable frame dropping for all
-master clock sources, use -noframedrop to disable it.
-
-
--infbuf
-Do not limit the input buffer size, read as much data as possible from the
-input as soon as possible. Enabled by default for realtime streams, where data
-may be dropped if not read in time. Use this option to enable infinite buffers
-for all inputs, use -noinfbuf to disable it.
-
-
-
-
-
-
3.6 While playing# TOC
-
-
-q, ESC
-Quit.
-
-
-f
-Toggle full screen.
-
-
-p, SPC
-Pause.
-
-
-a
-Cycle audio channel in the current program.
-
-
-v
-Cycle video channel.
-
-
-t
-Cycle subtitle channel in the current program.
-
-
-c
-Cycle program.
-
-
-w
-Cycle video filters or show modes.
-
-
-s
-Step to the next frame.
-
-Pause if the stream is not already paused, step to the next video
-frame, and pause.
-
-
-left/right
-Seek backward/forward 10 seconds.
-
-
-down/up
-Seek backward/forward 1 minute.
-
-
-page down/page up
-Seek to the previous/next chapter.
-or if there are no chapters
-Seek backward/forward 10 minutes.
-
-
-mouse click
-Seek to percentage in file corresponding to fraction of width.
-
-
-
-
-
-
-
-
4 See Also# TOC
-
-
ffmpeg-all ,
-ffmpeg , ffprobe , ffserver ,
-ffmpeg-utils ,
-ffmpeg-scaler ,
-ffmpeg-resampler ,
-ffmpeg-codecs ,
-ffmpeg-bitstream-filters ,
-ffmpeg-formats ,
-ffmpeg-devices ,
-ffmpeg-protocols ,
-ffmpeg-filters
-
-
-
-
5 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffprobe-all.html b/Externals/ffmpeg/dev/doc/ffprobe-all.html
deleted file mode 100644
index a52af3304f..0000000000
--- a/Externals/ffmpeg/dev/doc/ffprobe-all.html
+++ /dev/null
@@ -1,21676 +0,0 @@
-
-
-
-
-
-
- ffprobe Documentation
-
-
-
-
-
-
-
-
- ffprobe Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Synopsis# TOC
-
-
ffprobe [options ] [input_file ]
-
-
-
2 Description# TOC
-
-
ffprobe gathers information from multimedia streams and prints it in
-human- and machine-readable fashion.
-
-
For example it can be used to check the format of the container used
-by a multimedia stream and the format and type of each media stream
-contained in it.
-
-
If a filename is specified in input, ffprobe will try to open and
-probe the file content. If the file cannot be opened or recognized as
-a multimedia file, a positive exit code is returned.
-
-
ffprobe may be employed both as a standalone application or in
-combination with a textual filter, which may perform more
-sophisticated processing, e.g. statistical processing or plotting.
-
-
Options are used to list some of the formats supported by ffprobe or
-for specifying which information to display, and for setting how
-ffprobe will show it.
-
-
ffprobe output is designed to be easily parsable by a textual filter,
-and consists of one or more sections of a form defined by the selected
-writer, which is specified by the print_format option.
-
-
Sections may contain other nested sections, and are identified by a
-name (which may be shared by other sections), and an unique
-name. See the output of sections .
-
-
Metadata tags stored in the container or in the streams are recognized
-and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM"
-section.
-
-
-
-
3 Options# TOC
-
-
All the numerical options, if not specified otherwise, accept a string
-representing a number as input, which may be followed by one of the SI
-unit prefixes, for example: ’K’, ’M’, or ’G’.
-
-
If ’i’ is appended to the SI unit prefix, the complete prefix will be
-interpreted as a unit prefix for binary multiples, which are based on
-powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
-prefix multiplies the value by 8. This allows using, for example:
-’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
-
-
Options which do not take arguments are boolean options, and set the
-corresponding value to true. They can be set to false by prefixing
-the option name with "no". For example using "-nofoo"
-will set the boolean option with name "foo" to false.
-
-
-
3.1 Stream specifiers# TOC
-
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
-are used to precisely specify which stream(s) a given option belongs to.
-
-
A stream specifier is a string generally appended to the option name and
-separated from it by a colon. E.g. -codec:a:1 ac3
contains the
-a:1
stream specifier, which matches the second audio stream. Therefore, it
-would select the ac3 codec for the second audio stream.
-
-
A stream specifier can match several streams, so that the option is applied to all
-of them. E.g. the stream specifier in -b:a 128k
matches all audio
-streams.
-
-
An empty stream specifier matches all streams. For example, -codec copy
-or -codec: copy
would copy all the streams without reencoding.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index. E.g. -threads:1 4
would set the
-thread count for the second stream to 4.
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
-’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
-stream number stream_index of this type. Otherwise, it matches all
-streams of this type.
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number stream_index
-in the program with the id program_id . Otherwise, it matches all streams in the
-program.
-
-#stream_id or i:stream_id
-Match the stream by stream id (e.g. PID in MPEG-TS container).
-
-m:key [:value ]
-Matches streams with the metadata tag key having the specified value. If
-value is not given, matches streams that contain the given tag with any
-value.
-
-Note that in ffmpeg
, matching by metadata will only work properly for
-input files.
-
-
-
-
-
3.2 Generic options# TOC
-
-
These options are shared amongst the ff* tools.
-
-
--L
-Show license.
-
-
--h, -?, -help, --help [arg ]
-Show help. An optional parameter may be specified to print help about a specific
-item. If no argument is specified, only basic (non advanced) tool
-options are shown.
-
-Possible values of arg are:
-
-long
-Print advanced tool options in addition to the basic tool options.
-
-
-full
-Print complete list of options, including shared and private options
-for encoders, decoders, demuxers, muxers, filters, etc.
-
-
-decoder=decoder_name
-Print detailed information about the decoder named decoder_name . Use the
--decoders option to get a list of all decoders.
-
-
-encoder=encoder_name
-Print detailed information about the encoder named encoder_name . Use the
--encoders option to get a list of all encoders.
-
-
-demuxer=demuxer_name
-Print detailed information about the demuxer named demuxer_name . Use the
--formats option to get a list of all demuxers and muxers.
-
-
-muxer=muxer_name
-Print detailed information about the muxer named muxer_name . Use the
--formats option to get a list of all muxers and demuxers.
-
-
-filter=filter_name
-Print detailed information about the filter name filter_name . Use the
--filters option to get a list of all filters.
-
-
-
-
--version
-Show version.
-
-
--formats
-Show available formats (including devices).
-
-
--devices
-Show available devices.
-
-
--codecs
-Show all codecs known to libavcodec.
-
-Note that the term ’codec’ is used throughout this documentation as a shortcut
-for what is more correctly called a media bitstream format.
-
-
--decoders
-Show available decoders.
-
-
--encoders
-Show all available encoders.
-
-
--bsfs
-Show available bitstream filters.
-
-
--protocols
-Show available protocols.
-
-
--filters
-Show available libavfilter filters.
-
-
--pix_fmts
-Show available pixel formats.
-
-
--sample_fmts
-Show available sample formats.
-
-
--layouts
-Show channel names and standard channel layouts.
-
-
--colors
-Show recognized color names.
-
-
--sources device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sources of the intput device.
-Some devices may provide system-dependent source names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sources pulse,server=192.168.0.4
-
-
-
--sinks device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sinks of the output device.
-Some devices may provide system-dependent sink names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sinks pulse,server=192.168.0.4
-
-
-
--loglevel [repeat+]loglevel | -v [repeat+]loglevel
-Set the logging level used by the library.
-Adding "repeat+" indicates that repeated log output should not be compressed
-to the first line and the "Last message repeated n times" line will be
-omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-’repeat’ will not change the loglevel.
-loglevel is a string or a number containing one of the following values:
-
-‘quiet, -8 ’
-Show nothing at all; be silent.
-
-‘panic, 0 ’
-Only show fatal errors which could lead the process to crash, such as
-and assert failure. This is not currently used for anything.
-
-‘fatal, 8 ’
-Only show fatal errors. These are errors after which the process absolutely
-cannot continue after.
-
-‘error, 16 ’
-Show all errors, including ones which can be recovered from.
-
-‘warning, 24 ’
-Show all warnings and errors. Any message related to possibly
-incorrect or unexpected events will be shown.
-
-‘info, 32 ’
-Show informative messages during processing. This is in addition to
-warnings and errors. This is the default value.
-
-‘verbose, 40 ’
-Same as info
, except more verbose.
-
-‘debug, 48 ’
-Show everything, including debugging information.
-
-
-
-By default the program logs to stderr, if coloring is supported by the
-terminal, colors are used to mark errors and warnings. Log coloring
-can be disabled setting the environment variable
-AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
-the environment variable AV_LOG_FORCE_COLOR
.
-The use of the environment variable NO_COLOR
is deprecated and
-will be dropped in a following FFmpeg version.
-
-
--report
-Dump full command line and console output to a file named
-program -YYYYMMDD -HHMMSS .log
in the current
-directory.
-This file can be useful for bug reports.
-It also implies -loglevel verbose
.
-
-Setting the environment variable FFREPORT
to any value has the
-same effect. If the value is a ’:’-separated key=value sequence, these
-options will affect the report; option values must be escaped if they
-contain special characters or the options delimiter ’:’ (see the
-“Quoting and escaping” section in the ffmpeg-utils manual).
-
-The following options are recognized:
-
-file
-set the file name to use for the report; %p
is expanded to the name
-of the program, %t
is expanded to a timestamp, %%
is expanded
-to a plain %
-
-level
-set the log verbosity level using a numerical value (see -loglevel
).
-
-
-
-For example, to output a report to a file named ffreport.log
-using a log level of 32
(alias for log level info
):
-
-
-
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
-
-
-Errors in parsing the environment variable are not fatal, and will not
-appear in the report.
-
-
--hide_banner
-Suppress printing banner.
-
-All FFmpeg tools will normally show a copyright notice, build options
-and library versions. This option can be used to suppress printing
-this information.
-
-
--cpuflags flags (global )
-Allows setting and clearing cpu flags. This option is intended
-for testing. Do not use it unless you know what you’re doing.
-
-
ffmpeg -cpuflags -sse+mmx ...
-ffmpeg -cpuflags mmx ...
-ffmpeg -cpuflags 0 ...
-
-Possible flags for this option are:
-
-‘x86 ’
-
-‘mmx ’
-‘mmxext ’
-‘sse ’
-‘sse2 ’
-‘sse2slow ’
-‘sse3 ’
-‘sse3slow ’
-‘ssse3 ’
-‘atom ’
-‘sse4.1 ’
-‘sse4.2 ’
-‘avx ’
-‘xop ’
-‘fma4 ’
-‘3dnow ’
-‘3dnowext ’
-‘cmov ’
-
-
-‘ARM ’
-
-‘armv5te ’
-‘armv6 ’
-‘armv6t2 ’
-‘vfp ’
-‘vfpv3 ’
-‘neon ’
-
-
-‘PowerPC ’
-
-‘altivec ’
-
-
-‘Specific Processors ’
-
-‘pentium2 ’
-‘pentium3 ’
-‘pentium4 ’
-‘k6 ’
-‘k62 ’
-‘athlon ’
-‘athlonxp ’
-‘k8 ’
-
-
-
-
-
--opencl_bench
-Benchmark all available OpenCL devices and show the results. This option
-is only available when FFmpeg has been compiled with --enable-opencl
.
-
-
--opencl_options options (global )
-Set OpenCL environment options. This option is only available when
-FFmpeg has been compiled with --enable-opencl
.
-
-options must be a list of key =value option pairs
-separated by ’:’. See the “OpenCL Options” section in the
-ffmpeg-utils manual for the list of supported options.
-
-
-
-
-
3.3 AVOptions# TOC
-
-
These options are provided directly by the libavformat, libavdevice and
-libavcodec libraries. To see the list of available AVOptions, use the
--help option. They are separated into two categories:
-
-generic
-These options can be set for any container, codec or device. Generic options
-are listed under AVFormatContext options for containers/devices and under
-AVCodecContext options for codecs.
-
-private
-These options are specific to the given container, device or codec. Private
-options are listed under their corresponding containers/devices/codecs.
-
-
-
-
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
-an MP3 file, use the id3v2_version private option of the MP3
-muxer:
-
-
ffmpeg -i input.flac -id3v2_version 3 out.mp3
-
-
-
All codec AVOptions are per-stream, and thus a stream specifier
-should be attached to them.
-
-
Note: the -nooption syntax cannot be used for boolean
-AVOptions, use -option 0 /-option 1 .
-
-
Note: the old undocumented way of specifying per-stream AVOptions by
-prepending v/a/s to the options name is now obsolete and will be
-removed soon.
-
-
-
3.4 Main options# TOC
-
-
--f format
-Force format to use.
-
-
--unit
-Show the unit of the displayed values.
-
-
--prefix
-Use SI prefixes for the displayed values.
-Unless the "-byte_binary_prefix" option is used all the prefixes
-are decimal.
-
-
--byte_binary_prefix
-Force the use of binary prefixes for byte values.
-
-
--sexagesimal
-Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
-
-
--pretty
-Prettify the format of the displayed values, it corresponds to the
-options "-unit -prefix -byte_binary_prefix -sexagesimal".
-
-
--of, -print_format writer_name [=writer_options ]
-Set the output printing format.
-
-writer_name specifies the name of the writer, and
-writer_options specifies the options to be passed to the writer.
-
-For example for printing the output in JSON format, specify:
-
-
-For more details on the available output printing formats, see the
-Writers section below.
-
-
--sections
-Print sections structure and section information, and exit. The output
-is not meant to be parsed by a machine.
-
-
--select_streams stream_specifier
-Select only the streams specified by stream_specifier . This
-option affects only the options related to streams
-(e.g. show_streams
, show_packets
, etc.).
-
-For example to show only audio streams, you can use the command:
-
-
ffprobe -show_streams -select_streams a INPUT
-
-
-To show only video packets belonging to the video stream with index 1:
-
-
ffprobe -show_packets -select_streams v:1 INPUT
-
-
-
--show_data
-Show payload data, as a hexadecimal and ASCII dump. Coupled with
--show_packets , it will dump the packets’ data. Coupled with
--show_streams , it will dump the codec extradata.
-
-The dump is printed as the "data" field. It may contain newlines.
-
-
--show_data_hash algorithm
-Show a hash of payload data, for packets with -show_packets and for
-codec extradata with -show_streams .
-
-
--show_error
-Show information about the error found when trying to probe the input.
-
-The error information is printed within a section with name "ERROR".
-
-
--show_format
-Show information about the container format of the input multimedia
-stream.
-
-All the container format information is printed within a section with
-name "FORMAT".
-
-
--show_format_entry name
-Like -show_format , but only prints the specified entry of the
-container format information, rather than all. This option may be given more
-than once, then all specified entries will be shown.
-
-This option is deprecated, use show_entries
instead.
-
-
--show_entries section_entries
-Set list of entries to show.
-
-Entries are specified according to the following
-syntax. section_entries contains a list of section entries
-separated by :
. Each section entry is composed by a section
-name (or unique name), optionally followed by a list of entries local
-to that section, separated by ,
.
-
-If section name is specified but is followed by no =
, all
-entries are printed to output, together with all the contained
-sections. Otherwise only the entries specified in the local section
-entries list are printed. In particular, if =
is specified but
-the list of local entries is empty, then no entries will be shown for
-that section.
-
-Note that the order of specification of the local section entries is
-not honored in the output, and the usual display order will be
-retained.
-
-The formal syntax is given by:
-
-
LOCAL_SECTION_ENTRIES ::= SECTION_ENTRY_NAME [,LOCAL_SECTION_ENTRIES ]
-SECTION_ENTRY ::= SECTION_NAME [=[LOCAL_SECTION_ENTRIES ]]
-SECTION_ENTRIES ::= SECTION_ENTRY [:SECTION_ENTRIES ]
-
-
-For example, to show only the index and type of each stream, and the PTS
-time, duration time, and stream index of the packets, you can specify
-the argument:
-
-
packet=pts_time,duration_time,stream_index : stream=index,codec_type
-
-
-To show all the entries in the section "format", but only the codec
-type in the section "stream", specify the argument:
-
-
format : stream=codec_type
-
-
-To show all the tags in the stream and format sections:
-
-
stream_tags : format_tags
-
-
-To show only the title
tag (if available) in the stream
-sections:
-
-
-
--show_packets
-Show information about each packet contained in the input multimedia
-stream.
-
-The information for each single packet is printed within a dedicated
-section with name "PACKET".
-
-
--show_frames
-Show information about each frame and subtitle contained in the input
-multimedia stream.
-
-The information for each single frame is printed within a dedicated
-section with name "FRAME" or "SUBTITLE".
-
-
--show_streams
-Show information about each media stream contained in the input
-multimedia stream.
-
-Each media stream information is printed within a dedicated section
-with name "STREAM".
-
-
--show_programs
-Show information about programs and their streams contained in the input
-multimedia stream.
-
-Each media stream information is printed within a dedicated section
-with name "PROGRAM_STREAM".
-
-
--show_chapters
-Show information about chapters stored in the format.
-
-Each chapter is printed within a dedicated section with name "CHAPTER".
-
-
--count_frames
-Count the number of frames per stream and report it in the
-corresponding stream section.
-
-
--count_packets
-Count the number of packets per stream and report it in the
-corresponding stream section.
-
-
--read_intervals read_intervals
-
-Read only the specified intervals. read_intervals must be a
-sequence of interval specifications separated by ",".
-ffprobe
will seek to the interval starting point, and will
-continue reading from that.
-
-Each interval is specified by two optional parts, separated by "%".
-
-The first part specifies the interval start position. It is
-interpreted as an abolute position, or as a relative offset from the
-current position if it is preceded by the "+" character. If this first
-part is not specified, no seeking will be performed when reading this
-interval.
-
-The second part specifies the interval end position. It is interpreted
-as an absolute position, or as a relative offset from the current
-position if it is preceded by the "+" character. If the offset
-specification starts with "#", it is interpreted as the number of
-packets to read (not including the flushing packets) from the interval
-start. If no second part is specified, the program will read until the
-end of the input.
-
-Note that seeking is not accurate, thus the actual interval start
-point may be different from the specified position. Also, when an
-interval duration is specified, the absolute end time will be computed
-by adding the duration to the interval start point found by seeking
-the file, rather than to the specified start value.
-
-The formal syntax is given by:
-
-
INTERVAL ::= [START |+START_OFFSET ][%[END |+END_OFFSET ]]
-INTERVALS ::= INTERVAL [,INTERVALS ]
-
-
-A few examples follow.
-
- Seek to time 10, read packets until 20 seconds after the found seek
-point, then seek to position 01:30
(1 minute and thirty
-seconds) and read packets until position 01:45
.
-
-
- Read only 42 packets after seeking to position 01:23
:
-
-
- Read only the first 20 seconds from the start:
-
-
- Read from the start until position 02:30
:
-
-
-
-
--show_private_data, -private
-Show private data, that is data depending on the format of the
-particular shown element.
-This option is enabled by default, but you may need to disable it
-for specific uses, for example when creating XSD-compliant XML output.
-
-
--show_program_version
-Show information related to program version.
-
-Version information is printed within a section with name
-"PROGRAM_VERSION".
-
-
--show_library_versions
-Show information related to library versions.
-
-Version information for each library is printed within a section with
-name "LIBRARY_VERSION".
-
-
--show_versions
-Show information related to program and library versions. This is the
-equivalent of setting both -show_program_version and
--show_library_versions options.
-
-
--show_pixel_formats
-Show information about all pixel formats supported by FFmpeg.
-
-Pixel format information for each format is printed within a section
-with name "PIXEL_FORMAT".
-
-
--bitexact
-Force bitexact output, useful to produce output which is not dependent
-on the specific build.
-
-
--i input_file
-Read input_file .
-
-
-
-
-
-
4 Writers# TOC
-
-
A writer defines the output format adopted by ffprobe
, and will be
-used for printing all the parts of the output.
-
-
A writer may accept one or more arguments, which specify the options
-to adopt. The options are specified as a list of key =value
-pairs, separated by ":".
-
-
All writers support the following options:
-
-
-string_validation, sv
-Set string validation mode.
-
-The following values are accepted.
-
-‘fail ’
-The writer will fail immediately in case an invalid string (UTF-8)
-sequence or code point is found in the input. This is especially
-useful to validate input metadata.
-
-
-‘ignore ’
-Any validation error will be ignored. This will result in possibly
-broken output, especially with the json or xml writer.
-
-
-‘replace ’
-The writer will substitute invalid UTF-8 sequences or code points with
-the string specified with the string_validation_replacement .
-
-
-
-Default value is ‘replace ’.
-
-
-string_validation_replacement, svr
-Set replacement string to use in case string_validation is
-set to ‘replace ’.
-
-In case the option is not specified, the writer will assume the empty
-string, that is it will remove the invalid sequences from the input
-strings.
-
-
-
-
A description of the currently available writers follows.
-
-
-
4.1 default# TOC
-
Default format.
-
-
Print each section in the form:
-
-
[SECTION]
-key1=val1
-...
-keyN=valN
-[/SECTION]
-
-
-
Metadata tags are printed as a line in the corresponding FORMAT, STREAM or
-PROGRAM_STREAM section, and are prefixed by the string "TAG:".
-
-
A description of the accepted options follows.
-
-
-nokey, nk
-If set to 1 specify not to print the key of each field. Default value
-is 0.
-
-
-noprint_wrappers, nw
-If set to 1 specify not to print the section header and footer.
-Default value is 0.
-
-
-
-
-
4.2 compact, csv# TOC
-
Compact and CSV format.
-
-
The csv
writer is equivalent to compact
, but supports
-different defaults.
-
-
Each section is printed on a single line.
-If no option is specifid, the output has the form:
-
-
section|key1=val1| ... |keyN=valN
-
-
-
Metadata tags are printed in the corresponding "format" or "stream"
-section. A metadata tag key, if printed, is prefixed by the string
-"tag:".
-
-
The description of the accepted options follows.
-
-
-item_sep, s
-Specify the character to use for separating fields in the output line.
-It must be a single printable character, it is "|" by default ("," for
-the csv
writer).
-
-
-nokey, nk
-If set to 1 specify not to print the key of each field. Its default
-value is 0 (1 for the csv
writer).
-
-
-escape, e
-Set the escape mode to use, default to "c" ("csv" for the csv
-writer).
-
-It can assume one of the following values:
-
-c
-Perform C-like escaping. Strings containing a newline (’\n’), carriage
-return (’\r’), a tab (’\t’), a form feed (’\f’), the escaping
-character (’\’) or the item separator character SEP are escaped using C-like fashioned
-escaping, so that a newline is converted to the sequence "\n", a
-carriage return to "\r", ’\’ to "\\" and the separator SEP is
-converted to "\SEP ".
-
-
-csv
-Perform CSV-like escaping, as described in RFC4180. Strings
-containing a newline (’\n’), a carriage return (’\r’), a double quote
-(’"’), or SEP are enclosed in double-quotes.
-
-
-none
-Perform no escaping.
-
-
-
-
-print_section, p
-Print the section name at the begin of each line if the value is
-1
, disable it with value set to 0
. Default value is
-1
.
-
-
-
-
-
-
4.3 flat# TOC
-
Flat format.
-
-
A free-form output where each line contains an explicit key=value, such as
-"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be
-directly embedded in sh scripts as long as the separator character is an
-alphanumeric character or an underscore (see sep_char option).
-
-
The description of the accepted options follows.
-
-
-sep_char, s
-Separator character used to separate the chapter, the section name, IDs and
-potential tags in the printed field key.
-
-Default value is ’.’.
-
-
-hierarchical, h
-Specify if the section name specification should be hierarchical. If
-set to 1, and if there is more than one section in the current
-chapter, the section name will be prefixed by the name of the
-chapter. A value of 0 will disable this behavior.
-
-Default value is 1.
-
-
-
-
-
-
INI format output.
-
-
Print output in an INI based format.
-
-
The following conventions are adopted:
-
-
- all key and values are UTF-8
- ’.’ is the subgroup separator
- newline, ’\t’, ’\f’, ’\b’ and the following characters are escaped
- ’\’ is the escape character
- ’#’ is the comment indicator
- ’=’ is the key/value separator
- ’:’ is not used but usually parsed as key/value separator
-
-
-
This writer accepts options as a list of key =value pairs,
-separated by ":".
-
-
The description of the accepted options follows.
-
-
-hierarchical, h
-Specify if the section name specification should be hierarchical. If
-set to 1, and if there is more than one section in the current
-chapter, the section name will be prefixed by the name of the
-chapter. A value of 0 will disable this behavior.
-
-Default value is 1.
-
-
-
-
-
4.5 json# TOC
-
JSON based format.
-
-
Each section is printed using JSON notation.
-
-
The description of the accepted options follows.
-
-
-compact, c
-If set to 1 enable compact output, that is each section will be
-printed on a single line. Default value is 0.
-
-
-
-
For more information about JSON, see http://www.json.org/ .
-
-
-
-
XML based format.
-
-
The XML output is described in the XML schema description file
-ffprobe.xsd installed in the FFmpeg datadir.
-
-
An updated version of the schema can be retrieved at the url
-http://www.ffmpeg.org/schema/ffprobe.xsd , which redirects to the
-latest schema committed into the FFmpeg development source code tree.
-
-
Note that the output issued will be compliant to the
-ffprobe.xsd schema only when no special global output options
-(unit , prefix , byte_binary_prefix ,
-sexagesimal etc.) are specified.
-
-
The description of the accepted options follows.
-
-
-fully_qualified, q
-If set to 1 specify if the output should be fully qualified. Default
-value is 0.
-This is required for generating an XML file which can be validated
-through an XSD file.
-
-
-xsd_compliant, x
-If set to 1 perform more checks for ensuring that the output is XSD
-compliant. Default value is 0.
-This option automatically sets fully_qualified to 1.
-
-
-
-
For more information about the XML format, see
-http://www.w3.org/XML/ .
-
-
-
5 Timecode# TOC
-
-
ffprobe
supports Timecode extraction:
-
-
- MPEG1/2 timecode is extracted from the GOP, and is available in the video
-stream details (-show_streams , see timecode ).
-
- MOV timecode is extracted from tmcd track, so is available in the tmcd
-stream metadata (-show_streams , see TAG:timecode ).
-
- DV, GXF and AVI timecodes are available in format metadata
-(-show_format , see TAG:timecode ).
-
-
-
-
-
6 Syntax# TOC
-
-
This section documents the syntax and formats employed by the FFmpeg
-libraries and tools.
-
-
-
6.1 Quoting and escaping# TOC
-
-
FFmpeg adopts the following quoting and escaping mechanism, unless
-explicitly specified. The following rules are applied:
-
-
- '
and \
are special characters (respectively used for
-quoting and escaping). In addition to them, there might be other
-special characters depending on the specific syntax where the escaping
-and quoting are employed.
-
- A special character is escaped by prefixing it with a ’\’.
-
- All characters enclosed between ” are included literally in the
-parsed string. The quote character '
itself cannot be quoted,
-so you may need to close the quote and escape it.
-
- Leading and trailing whitespaces, unless escaped or quoted, are
-removed from the parsed string.
-
-
-
Note that you may need to add a second level of escaping when using
-the command line or a script, which depends on the syntax of the
-adopted shell language.
-
-
The function av_get_token
defined in
-libavutil/avstring.h can be used to parse a token quoted or
-escaped according to the rules defined above.
-
-
The tool tools/ffescape in the FFmpeg source tree can be used
-to automatically quote or escape a string in a script.
-
-
-
6.1.1 Examples# TOC
-
-
- Escape the string Crime d'Amour
containing the '
special
-character:
-
-
- The string above contains a quote, so the '
needs to be escaped
-when quoting it:
-
-
- Include leading or trailing whitespaces using quoting:
-
-
' this string starts and ends with whitespaces '
-
-
- Escaping and quoting can be mixed together:
-
-
' The string '\'string\'' is a string '
-
-
- To include a literal \
you can use either escaping or quoting:
-
-
'c:\foo' can be written as c:\\foo
-
-
-
-
-
6.2 Date# TOC
-
-
The accepted syntax is:
-
-
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
-now
-
-
-
If the value is "now" it takes the current time.
-
-
Time is local time unless Z is appended, in which case it is
-interpreted as UTC.
-If the year-month-day part is not specified it takes the current
-year-month-day.
-
-
-
6.3 Time duration# TOC
-
-
There are two accepted syntaxes for expressing time duration.
-
-
-
-
HH expresses the number of hours, MM the number of minutes
-for a maximum of 2 digits, and SS the number of seconds for a
-maximum of 2 digits. The m at the end expresses decimal value for
-SS .
-
-
or
-
-
-
-
S expresses the number of seconds, with the optional decimal part
-m .
-
-
In both expressions, the optional ‘- ’ indicates negative duration.
-
-
-
6.3.1 Examples# TOC
-
-
The following examples are all valid time duration:
-
-
-‘55 ’
-55 seconds
-
-
-‘12:03:45 ’
-12 hours, 03 minutes and 45 seconds
-
-
-‘23.189 ’
-23.189 seconds
-
-
-
-
-
6.4 Video size# TOC
-
Specify the size of the sourced video, it may be a string of the form
-width xheight , or the name of a size abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-720x480
-
-‘pal ’
-720x576
-
-‘qntsc ’
-352x240
-
-‘qpal ’
-352x288
-
-‘sntsc ’
-640x480
-
-‘spal ’
-768x576
-
-‘film ’
-352x240
-
-‘ntsc-film ’
-352x240
-
-‘sqcif ’
-128x96
-
-‘qcif ’
-176x144
-
-‘cif ’
-352x288
-
-‘4cif ’
-704x576
-
-‘16cif ’
-1408x1152
-
-‘qqvga ’
-160x120
-
-‘qvga ’
-320x240
-
-‘vga ’
-640x480
-
-‘svga ’
-800x600
-
-‘xga ’
-1024x768
-
-‘uxga ’
-1600x1200
-
-‘qxga ’
-2048x1536
-
-‘sxga ’
-1280x1024
-
-‘qsxga ’
-2560x2048
-
-‘hsxga ’
-5120x4096
-
-‘wvga ’
-852x480
-
-‘wxga ’
-1366x768
-
-‘wsxga ’
-1600x1024
-
-‘wuxga ’
-1920x1200
-
-‘woxga ’
-2560x1600
-
-‘wqsxga ’
-3200x2048
-
-‘wquxga ’
-3840x2400
-
-‘whsxga ’
-6400x4096
-
-‘whuxga ’
-7680x4800
-
-‘cga ’
-320x200
-
-‘ega ’
-640x350
-
-‘hd480 ’
-852x480
-
-‘hd720 ’
-1280x720
-
-‘hd1080 ’
-1920x1080
-
-‘2k ’
-2048x1080
-
-‘2kflat ’
-1998x1080
-
-‘2kscope ’
-2048x858
-
-‘4k ’
-4096x2160
-
-‘4kflat ’
-3996x2160
-
-‘4kscope ’
-4096x1716
-
-‘nhd ’
-640x360
-
-‘hqvga ’
-240x160
-
-‘wqvga ’
-400x240
-
-‘fwqvga ’
-432x240
-
-‘hvga ’
-480x320
-
-‘qhd ’
-960x540
-
-
-
-
-
6.5 Video rate# TOC
-
-
Specify the frame rate of a video, expressed as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a float
-number or a valid video frame rate abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-30000/1001
-
-‘pal ’
-25/1
-
-‘qntsc ’
-30000/1001
-
-‘qpal ’
-25/1
-
-‘sntsc ’
-30000/1001
-
-‘spal ’
-25/1
-
-‘film ’
-24/1
-
-‘ntsc-film ’
-24000/1001
-
-
-
-
-
6.6 Ratio# TOC
-
-
A ratio can be expressed as an expression, or in the form
-numerator :denominator .
-
-
Note that a ratio with infinite (1/0) or negative value is
-considered valid, so you should check on the returned value if you
-want to exclude those values.
-
-
The undefined value can be expressed using the "0:0" string.
-
-
-
6.7 Color# TOC
-
-
It can be the name of a color as defined below (case insensitive match) or a
-[0x|#]RRGGBB[AA]
sequence, possibly followed by @ and a string
-representing the alpha component.
-
-
The alpha component may be a string composed by "0x" followed by an
-hexadecimal number or a decimal number between 0.0 and 1.0, which
-represents the opacity value (‘0x00 ’ or ‘0.0 ’ means completely
-transparent, ‘0xff ’ or ‘1.0 ’ completely opaque). If the alpha
-component is not specified then ‘0xff ’ is assumed.
-
-
The string ‘random ’ will result in a random color.
-
-
The following names of colors are recognized:
-
-‘AliceBlue ’
-0xF0F8FF
-
-‘AntiqueWhite ’
-0xFAEBD7
-
-‘Aqua ’
-0x00FFFF
-
-‘Aquamarine ’
-0x7FFFD4
-
-‘Azure ’
-0xF0FFFF
-
-‘Beige ’
-0xF5F5DC
-
-‘Bisque ’
-0xFFE4C4
-
-‘Black ’
-0x000000
-
-‘BlanchedAlmond ’
-0xFFEBCD
-
-‘Blue ’
-0x0000FF
-
-‘BlueViolet ’
-0x8A2BE2
-
-‘Brown ’
-0xA52A2A
-
-‘BurlyWood ’
-0xDEB887
-
-‘CadetBlue ’
-0x5F9EA0
-
-‘Chartreuse ’
-0x7FFF00
-
-‘Chocolate ’
-0xD2691E
-
-‘Coral ’
-0xFF7F50
-
-‘CornflowerBlue ’
-0x6495ED
-
-‘Cornsilk ’
-0xFFF8DC
-
-‘Crimson ’
-0xDC143C
-
-‘Cyan ’
-0x00FFFF
-
-‘DarkBlue ’
-0x00008B
-
-‘DarkCyan ’
-0x008B8B
-
-‘DarkGoldenRod ’
-0xB8860B
-
-‘DarkGray ’
-0xA9A9A9
-
-‘DarkGreen ’
-0x006400
-
-‘DarkKhaki ’
-0xBDB76B
-
-‘DarkMagenta ’
-0x8B008B
-
-‘DarkOliveGreen ’
-0x556B2F
-
-‘Darkorange ’
-0xFF8C00
-
-‘DarkOrchid ’
-0x9932CC
-
-‘DarkRed ’
-0x8B0000
-
-‘DarkSalmon ’
-0xE9967A
-
-‘DarkSeaGreen ’
-0x8FBC8F
-
-‘DarkSlateBlue ’
-0x483D8B
-
-‘DarkSlateGray ’
-0x2F4F4F
-
-‘DarkTurquoise ’
-0x00CED1
-
-‘DarkViolet ’
-0x9400D3
-
-‘DeepPink ’
-0xFF1493
-
-‘DeepSkyBlue ’
-0x00BFFF
-
-‘DimGray ’
-0x696969
-
-‘DodgerBlue ’
-0x1E90FF
-
-‘FireBrick ’
-0xB22222
-
-‘FloralWhite ’
-0xFFFAF0
-
-‘ForestGreen ’
-0x228B22
-
-‘Fuchsia ’
-0xFF00FF
-
-‘Gainsboro ’
-0xDCDCDC
-
-‘GhostWhite ’
-0xF8F8FF
-
-‘Gold ’
-0xFFD700
-
-‘GoldenRod ’
-0xDAA520
-
-‘Gray ’
-0x808080
-
-‘Green ’
-0x008000
-
-‘GreenYellow ’
-0xADFF2F
-
-‘HoneyDew ’
-0xF0FFF0
-
-‘HotPink ’
-0xFF69B4
-
-‘IndianRed ’
-0xCD5C5C
-
-‘Indigo ’
-0x4B0082
-
-‘Ivory ’
-0xFFFFF0
-
-‘Khaki ’
-0xF0E68C
-
-‘Lavender ’
-0xE6E6FA
-
-‘LavenderBlush ’
-0xFFF0F5
-
-‘LawnGreen ’
-0x7CFC00
-
-‘LemonChiffon ’
-0xFFFACD
-
-‘LightBlue ’
-0xADD8E6
-
-‘LightCoral ’
-0xF08080
-
-‘LightCyan ’
-0xE0FFFF
-
-‘LightGoldenRodYellow ’
-0xFAFAD2
-
-‘LightGreen ’
-0x90EE90
-
-‘LightGrey ’
-0xD3D3D3
-
-‘LightPink ’
-0xFFB6C1
-
-‘LightSalmon ’
-0xFFA07A
-
-‘LightSeaGreen ’
-0x20B2AA
-
-‘LightSkyBlue ’
-0x87CEFA
-
-‘LightSlateGray ’
-0x778899
-
-‘LightSteelBlue ’
-0xB0C4DE
-
-‘LightYellow ’
-0xFFFFE0
-
-‘Lime ’
-0x00FF00
-
-‘LimeGreen ’
-0x32CD32
-
-‘Linen ’
-0xFAF0E6
-
-‘Magenta ’
-0xFF00FF
-
-‘Maroon ’
-0x800000
-
-‘MediumAquaMarine ’
-0x66CDAA
-
-‘MediumBlue ’
-0x0000CD
-
-‘MediumOrchid ’
-0xBA55D3
-
-‘MediumPurple ’
-0x9370D8
-
-‘MediumSeaGreen ’
-0x3CB371
-
-‘MediumSlateBlue ’
-0x7B68EE
-
-‘MediumSpringGreen ’
-0x00FA9A
-
-‘MediumTurquoise ’
-0x48D1CC
-
-‘MediumVioletRed ’
-0xC71585
-
-‘MidnightBlue ’
-0x191970
-
-‘MintCream ’
-0xF5FFFA
-
-‘MistyRose ’
-0xFFE4E1
-
-‘Moccasin ’
-0xFFE4B5
-
-‘NavajoWhite ’
-0xFFDEAD
-
-‘Navy ’
-0x000080
-
-‘OldLace ’
-0xFDF5E6
-
-‘Olive ’
-0x808000
-
-‘OliveDrab ’
-0x6B8E23
-
-‘Orange ’
-0xFFA500
-
-‘OrangeRed ’
-0xFF4500
-
-‘Orchid ’
-0xDA70D6
-
-‘PaleGoldenRod ’
-0xEEE8AA
-
-‘PaleGreen ’
-0x98FB98
-
-‘PaleTurquoise ’
-0xAFEEEE
-
-‘PaleVioletRed ’
-0xD87093
-
-‘PapayaWhip ’
-0xFFEFD5
-
-‘PeachPuff ’
-0xFFDAB9
-
-‘Peru ’
-0xCD853F
-
-‘Pink ’
-0xFFC0CB
-
-‘Plum ’
-0xDDA0DD
-
-‘PowderBlue ’
-0xB0E0E6
-
-‘Purple ’
-0x800080
-
-‘Red ’
-0xFF0000
-
-‘RosyBrown ’
-0xBC8F8F
-
-‘RoyalBlue ’
-0x4169E1
-
-‘SaddleBrown ’
-0x8B4513
-
-‘Salmon ’
-0xFA8072
-
-‘SandyBrown ’
-0xF4A460
-
-‘SeaGreen ’
-0x2E8B57
-
-‘SeaShell ’
-0xFFF5EE
-
-‘Sienna ’
-0xA0522D
-
-‘Silver ’
-0xC0C0C0
-
-‘SkyBlue ’
-0x87CEEB
-
-‘SlateBlue ’
-0x6A5ACD
-
-‘SlateGray ’
-0x708090
-
-‘Snow ’
-0xFFFAFA
-
-‘SpringGreen ’
-0x00FF7F
-
-‘SteelBlue ’
-0x4682B4
-
-‘Tan ’
-0xD2B48C
-
-‘Teal ’
-0x008080
-
-‘Thistle ’
-0xD8BFD8
-
-‘Tomato ’
-0xFF6347
-
-‘Turquoise ’
-0x40E0D0
-
-‘Violet ’
-0xEE82EE
-
-‘Wheat ’
-0xF5DEB3
-
-‘White ’
-0xFFFFFF
-
-‘WhiteSmoke ’
-0xF5F5F5
-
-‘Yellow ’
-0xFFFF00
-
-‘YellowGreen ’
-0x9ACD32
-
-
-
-
-
6.8 Channel Layout# TOC
-
-
A channel layout specifies the spatial disposition of the channels in
-a multi-channel audio stream. To specify a channel layout, FFmpeg
-makes use of a special syntax.
-
-
Individual channels are identified by an id, as given by the table
-below:
-
-‘FL ’
-front left
-
-‘FR ’
-front right
-
-‘FC ’
-front center
-
-‘LFE ’
-low frequency
-
-‘BL ’
-back left
-
-‘BR ’
-back right
-
-‘FLC ’
-front left-of-center
-
-‘FRC ’
-front right-of-center
-
-‘BC ’
-back center
-
-‘SL ’
-side left
-
-‘SR ’
-side right
-
-‘TC ’
-top center
-
-‘TFL ’
-top front left
-
-‘TFC ’
-top front center
-
-‘TFR ’
-top front right
-
-‘TBL ’
-top back left
-
-‘TBC ’
-top back center
-
-‘TBR ’
-top back right
-
-‘DL ’
-downmix left
-
-‘DR ’
-downmix right
-
-‘WL ’
-wide left
-
-‘WR ’
-wide right
-
-‘SDL ’
-surround direct left
-
-‘SDR ’
-surround direct right
-
-‘LFE2 ’
-low frequency 2
-
-
-
-
Standard channel layout compositions can be specified by using the
-following identifiers:
-
-‘mono ’
-FC
-
-‘stereo ’
-FL+FR
-
-‘2.1 ’
-FL+FR+LFE
-
-‘3.0 ’
-FL+FR+FC
-
-‘3.0(back) ’
-FL+FR+BC
-
-‘4.0 ’
-FL+FR+FC+BC
-
-‘quad ’
-FL+FR+BL+BR
-
-‘quad(side) ’
-FL+FR+SL+SR
-
-‘3.1 ’
-FL+FR+FC+LFE
-
-‘5.0 ’
-FL+FR+FC+BL+BR
-
-‘5.0(side) ’
-FL+FR+FC+SL+SR
-
-‘4.1 ’
-FL+FR+FC+LFE+BC
-
-‘5.1 ’
-FL+FR+FC+LFE+BL+BR
-
-‘5.1(side) ’
-FL+FR+FC+LFE+SL+SR
-
-‘6.0 ’
-FL+FR+FC+BC+SL+SR
-
-‘6.0(front) ’
-FL+FR+FLC+FRC+SL+SR
-
-‘hexagonal ’
-FL+FR+FC+BL+BR+BC
-
-‘6.1 ’
-FL+FR+FC+LFE+BC+SL+SR
-
-‘6.1 ’
-FL+FR+FC+LFE+BL+BR+BC
-
-‘6.1(front) ’
-FL+FR+LFE+FLC+FRC+SL+SR
-
-‘7.0 ’
-FL+FR+FC+BL+BR+SL+SR
-
-‘7.0(front) ’
-FL+FR+FC+FLC+FRC+SL+SR
-
-‘7.1 ’
-FL+FR+FC+LFE+BL+BR+SL+SR
-
-‘7.1(wide) ’
-FL+FR+FC+LFE+BL+BR+FLC+FRC
-
-‘7.1(wide-side) ’
-FL+FR+FC+LFE+FLC+FRC+SL+SR
-
-‘octagonal ’
-FL+FR+FC+BL+BR+BC+SL+SR
-
-‘downmix ’
-DL+DR
-
-
-
-
A custom channel layout can be specified as a sequence of terms, separated by
-’+’ or ’|’. Each term can be:
-
- the name of a standard channel layout (e.g. ‘mono ’,
-‘stereo ’, ‘4.0 ’, ‘quad ’, ‘5.0 ’, etc.)
-
- the name of a single channel (e.g. ‘FL ’, ‘FR ’, ‘FC ’, ‘LFE ’, etc.)
-
- a number of channels, in decimal, optionally followed by ’c’, yielding
-the default channel layout for that number of channels (see the
-function av_get_default_channel_layout
)
-
- a channel layout mask, in hexadecimal starting with "0x" (see the
-AV_CH_*
macros in libavutil/channel_layout.h .
-
-
-
Starting from libavutil version 53 the trailing character "c" to
-specify a number of channels will be required, while a channel layout
-mask could also be specified as a decimal number (if and only if not
-followed by "c").
-
-
See also the function av_get_channel_layout
defined in
-libavutil/channel_layout.h .
-
-
-
7 Expression Evaluation# TOC
-
-
When evaluating an arithmetic expression, FFmpeg uses an internal
-formula evaluator, implemented through the libavutil/eval.h
-interface.
-
-
An expression may contain unary, binary operators, constants, and
-functions.
-
-
Two expressions expr1 and expr2 can be combined to form
-another expression "expr1 ;expr2 ".
-expr1 and expr2 are evaluated in turn, and the new
-expression evaluates to the value of expr2 .
-
-
The following binary operators are available: +
, -
,
-*
, /
, ^
.
-
-
The following unary operators are available: +
, -
.
-
-
The following functions are available:
-
-abs(x)
-Compute absolute value of x .
-
-
-acos(x)
-Compute arccosine of x .
-
-
-asin(x)
-Compute arcsine of x .
-
-
-atan(x)
-Compute arctangent of x .
-
-
-between(x, min, max)
-Return 1 if x is greater than or equal to min and lesser than or
-equal to max , 0 otherwise.
-
-
-bitand(x, y)
-bitor(x, y)
-Compute bitwise and/or operation on x and y .
-
-The results of the evaluation of x and y are converted to
-integers before executing the bitwise operation.
-
-Note that both the conversion to integer and the conversion back to
-floating point can lose precision. Beware of unexpected results for
-large numbers (usually 2^53 and larger).
-
-
-ceil(expr)
-Round the value of expression expr upwards to the nearest
-integer. For example, "ceil(1.5)" is "2.0".
-
-
-clip(x, min, max)
-Return the value of x clipped between min and max .
-
-
-cos(x)
-Compute cosine of x .
-
-
-cosh(x)
-Compute hyperbolic cosine of x .
-
-
-eq(x, y)
-Return 1 if x and y are equivalent, 0 otherwise.
-
-
-exp(x)
-Compute exponential of x (with base e
, the Euler’s number).
-
-
-floor(expr)
-Round the value of expression expr downwards to the nearest
-integer. For example, "floor(-1.5)" is "-2.0".
-
-
-gauss(x)
-Compute Gauss function of x , corresponding to
-exp(-x*x/2) / sqrt(2*PI)
.
-
-
-gcd(x, y)
-Return the greatest common divisor of x and y . If both x and
-y are 0 or either or both are less than zero then behavior is undefined.
-
-
-gt(x, y)
-Return 1 if x is greater than y , 0 otherwise.
-
-
-gte(x, y)
-Return 1 if x is greater than or equal to y , 0 otherwise.
-
-
-hypot(x, y)
-This function is similar to the C function with the same name; it returns
-"sqrt(x *x + y *y )", the length of the hypotenuse of a
-right triangle with sides of length x and y , or the distance of the
-point (x , y ) from the origin.
-
-
-if(x, y)
-Evaluate x , and if the result is non-zero return the result of
-the evaluation of y , return 0 otherwise.
-
-
-if(x, y, z)
-Evaluate x , and if the result is non-zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-ifnot(x, y)
-Evaluate x , and if the result is zero return the result of the
-evaluation of y , return 0 otherwise.
-
-
-ifnot(x, y, z)
-Evaluate x , and if the result is zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-isinf(x)
-Return 1.0 if x is +/-INFINITY, 0.0 otherwise.
-
-
-isnan(x)
-Return 1.0 if x is NAN, 0.0 otherwise.
-
-
-ld(var)
-Allow to load the value of the internal variable with number
-var , which was previously stored with st(var , expr ).
-The function returns the loaded value.
-
-
-log(x)
-Compute natural logarithm of x .
-
-
-lt(x, y)
-Return 1 if x is lesser than y , 0 otherwise.
-
-
-lte(x, y)
-Return 1 if x is lesser than or equal to y , 0 otherwise.
-
-
-max(x, y)
-Return the maximum between x and y .
-
-
-min(x, y)
-Return the maximum between x and y .
-
-
-mod(x, y)
-Compute the remainder of division of x by y .
-
-
-not(expr)
-Return 1.0 if expr is zero, 0.0 otherwise.
-
-
-pow(x, y)
-Compute the power of x elevated y , it is equivalent to
-"(x )^(y )".
-
-
-print(t)
-print(t, l)
-Print the value of expression t with loglevel l . If
-l is not specified then a default log level is used.
-Returns the value of the expression printed.
-
-Prints t with loglevel l
-
-
-random(x)
-Return a pseudo random value between 0.0 and 1.0. x is the index of the
-internal variable which will be used to save the seed/state.
-
-
-root(expr, max)
-Find an input value for which the function represented by expr
-with argument ld(0) is 0 in the interval 0..max .
-
-The expression in expr must denote a continuous function or the
-result is undefined.
-
-ld(0) is used to represent the function input value, which means
-that the given expression will be evaluated multiple times with
-various input values that the expression can access through
-ld(0)
. When the expression evaluates to 0 then the
-corresponding input value will be returned.
-
-
-sin(x)
-Compute sine of x .
-
-
-sinh(x)
-Compute hyperbolic sine of x .
-
-
-sqrt(expr)
-Compute the square root of expr . This is equivalent to
-"(expr )^.5".
-
-
-squish(x)
-Compute expression 1/(1 + exp(4*x))
.
-
-
-st(var, expr)
-Allow to store the value of the expression expr in an internal
-variable. var specifies the number of the variable where to
-store the value, and it is a value ranging from 0 to 9. The function
-returns the value stored in the internal variable.
-Note, Variables are currently not shared between expressions.
-
-
-tan(x)
-Compute tangent of x .
-
-
-tanh(x)
-Compute hyperbolic tangent of x .
-
-
-taylor(expr, x)
-taylor(expr, x, id)
-Evaluate a Taylor series at x , given an expression representing
-the ld(id)
-th derivative of a function at 0.
-
-When the series does not converge the result is undefined.
-
-ld(id) is used to represent the derivative order in expr ,
-which means that the given expression will be evaluated multiple times
-with various input values that the expression can access through
-ld(id)
. If id is not specified then 0 is assumed.
-
-Note, when you have the derivatives at y instead of 0,
-taylor(expr, x-y)
can be used.
-
-
-time(0)
-Return the current (wallclock) time in seconds.
-
-
-trunc(expr)
-Round the value of expression expr towards zero to the nearest
-integer. For example, "trunc(-1.5)" is "-1.0".
-
-
-while(cond, expr)
-Evaluate expression expr while the expression cond is
-non-zero, and returns the value of the last expr evaluation, or
-NAN if cond was always false.
-
-
-
-
The following constants are available:
-
-PI
-area of the unit disc, approximately 3.14
-
-E
-exp(1) (Euler’s number), approximately 2.718
-
-PHI
-golden ratio (1+sqrt(5))/2, approximately 1.618
-
-
-
-
Assuming that an expression is considered "true" if it has a non-zero
-value, note that:
-
-
*
works like AND
-
-
+
works like OR
-
-
For example the construct:
-
-
is equivalent to:
-
-
-
In your C code, you can extend the list of unary and binary functions,
-and define recognized constants, so that they are available for your
-expressions.
-
-
The evaluator also recognizes the International System unit prefixes.
-If ’i’ is appended after the prefix, binary prefixes are used, which
-are based on powers of 1024 instead of powers of 1000.
-The ’B’ postfix multiplies the value by 8, and can be appended after a
-unit prefix or used alone. This allows using for example ’KB’, ’MiB’,
-’G’ and ’B’ as number postfix.
-
-
The list of available International System prefixes follows, with
-indication of the corresponding powers of 10 and of 2.
-
-y
-10^-24 / 2^-80
-
-z
-10^-21 / 2^-70
-
-a
-10^-18 / 2^-60
-
-f
-10^-15 / 2^-50
-
-p
-10^-12 / 2^-40
-
-n
-10^-9 / 2^-30
-
-u
-10^-6 / 2^-20
-
-m
-10^-3 / 2^-10
-
-c
-10^-2
-
-d
-10^-1
-
-h
-10^2
-
-k
-10^3 / 2^10
-
-K
-10^3 / 2^10
-
-M
-10^6 / 2^20
-
-G
-10^9 / 2^30
-
-T
-10^12 / 2^40
-
-P
-10^15 / 2^40
-
-E
-10^18 / 2^50
-
-Z
-10^21 / 2^60
-
-Y
-10^24 / 2^70
-
-
-
-
-
-
8 OpenCL Options# TOC
-
-
When FFmpeg is configured with --enable-opencl
, it is possible
-to set the options for the global OpenCL context.
-
-
The list of supported options follows:
-
-
-build_options
-Set build options used to compile the registered kernels.
-
-See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
-
-
-platform_idx
-Select the index of the platform to run OpenCL code.
-
-The specified index must be one of the indexes in the device list
-which can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-device_idx
-Select the index of the device used to run OpenCL code.
-
-The specified index must be one of the indexes in the device list which
-can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-
-
-
-
9 Codec Options# TOC
-
-
libavcodec provides some generic global options, which can be set on
-all the encoders and decoders. In addition each codec may support
-so-called private options, which are specific for a given codec.
-
-
Sometimes, a global option may only affect a specific kind of codec,
-and may be nonsensical or ignored by another, so you need to be aware
-of the meaning of the specified options. Also some options are
-meant only for decoding or encoding.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVCodecContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follow:
-
-
-b integer (encoding,audio,video )
-Set bitrate in bits/s. Default value is 200K.
-
-
-ab integer (encoding,audio )
-Set audio bitrate (in bits/s). Default value is 128K.
-
-
-bt integer (encoding,video )
-Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate
-tolerance specifies how far ratecontrol is willing to deviate from the
-target average bitrate value. This is not related to min/max
-bitrate. Lowering tolerance too much has an adverse effect on quality.
-
-
-flags flags (decoding/encoding,audio,video,subtitles )
-Set generic flags.
-
-Possible values:
-
-‘mv4 ’
-Use four motion vector by macroblock (mpeg4).
-
-‘qpel ’
-Use 1/4 pel motion compensation.
-
-‘loop ’
-Use loop filter.
-
-‘qscale ’
-Use fixed qscale.
-
-‘gmc ’
-Use gmc.
-
-‘mv0 ’
-Always try a mb with mv=<0,0>.
-
-‘input_preserved ’
-‘pass1 ’
-Use internal 2pass ratecontrol in first pass mode.
-
-‘pass2 ’
-Use internal 2pass ratecontrol in second pass mode.
-
-‘gray ’
-Only decode/encode grayscale.
-
-‘emu_edge ’
-Do not draw edges.
-
-‘psnr ’
-Set error[?] variables during encoding.
-
-‘truncated ’
-‘naq ’
-Normalize adaptive quantization.
-
-‘ildct ’
-Use interlaced DCT.
-
-‘low_delay ’
-Force low delay.
-
-‘global_header ’
-Place global headers in extradata instead of every keyframe.
-
-‘bitexact ’
-Only write platform-, build- and time-independent data. (except (I)DCT).
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-‘aic ’
-Apply H263 advanced intra coding / mpeg4 ac prediction.
-
-‘cbp ’
-Deprecated, use mpegvideo private options instead.
-
-‘qprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘ilme ’
-Apply interlaced motion estimation.
-
-‘cgop ’
-Use closed gop.
-
-
-
-
-me_method integer (encoding,video )
-Set motion estimation method.
-
-Possible values:
-
-‘zero ’
-zero motion estimation (fastest)
-
-‘full ’
-full motion estimation (slowest)
-
-‘epzs ’
-EPZS motion estimation (default)
-
-‘esa ’
-esa motion estimation (alias for full)
-
-‘tesa ’
-tesa motion estimation
-
-‘dia ’
-dia motion estimation (alias for epzs)
-
-‘log ’
-log motion estimation
-
-‘phods ’
-phods motion estimation
-
-‘x1 ’
-X1 motion estimation
-
-‘hex ’
-hex motion estimation
-
-‘umh ’
-umh motion estimation
-
-‘iter ’
-iter motion estimation
-
-
-
-
-extradata_size integer
-Set extradata size.
-
-
-time_base rational number
-Set codec time base.
-
-It is the fundamental unit of time (in seconds) in terms of which
-frame timestamps are represented. For fixed-fps content, timebase
-should be 1 / frame_rate
and timestamp increments should be
-identically 1.
-
-
-g integer (encoding,video )
-Set the group of picture size. Default value is 12.
-
-
-ar integer (decoding/encoding,audio )
-Set audio sampling rate (in Hz).
-
-
-ac integer (decoding/encoding,audio )
-Set number of audio channels.
-
-
-cutoff integer (encoding,audio )
-Set cutoff bandwidth.
-
-
-frame_size integer (encoding,audio )
-Set audio frame size.
-
-Each submitted frame except the last must contain exactly frame_size
-samples per channel. May be 0 when the codec has
-CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not
-restricted. It is set by some decoders to indicate constant frame
-size.
-
-
-frame_number integer
-Set the frame number.
-
-
-delay integer
-qcomp float (encoding,video )
-Set video quantizer scale compression (VBR). It is used as a constant
-in the ratecontrol equation. Recommended range for default rc_eq:
-0.0-1.0.
-
-
-qblur float (encoding,video )
-Set video quantizer scale blur (VBR).
-
-
-qmin integer (encoding,video )
-Set min video quantizer scale (VBR). Must be included between -1 and
-69, default value is 2.
-
-
-qmax integer (encoding,video )
-Set max video quantizer scale (VBR). Must be included between -1 and
-1024, default value is 31.
-
-
-qdiff integer (encoding,video )
-Set max difference between the quantizer scale (VBR).
-
-
-bf integer (encoding,video )
-Set max number of B frames between non-B-frames.
-
-Must be an integer between -1 and 16. 0 means that B-frames are
-disabled. If a value of -1 is used, it will choose an automatic value
-depending on the encoder.
-
-Default value is 0.
-
-
-b_qfactor float (encoding,video )
-Set qp factor between P and B frames.
-
-
-rc_strategy integer (encoding,video )
-Set ratecontrol method.
-
-
-b_strategy integer (encoding,video )
-Set strategy to choose between I/P/B-frames.
-
-
-ps integer (encoding,video )
-Set RTP payload size in bytes.
-
-
-mv_bits integer
-header_bits integer
-i_tex_bits integer
-p_tex_bits integer
-i_count integer
-p_count integer
-skip_count integer
-misc_bits integer
-frame_bits integer
-codec_tag integer
-bug flags (decoding,video )
-Workaround not auto detected encoder bugs.
-
-Possible values:
-
-‘autodetect ’
-‘old_msmpeg4 ’
-some old lavc generated msmpeg4v3 files (no autodetection)
-
-‘xvid_ilace ’
-Xvid interlacing bug (autodetected if fourcc==XVIX)
-
-‘ump4 ’
-(autodetected if fourcc==UMP4)
-
-‘no_padding ’
-padding bug (autodetected)
-
-‘amv ’
-‘ac_vlc ’
-illegal vlc bug (autodetected per fourcc)
-
-‘qpel_chroma ’
-‘std_qpel ’
-old standard qpel (autodetected per fourcc/version)
-
-‘qpel_chroma2 ’
-‘direct_blocksize ’
-direct-qpel-blocksize bug (autodetected per fourcc/version)
-
-‘edge ’
-edge padding bug (autodetected per fourcc/version)
-
-‘hpel_chroma ’
-‘dc_clip ’
-‘ms ’
-Workaround various bugs in microsoft broken decoders.
-
-‘trunc ’
-trancated frames
-
-
-
-
-lelim integer (encoding,video )
-Set single coefficient elimination threshold for luminance (negative
-values also consider DC coefficient).
-
-
-celim integer (encoding,video )
-Set single coefficient elimination threshold for chrominance (negative
-values also consider dc coefficient)
-
-
-strict integer (decoding/encoding,audio,video )
-Specify how strictly to follow the standards.
-
-Possible values:
-
-‘very ’
-strictly conform to a older more strict version of the spec or reference software
-
-‘strict ’
-strictly conform to all the things in the spec no matter what consequences
-
-‘normal ’
-‘unofficial ’
-allow unofficial extensions
-
-‘experimental ’
-allow non standardized experimental things, experimental
-(unfinished/work in progress/not well tested) decoders and encoders.
-Note: experimental decoders can pose a security risk, do not use this for
-decoding untrusted input.
-
-
-
-
-b_qoffset float (encoding,video )
-Set QP offset between P and B frames.
-
-
-err_detect flags (decoding,audio,video )
-Set error detection flags.
-
-Possible values:
-
-‘crccheck ’
-verify embedded CRCs
-
-‘bitstream ’
-detect bitstream specification deviations
-
-‘buffer ’
-detect improper bitstream length
-
-‘explode ’
-abort decoding on minor error detection
-
-‘ignore_err ’
-ignore decoding errors, and continue decoding.
-This is useful if you want to analyze the content of a video and thus want
-everything to be decoded no matter what. This option will not result in a video
-that is pleasing to watch in case of errors.
-
-‘careful ’
-consider things that violate the spec and have not been seen in the wild as errors
-
-‘compliant ’
-consider all spec non compliancies as errors
-
-‘aggressive ’
-consider things that a sane encoder should not do as an error
-
-
-
-
-has_b_frames integer
-block_align integer
-mpeg_quant integer (encoding,video )
-Use MPEG quantizers instead of H.263.
-
-
-qsquish float (encoding,video )
-How to keep quantizer between qmin and qmax (0 = clip, 1 = use
-differentiable function).
-
-
-rc_qmod_amp float (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_qmod_freq integer (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_override_count integer
-rc_eq string (encoding,video )
-Set rate control equation. When computing the expression, besides the
-standard functions defined in the section ’Expression Evaluation’, the
-following functions are available: bits2qp(bits), qp2bits(qp). Also
-the following constants are available: iTex pTex tex mv fCode iCount
-mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
-avgTex.
-
-
-maxrate integer (encoding,audio,video )
-Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
-
-
-minrate integer (encoding,audio,video )
-Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR
-encode. It is of little use elsewise.
-
-
-bufsize integer (encoding,audio,video )
-Set ratecontrol buffer size (in bits).
-
-
-rc_buf_aggressivity float (encoding,video )
-Currently useless.
-
-
-i_qfactor float (encoding,video )
-Set QP factor between P and I frames.
-
-
-i_qoffset float (encoding,video )
-Set QP offset between P and I frames.
-
-
-rc_init_cplx float (encoding,video )
-Set initial complexity for 1-pass encoding.
-
-
-dct integer (encoding,video )
-Set DCT algorithm.
-
-Possible values:
-
-‘auto ’
-autoselect a good one (default)
-
-‘fastint ’
-fast integer
-
-‘int ’
-accurate integer
-
-‘mmx ’
-‘altivec ’
-‘faan ’
-floating point AAN DCT
-
-
-
-
-lumi_mask float (encoding,video )
-Compress bright areas stronger than medium ones.
-
-
-tcplx_mask float (encoding,video )
-Set temporal complexity masking.
-
-
-scplx_mask float (encoding,video )
-Set spatial complexity masking.
-
-
-p_mask float (encoding,video )
-Set inter masking.
-
-
-dark_mask float (encoding,video )
-Compress dark areas stronger than medium ones.
-
-
-idct integer (decoding/encoding,video )
-Select IDCT implementation.
-
-Possible values:
-
-‘auto ’
-‘int ’
-‘simple ’
-‘simplemmx ’
-‘simpleauto ’
-Automatically pick a IDCT compatible with the simple one
-
-
-‘arm ’
-‘altivec ’
-‘sh4 ’
-‘simplearm ’
-‘simplearmv5te ’
-‘simplearmv6 ’
-‘simpleneon ’
-‘simplealpha ’
-‘ipp ’
-‘xvidmmx ’
-‘faani ’
-floating point AAN IDCT
-
-
-
-
-slice_count integer
-ec flags (decoding,video )
-Set error concealment strategy.
-
-Possible values:
-
-‘guess_mvs ’
-iterative motion vector (MV) search (slow)
-
-‘deblock ’
-use strong deblock filter for damaged MBs
-
-‘favor_inter ’
-favor predicting from the previous frame instead of the current
-
-
-
-
-bits_per_coded_sample integer
-pred integer (encoding,video )
-Set prediction method.
-
-Possible values:
-
-‘left ’
-‘plane ’
-‘median ’
-
-
-
-aspect rational number (encoding,video )
-Set sample aspect ratio.
-
-
-debug flags (decoding/encoding,audio,video,subtitles )
-Print specific debug info.
-
-Possible values:
-
-‘pict ’
-picture info
-
-‘rc ’
-rate control
-
-‘bitstream ’
-‘mb_type ’
-macroblock (MB) type
-
-‘qp ’
-per-block quantization parameter (QP)
-
-‘mv ’
-motion vector
-
-‘dct_coeff ’
-‘skip ’
-‘startcode ’
-‘pts ’
-‘er ’
-error recognition
-
-‘mmco ’
-memory management control operations (H.264)
-
-‘bugs ’
-‘vis_qp ’
-visualize quantization parameter (QP), lower QP are tinted greener
-
-‘vis_mb_type ’
-visualize block types
-
-‘buffers ’
-picture buffer allocations
-
-‘thread_ops ’
-threading operations
-
-‘nomc ’
-skip motion compensation
-
-
-
-
-vismv integer (decoding,video )
-Visualize motion vectors (MVs).
-
-This option is deprecated, see the codecview filter instead.
-
-Possible values:
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-cmp integer (encoding,video )
-Set full pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-subcmp integer (encoding,video )
-Set sub pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-mbcmp integer (encoding,video )
-Set macroblock compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-ildctcmp integer (encoding,video )
-Set interlaced dct compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-dia_size integer (encoding,video )
-Set diamond type & size for motion estimation.
-
-
-last_pred integer (encoding,video )
-Set amount of motion predictors from the previous frame.
-
-
-preme integer (encoding,video )
-Set pre motion estimation.
-
-
-precmp integer (encoding,video )
-Set pre motion estimation compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-pre_dia_size integer (encoding,video )
-Set diamond type & size for motion estimation pre-pass.
-
-
-subq integer (encoding,video )
-Set sub pel motion estimation quality.
-
-
-dtg_active_format integer
-me_range integer (encoding,video )
-Set limit motion vectors range (1023 for DivX player).
-
-
-ibias integer (encoding,video )
-Set intra quant bias.
-
-
-pbias integer (encoding,video )
-Set inter quant bias.
-
-
-color_table_id integer
-global_quality integer (encoding,audio,video )
-coder integer (encoding,video )
-
-Possible values:
-
-‘vlc ’
-variable length coder / huffman coder
-
-‘ac ’
-arithmetic coder
-
-‘raw ’
-raw (no encoding)
-
-‘rle ’
-run-length coder
-
-‘deflate ’
-deflate-based coder
-
-
-
-
-context integer (encoding,video )
-Set context model.
-
-
-slice_flags integer
-xvmc_acceleration integer
-mbd integer (encoding,video )
-Set macroblock decision algorithm (high quality mode).
-
-Possible values:
-
-‘simple ’
-use mbcmp (default)
-
-‘bits ’
-use fewest bits
-
-‘rd ’
-use best rate distortion
-
-
-
-
-stream_codec_tag integer
-sc_threshold integer (encoding,video )
-Set scene change threshold.
-
-
-lmin integer (encoding,video )
-Set min lagrange factor (VBR).
-
-
-lmax integer (encoding,video )
-Set max lagrange factor (VBR).
-
-
-nr integer (encoding,video )
-Set noise reduction.
-
-
-rc_init_occupancy integer (encoding,video )
-Set number of bits which should be loaded into the rc buffer before
-decoding starts.
-
-
-flags2 flags (decoding/encoding,audio,video )
-
-Possible values:
-
-‘fast ’
-Allow non spec compliant speedup tricks.
-
-‘sgop ’
-Deprecated, use mpegvideo private options instead.
-
-‘noout ’
-Skip bitstream encoding.
-
-‘ignorecrop ’
-Ignore cropping information from sps.
-
-‘local_header ’
-Place global headers at every keyframe instead of in extradata.
-
-‘chunks ’
-Frame data might be split into multiple chunks.
-
-‘showall ’
-Show all frames before the first keyframe.
-
-‘skiprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘export_mvs ’
-Export motion vectors into frame side-data (see AV_FRAME_DATA_MOTION_VECTORS
)
-for codecs that support it. See also doc/examples/export_mvs.c .
-
-
-
-
-error integer (encoding,video )
-qns integer (encoding,video )
-Deprecated, use mpegvideo private options instead.
-
-
-threads integer (decoding/encoding,video )
-
-Possible values:
-
-‘auto ’
-detect a good number of threads
-
-
-
-
-me_threshold integer (encoding,video )
-Set motion estimation threshold.
-
-
-mb_threshold integer (encoding,video )
-Set macroblock threshold.
-
-
-dc integer (encoding,video )
-Set intra_dc_precision.
-
-
-nssew integer (encoding,video )
-Set nsse weight.
-
-
-skip_top integer (decoding,video )
-Set number of macroblock rows at the top which are skipped.
-
-
-skip_bottom integer (decoding,video )
-Set number of macroblock rows at the bottom which are skipped.
-
-
-profile integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-‘aac_main ’
-‘aac_low ’
-‘aac_ssr ’
-‘aac_ltp ’
-‘aac_he ’
-‘aac_he_v2 ’
-‘aac_ld ’
-‘aac_eld ’
-‘mpeg2_aac_low ’
-‘mpeg2_aac_he ’
-‘mpeg4_sp ’
-‘mpeg4_core ’
-‘mpeg4_main ’
-‘mpeg4_asp ’
-‘dts ’
-‘dts_es ’
-‘dts_96_24 ’
-‘dts_hd_hra ’
-‘dts_hd_ma ’
-
-
-
-level integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-
-
-
-lowres integer (decoding,audio,video )
-Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
-
-
-skip_threshold integer (encoding,video )
-Set frame skip threshold.
-
-
-skip_factor integer (encoding,video )
-Set frame skip factor.
-
-
-skip_exp integer (encoding,video )
-Set frame skip exponent.
-Negative values behave identical to the corresponding positive ones, except
-that the score is normalized.
-Positive values exist primarily for compatibility reasons and are not so useful.
-
-
-skipcmp integer (encoding,video )
-Set frame skip compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-border_mask float (encoding,video )
-Increase the quantizer for macroblocks close to borders.
-
-
-mblmin integer (encoding,video )
-Set min macroblock lagrange factor (VBR).
-
-
-mblmax integer (encoding,video )
-Set max macroblock lagrange factor (VBR).
-
-
-mepc integer (encoding,video )
-Set motion estimation bitrate penalty compensation (1.0 = 256).
-
-
-skip_loop_filter integer (decoding,video )
-skip_idct integer (decoding,video )
-skip_frame integer (decoding,video )
-
-Make decoder discard processing depending on the frame type selected
-by the option value.
-
-skip_loop_filter skips frame loop filtering, skip_idct
-skips frame IDCT/dequantization, skip_frame skips decoding.
-
-Possible values:
-
-‘none ’
-Discard no frame.
-
-
-‘default ’
-Discard useless frames like 0-sized frames.
-
-
-‘noref ’
-Discard all non-reference frames.
-
-
-‘bidir ’
-Discard all bidirectional frames.
-
-
-‘nokey ’
-Discard all frames excepts keyframes.
-
-
-‘all ’
-Discard all frames.
-
-
-
-Default value is ‘default ’.
-
-
-bidir_refine integer (encoding,video )
-Refine the two motion vectors used in bidirectional macroblocks.
-
-
-brd_scale integer (encoding,video )
-Downscale frames for dynamic B-frame decision.
-
-
-keyint_min integer (encoding,video )
-Set minimum interval between IDR-frames.
-
-
-refs integer (encoding,video )
-Set reference frames to consider for motion compensation.
-
-
-chromaoffset integer (encoding,video )
-Set chroma qp offset from luma.
-
-
-trellis integer (encoding,audio,video )
-Set rate-distortion optimal quantization.
-
-
-sc_factor integer (encoding,video )
-Set value multiplied by qscale for each frame and added to
-scene_change_score.
-
-
-mv0_threshold integer (encoding,video )
-b_sensitivity integer (encoding,video )
-Adjust sensitivity of b_frame_strategy 1.
-
-
-compression_level integer (encoding,audio,video )
-min_prediction_order integer (encoding,audio )
-max_prediction_order integer (encoding,audio )
-timecode_frame_start integer (encoding,video )
-Set GOP timecode frame start number, in non drop frame format.
-
-
-request_channels integer (decoding,audio )
-Set desired number of audio channels.
-
-
-bits_per_raw_sample integer
-channel_layout integer (decoding/encoding,audio )
-
-Possible values:
-
-request_channel_layout integer (decoding,audio )
-
-Possible values:
-
-rc_max_vbv_use float (encoding,video )
-rc_min_vbv_use float (encoding,video )
-ticks_per_frame integer (decoding/encoding,audio,video )
-color_primaries integer (decoding/encoding,video )
-color_trc integer (decoding/encoding,video )
-colorspace integer (decoding/encoding,video )
-color_range integer (decoding/encoding,video )
-chroma_sample_location integer (decoding/encoding,video )
-log_level_offset integer
-Set the log level offset.
-
-
-slices integer (encoding,video )
-Number of slices, used in parallelized encoding.
-
-
-thread_type flags (decoding/encoding,video )
-Select which multithreading methods to use.
-
-Use of ‘frame ’ will increase decoding delay by one frame per
-thread, so clients which cannot provide future frames should not use
-it.
-
-Possible values:
-
-‘slice ’
-Decode more than one part of a single frame at once.
-
-Multithreading using slices works only when the video was encoded with
-slices.
-
-
-‘frame ’
-Decode more than one frame at once.
-
-
-
-Default value is ‘slice+frame ’.
-
-
-audio_service_type integer (encoding,audio )
-Set audio service type.
-
-Possible values:
-
-‘ma ’
-Main Audio Service
-
-‘ef ’
-Effects
-
-‘vi ’
-Visually Impaired
-
-‘hi ’
-Hearing Impaired
-
-‘di ’
-Dialogue
-
-‘co ’
-Commentary
-
-‘em ’
-Emergency
-
-‘vo ’
-Voice Over
-
-‘ka ’
-Karaoke
-
-
-
-
-request_sample_fmt sample_fmt (decoding,audio )
-Set sample format audio decoders should prefer. Default value is
-none
.
-
-
-pkt_timebase rational number
-sub_charenc encoding (decoding,subtitles )
-Set the input subtitles character encoding.
-
-
-field_order field_order (video )
-Set/override the field order of the video.
-Possible values:
-
-‘progressive ’
-Progressive video
-
-‘tt ’
-Interlaced video, top field coded and displayed first
-
-‘bb ’
-Interlaced video, bottom field coded and displayed first
-
-‘tb ’
-Interlaced video, top coded first, bottom displayed first
-
-‘bt ’
-Interlaced video, bottom coded first, top displayed first
-
-
-
-
-skip_alpha integer (decoding,video )
-Set to 1 to disable processing alpha (transparency). This works like the
-‘gray ’ flag in the flags option which skips chroma information
-instead of alpha. Default is 0.
-
-
-codec_whitelist list (input )
-"," separated List of allowed decoders. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
-
10 Decoders# TOC
-
-
Decoders are configured elements in FFmpeg which allow the decoding of
-multimedia streams.
-
-
When you configure your FFmpeg build, all the supported native decoders
-are enabled by default. Decoders requiring an external library must be enabled
-manually via the corresponding --enable-lib
option. You can list all
-available decoders using the configure option --list-decoders
.
-
-
You can disable all the decoders with the configure option
---disable-decoders
and selectively enable / disable single decoders
-with the options --enable-decoder=DECODER
/
---disable-decoder=DECODER
.
-
-
The option -decoders
of the ff* tools will display the list of
-enabled decoders.
-
-
-
-
11 Video Decoders# TOC
-
-
A description of some of the currently available video decoders
-follows.
-
-
-
11.1 rawvideo# TOC
-
-
Raw video decoder.
-
-
This decoder decodes rawvideo streams.
-
-
-
11.1.1 Options# TOC
-
-
-top top_field_first
-Specify the assumed field type of the input video.
-
--1
-the video is assumed to be progressive (default)
-
-0
-bottom-field-first is assumed
-
-1
-top-field-first is assumed
-
-
-
-
-
-
-
-
-
12 Audio Decoders# TOC
-
-
A description of some of the currently available audio decoders
-follows.
-
-
-
12.1 ac3# TOC
-
-
AC-3 audio decoder.
-
-
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
-the undocumented RealAudio 3 (a.k.a. dnet).
-
-
-
12.1.1 AC-3 Decoder Options# TOC
-
-
--drc_scale value
-Dynamic Range Scale Factor. The factor to apply to dynamic range values
-from the AC-3 stream. This factor is applied exponentially.
-There are 3 notable scale factor ranges:
-
-drc_scale == 0
-DRC disabled. Produces full range audio.
-
-0 < drc_scale <= 1
-DRC enabled. Applies a fraction of the stream DRC value.
-Audio reproduction is between full range and full compression.
-
-drc_scale > 1
-DRC enabled. Applies drc_scale asymmetrically.
-Loud sounds are fully compressed. Soft sounds are enhanced.
-
-
-
-
-
-
-
-
12.2 ffwavesynth# TOC
-
-
Internal wave synthetizer.
-
-
This decoder generates wave patterns according to predefined sequences. Its
-use is purely internal and the format of the data it accepts is not publicly
-documented.
-
-
-
12.3 libcelt# TOC
-
-
libcelt decoder wrapper.
-
-
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
-Requires the presence of the libcelt headers and library during configuration.
-You need to explicitly configure the build with --enable-libcelt
.
-
-
-
12.4 libgsm# TOC
-
-
libgsm decoder wrapper.
-
-
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
-the presence of the libgsm headers and library during configuration. You need
-to explicitly configure the build with --enable-libgsm
.
-
-
This decoder supports both the ordinary GSM and the Microsoft variant.
-
-
-
12.5 libilbc# TOC
-
-
libilbc decoder wrapper.
-
-
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
-audio codec. Requires the presence of the libilbc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libilbc
.
-
-
-
12.5.1 Options# TOC
-
-
The following option is supported by the libilbc wrapper.
-
-
-enhance
-
-Enable the enhancement of the decoded audio when set to 1. The default
-value is 0 (disabled).
-
-
-
-
-
-
12.6 libopencore-amrnb# TOC
-
-
libopencore-amrnb decoder wrapper.
-
-
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
-Narrowband audio codec. Using it requires the presence of the
-libopencore-amrnb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrnb
.
-
-
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
-without this library.
-
-
-
12.7 libopencore-amrwb# TOC
-
-
libopencore-amrwb decoder wrapper.
-
-
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
-Wideband audio codec. Using it requires the presence of the
-libopencore-amrwb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrwb
.
-
-
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
-without this library.
-
-
-
12.8 libopus# TOC
-
-
libopus decoder wrapper.
-
-
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
-Requires the presence of the libopus headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopus
.
-
-
An FFmpeg native decoder for Opus exists, so users can decode Opus
-without this library.
-
-
-
-
13 Subtitles Decoders# TOC
-
-
-
13.1 dvdsub# TOC
-
-
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
-also be found in VobSub file pairs and in some Matroska files.
-
-
-
13.1.1 Options# TOC
-
-
-palette
-Specify the global palette used by the bitmaps. When stored in VobSub, the
-palette is normally specified in the index file; in Matroska, the palette is
-stored in the codec extra-data in the same format as in VobSub. In DVDs, the
-palette is stored in the IFO file, and therefore not available when reading
-from dumped VOB files.
-
-The format for this option is a string containing 16 24-bits hexadecimal
-numbers (without 0x prefix) separated by comas, for example 0d00ee,
-ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
-7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b
.
-
-
-ifo_palette
-Specify the IFO file from which the global palette is obtained.
-(experimental)
-
-
-forced_subs_only
-Only decode subtitle entries marked as forced. Some titles have forced
-and non-forced subtitles in the same track. Setting this flag to 1
-will only keep the forced subtitles. Default value is 0
.
-
-
-
-
-
13.2 libzvbi-teletext# TOC
-
-
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
-subtitles. Requires the presence of the libzvbi headers and library during
-configuration. You need to explicitly configure the build with
---enable-libzvbi
.
-
-
-
13.2.1 Options# TOC
-
-
-txt_page
-List of teletext page numbers to decode. You may use the special * string to
-match all pages. Pages that do not match the specified list are dropped.
-Default value is *.
-
-txt_chop_top
-Discards the top teletext line. Default value is 1.
-
-txt_format
-Specifies the format of the decoded subtitles. The teletext decoder is capable
-of decoding the teletext pages to bitmaps or to simple text, you should use
-"bitmap" for teletext pages, because certain graphics and colors cannot be
-expressed in simple text. You might use "text" for teletext based subtitles if
-your application can handle simple text based subtitles. Default value is
-bitmap.
-
-txt_left
-X offset of generated bitmaps, default is 0.
-
-txt_top
-Y offset of generated bitmaps, default is 0.
-
-txt_chop_spaces
-Chops leading and trailing spaces and removes empty lines from the generated
-text. This option is useful for teletext based subtitles where empty spaces may
-be present at the start or at the end of the lines or empty lines may be
-present between the subtitle lines because of double-sized teletext charactes.
-Default value is 1.
-
-txt_duration
-Sets the display duration of the decoded teletext pages or subtitles in
-miliseconds. Default value is 30000 which is 30 seconds.
-
-txt_transparent
-Force transparent background of the generated teletext bitmaps. Default value
-is 0 which means an opaque (black) background.
-
-
-
-
-
14 Bitstream Filters# TOC
-
-
When you configure your FFmpeg build, all the supported bitstream
-filters are enabled by default. You can list all available ones using
-the configure option --list-bsfs
.
-
-
You can disable all the bitstream filters using the configure option
---disable-bsfs
, and selectively enable any bitstream filter using
-the option --enable-bsf=BSF
, or you can disable a particular
-bitstream filter using the option --disable-bsf=BSF
.
-
-
The option -bsfs
of the ff* tools will display the list of
-all the supported bitstream filters included in your build.
-
-
The ff* tools have a -bsf option applied per stream, taking a
-comma-separated list of filters, whose parameters follow the filter
-name after a ’=’.
-
-
-
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
-
-
-
Below is a description of the currently available bitstream filters,
-with their parameters, if any.
-
-
-
14.1 aac_adtstoasc# TOC
-
-
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
-bitstream filter.
-
-
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
-ADTS header and removes the ADTS header.
-
-
This is required for example when copying an AAC stream from a raw
-ADTS AAC container to a FLV or a MOV/MP4 file.
-
-
-
14.2 chomp# TOC
-
-
Remove zero padding at the end of a packet.
-
-
-
14.3 dump_extra# TOC
-
-
Add extradata to the beginning of the filtered packets.
-
-
The additional argument specifies which packets should be filtered.
-It accepts the values:
-
-‘a ’
-add extradata to all key packets, but only if local_header is
-set in the flags2 codec context field
-
-
-‘k ’
-add extradata to all key packets
-
-
-‘e ’
-add extradata to all packets
-
-
-
-
If not specified it is assumed ‘k ’.
-
-
For example the following ffmpeg
command forces a global
-header (thus disabling individual packet headers) in the H.264 packets
-generated by the libx264
encoder, but corrects them by adding
-the header stored in extradata to the key packets:
-
-
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
-
-
-
-
14.4 h264_mp4toannexb# TOC
-
-
Convert an H.264 bitstream from length prefixed mode to start code
-prefixed mode (as defined in the Annex B of the ITU-T H.264
-specification).
-
-
This is required by some streaming formats, typically the MPEG-2
-transport stream format ("mpegts").
-
-
For example to remux an MP4 file containing an H.264 stream to mpegts
-format with ffmpeg
, you can use the command:
-
-
-
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
-
-
-
-
14.5 imxdump# TOC
-
-
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
-Pro decoder. This filter only applies to the mpeg2video codec, and is
-likely not needed for Final Cut Pro 7 and newer with the appropriate
--tag:v .
-
-
For example, to remux 30 MB/sec NTSC IMX to MOV:
-
-
-
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
-
-
-
-
14.6 mjpeg2jpeg# TOC
-
-
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
-
-
MJPEG is a video codec wherein each video frame is essentially a
-JPEG image. The individual frames can be extracted without loss,
-e.g. by
-
-
-
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
-
-
-
Unfortunately, these chunks are incomplete JPEG images, because
-they lack the DHT segment required for decoding. Quoting from
-http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml :
-
-
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
-commented that "MJPEG, or at least the MJPEG in AVIs having the
-MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
-Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
-and it must use basic Huffman encoding, not arithmetic or
-progressive. . . . You can indeed extract the MJPEG frames and
-decode them with a regular JPEG decoder, but you have to prepend
-the DHT segment to them, or else the decoder won’t have any idea
-how to decompress the data. The exact table necessary is given in
-the OpenDML spec."
-
-
This bitstream filter patches the header of frames extracted from an MJPEG
-stream (carrying the AVI1 header ID and lacking a DHT segment) to
-produce fully qualified JPEG images.
-
-
-
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
-exiftran -i -9 frame*.jpg
-ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
-
-
-
-
14.7 mjpega_dump_header# TOC
-
-
-
14.8 movsub# TOC
-
-
-
14.9 mp3_header_decompress# TOC
-
-
-
14.10 noise# TOC
-
-
Damages the contents of packets without damaging the container. Can be
-used for fuzzing or testing error resilience/concealment.
-
-
Parameters:
-A numeral string, whose value is related to how often output bytes will
-be modified. Therefore, values below or equal to 0 are forbidden, and
-the lower the more frequent bytes will be modified, with 1 meaning
-every byte is modified.
-
-
-
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
-
-
applies the modification to every byte.
-
-
-
14.11 remove_extra# TOC
-
-
-
15 Format Options# TOC
-
-
The libavformat library provides some generic global options, which
-can be set on all the muxers and demuxers. In addition each muxer or
-demuxer may support so-called private options, which are specific for
-that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follows:
-
-
-avioflags flags (input/output )
-Possible values:
-
-‘direct ’
-Reduce buffering.
-
-
-
-
-probesize integer (input )
-Set probing size in bytes, i.e. the size of the data to analyze to get
-stream information. A higher value will allow to detect more
-information in case it is dispersed into the stream, but will increase
-latency. Must be an integer not lesser than 32. It is 5000000 by default.
-
-
-packetsize integer (output )
-Set packet size.
-
-
-fflags flags (input/output )
-Set format flags.
-
-Possible values:
-
-‘ignidx ’
-Ignore index.
-
-‘genpts ’
-Generate PTS.
-
-‘nofillin ’
-Do not fill in missing values that can be exactly calculated.
-
-‘noparse ’
-Disable AVParsers, this needs +nofillin
too.
-
-‘igndts ’
-Ignore DTS.
-
-‘discardcorrupt ’
-Discard corrupted frames.
-
-‘sortdts ’
-Try to interleave output packets by DTS.
-
-‘keepside ’
-Do not merge side data.
-
-‘latm ’
-Enable RTP MP4A-LATM payload.
-
-‘nobuffer ’
-Reduce the latency introduced by optional buffering
-
-‘bitexact ’
-Only write platform-, build- and time-independent data.
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-
-
-
-seek2any integer (input )
-Allow seeking to non-keyframes on demuxer level when supported if set to 1.
-Default is 0.
-
-
-analyzeduration integer (input )
-Specify how many microseconds are analyzed to probe the input. A
-higher value will allow to detect more accurate information, but will
-increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
-
-
-cryptokey hexadecimal string (input )
-Set decryption key.
-
-
-indexmem integer (input )
-Set max memory used for timestamp index (per stream).
-
-
-rtbufsize integer (input )
-Set max memory used for buffering real-time frames.
-
-
-fdebug flags (input/output )
-Print specific debug info.
-
-Possible values:
-
-‘ts ’
-
-
-
-max_delay integer (input/output )
-Set maximum muxing or demuxing delay in microseconds.
-
-
-fpsprobesize integer (input )
-Set number of frames used to probe fps.
-
-
-audio_preload integer (output )
-Set microseconds by which audio packets should be interleaved earlier.
-
-
-chunk_duration integer (output )
-Set microseconds for each chunk.
-
-
-chunk_size integer (output )
-Set size in bytes for each chunk.
-
-
-err_detect, f_err_detect flags (input )
-Set error detection flags. f_err_detect
is deprecated and
-should be used only via the ffmpeg
tool.
-
-Possible values:
-
-‘crccheck ’
-Verify embedded CRCs.
-
-‘bitstream ’
-Detect bitstream specification deviations.
-
-‘buffer ’
-Detect improper bitstream length.
-
-‘explode ’
-Abort decoding on minor error detection.
-
-‘careful ’
-Consider things that violate the spec and have not been seen in the
-wild as errors.
-
-‘compliant ’
-Consider all spec non compliancies as errors.
-
-‘aggressive ’
-Consider things that a sane encoder should not do as an error.
-
-
-
-
-use_wallclock_as_timestamps integer (input )
-Use wallclock as timestamps.
-
-
-avoid_negative_ts integer (output )
-
-Possible values:
-
-‘make_non_negative ’
-Shift timestamps to make them non-negative.
-Also note that this affects only leading negative timestamps, and not
-non-monotonic negative timestamps.
-
-‘make_zero ’
-Shift timestamps so that the first timestamp is 0.
-
-‘auto (default) ’
-Enables shifting when required by the target format.
-
-‘disabled ’
-Disables shifting of timestamp.
-
-
-
-When shifting is enabled, all output timestamps are shifted by the
-same amount. Audio, video, and subtitles desynching and relative
-timestamp differences are preserved compared to how they would have
-been without shifting.
-
-
-skip_initial_bytes integer (input )
-Set number of bytes to skip before reading header and frames if set to 1.
-Default is 0.
-
-
-correct_ts_overflow integer (input )
-Correct single timestamp overflows if set to 1. Default is 1.
-
-
-flush_packets integer (output )
-Flush the underlying I/O stream after each packet. Default 1 enables it, and
-has the effect of reducing the latency; 0 disables it and may slightly
-increase performance in some cases.
-
-
-output_ts_offset offset (output )
-Set the output time offset.
-
-offset must be a time duration specification,
-see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-The offset is added by the muxer to the output timestamps.
-
-Specifying a positive offset means that the corresponding streams are
-delayed bt the time duration specified in offset . Default value
-is 0
(meaning that no offset is applied).
-
-
-format_whitelist list (input )
-"," separated List of allowed demuxers. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
15.1 Format stream specifiers# TOC
-
-
Format stream specifiers allow selection of one or more streams that
-match specific properties.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index.
-
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio,
-’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If
-stream_index is given, then it matches the stream number
-stream_index of this type. Otherwise, it matches all streams of
-this type.
-
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number
-stream_index in the program with the id
-program_id . Otherwise, it matches all streams in the program.
-
-
-#stream_id
-Matches the stream by a format-specific ID.
-
-
-
-
The exact semantics of stream specifiers is defined by the
-avformat_match_stream_specifier()
function declared in the
-libavformat/avformat.h header.
-
-
-
16 Demuxers# TOC
-
-
Demuxers are configured elements in FFmpeg that can read the
-multimedia streams from a particular type of file.
-
-
When you configure your FFmpeg build, all the supported demuxers
-are enabled by default. You can list all available ones using the
-configure option --list-demuxers
.
-
-
You can disable all the demuxers using the configure option
---disable-demuxers
, and selectively enable a single demuxer with
-the option --enable-demuxer=DEMUXER
, or disable it
-with the option --disable-demuxer=DEMUXER
.
-
-
The option -formats
of the ff* tools will display the list of
-enabled demuxers.
-
-
The description of some of the currently available demuxers follows.
-
-
-
16.1 applehttp# TOC
-
-
Apple HTTP Live Streaming demuxer.
-
-
This demuxer presents all AVStreams from all variant streams.
-The id field is set to the bitrate variant index number. By setting
-the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay),
-the caller can decide which variant streams to actually receive.
-The total bitrate of the variant that the stream belongs to is
-available in a metadata key named "variant_bitrate".
-
-
-
16.2 apng# TOC
-
-
Animated Portable Network Graphics demuxer.
-
-
This demuxer is used to demux APNG files.
-All headers, but the PNG signature, up to (but not including) the first
-fcTL chunk are transmitted as extradata.
-Frames are then split as being all the chunks between two fcTL ones, or
-between the last fcTL and IEND chunks.
-
-
--ignore_loop bool
-Ignore the loop variable in the file if set.
-
--max_fps int
-Maximum framerate in frames per second (0 for no limit).
-
--default_fps int
-Default framerate in frames per second when none is specified in the file
-(0 meaning as fast as possible).
-
-
-
-
-
16.3 asf# TOC
-
-
Advanced Systems Format demuxer.
-
-
This demuxer is used to demux ASF files and MMS network streams.
-
-
--no_resync_search bool
-Do not try to resynchronize by looking for a certain optional start code.
-
-
-
-
-
16.4 concat# TOC
-
-
Virtual concatenation script demuxer.
-
-
This demuxer reads a list of files and other directives from a text file and
-demuxes them one after the other, as if all their packet had been muxed
-together.
-
-
The timestamps in the files are adjusted so that the first file starts at 0
-and each next file starts where the previous one finishes. Note that it is
-done globally and may cause gaps if all streams do not have exactly the same
-length.
-
-
All files must have the same streams (same codecs, same time base, etc.).
-
-
The duration of each file is used to adjust the timestamps of the next file:
-if the duration is incorrect (because it was computed using the bit-rate or
-because the file is truncated, for example), it can cause artifacts. The
-duration
directive can be used to override the duration stored in
-each file.
-
-
-
16.4.1 Syntax# TOC
-
-
The script is a text file in extended-ASCII, with one directive per line.
-Empty lines, leading spaces and lines starting with ’#’ are ignored. The
-following directive is recognized:
-
-
-file path
-Path to a file to read; special characters and spaces must be escaped with
-backslash or single quotes.
-
-All subsequent file-related directives apply to that file.
-
-
-ffconcat version 1.0
-Identify the script type and version. It also sets the safe option
-to 1 if it was to its default -1.
-
-To make FFmpeg recognize the format automatically, this directive must
-appears exactly as is (no extra space or byte-order-mark) on the very first
-line of the script.
-
-
-duration dur
-Duration of the file. This information can be specified from the file;
-specifying it here may be more efficient or help if the information from the
-file is not available or accurate.
-
-If the duration is set for all files, then it is possible to seek in the
-whole concatenated video.
-
-
-stream
-Introduce a stream in the virtual file.
-All subsequent stream-related directives apply to the last introduced
-stream.
-Some streams properties must be set in order to allow identifying the
-matching streams in the subfiles.
-If no streams are defined in the script, the streams from the first file are
-copied.
-
-
-exact_stream_id id
-Set the id of the stream.
-If this directive is given, the string with the corresponding id in the
-subfiles will be used.
-This is especially useful for MPEG-PS (VOB) files, where the order of the
-streams is not reliable.
-
-
-
-
-
-
16.4.2 Options# TOC
-
-
This demuxer accepts the following option:
-
-
-safe
-If set to 1, reject unsafe file paths. A file path is considered safe if it
-does not contain a protocol specification and is relative and all components
-only contain characters from the portable character set (letters, digits,
-period, underscore and hyphen) and have no period at the beginning of a
-component.
-
-If set to 0, any file name is accepted.
-
-The default is -1, it is equivalent to 1 if the format was automatically
-probed and 0 otherwise.
-
-
-auto_convert
-If set to 1, try to perform automatic conversions on packet data to make the
-streams concatenable.
-
-Currently, the only conversion is adding the h264_mp4toannexb bitstream
-filter to H.264 streams in MP4 format. This is necessary in particular if
-there are resolution changes.
-
-
-
-
-
-
16.5 flv# TOC
-
-
Adobe Flash Video Format demuxer.
-
-
This demuxer is used to demux FLV files and RTMP network streams.
-
-
--flv_metadata bool
-Allocate the streams according to the onMetaData array content.
-
-
-
-
-
16.6 libgme# TOC
-
-
The Game Music Emu library is a collection of video game music file emulators.
-
-
See http://code.google.com/p/game-music-emu/ for more information.
-
-
Some files have multiple tracks. The demuxer will pick the first track by
-default. The track_index option can be used to select a different
-track. Track indexes start at 0. The demuxer exports the number of tracks as
-tracks meta data entry.
-
-
For very large files, the max_size option may have to be adjusted.
-
-
-
16.7 libquvi# TOC
-
-
Play media from Internet services using the quvi project.
-
-
The demuxer accepts a format option to request a specific quality. It
-is by default set to best .
-
-
See http://quvi.sourceforge.net/ for more information.
-
-
FFmpeg needs to be built with --enable-libquvi
for this demuxer to be
-enabled.
-
-
-
16.8 gif# TOC
-
-
Animated GIF demuxer.
-
-
It accepts the following options:
-
-
-min_delay
-Set the minimum valid delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 2.
-
-
-default_delay
-Set the default delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 10.
-
-
-ignore_loop
-GIF files can contain information to loop a certain number of times (or
-infinitely). If ignore_loop is set to 1, then the loop setting
-from the input will be ignored and looping will not occur. If set to 0,
-then looping will occur and will cycle the number of times according to
-the GIF. Default value is 1.
-
-
-
-
For example, with the overlay filter, place an infinitely looping GIF
-over another video:
-
-
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
-
-
-
Note that in the above example the shortest option for overlay filter is
-used to end the output video at the length of the shortest input file,
-which in this case is input.mp4 as the GIF in this example loops
-infinitely.
-
-
-
16.9 image2# TOC
-
-
Image file demuxer.
-
-
This demuxer reads from a list of image files specified by a pattern.
-The syntax and meaning of the pattern is specified by the
-option pattern_type .
-
-
The pattern may contain a suffix which is used to automatically
-determine the format of the images contained in the files.
-
-
The size, the pixel format, and the format of each image must be the
-same for all the files in the sequence.
-
-
This demuxer accepts the following options:
-
-framerate
-Set the frame rate for the video stream. It defaults to 25.
-
-loop
-If set to 1, loop over the input. Default value is 0.
-
-pattern_type
-Select the pattern type used to interpret the provided filename.
-
-pattern_type accepts one of the following values.
-
-sequence
-Select a sequence pattern type, used to specify a sequence of files
-indexed by sequential numbers.
-
-A sequence pattern may contain the string "%d" or "%0N d", which
-specifies the position of the characters representing a sequential
-number in each filename matched by the pattern. If the form
-"%d0N d" is used, the string representing the number in each
-filename is 0-padded and N is the total number of 0-padded
-digits representing the number. The literal character ’%’ can be
-specified in the pattern with the string "%%".
-
-If the sequence pattern contains "%d" or "%0N d", the first filename of
-the file list specified by the pattern must contain a number
-inclusively contained between start_number and
-start_number +start_number_range -1, and all the following
-numbers must be sequential.
-
-For example the pattern "img-%03d.bmp" will match a sequence of
-filenames of the form img-001.bmp , img-002.bmp , ...,
-img-010.bmp , etc.; the pattern "i%%m%%g-%d.jpg" will match a
-sequence of filenames of the form i%m%g-1.jpg ,
-i%m%g-2.jpg , ..., i%m%g-10.jpg , etc.
-
-Note that the pattern must not necessarily contain "%d" or
-"%0N d", for example to convert a single image file
-img.jpeg you can employ the command:
-
-
ffmpeg -i img.jpeg img.png
-
-
-
-glob
-Select a glob wildcard pattern type.
-
-The pattern is interpreted like a glob()
pattern. This is only
-selectable if libavformat was compiled with globbing support.
-
-
-glob_sequence (deprecated, will be removed)
-Select a mixed glob wildcard/sequence pattern.
-
-If your version of libavformat was compiled with globbing support, and
-the provided pattern contains at least one glob meta character among
-%*?[]{}
that is preceded by an unescaped "%", the pattern is
-interpreted like a glob()
pattern, otherwise it is interpreted
-like a sequence pattern.
-
-All glob special characters %*?[]{}
must be prefixed
-with "%". To escape a literal "%" you shall use "%%".
-
-For example the pattern foo-%*.jpeg
will match all the
-filenames prefixed by "foo-" and terminating with ".jpeg", and
-foo-%?%?%?.jpeg
will match all the filenames prefixed with
-"foo-", followed by a sequence of three characters, and terminating
-with ".jpeg".
-
-This pattern type is deprecated in favor of glob and
-sequence .
-
-
-
-Default value is glob_sequence .
-
-pixel_format
-Set the pixel format of the images to read. If not specified the pixel
-format is guessed from the first image file in the sequence.
-
-start_number
-Set the index of the file matched by the image file pattern to start
-to read from. Default value is 0.
-
-start_number_range
-Set the index interval range to check when looking for the first image
-file in the sequence, starting from start_number . Default value
-is 5.
-
-ts_from_file
-If set to 1, will set frame timestamp to modification time of image file. Note
-that monotonity of timestamps is not provided: images go in the same order as
-without this option. Default value is 0.
-If set to 2, will set frame timestamp to the modification time of the image file in
-nanosecond precision.
-
-video_size
-Set the video size of the images to read. If not specified the video
-size is guessed from the first image file in the sequence.
-
-
-
-
-
16.9.1 Examples# TOC
-
-
- Use ffmpeg
for creating a video from the images in the file
-sequence img-001.jpeg , img-002.jpeg , ..., assuming an
-input frame rate of 10 frames per second:
-
-
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
-
-
- As above, but start by reading from a file with index 100 in the sequence:
-
-
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
-
-
- Read images matching the "*.png" glob pattern , that is all the files
-terminating with the ".png" suffix:
-
-
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
-
-
-
-
-
16.10 mpegts# TOC
-
-
MPEG-2 transport stream demuxer.
-
-
-fix_teletext_pts
-Overrides teletext packet PTS and DTS values with the timestamps calculated
-from the PCR of the first program which the teletext stream is part of and is
-not discarded. Default value is 1, set this option to 0 if you want your
-teletext packet PTS and DTS values untouched.
-
-
-
-
-
16.11 rawvideo# TOC
-
-
Raw video demuxer.
-
-
This demuxer allows one to read raw video data. Since there is no header
-specifying the assumed video parameters, the user must specify them
-in order to be able to decode the data correctly.
-
-
This demuxer accepts the following options:
-
-framerate
-Set input video frame rate. Default value is 25.
-
-
-pixel_format
-Set the input video pixel format. Default value is yuv420p
.
-
-
-video_size
-Set the input video size. This value must be specified explicitly.
-
-
-
-
For example to read a rawvideo file input.raw with
-ffplay
, assuming a pixel format of rgb24
, a video
-size of 320x240
, and a frame rate of 10 images per second, use
-the command:
-
-
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
-
-
-
-
16.12 sbg# TOC
-
-
SBaGen script demuxer.
-
-
This demuxer reads the script language used by SBaGen
-http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG
-script looks like that:
-
-
-SE
-a: 300-2.5/3 440+4.5/0
-b: 300-2.5/0 440+4.5/3
-off: -
-NOW == a
-+0:07:00 == b
-+0:14:00 == a
-+0:21:00 == b
-+0:30:00 off
-
-
-
A SBG script can mix absolute and relative timestamps. If the script uses
-either only absolute timestamps (including the script start time) or only
-relative ones, then its layout is fixed, and the conversion is
-straightforward. On the other hand, if the script mixes both kind of
-timestamps, then the NOW reference for relative timestamps will be
-taken from the current time of day at the time the script is read, and the
-script layout will be frozen according to that reference. That means that if
-the script is directly played, the actual times will match the absolute
-timestamps up to the sound controller’s clock accuracy, but if the user
-somehow pauses the playback or seeks, all times will be shifted accordingly.
-
-
-
16.13 tedcaptions# TOC
-
-
JSON captions used for TED Talks .
-
-
TED does not provide links to the captions, but they can be guessed from the
-page. The file tools/bookmarklets.html from the FFmpeg source tree
-contains a bookmarklet to expose them.
-
-
This demuxer accepts the following option:
-
-start_time
-Set the start time of the TED talk, in milliseconds. The default is 15000
-(15s). It is used to sync the captions with the downloadable videos, because
-they include a 15s intro.
-
-
-
-
Example: convert the captions to a format most players understand:
-
-
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
-
-
-
-
17 Metadata# TOC
-
-
FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded
-INI-like text file and then load it back using the metadata muxer/demuxer.
-
-
The file format is as follows:
-
- A file consists of a header and a number of metadata tags divided into sections,
-each on its own line.
-
- The header is a ’;FFMETADATA’ string, followed by a version number (now 1).
-
- Metadata tags are of the form ’key=value’
-
- Immediately after header follows global metadata
-
- After global metadata there may be sections with per-stream/per-chapter
-metadata.
-
- A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
-brackets (’[’, ’]’) and ends with next section or end of file.
-
- At the beginning of a chapter section there may be an optional timebase to be
-used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and
-den are integers. If the timebase is missing then start/end times are assumed to
-be in milliseconds.
-Next a chapter section must contain chapter start and end times in form
-’START=num’, ’END=num’, where num is a positive integer.
-
- Empty lines and lines starting with ’;’ or ’#’ are ignored.
-
- Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a
-newline) must be escaped with a backslash ’\’.
-
- Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
-the tag (in the example above key is ’foo ’, value is ’ bar’).
-
-
-
A ffmetadata file might look like this:
-
-
;FFMETADATA1
-title=bike\\shed
-;this is a comment
-artist=FFmpeg troll team
-
-[CHAPTER]
-TIMEBASE=1/1000
-START=0
-#chapter ends at 0:01:00
-END=60000
-title=chapter \#1
-[STREAM]
-title=multi\
-line
-
-
-
By using the ffmetadata muxer and demuxer it is possible to extract
-metadata from an input file to an ffmetadata file, and then transcode
-the file into an output file with the edited ffmetadata file.
-
-
Extracting an ffmetadata file with ffmpeg goes as follows:
-
-
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
-
-
-
Reinserting edited metadata information from the FFMETADATAFILE file can
-be done as:
-
-
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
-
-
-
-
18 Protocols# TOC
-
-
Protocols are configured elements in FFmpeg that enable access to
-resources that require specific protocols.
-
-
When you configure your FFmpeg build, all the supported protocols are
-enabled by default. You can list all available ones using the
-configure option "–list-protocols".
-
-
You can disable all the protocols using the configure option
-"–disable-protocols", and selectively enable a protocol using the
-option "–enable-protocol=PROTOCOL ", or you can disable a
-particular protocol using the option
-"–disable-protocol=PROTOCOL ".
-
-
The option "-protocols" of the ff* tools will display the list of
-supported protocols.
-
-
A description of the currently available protocols follows.
-
-
-
18.1 bluray# TOC
-
-
Read BluRay playlist.
-
-
The accepted options are:
-
-angle
-BluRay angle
-
-
-chapter
-Start chapter (1...N)
-
-
-playlist
-Playlist to read (BDMV/PLAYLIST/?????.mpls)
-
-
-
-
-
Examples:
-
-
Read longest playlist from BluRay mounted to /mnt/bluray:
-
-
-
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
-
-
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
-
-
-
-
18.2 cache# TOC
-
-
Caching wrapper for input stream.
-
-
Cache the input stream to temporary file. It brings seeking capability to live streams.
-
-
-
-
-
18.3 concat# TOC
-
-
Physical concatenation protocol.
-
-
Allow to read and seek from many resource in sequence as if they were
-a unique resource.
-
-
A URL accepted by this protocol has the syntax:
-
-
concat:URL1 |URL2 |...|URLN
-
-
-
where URL1 , URL2 , ..., URLN are the urls of the
-resource to be concatenated, each one possibly specifying a distinct
-protocol.
-
-
For example to read a sequence of files split1.mpeg ,
-split2.mpeg , split3.mpeg with ffplay
use the
-command:
-
-
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
-
-
-
Note that you may need to escape the character "|" which is special for
-many shells.
-
-
-
18.4 crypto# TOC
-
-
AES-encrypted stream reading protocol.
-
-
The accepted options are:
-
-key
-Set the AES decryption key binary block from given hexadecimal representation.
-
-
-iv
-Set the AES decryption initialization vector binary block from given hexadecimal representation.
-
-
-
-
Accepted URL formats:
-
-
crypto:URL
-crypto+URL
-
-
-
-
18.5 data# TOC
-
-
Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme .
-
-
For example, to convert a GIF file given inline with ffmpeg
:
-
-
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
-
-
-
-
18.6 file# TOC
-
-
File access protocol.
-
-
Allow to read from or write to a file.
-
-
A file URL can have the form:
-
-
-
where filename is the path of the file to read.
-
-
An URL that does not have a protocol prefix will be assumed to be a
-file URL. Depending on the build, an URL that looks like a Windows
-path with the drive letter at the beginning will also be assumed to be
-a file URL (usually not the case in builds for unix-like systems).
-
-
For example to read from a file input.mpeg with ffmpeg
-use the command:
-
-
ffmpeg -i file:input.mpeg output.mpeg
-
-
-
This protocol accepts the following options:
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable for files on slow medium.
-
-
-
-
-
18.7 ftp# TOC
-
-
FTP (File Transfer Protocol).
-
-
Allow to read from or write to remote resources using FTP protocol.
-
-
Following syntax is required.
-
-
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-ftp-anonymous-password
-Password used when login as anonymous user. Typically an e-mail address
-should be used.
-
-
-ftp-write-seekable
-Control seekability of connection during encoding. If set to 1 the
-resource is supposed to be seekable, if set to 0 it is assumed not
-to be seekable. Default value is 0.
-
-
-
-
NOTE: Protocol can be used as output, but it is recommended to not do
-it, unless special care is taken (tests, customized server configuration
-etc.). Different FTP servers behave in different way during seek
-operation. ff* tools may produce incomplete content due to server limitations.
-
-
-
18.8 gopher# TOC
-
-
Gopher protocol.
-
-
-
18.9 hls# TOC
-
-
Read Apple HTTP Live Streaming compliant segmented stream as
-a uniform one. The M3U8 playlists describing the segments can be
-remote HTTP resources or local files, accessed using the standard
-file protocol.
-The nested protocol is declared by specifying
-"+proto " after the hls URI scheme name, where proto
-is either "file" or "http".
-
-
-
hls+http://host/path/to/remote/resource.m3u8
-hls+file://path/to/local/resource.m3u8
-
-
-
Using this protocol is discouraged - the hls demuxer should work
-just as well (if not, please report the issues) and is more complete.
-To use the hls demuxer instead, simply use the direct URLs to the
-m3u8 files.
-
-
-
18.10 http# TOC
-
-
HTTP (Hyper Text Transfer Protocol).
-
-
This protocol accepts the following options:
-
-
-seekable
-Control seekability of connection. If set to 1 the resource is
-supposed to be seekable, if set to 0 it is assumed not to be seekable,
-if set to -1 it will try to autodetect if it is seekable. Default
-value is -1.
-
-
-chunked_post
-If set to 1 use chunked Transfer-Encoding for posts, default is 1.
-
-
-content_type
-Set a specific content type for the POST messages.
-
-
-headers
-Set custom HTTP headers, can override built in default headers. The
-value must be a string encoding the headers.
-
-
-multiple_requests
-Use persistent connections if set to 1, default is 0.
-
-
-post_data
-Set custom HTTP post data.
-
-
-user-agent
-user_agent
-Override the User-Agent header. If not specified the protocol will use a
-string describing the libavformat build. ("Lavf/<version>")
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-mime_type
-Export the MIME type.
-
-
-icy
-If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
-supports this, the metadata has to be retrieved by the application by reading
-the icy_metadata_headers and icy_metadata_packet options.
-The default is 1.
-
-
-icy_metadata_headers
-If the server supports ICY metadata, this contains the ICY-specific HTTP reply
-headers, separated by newline characters.
-
-
-icy_metadata_packet
-If the server supports ICY metadata, and icy was set to 1, this
-contains the last non-empty metadata packet sent by the server. It should be
-polled in regular intervals by applications interested in mid-stream metadata
-updates.
-
-
-cookies
-Set the cookies to be sent in future requests. The format of each cookie is the
-same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
-delimited by a newline character.
-
-
-offset
-Set initial byte offset.
-
-
-end_offset
-Try to limit the request to bytes preceding this offset.
-
-
-
-
-
18.10.1 HTTP Cookies# TOC
-
-
Some HTTP requests will be denied unless cookie values are passed in with the
-request. The cookies option allows these cookies to be specified. At
-the very least, each cookie must specify a value along with a path and domain.
-HTTP requests that match both the domain and path will automatically include the
-cookie value in the HTTP Cookie header field. Multiple cookies can be delimited
-by a newline.
-
-
The required syntax to play a stream specifying a cookie is:
-
-
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
-
-
-
-
18.11 Icecast# TOC
-
-
Icecast protocol (stream to Icecast servers)
-
-
This protocol accepts the following options:
-
-
-ice_genre
-Set the stream genre.
-
-
-ice_name
-Set the stream name.
-
-
-ice_description
-Set the stream description.
-
-
-ice_url
-Set the stream website URL.
-
-
-ice_public
-Set if the stream should be public.
-The default is 0 (not public).
-
-
-user_agent
-Override the User-Agent header. If not specified a string of the form
-"Lavf/<version>" will be used.
-
-
-password
-Set the Icecast mountpoint password.
-
-
-content_type
-Set the stream content type. This must be set if it is different from
-audio/mpeg.
-
-
-legacy_icecast
-This enables support for Icecast versions < 2.4.0, that do not support the
-HTTP PUT method but the SOURCE method.
-
-
-
-
-
-
icecast://[username [:password ]@]server :port /mountpoint
-
-
-
-
18.12 mmst# TOC
-
-
MMS (Microsoft Media Server) protocol over TCP.
-
-
-
18.13 mmsh# TOC
-
-
MMS (Microsoft Media Server) protocol over HTTP.
-
-
The required syntax is:
-
-
mmsh://server [:port ][/app ][/playpath ]
-
-
-
-
18.14 md5# TOC
-
-
MD5 output protocol.
-
-
Computes the MD5 hash of the data to be written, and on close writes
-this to the designated output or stdout if none is specified. It can
-be used to test muxers without writing an actual file.
-
-
Some examples follow.
-
-
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
-ffmpeg -i input.flv -f avi -y md5:output.avi.md5
-
-# Write the MD5 hash of the encoded AVI file to stdout.
-ffmpeg -i input.flv -f avi -y md5:
-
-
-
Note that some formats (typically MOV) require the output protocol to
-be seekable, so they will fail with the MD5 output protocol.
-
-
-
18.15 pipe# TOC
-
-
UNIX pipe access protocol.
-
-
Allow to read and write from UNIX pipes.
-
-
The accepted syntax is:
-
-
-
number is the number corresponding to the file descriptor of the
-pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number
-is not specified, by default the stdout file descriptor will be used
-for writing, stdin for reading.
-
-
For example to read from stdin with ffmpeg
:
-
-
cat test.wav | ffmpeg -i pipe:0
-# ...this is the same as...
-cat test.wav | ffmpeg -i pipe:
-
-
-
For writing to stdout with ffmpeg
:
-
-
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
-# ...this is the same as...
-ffmpeg -i test.wav -f avi pipe: | cat > test.avi
-
-
-
This protocol accepts the following options:
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable if data transmission is slow.
-
-
-
-
Note that some formats (typically MOV), require the output protocol to
-be seekable, so they will fail with the pipe output protocol.
-
-
-
18.16 rtmp# TOC
-
-
Real-Time Messaging Protocol.
-
-
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
-content across a TCP/IP network.
-
-
The required syntax is:
-
-
rtmp://[username :password @]server [:port ][/app ][/instance ][/playpath ]
-
-
-
The accepted parameters are:
-
-username
-An optional username (mostly for publishing).
-
-
-password
-An optional password (mostly for publishing).
-
-
-server
-The address of the RTMP server.
-
-
-port
-The number of the TCP port to use (by default is 1935).
-
-
-app
-It is the name of the application to access. It usually corresponds to
-the path where the application is installed on the RTMP server
-(e.g. /ondemand/ , /flash/live/ , etc.). You can override
-the value parsed from the URI through the rtmp_app
option, too.
-
-
-playpath
-It is the path or name of the resource to play with reference to the
-application specified in app , may be prefixed by "mp4:". You
-can override the value parsed from the URI through the rtmp_playpath
-option, too.
-
-
-listen
-Act as a server, listening for an incoming connection.
-
-
-timeout
-Maximum time to wait for the incoming connection. Implies listen.
-
-
-
-
Additionally, the following parameters can be set via command line options
-(or in code via AVOption
s):
-
-rtmp_app
-Name of application to connect on the RTMP server. This option
-overrides the parameter specified in the URI.
-
-
-rtmp_buffer
-Set the client buffer time in milliseconds. The default is 3000.
-
-
-rtmp_conn
-Extra arbitrary AMF connection parameters, parsed from a string,
-e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0
.
-Each value is prefixed by a single character denoting the type,
-B for Boolean, N for number, S for string, O for object, or Z for null,
-followed by a colon. For Booleans the data must be either 0 or 1 for
-FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
-1 to end or begin an object, respectively. Data items in subobjects may
-be named, by prefixing the type with ’N’ and specifying the name before
-the value (i.e. NB:myFlag:1
). This option may be used multiple
-times to construct arbitrary AMF sequences.
-
-
-rtmp_flashver
-Version of the Flash plugin used to run the SWF player. The default
-is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
-<libavformat version>).)
-
-
-rtmp_flush_interval
-Number of packets flushed in the same request (RTMPT only). The default
-is 10.
-
-
-rtmp_live
-Specify that the media is a live stream. No resuming or seeking in
-live streams is possible. The default value is any
, which means the
-subscriber first tries to play the live stream specified in the
-playpath. If a live stream of that name is not found, it plays the
-recorded stream. The other possible values are live
and
-recorded
.
-
-
-rtmp_pageurl
-URL of the web page in which the media was embedded. By default no
-value will be sent.
-
-
-rtmp_playpath
-Stream identifier to play or to publish. This option overrides the
-parameter specified in the URI.
-
-
-rtmp_subscribe
-Name of live stream to subscribe to. By default no value will be sent.
-It is only sent if the option is specified or if rtmp_live
-is set to live.
-
-
-rtmp_swfhash
-SHA256 hash of the decompressed SWF file (32 bytes).
-
-
-rtmp_swfsize
-Size of the decompressed SWF file, required for SWFVerification.
-
-
-rtmp_swfurl
-URL of the SWF player for the media. By default no value will be sent.
-
-
-rtmp_swfverify
-URL to player swf file, compute hash/size automatically.
-
-
-rtmp_tcurl
-URL of the target stream. Defaults to proto://host[:port]/app.
-
-
-
-
-
For example to read with ffplay
a multimedia resource named
-"sample" from the application "vod" from an RTMP server "myserver":
-
-
ffplay rtmp://myserver/vod/sample
-
-
-
To publish to a password protected server, passing the playpath and
-app names separately:
-
-
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
-
-
-
-
18.17 rtmpe# TOC
-
-
Encrypted Real-Time Messaging Protocol.
-
-
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
-streaming multimedia content within standard cryptographic primitives,
-consisting of Diffie-Hellman key exchange and HMACSHA256, generating
-a pair of RC4 keys.
-
-
-
18.18 rtmps# TOC
-
-
Real-Time Messaging Protocol over a secure SSL connection.
-
-
The Real-Time Messaging Protocol (RTMPS) is used for streaming
-multimedia content across an encrypted connection.
-
-
-
18.19 rtmpt# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
-for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
18.20 rtmpte# TOC
-
-
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
-is used for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
18.21 rtmpts# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTPS.
-
-
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
-for streaming multimedia content within HTTPS requests to traverse
-firewalls.
-
-
-
18.22 libsmbclient# TOC
-
-
libsmbclient permits one to manipulate CIFS/SMB network resources.
-
-
Following syntax is required.
-
-
-
smb://[[domain:]user[:password@]]server[/share[/path[/file]]]
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in miliseconds of socket I/O operations used by the underlying
-low level operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-workgroup
-Set the workgroup used for making connections. By default workgroup is not specified.
-
-
-
-
-
For more information see: http://www.samba.org/ .
-
-
-
18.23 libssh# TOC
-
-
Secure File Transfer Protocol via libssh
-
-
Allow to read from or write to remote resources using SFTP protocol.
-
-
Following syntax is required.
-
-
-
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-private_key
-Specify the path of the file containing private key to use during authorization.
-By default libssh searches for keys in the ~/.ssh/ directory.
-
-
-
-
-
Example: Play a file stored on remote server.
-
-
-
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
-
-
-
-
18.24 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte# TOC
-
-
Real-Time Messaging Protocol and its variants supported through
-librtmp.
-
-
Requires the presence of the librtmp headers and library during
-configuration. You need to explicitly configure the build with
-"–enable-librtmp". If enabled this will replace the native RTMP
-protocol.
-
-
This protocol provides most client functions and a few server
-functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT),
-encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled
-variants of these encrypted types (RTMPTE, RTMPTS).
-
-
The required syntax is:
-
-
rtmp_proto ://server [:port ][/app ][/playpath ] options
-
-
-
where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe",
-"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and
-server , port , app and playpath have the same
-meaning as specified for the RTMP native protocol.
-options contains a list of space-separated options of the form
-key =val .
-
-
See the librtmp manual page (man 3 librtmp) for more information.
-
-
For example, to stream a file in real-time to an RTMP server using
-ffmpeg
:
-
-
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
-
-
-
To play the same stream using ffplay
:
-
-
ffplay "rtmp://myserver/live/mystream live=1"
-
-
-
-
18.25 rtp# TOC
-
-
Real-time Transport Protocol.
-
-
The required syntax for an RTP URL is:
-rtp://hostname [:port ][?option =val ...]
-
-
port specifies the RTP port to use.
-
-
The following URL options are supported:
-
-
-ttl=n
-Set the TTL (Time-To-Live) value (for multicast only).
-
-
-rtcpport=n
-Set the remote RTCP port to n .
-
-
-localrtpport=n
-Set the local RTP port to n .
-
-
-localrtcpport=n '
-Set the local RTCP port to n .
-
-
-pkt_size=n
-Set max packet size (in bytes) to n .
-
-
-connect=0|1
-Do a connect()
on the UDP socket (if set to 1) or not (if set
-to 0).
-
-
-sources=ip [,ip ]
-List allowed source IP addresses.
-
-
-block=ip [,ip ]
-List disallowed (blocked) source IP addresses.
-
-
-write_to_source=0|1
-Send packets to the source address of the latest received packet (if
-set to 1) or to a default remote address (if set to 0).
-
-
-localport=n
-Set the local RTP port to n .
-
-This is a deprecated option. Instead, localrtpport should be
-used.
-
-
-
-
-
Important notes:
-
-
- If rtcpport is not set the RTCP port will be set to the RTP
-port value plus 1.
-
- If localrtpport (the local RTP port) is not set any available
-port will be used for the local RTP and RTCP ports.
-
- If localrtcpport (the local RTCP port) is not set it will be
-set to the local RTP port value plus 1.
-
-
-
-
18.26 rtsp# TOC
-
-
Real-Time Streaming Protocol.
-
-
RTSP is not technically a protocol handler in libavformat, it is a demuxer
-and muxer. The demuxer supports both normal RTSP (with data transferred
-over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
-data transferred over RDT).
-
-
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
-supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s
-RTSP server ).
-
-
The required syntax for a RTSP url is:
-
-
rtsp://hostname [:port ]/path
-
-
-
Options can be set on the ffmpeg
/ffplay
command
-line, or set in code via AVOption
s or in
-avformat_open_input
.
-
-
The following options are supported.
-
-
-initial_pause
-Do not start playing the stream immediately if set to 1. Default value
-is 0.
-
-
-rtsp_transport
-Set RTSP transport protocols.
-
-It accepts the following values:
-
-‘udp ’
-Use UDP as lower transport protocol.
-
-
-‘tcp ’
-Use TCP (interleaving within the RTSP control channel) as lower
-transport protocol.
-
-
-‘udp_multicast ’
-Use UDP multicast as lower transport protocol.
-
-
-‘http ’
-Use HTTP tunneling as lower transport protocol, which is useful for
-passing proxies.
-
-
-
-Multiple lower transport protocols may be specified, in that case they are
-tried one at a time (if the setup of one fails, the next one is tried).
-For the muxer, only the ‘tcp ’ and ‘udp ’ options are supported.
-
-
-rtsp_flags
-Set RTSP flags.
-
-The following values are accepted:
-
-‘filter_src ’
-Accept packets only from negotiated peer address and port.
-
-‘listen ’
-Act as a server, listening for an incoming connection.
-
-‘prefer_tcp ’
-Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
-
-
-
-Default value is ‘none ’.
-
-
-allowed_media_types
-Set media types to accept from the server.
-
-The following flags are accepted:
-
-‘video ’
-‘audio ’
-‘data ’
-
-
-By default it accepts all media types.
-
-
-min_port
-Set minimum local UDP port. Default value is 5000.
-
-
-max_port
-Set maximum local UDP port. Default value is 65000.
-
-
-timeout
-Set maximum timeout (in seconds) to wait for incoming connections.
-
-A value of -1 means infinite (default). This option implies the
-rtsp_flags set to ‘listen ’.
-
-
-reorder_queue_size
-Set number of packets to buffer for handling of reordered packets.
-
-
-stimeout
-Set socket TCP I/O timeout in microseconds.
-
-
-user-agent
-Override User-Agent header. If not specified, it defaults to the
-libavformat identifier string.
-
-
-
-
When receiving data over UDP, the demuxer tries to reorder received packets
-(since they may arrive out of order, or packets may get lost totally). This
-can be disabled by setting the maximum demuxing delay to zero (via
-the max_delay
field of AVFormatContext).
-
-
When watching multi-bitrate Real-RTSP streams with ffplay
, the
-streams to display can be chosen with -vst
n and
--ast
n for video and audio respectively, and can be switched
-on the fly by pressing v
and a
.
-
-
-
18.26.1 Examples# TOC
-
-
The following examples all make use of the ffplay
and
-ffmpeg
tools.
-
-
- Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
-
-
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
-
-
- Watch a stream tunneled over HTTP:
-
-
ffplay -rtsp_transport http rtsp://server/video.mp4
-
-
- Send a stream in realtime to a RTSP server, for others to watch:
-
-
ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
-
-
- Receive a stream in realtime:
-
-
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
-
-
-
-
-
18.27 sap# TOC
-
-
Session Announcement Protocol (RFC 2974). This is not technically a
-protocol handler in libavformat, it is a muxer and demuxer.
-It is used for signalling of RTP streams, by announcing the SDP for the
-streams regularly on a separate port.
-
-
-
18.27.1 Muxer# TOC
-
-
The syntax for a SAP url given to the muxer is:
-
-
sap://destination [:port ][?options ]
-
-
-
The RTP packets are sent to destination on port port ,
-or to port 5004 if no port is specified.
-options is a &
-separated list. The following options
-are supported:
-
-
-announce_addr=address
-Specify the destination IP address for sending the announcements to.
-If omitted, the announcements are sent to the commonly used SAP
-announcement multicast address 224.2.127.254 (sap.mcast.net), or
-ff0e::2:7ffe if destination is an IPv6 address.
-
-
-announce_port=port
-Specify the port to send the announcements on, defaults to
-9875 if not specified.
-
-
-ttl=ttl
-Specify the time to live value for the announcements and RTP packets,
-defaults to 255.
-
-
-same_port=0|1
-If set to 1, send all RTP streams on the same port pair. If zero (the
-default), all streams are sent on unique ports, with each stream on a
-port 2 numbers higher than the previous.
-VLC/Live555 requires this to be set to 1, to be able to receive the stream.
-The RTP stack in libavformat for receiving requires all streams to be sent
-on unique ports.
-
-
-
-
Example command lines follow.
-
-
To broadcast a stream on the local subnet, for watching in VLC:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
-
-
-
Similarly, for watching in ffplay
:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255
-
-
-
And for watching in ffplay
, over IPv6:
-
-
-
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
-
-
-
-
18.27.2 Demuxer# TOC
-
-
The syntax for a SAP url given to the demuxer is:
-
-
sap://[address ][:port ]
-
-
-
address is the multicast address to listen for announcements on,
-if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port
-is the port that is listened on, 9875 if omitted.
-
-
The demuxers listens for announcements on the given address and port.
-Once an announcement is received, it tries to receive that particular stream.
-
-
Example command lines follow.
-
-
To play back the first stream announced on the normal SAP multicast address:
-
-
-
-
To play back the first stream announced on one the default IPv6 SAP multicast address:
-
-
-
ffplay sap://[ff0e::2:7ffe]
-
-
-
-
18.28 sctp# TOC
-
-
Stream Control Transmission Protocol.
-
-
The accepted URL syntax is:
-
-
sctp://host :port [?options ]
-
-
-
The protocol accepts the following options:
-
-listen
-If set to any value, listen for an incoming connection. Outgoing connection is done by default.
-
-
-max_streams
-Set the maximum number of streams. By default no limit is set.
-
-
-
-
-
18.29 srtp# TOC
-
-
Secure Real-time Transport Protocol.
-
-
The accepted options are:
-
-srtp_in_suite
-srtp_out_suite
-Select input and output encoding suites.
-
-Supported values:
-
-‘AES_CM_128_HMAC_SHA1_80 ’
-‘SRTP_AES128_CM_HMAC_SHA1_80 ’
-‘AES_CM_128_HMAC_SHA1_32 ’
-‘SRTP_AES128_CM_HMAC_SHA1_32 ’
-
-
-
-srtp_in_params
-srtp_out_params
-Set input and output encoding parameters, which are expressed by a
-base64-encoded representation of a binary block. The first 16 bytes of
-this binary block are used as master key, the following 14 bytes are
-used as master salt.
-
-
-
-
-
18.30 subfile# TOC
-
-
Virtually extract a segment of a file or another stream.
-The underlying stream must be seekable.
-
-
Accepted options:
-
-start
-Start offset of the extracted segment, in bytes.
-
-end
-End offset of the extracted segment, in bytes.
-
-
-
-
Examples:
-
-
Extract a chapter from a DVD VOB file (start and end sectors obtained
-externally and multiplied by 2048):
-
-
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
-
-
-
Play an AVI file directly from a TAR archive:
-subfile,,start,183241728,end,366490624,,:archive.tar
-
-
-
18.31 tcp# TOC
-
-
Transmission Control Protocol.
-
-
The required syntax for a TCP url is:
-
-
tcp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form
-key =val .
-
-
The list of supported options follows.
-
-
-listen=1|0
-Listen for an incoming connection. Default value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-listen_timeout=microseconds
-Set listen timeout, expressed in microseconds.
-
-
-
-
The following example shows how to setup a listening TCP connection
-with ffmpeg
, which is then accessed with ffplay
:
-
-
ffmpeg -i input -f format tcp://hostname :port ?listen
-ffplay tcp://hostname :port
-
-
-
-
18.32 tls# TOC
-
-
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
-
-
The required syntax for a TLS/SSL url is:
-
-
tls://hostname :port [?options ]
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-ca_file, cafile=filename
-A file containing certificate authority (CA) root certificates to treat
-as trusted. If the linked TLS library contains a default this might not
-need to be specified for verification to work, but not all libraries and
-setups have defaults built in.
-The file must be in OpenSSL PEM format.
-
-
-tls_verify=1|0
-If enabled, try to verify the peer that we are communicating with.
-Note, if using OpenSSL, this currently only makes sure that the
-peer certificate is signed by one of the root certificates in the CA
-database, but it does not validate that the certificate actually
-matches the host name we are trying to connect to. (With GnuTLS,
-the host name is validated as well.)
-
-This is disabled by default since it requires a CA database to be
-provided by the caller in many cases.
-
-
-cert_file, cert=filename
-A file containing a certificate to use in the handshake with the peer.
-(When operating as server, in listen mode, this is more often required
-by the peer, while client certificates only are mandated in certain
-setups.)
-
-
-key_file, key=filename
-A file containing the private key for the certificate.
-
-
-listen=1|0
-If enabled, listen for connections on the provided port, and assume
-the server role in the handshake instead of the client role.
-
-
-
-
-
Example command lines:
-
-
To create a TLS/SSL server that serves an input stream.
-
-
-
ffmpeg -i input -f format tls://hostname :port ?listen&cert=server.crt &key=server.key
-
-
-
To play back a stream from the TLS/SSL server using ffplay
:
-
-
-
ffplay tls://hostname :port
-
-
-
-
18.33 udp# TOC
-
-
User Datagram Protocol.
-
-
The required syntax for an UDP URL is:
-
-
udp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form key =val .
-
-
In case threading is enabled on the system, a circular buffer is used
-to store the incoming data, which allows one to reduce loss of data due to
-UDP socket buffer overruns. The fifo_size and
-overrun_nonfatal options are related to this buffer.
-
-
The list of supported options follows.
-
-
-buffer_size=size
-Set the UDP maximum socket buffer size in bytes. This is used to set either
-the receive or send buffer size, depending on what the socket is used for.
-Default is 64KB. See also fifo_size .
-
-
-localport=port
-Override the local UDP port to bind with.
-
-
-localaddr=addr
-Choose the local IP address. This is useful e.g. if sending multicast
-and the host has multiple interfaces, where the user can choose
-which interface to send on by specifying the IP address of that interface.
-
-
-pkt_size=size
-Set the size in bytes of UDP packets.
-
-
-reuse=1|0
-Explicitly allow or disallow reusing UDP sockets.
-
-
-ttl=ttl
-Set the time to live value (for multicast only).
-
-
-connect=1|0
-Initialize the UDP socket with connect()
. In this case, the
-destination address can’t be changed with ff_udp_set_remote_url later.
-If the destination address isn’t known at the start, this option can
-be specified in ff_udp_set_remote_url, too.
-This allows finding out the source address for the packets with getsockname,
-and makes writes return with AVERROR(ECONNREFUSED) if "destination
-unreachable" is received.
-For receiving, this gives the benefit of only receiving packets from
-the specified peer address/port.
-
-
-sources=address [,address ]
-Only receive packets sent to the multicast group from one of the
-specified sender IP addresses.
-
-
-block=address [,address ]
-Ignore packets sent to the multicast group from the specified
-sender IP addresses.
-
-
-fifo_size=units
-Set the UDP receiving circular buffer size, expressed as a number of
-packets with size of 188 bytes. If not specified defaults to 7*4096.
-
-
-overrun_nonfatal=1|0
-Survive in case of UDP receiving circular buffer overrun. Default
-value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-broadcast=1|0
-Explicitly allow or disallow UDP broadcasting.
-
-Note that broadcasting may not work properly on networks having
-a broadcast storm protection.
-
-
-
-
-
18.33.1 Examples# TOC
-
-
- Use ffmpeg
to stream over UDP to a remote endpoint:
-
-
ffmpeg -i input -f format udp://hostname :port
-
-
- Use ffmpeg
to stream in mpegts format over UDP using 188
-sized UDP packets, using a large input buffer:
-
-
ffmpeg -i input -f mpegts udp://hostname :port ?pkt_size=188&buffer_size=65535
-
-
- Use ffmpeg
to receive over UDP from a remote endpoint:
-
-
ffmpeg -i udp://[multicast-address ]:port ...
-
-
-
-
-
18.34 unix# TOC
-
-
Unix local socket
-
-
The required syntax for a Unix socket URL is:
-
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-timeout
-Timeout in ms.
-
-listen
-Create the Unix socket in listening mode.
-
-
-
-
-
19 Device Options# TOC
-
-
The libavdevice library provides the same interface as
-libavformat. Namely, an input device is considered like a demuxer, and
-an output device like a muxer, and the interface and generic device
-options are the same provided by libavformat (see the ffmpeg-formats
-manual).
-
-
In addition each input or output device may support so-called private
-options, which are specific for that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the device
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
-
-
20 Input Devices# TOC
-
-
Input devices are configured elements in FFmpeg which allow to access
-the data coming from a multimedia device attached to your system.
-
-
When you configure your FFmpeg build, all the supported input devices
-are enabled by default. You can list all available ones using the
-configure option "–list-indevs".
-
-
You can disable all the input devices using the configure option
-"–disable-indevs", and selectively enable an input device using the
-option "–enable-indev=INDEV ", or you can disable a particular
-input device using the option "–disable-indev=INDEV ".
-
-
The option "-devices" of the ff* tools will display the list of
-supported input devices.
-
-
A description of the currently available input devices follows.
-
-
-
20.1 alsa# TOC
-
-
ALSA (Advanced Linux Sound Architecture) input device.
-
-
To enable this input device during configuration you need libasound
-installed on your system.
-
-
This device allows capturing from an ALSA device. The name of the
-device to capture has to be an ALSA card identifier.
-
-
An ALSA identifier has the syntax:
-
-
hw:CARD [,DEV [,SUBDEV ]]
-
-
-
where the DEV and SUBDEV components are optional.
-
-
The three arguments (in order: CARD ,DEV ,SUBDEV )
-specify card number or identifier, device number and subdevice number
-(-1 means any).
-
-
To see the list of cards currently recognized by your system check the
-files /proc/asound/cards and /proc/asound/devices .
-
-
For example to capture with ffmpeg
from an ALSA device with
-card id 0, you may run the command:
-
-
ffmpeg -f alsa -i hw:0 alsaout.wav
-
-
-
For more information see:
-http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html
-
-
-
20.2 avfoundation# TOC
-
-
AVFoundation input device.
-
-
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
-The older QTKit framework has been marked deprecated since OSX version 10.7.
-
-
The input filename has to be given in the following syntax:
-
-
-i "[[VIDEO]:[AUDIO]]"
-
-
The first entry selects the video input while the latter selects the audio input.
-The stream has to be specified by the device name or the device index as shown by the device list.
-Alternatively, the video and/or audio input device can be chosen by index using the
-
- -video_device_index <INDEX>
-
-and/or
-
- -audio_device_index <INDEX>
-
-, overriding any
-device name or index given in the input filename.
-
-
All available devices can be enumerated by using -list_devices true , listing
-all device names and corresponding indices.
-
-
There are two device name aliases:
-
-default
-Select the AVFoundation default device of the corresponding type.
-
-
-none
-Do not record the corresponding media type.
-This is equivalent to specifying an empty device name or index.
-
-
-
-
-
-
20.2.1 Options# TOC
-
-
AVFoundation supports the following options:
-
-
--list_devices <TRUE|FALSE>
-If set to true, a list of all available input devices is given showing all
-device names and indices.
-
-
--video_device_index <INDEX>
-Specify the video device by its index. Overrides anything given in the input filename.
-
-
--audio_device_index <INDEX>
-Specify the audio device by its index. Overrides anything given in the input filename.
-
-
--pixel_format <FORMAT>
-Request the video device to use a specific pixel format.
-If the specified format is not supported, a list of available formats is given
-und the first one in this list is used instead. Available pixel formats are:
-monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
- bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
- yuv420p, nv12, yuyv422, gray
-
-
-
-
-
-
20.2.2 Examples# TOC
-
-
- Print the list of AVFoundation supported devices and exit:
-
-
$ ffmpeg -f avfoundation -list_devices true -i ""
-
-
- Record video from video device 0 and audio from audio device 0 into out.avi:
-
-
$ ffmpeg -f avfoundation -i "0:0" out.avi
-
-
- Record video from video device 2 and audio from audio device 1 into out.avi:
-
-
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
-
-
- Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
-
-
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
-
-
-
-
-
-
20.3 bktr# TOC
-
-
BSD video input device.
-
-
-
20.4 dshow# TOC
-
-
Windows DirectShow input device.
-
-
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
-Currently only audio and video devices are supported.
-
-
Multiple devices may be opened as separate inputs, but they may also be
-opened on the same input, which should improve synchronism between them.
-
-
The input name should be in the format:
-
-
-
-
where TYPE can be either audio or video ,
-and NAME is the device’s name.
-
-
-
20.4.1 Options# TOC
-
-
If no options are specified, the device’s defaults are used.
-If the device does not support the requested options, it will
-fail to open.
-
-
-video_size
-Set the video size in the captured video.
-
-
-framerate
-Set the frame rate in the captured video.
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-
-
-sample_size
-Set the sample size (in bits) of the captured audio.
-
-
-channels
-Set the number of channels in the captured audio.
-
-
-list_devices
-If set to true , print a list of devices and exit.
-
-
-list_options
-If set to true , print a list of selected device’s options
-and exit.
-
-
-video_device_number
-Set video device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-audio_device_number
-Set audio device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-pixel_format
-Select pixel format to be used by DirectShow. This may only be set when
-the video codec is not set or set to rawvideo.
-
-
-audio_buffer_size
-Set audio device buffer size in milliseconds (which can directly
-impact latency, depending on the device).
-Defaults to using the audio device’s
-default buffer size (typically some multiple of 500ms).
-Setting this value too low can degrade performance.
-See also
-http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx
-
-
-
-
-
-
20.4.2 Examples# TOC
-
-
- Print the list of DirectShow supported devices and exit:
-
-
$ ffmpeg -list_devices true -f dshow -i dummy
-
-
- Open video device Camera :
-
-
$ ffmpeg -f dshow -i video="Camera"
-
-
- Open second video device with name Camera :
-
-
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
-
-
- Open video device Camera and audio device Microphone :
-
-
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
-
-
- Print the list of supported options in selected device and exit:
-
-
$ ffmpeg -list_options true -f dshow -i video="Camera"
-
-
-
-
-
-
20.5 dv1394# TOC
-
-
Linux DV 1394 input device.
-
-
-
20.6 fbdev# TOC
-
-
Linux framebuffer input device.
-
-
The Linux framebuffer is a graphic hardware-independent abstraction
-layer to show graphics on a computer monitor, typically on the
-console. It is accessed through a file device node, usually
-/dev/fb0 .
-
-
For more detailed information read the file
-Documentation/fb/framebuffer.txt included in the Linux source tree.
-
-
To record from the framebuffer device /dev/fb0 with
-ffmpeg
:
-
-
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
-
-
-
You can take a single screenshot image with the command:
-
-
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
-
-
-
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
-
-
-
20.7 gdigrab# TOC
-
-
Win32 GDI-based screen capture device.
-
-
This device allows you to capture a region of the display on Windows.
-
-
There are two options for the input filename:
-
-
or
-
-
-
The first option will capture the entire desktop, or a fixed region of the
-desktop. The second option will instead capture the contents of a single
-window, regardless of its position on the screen.
-
-
For example, to grab the entire desktop using ffmpeg
:
-
-
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
-
-
-
Grab a 640x480 region at position 10,20
:
-
-
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
-
-
-
Grab the contents of the window named "Calculator"
-
-
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
-
-
-
-
20.7.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. Use the value 0
to
-not draw the pointer. Default value is 1
.
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-Note that show_region is incompatible with grabbing the contents
-of a single window.
-
-For example:
-
-
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
-
-
-
-video_size
-Set the video frame size. The default is to capture the full screen if desktop is selected, or the full window size if title=window_title is selected.
-
-
-offset_x
-When capturing a region with video_size , set the distance from the left edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative offset_x value to move the region to that monitor.
-
-
-offset_y
-When capturing a region with video_size , set the distance from the top edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative offset_y value to move the region to that monitor.
-
-
-
-
-
-
20.8 iec61883# TOC
-
-
FireWire DV/HDV input device using libiec61883.
-
-
To enable this input device, you need libiec61883, libraw1394 and
-libavc1394 installed on your system. Use the configure option
---enable-libiec61883
to compile with the device enabled.
-
-
The iec61883 capture device supports capturing from a video device
-connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
-FireWire stack (juju). This is the default DV/HDV input method in Linux
-Kernel 2.6.37 and later, since the old FireWire stack was removed.
-
-
Specify the FireWire port to be used as input file, or "auto"
-to choose the first port connected.
-
-
-
20.8.1 Options# TOC
-
-
-dvtype
-Override autodetection of DV/HDV. This should only be used if auto
-detection does not work, or if usage of a different device type
-should be prohibited. Treating a DV device as HDV (or vice versa) will
-not work and result in undefined behavior.
-The values auto , dv and hdv are supported.
-
-
-dvbuffer
-Set maximum size of buffer for incoming data, in frames. For DV, this
-is an exact value. For HDV, it is not frame exact, since HDV does
-not have a fixed frame size.
-
-
-dvguid
-Select the capture device by specifying it’s GUID. Capturing will only
-be performed from the specified device and fails if no device with the
-given GUID is found. This is useful to select the input if multiple
-devices are connected at the same time.
-Look at /sys/bus/firewire/devices to find out the GUIDs.
-
-
-
-
-
-
20.8.2 Examples# TOC
-
-
- Grab and show the input of a FireWire DV/HDV device.
-
-
ffplay -f iec61883 -i auto
-
-
- Grab and record the input of a FireWire DV/HDV device,
-using a packet buffer of 100000 packets if the source is HDV.
-
-
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
-
-
-
-
-
-
20.9 jack# TOC
-
-
JACK input device.
-
-
To enable this input device during configuration you need libjack
-installed on your system.
-
-
A JACK input device creates one or more JACK writable clients, one for
-each audio channel, with name client_name :input_N , where
-client_name is the name provided by the application, and N
-is a number which identifies the channel.
-Each writable client will send the acquired data to the FFmpeg input
-device.
-
-
Once you have created one or more JACK readable clients, you need to
-connect them to one or more JACK writable clients.
-
-
To connect or disconnect JACK clients you can use the jack_connect
-and jack_disconnect
programs, or do it through a graphical interface,
-for example with qjackctl
.
-
-
To list the JACK clients and their properties you can invoke the command
-jack_lsp
.
-
-
Follows an example which shows how to capture a JACK readable client
-with ffmpeg
.
-
-
# Create a JACK writable client with name "ffmpeg".
-$ ffmpeg -f jack -i ffmpeg -y out.wav
-
-# Start the sample jack_metro readable client.
-$ jack_metro -b 120 -d 0.2 -f 4000
-
-# List the current JACK clients.
-$ jack_lsp -c
-system:capture_1
-system:capture_2
-system:playback_1
-system:playback_2
-ffmpeg:input_1
-metro:120_bpm
-
-# Connect metro to the ffmpeg writable client.
-$ jack_connect metro:120_bpm ffmpeg:input_1
-
-
-
For more information read:
-http://jackaudio.org/
-
-
-
20.10 lavfi# TOC
-
-
Libavfilter input virtual device.
-
-
This input device reads data from the open output pads of a libavfilter
-filtergraph.
-
-
For each filtergraph open output, the input device will create a
-corresponding stream which is mapped to the generated output. Currently
-only video data is supported. The filtergraph is specified through the
-option graph .
-
-
-
20.10.1 Options# TOC
-
-
-graph
-Specify the filtergraph to use as input. Each video open output must be
-labelled by a unique string of the form "outN ", where N is a
-number starting from 0 corresponding to the mapped input stream
-generated by the device.
-The first unlabelled output is automatically assigned to the "out0"
-label, but all the others need to be specified explicitly.
-
-The suffix "+subcc" can be appended to the output label to create an extra
-stream with the closed captions packets attached to that output
-(experimental; only for EIA-608 / CEA-708 for now).
-The subcc streams are created after all the normal streams, in the order of
-the corresponding stream.
-For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
-stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
-
-If not specified defaults to the filename specified for the input
-device.
-
-
-graph_file
-Set the filename of the filtergraph to be read and sent to the other
-filters. Syntax of the filtergraph is the same as the one specified by
-the option graph .
-
-
-
-
-
-
20.10.2 Examples# TOC
-
-
- Create a color video stream and play it back with ffplay
:
-
-
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
-
-
- As the previous example, but use filename for specifying the graph
-description, and omit the "out0" label:
-
-
ffplay -f lavfi color=c=pink
-
-
- Create three different video test filtered sources and play them:
-
-
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
-
-
- Read an audio stream from a file using the amovie source and play it
-back with ffplay
:
-
-
ffplay -f lavfi "amovie=test.wav"
-
-
- Read an audio stream and a video stream and play it back with
-ffplay
:
-
-
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
-
-
- Dump decoded frames to images and closed captions to a file (experimental):
-
-
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
-
-
-
-
-
-
20.11 libcdio# TOC
-
-
Audio-CD input device based on cdio.
-
-
To enable this input device during configuration you need libcdio
-installed on your system. Requires the configure option
---enable-libcdio
.
-
-
This device allows playing and grabbing from an Audio-CD.
-
-
For example to copy with ffmpeg
the entire Audio-CD in /dev/sr0,
-you may run the command:
-
-
ffmpeg -f libcdio -i /dev/sr0 cd.wav
-
-
-
-
20.12 libdc1394# TOC
-
-
IIDC1394 input device, based on libdc1394 and libraw1394.
-
-
Requires the configure option --enable-libdc1394
.
-
-
-
20.13 openal# TOC
-
-
The OpenAL input device provides audio capture on all systems with a
-working OpenAL 1.1 implementation.
-
-
To enable this input device during configuration, you need OpenAL
-headers and libraries installed on your system, and need to configure
-FFmpeg with --enable-openal
.
-
-
OpenAL headers and libraries should be provided as part of your OpenAL
-implementation, or as an additional download (an SDK). Depending on your
-installation you may need to specify additional flags via the
---extra-cflags
and --extra-ldflags
for allowing the build
-system to locate the OpenAL headers and libraries.
-
-
An incomplete list of OpenAL implementations follows:
-
-
-Creative
-The official Windows implementation, providing hardware acceleration
-with supported devices and software fallback.
-See http://openal.org/ .
-
-OpenAL Soft
-Portable, open source (LGPL) software implementation. Includes
-backends for the most common sound APIs on the Windows, Linux,
-Solaris, and BSD operating systems.
-See http://kcat.strangesoft.net/openal.html .
-
-Apple
-OpenAL is part of Core Audio, the official Mac OS X Audio interface.
-See http://developer.apple.com/technologies/mac/audio-and-video.html
-
-
-
-
This device allows one to capture from an audio input device handled
-through OpenAL.
-
-
You need to specify the name of the device to capture in the provided
-filename. If the empty string is provided, the device will
-automatically select the default device. You can get the list of the
-supported devices by using the option list_devices .
-
-
-
20.13.1 Options# TOC
-
-
-channels
-Set the number of channels in the captured audio. Only the values
-1 (monaural) and 2 (stereo) are currently supported.
-Defaults to 2 .
-
-
-sample_size
-Set the sample size (in bits) of the captured audio. Only the values
-8 and 16 are currently supported. Defaults to
-16 .
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-Defaults to 44.1k .
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-
-
-
-
20.13.2 Examples# TOC
-
-
Print the list of OpenAL supported devices and exit:
-
-
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
-
-
-
Capture from the OpenAL device DR-BT101 via PulseAudio :
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
-
-
-
Capture from the default device (note the empty string ” as filename):
-
-
$ ffmpeg -f openal -i '' out.ogg
-
-
-
Capture from two devices simultaneously, writing to two different files,
-within the same ffmpeg
command:
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
-
-
Note: not all OpenAL implementations support multiple simultaneous capture -
-try the latest OpenAL Soft if the above does not work.
-
-
-
20.14 oss# TOC
-
-
Open Sound System input device.
-
-
The filename to provide to the input device is the device node
-representing the OSS input device, and is usually set to
-/dev/dsp .
-
-
For example to grab from /dev/dsp using ffmpeg
use the
-command:
-
-
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
-
-
-
For more information about OSS see:
-http://manuals.opensound.com/usersguide/dsp.html
-
-
-
20.15 pulse# TOC
-
-
PulseAudio input device.
-
-
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
-
-
The filename to provide to the input device is a source device or the
-string "default"
-
-
To list the PulseAudio source devices and their properties you can invoke
-the command pactl list sources
.
-
-
More information about PulseAudio can be found on http://www.pulseaudio.org .
-
-
-
20.15.1 Options# TOC
-
-server
-Connect to a specific PulseAudio server, specified by an IP address.
-Default server is used when not provided.
-
-
-name
-Specify the application name PulseAudio will use when showing active clients,
-by default it is the LIBAVFORMAT_IDENT
string.
-
-
-stream_name
-Specify the stream name PulseAudio will use when showing active streams,
-by default it is "record".
-
-
-sample_rate
-Specify the samplerate in Hz, by default 48kHz is used.
-
-
-channels
-Specify the channels in use, by default 2 (stereo) is set.
-
-
-frame_size
-Specify the number of bytes per frame, by default it is set to 1024.
-
-
-fragment_size
-Specify the minimal buffering fragment in PulseAudio, it will affect the
-audio latency. By default it is unset.
-
-
-
-
-
20.15.2 Examples# TOC
-
Record a stream from default device:
-
-
ffmpeg -f pulse -i default /tmp/pulse.wav
-
-
-
-
20.16 qtkit# TOC
-
-
QTKit input device.
-
-
The filename passed as input is parsed to contain either a device name or index.
-The device index can also be given by using -video_device_index.
-A given device index will override any given device name.
-If the desired device consists of numbers only, use -video_device_index to identify it.
-The default device will be chosen if an empty string or the device name "default" is given.
-The available devices can be enumerated by using -list_devices.
-
-
-
ffmpeg -f qtkit -i "0" out.mpg
-
-
-
-
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
-
-
-
-
ffmpeg -f qtkit -i "default" out.mpg
-
-
-
-
ffmpeg -f qtkit -list_devices true -i ""
-
-
-
-
20.17 sndio# TOC
-
-
sndio input device.
-
-
To enable this input device during configuration you need libsndio
-installed on your system.
-
-
The filename to provide to the input device is the device node
-representing the sndio input device, and is usually set to
-/dev/audio0 .
-
-
For example to grab from /dev/audio0 using ffmpeg
use the
-command:
-
-
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
-
-
-
-
20.18 video4linux2, v4l2# TOC
-
-
Video4Linux2 input video device.
-
-
"v4l2" can be used as alias for "video4linux2".
-
-
If FFmpeg is built with v4l-utils support (by using the
---enable-libv4l2
configure option), it is possible to use it with the
--use_libv4l2
input device option.
-
-
The name of the device to grab is a file device node, usually Linux
-systems tend to automatically create such nodes when the device
-(e.g. an USB webcam) is plugged into the system, and has a name of the
-kind /dev/videoN , where N is a number associated to
-the device.
-
-
Video4Linux2 devices usually support a limited set of
-width xheight sizes and frame rates. You can check which are
-supported using -list_formats all
for Video4Linux2 devices.
-Some devices, like TV cards, support one or more standards. It is possible
-to list all the supported standards using -list_standards all
.
-
-
The time base for the timestamps is 1 microsecond. Depending on the kernel
-version and configuration, the timestamps may be derived from the real time
-clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
-boot time, unaffected by NTP or manual changes to the clock). The
--timestamps abs or -ts abs option can be used to force
-conversion into the real time clock.
-
-
Some usage examples of the video4linux2 device with ffmpeg
-and ffplay
:
-
- Grab and show the input of a video4linux2 device:
-
-
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
-
-
- Grab and record the input of a video4linux2 device, leave the
-frame rate and size as previously set:
-
-
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
-
-
-
-
For more information about Video4Linux, check http://linuxtv.org/ .
-
-
-
20.18.1 Options# TOC
-
-
-standard
-Set the standard. Must be the name of a supported standard. To get a
-list of the supported standards, use the list_standards
-option.
-
-
-channel
-Set the input channel number. Default to -1, which means using the
-previously selected channel.
-
-
-video_size
-Set the video frame size. The argument must be a string in the form
-WIDTH xHEIGHT or a valid size abbreviation.
-
-
-pixel_format
-Select the pixel format (only valid for raw video input).
-
-
-input_format
-Set the preferred pixel format (for raw video) or a codec name.
-This option allows one to select the input format, when several are
-available.
-
-
-framerate
-Set the preferred video frame rate.
-
-
-list_formats
-List available formats (supported pixel formats, codecs, and frame
-sizes) and exit.
-
-Available values are:
-
-‘all ’
-Show all available (compressed and non-compressed) formats.
-
-
-‘raw ’
-Show only raw video (non-compressed) formats.
-
-
-‘compressed ’
-Show only compressed formats.
-
-
-
-
-list_standards
-List supported standards and exit.
-
-Available values are:
-
-‘all ’
-Show all supported standards.
-
-
-
-
-timestamps, ts
-Set type of timestamps for grabbed frames.
-
-Available values are:
-
-‘default ’
-Use timestamps from the kernel.
-
-
-‘abs ’
-Use absolute timestamps (wall clock).
-
-
-‘mono2abs ’
-Force conversion from monotonic to absolute timestamps.
-
-
-
-Default value is default
.
-
-
-
-
-
20.19 vfwcap# TOC
-
-
VfW (Video for Windows) capture input device.
-
-
The filename passed as input is the capture driver number, ranging from
-0 to 9. You may use "list" as filename to print a list of drivers. Any
-other filename will be interpreted as device number 0.
-
-
-
20.20 x11grab# TOC
-
-
X11 video input device.
-
-
Depends on X11, Xext, and Xfixes. Requires the configure option
---enable-x11grab
.
-
-
This device allows one to capture a region of an X11 display.
-
-
The filename passed as input has the syntax:
-
-
[hostname ]:display_number .screen_number [+x_offset ,y_offset ]
-
-
-
hostname :display_number .screen_number specifies the
-X11 display name of the screen to grab from. hostname can be
-omitted, and defaults to "localhost". The environment variable
-DISPLAY
contains the default display name.
-
-
x_offset and y_offset specify the offsets of the grabbed
-area with respect to the top-left border of the X11 screen. They
-default to 0.
-
-
Check the X11 documentation (e.g. man X) for more detailed information.
-
-
Use the dpyinfo
program for getting basic information about the
-properties of your X11 display (e.g. grep for "name" or "dimensions").
-
-
For example to grab from :0.0 using ffmpeg
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
Grab at position 10,20
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-
-
20.20.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. A value of 0
specify
-not to draw the pointer. Default value is 1
.
-
-
-follow_mouse
-Make the grabbed area follow the mouse. The argument can be
-centered
or a number of pixels PIXELS .
-
-When it is specified with "centered", the grabbing region follows the mouse
-pointer and keeps the pointer at the center of region; otherwise, the region
-follows only when the mouse pointer reaches within PIXELS (greater than
-zero) to the edge of region.
-
-For example:
-
-
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-To follow only when the mouse pointer reaches within 100 pixels to edge:
-
-
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-For example:
-
-
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-With follow_mouse :
-
-
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-video_size
-Set the video frame size. Default value is vga
.
-
-
-use_shm
-Use the MIT-SHM extension for shared memory. Default value is 1
.
-It may be necessary to disable it for remote displays.
-
-
-
-
-
20.21 decklink# TOC
-
-
The decklink input device provides capture capabilities for Blackmagic
-DeckLink devices.
-
-
To enable this input device, you need the Blackmagic DeckLink SDK and you
-need to configure with the appropriate --extra-cflags
-and --extra-ldflags
.
-On Windows, you need to run the IDL files through widl
.
-
-
DeckLink is very picky about the formats it supports. Pixel format is always
-uyvy422, framerate and video size must be determined for your device with
--list_formats 1
. Audio sample rate is always 48 kHz and the number
-of channels currently is limited to 2 (stereo).
-
-
-
20.21.1 Options# TOC
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-list_formats
-If set to true , print a list of supported formats and exit.
-Defaults to false .
-
-
-
-
-
-
20.21.2 Examples# TOC
-
-
- List input devices:
-
-
ffmpeg -f decklink -list_devices 1 -i dummy
-
-
- List supported formats:
-
-
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
-
-
- Capture video clip at 1080i50 (format 11):
-
-
ffmpeg -f decklink -i 'Intensity Pro@11' -acodec copy -vcodec copy output.avi
-
-
-
-
-
-
-
21 Resampler Options# TOC
-
-
The audio resampler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, option =value for the aresample filter,
-by setting the value explicitly in the
-SwrContext
options or using the libavutil/opt.h API for
-programmatic use.
-
-
-ich, in_channel_count
-Set the number of input channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-in_channel_layout is set.
-
-
-och, out_channel_count
-Set the number of output channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-out_channel_layout is set.
-
-
-uch, used_channel_count
-Set the number of used input channels. Default value is 0. This option is
-only used for special remapping.
-
-
-isr, in_sample_rate
-Set the input sample rate. Default value is 0.
-
-
-osr, out_sample_rate
-Set the output sample rate. Default value is 0.
-
-
-isf, in_sample_fmt
-Specify the input sample format. It is set by default to none
.
-
-
-osf, out_sample_fmt
-Specify the output sample format. It is set by default to none
.
-
-
-tsf, internal_sample_fmt
-Set the internal sample format. Default value is none
.
-This will automatically be chosen when it is not explicitly set.
-
-
-icl, in_channel_layout
-ocl, out_channel_layout
-Set the input/output channel layout.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-clev, center_mix_level
-Set the center mix level. It is a value expressed in deciBel, and must be
-in the interval [-32,32].
-
-
-slev, surround_mix_level
-Set the surround mix level. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-lfe_mix_level
-Set LFE mix into non LFE level. It is used when there is a LFE input but no
-LFE output. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-rmvol, rematrix_volume
-Set rematrix volume. Default value is 1.0.
-
-
-rematrix_maxval
-Set maximum output value for rematrixing.
-This can be used to prevent clipping vs. preventing volumn reduction
-A value of 1.0 prevents cliping.
-
-
-flags, swr_flags
-Set flags used by the converter. Default value is 0.
-
-It supports the following individual flags:
-
-res
-force resampling, this flag forces resampling to be used even when the
-input and output sample rates match.
-
-
-
-
-dither_scale
-Set the dither scale. Default value is 1.
-
-
-dither_method
-Set dither method. Default value is 0.
-
-Supported values:
-
-‘rectangular ’
-select rectangular dither
-
-‘triangular ’
-select triangular dither
-
-‘triangular_hp ’
-select triangular dither with high pass
-
-‘lipshitz ’
-select lipshitz noise shaping dither
-
-‘shibata ’
-select shibata noise shaping dither
-
-‘low_shibata ’
-select low shibata noise shaping dither
-
-‘high_shibata ’
-select high shibata noise shaping dither
-
-‘f_weighted ’
-select f-weighted noise shaping dither
-
-‘modified_e_weighted ’
-select modified-e-weighted noise shaping dither
-
-‘improved_e_weighted ’
-select improved-e-weighted noise shaping dither
-
-
-
-
-
-resampler
-Set resampling engine. Default value is swr.
-
-Supported values:
-
-‘swr ’
-select the native SW Resampler; filter options precision and cheby are not
-applicable in this case.
-
-‘soxr ’
-select the SoX Resampler (where available); compensation, and filter options
-filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
-case.
-
-
-
-
-filter_size
-For swr only, set resampling filter size, default value is 32.
-
-
-phase_shift
-For swr only, set resampling phase shift, default value is 10, and must be in
-the interval [0,30].
-
-
-linear_interp
-Use Linear Interpolation if set to 1, default value is 0.
-
-
-cutoff
-Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
-value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
-(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
-
-
-precision
-For soxr only, the precision in bits to which the resampled signal will be
-calculated. The default value of 20 (which, with suitable dithering, is
-appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
-value of 28 gives SoX’s ’Very High Quality’.
-
-
-cheby
-For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
-approximation for ’irrational’ ratios. Default value is 0.
-
-
-async
-For swr only, simple 1 parameter audio sync to timestamps using stretching,
-squeezing, filling and trimming. Setting this to 1 will enable filling and
-trimming, larger values represent the maximum amount in samples that the data
-may be stretched or squeezed for each second.
-Default value is 0, thus no compensation is applied to make the samples match
-the audio timestamps.
-
-
-first_pts
-For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
-This allows for padding/trimming at the start of stream. By default, no
-assumption is made about the first frame’s expected pts, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative pts due to encoder delay.
-
-
-min_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger stretching/squeezing/filling or trimming of the
-data to make it match the timestamps. The default is that
-stretching/squeezing/filling and trimming is disabled
-(min_comp = FLT_MAX
).
-
-
-min_hard_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger adding/dropping samples to make it match the
-timestamps. This option effectively is a threshold to select between
-hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
-all compensation is by default disabled through min_comp .
-The default is 0.1.
-
-
-comp_duration
-For swr only, set duration (in seconds) over which data is stretched/squeezed
-to make it match the timestamps. Must be a non-negative double float value,
-default value is 1.0.
-
-
-max_soft_comp
-For swr only, set maximum factor by which data is stretched/squeezed to make it
-match the timestamps. Must be a non-negative double float value, default value
-is 0.
-
-
-matrix_encoding
-Select matrixed stereo encoding.
-
-It accepts the following values:
-
-‘none ’
-select none
-
-‘dolby ’
-select Dolby
-
-‘dplii ’
-select Dolby Pro Logic II
-
-
-
-Default value is none
.
-
-
-filter_type
-For swr only, select resampling filter type. This only affects resampling
-operations.
-
-It accepts the following values:
-
-‘cubic ’
-select cubic
-
-‘blackman_nuttall ’
-select Blackman Nuttall Windowed Sinc
-
-‘kaiser ’
-select Kaiser Windowed Sinc
-
-
-
-
-kaiser_beta
-For swr only, set Kaiser Window Beta value. Must be an integer in the
-interval [2,16], default value is 9.
-
-
-output_sample_bits
-For swr only, set number of used output sample bits for dithering. Must be an integer in the
-interval [0,64], default value is 0, which means it’s not used.
-
-
-
-
-
-
22 Scaler Options# TOC
-
-
The video scaler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools. For programmatic use, they can be set explicitly in the
-SwsContext
options or through the libavutil/opt.h API.
-
-
-
-
-sws_flags
-Set the scaler flags. This is also used to set the scaling
-algorithm. Only a single algorithm should be selected.
-
-It accepts the following values:
-
-‘fast_bilinear ’
-Select fast bilinear scaling algorithm.
-
-
-‘bilinear ’
-Select bilinear scaling algorithm.
-
-
-‘bicubic ’
-Select bicubic scaling algorithm.
-
-
-‘experimental ’
-Select experimental scaling algorithm.
-
-
-‘neighbor ’
-Select nearest neighbor rescaling algorithm.
-
-
-‘area ’
-Select averaging area rescaling algorithm.
-
-
-‘bicublin ’
-Select bicubic scaling algorithm for the luma component, bilinear for
-chroma components.
-
-
-‘gauss ’
-Select Gaussian rescaling algorithm.
-
-
-‘sinc ’
-Select sinc rescaling algorithm.
-
-
-‘lanczos ’
-Select lanczos rescaling algorithm.
-
-
-‘spline ’
-Select natural bicubic spline rescaling algorithm.
-
-
-‘print_info ’
-Enable printing/debug logging.
-
-
-‘accurate_rnd ’
-Enable accurate rounding.
-
-
-‘full_chroma_int ’
-Enable full chroma interpolation.
-
-
-‘full_chroma_inp ’
-Select full chroma input.
-
-
-‘bitexact ’
-Enable bitexact output.
-
-
-
-
-srcw
-Set source width.
-
-
-srch
-Set source height.
-
-
-dstw
-Set destination width.
-
-
-dsth
-Set destination height.
-
-
-src_format
-Set source pixel format (must be expressed as an integer).
-
-
-dst_format
-Set destination pixel format (must be expressed as an integer).
-
-
-src_range
-Select source range.
-
-
-dst_range
-Select destination range.
-
-
-param0, param1
-Set scaling algorithm parameters. The specified values are specific of
-some scaling algorithms and ignored by others. The specified values
-are floating point number values.
-
-
-sws_dither
-Set the dithering algorithm. Accepts one of the following
-values. Default value is ‘auto ’.
-
-
-‘auto ’
-automatic choice
-
-
-‘none ’
-no dithering
-
-
-‘bayer ’
-bayer dither
-
-
-‘ed ’
-error diffusion dither
-
-
-‘a_dither ’
-arithmetic dither, based using addition
-
-
-‘x_dither ’
-arithmetic dither, based using xor (more random/less apparent patterning that
-a_dither).
-
-
-
-
-
-
-
-
-
23 Filtering Introduction# TOC
-
-
Filtering in FFmpeg is enabled through the libavfilter library.
-
-
In libavfilter, a filter can have multiple inputs and multiple
-outputs.
-To illustrate the sorts of things that are possible, we consider the
-following filtergraph.
-
-
-
[main]
-input --> split ---------------------> overlay --> output
- | ^
- |[tmp] [flip]|
- +-----> crop --> vflip -------+
-
-
-
This filtergraph splits the input stream in two streams, then sends one
-stream through the crop filter and the vflip filter, before merging it
-back with the other stream by overlaying it on top. You can use the
-following command to achieve this:
-
-
-
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
-
-
-
The result will be that the top half of the video is mirrored
-onto the bottom half of the output video.
-
-
Filters in the same linear chain are separated by commas, and distinct
-linear chains of filters are separated by semicolons. In our example,
-crop,vflip are in one linear chain, split and
-overlay are separately in another. The points where the linear
-chains join are labelled by names enclosed in square brackets. In the
-example, the split filter generates two outputs that are associated to
-the labels [main] and [tmp] .
-
-
The stream sent to the second output of split , labelled as
-[tmp] , is processed through the crop filter, which crops
-away the lower half part of the video, and then vertically flipped. The
-overlay filter takes in input the first unchanged output of the
-split filter (which was labelled as [main] ), and overlay on its
-lower half the output generated by the crop,vflip filterchain.
-
-
Some filters take in input a list of parameters: they are specified
-after the filter name and an equal sign, and are separated from each other
-by a colon.
-
-
There exist so-called source filters that do not have an
-audio/video input, and sink filters that will not have audio/video
-output.
-
-
-
-
24 graph2dot# TOC
-
-
The graph2dot program included in the FFmpeg tools
-directory can be used to parse a filtergraph description and issue a
-corresponding textual representation in the dot language.
-
-
Invoke the command:
-
-
-
to see how to use graph2dot .
-
-
You can then pass the dot description to the dot program (from
-the graphviz suite of programs) and obtain a graphical representation
-of the filtergraph.
-
-
For example the sequence of commands:
-
-
echo GRAPH_DESCRIPTION | \
-tools/graph2dot -o graph.tmp && \
-dot -Tpng graph.tmp -o graph.png && \
-display graph.png
-
-
-
can be used to create and display an image representing the graph
-described by the GRAPH_DESCRIPTION string. Note that this string must be
-a complete self-contained graph, with its inputs and outputs explicitly defined.
-For example if your command line is of the form:
-
-
ffmpeg -i infile -vf scale=640:360 outfile
-
-
your GRAPH_DESCRIPTION string will need to be of the form:
-
-
nullsrc,scale=640:360,nullsink
-
-
you may also need to set the nullsrc parameters and add a format
-filter in order to simulate a specific input file.
-
-
-
-
25 Filtergraph description# TOC
-
-
A filtergraph is a directed graph of connected filters. It can contain
-cycles, and there can be multiple links between a pair of
-filters. Each link has one input pad on one side connecting it to one
-filter from which it takes its input, and one output pad on the other
-side connecting it to one filter accepting its output.
-
-
Each filter in a filtergraph is an instance of a filter class
-registered in the application, which defines the features and the
-number of input and output pads of the filter.
-
-
A filter with no input pads is called a "source", and a filter with no
-output pads is called a "sink".
-
-
-
25.1 Filtergraph syntax# TOC
-
-
A filtergraph has a textual representation, which is
-recognized by the -filter /-vf and -filter_complex
-options in ffmpeg
and -vf in ffplay
, and by the
-avfilter_graph_parse()
/avfilter_graph_parse2()
functions defined in
-libavfilter/avfilter.h .
-
-
A filterchain consists of a sequence of connected filters, each one
-connected to the previous one in the sequence. A filterchain is
-represented by a list of ","-separated filter descriptions.
-
-
A filtergraph consists of a sequence of filterchains. A sequence of
-filterchains is represented by a list of ";"-separated filterchain
-descriptions.
-
-
A filter is represented by a string of the form:
-[in_link_1 ]...[in_link_N ]filter_name =arguments [out_link_1 ]...[out_link_M ]
-
-
filter_name is the name of the filter class of which the
-described filter is an instance of, and has to be the name of one of
-the filter classes registered in the program.
-The name of the filter class is optionally followed by a string
-"=arguments ".
-
-
arguments is a string which contains the parameters used to
-initialize the filter instance. It may have one of two forms:
-
- A ’:’-separated list of key=value pairs.
-
- A ’:’-separated list of value . In this case, the keys are assumed to be
-the option names in the order they are declared. E.g. the fade
filter
-declares three options in this order – type , start_frame and
-nb_frames . Then the parameter list in:0:30 means that the value
-in is assigned to the option type , 0 to
-start_frame and 30 to nb_frames .
-
- A ’:’-separated list of mixed direct value and long key=value
-pairs. The direct value must precede the key=value pairs, and
-follow the same constraints order of the previous point. The following
-key=value pairs can be set in any preferred order.
-
-
-
-
If the option value itself is a list of items (e.g. the format
filter
-takes a list of pixel formats), the items in the list are usually separated by
-’|’.
-
-
The list of arguments can be quoted using the character "’" as initial
-and ending mark, and the character ’\’ for escaping the characters
-within the quoted text; otherwise the argument string is considered
-terminated when the next special character (belonging to the set
-"[]=;,") is encountered.
-
-
The name and arguments of the filter are optionally preceded and
-followed by a list of link labels.
-A link label allows one to name a link and associate it to a filter output
-or input pad. The preceding labels in_link_1
-... in_link_N , are associated to the filter input pads,
-the following labels out_link_1 ... out_link_M , are
-associated to the output pads.
-
-
When two link labels with the same name are found in the
-filtergraph, a link between the corresponding input and output pad is
-created.
-
-
If an output pad is not labelled, it is linked by default to the first
-unlabelled input pad of the next filter in the filterchain.
-For example in the filterchain
-
-
nullsrc, split[L1], [L2]overlay, nullsink
-
-
the split filter instance has two output pads, and the overlay filter
-instance two input pads. The first output pad of split is labelled
-"L1", the first input pad of overlay is labelled "L2", and the second
-output pad of split is linked to the second input pad of overlay,
-which are both unlabelled.
-
-
In a complete filterchain all the unlabelled filter input and output
-pads must be connected. A filtergraph is considered valid if all the
-filter input and output pads of all the filterchains are connected.
-
-
Libavfilter will automatically insert scale filters where format
-conversion is required. It is possible to specify swscale flags
-for those automatically inserted scalers by prepending
-sws_flags=flags ;
-to the filtergraph description.
-
-
Here is a BNF description of the filtergraph syntax:
-
-
NAME ::= sequence of alphanumeric characters and '_'
-LINKLABEL ::= "[" NAME "]"
-LINKLABELS ::= LINKLABEL [LINKLABELS ]
-FILTER_ARGUMENTS ::= sequence of chars (possibly quoted)
-FILTER ::= [LINKLABELS ] NAME ["=" FILTER_ARGUMENTS ] [LINKLABELS ]
-FILTERCHAIN ::= FILTER [,FILTERCHAIN ]
-FILTERGRAPH ::= [sws_flags=flags ;] FILTERCHAIN [;FILTERGRAPH ]
-
-
-
-
25.2 Notes on filtergraph escaping# TOC
-
-
Filtergraph description composition entails several levels of
-escaping. See (ffmpeg-utils)the "Quoting and escaping"
-section in the ffmpeg-utils(1) manual for more
-information about the employed escaping procedure.
-
-
A first level escaping affects the content of each filter option
-value, which may contain the special character :
used to
-separate values, or one of the escaping characters \'
.
-
-
A second level escaping affects the whole filter description, which
-may contain the escaping characters \'
or the special
-characters [],;
used by the filtergraph description.
-
-
Finally, when you specify a filtergraph on a shell commandline, you
-need to perform a third level escaping for the shell special
-characters contained within it.
-
-
For example, consider the following string to be embedded in
-the drawtext filter description text value:
-
-
this is a 'string': may contain one, or more, special characters
-
-
-
This string contains the '
special escaping character, and the
-:
special character, so it needs to be escaped in this way:
-
-
text=this is a \'string\'\: may contain one, or more, special characters
-
-
-
A second level of escaping is required when embedding the filter
-description in a filtergraph description, in order to escape all the
-filtergraph special characters. Thus the example above becomes:
-
-
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
-
-
(note that in addition to the \'
escaping special characters,
-also ,
needs to be escaped).
-
-
Finally an additional level of escaping is needed when writing the
-filtergraph description in a shell command, which depends on the
-escaping rules of the adopted shell. For example, assuming that
-\
is special and needs to be escaped with another \
, the
-previous string will finally result in:
-
-
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
-
-
-
-
26 Timeline editing# TOC
-
-
Some filters support a generic enable option. For the filters
-supporting timeline editing, this option can be set to an expression which is
-evaluated before sending a frame to the filter. If the evaluation is non-zero,
-the filter will be enabled, otherwise the frame will be sent unchanged to the
-next filter in the filtergraph.
-
-
The expression accepts the following values:
-
-‘t ’
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-‘n ’
-sequential number of the input frame, starting from 0
-
-
-‘pos ’
-the position in the file of the input frame, NAN if unknown
-
-
-‘w ’
-‘h ’
-width and height of the input frame if video
-
-
-
-
Additionally, these filters support an enable command that can be used
-to re-define the expression.
-
-
Like any other filtering option, the enable option follows the same
-rules.
-
-
For example, to enable a blur filter (smartblur ) from 10 seconds to 3
-minutes, and a curves filter starting at 3 seconds:
-
-
smartblur = enable='between(t,10,3*60)',
-curves = enable='gte(t,3)' : preset=cross_process
-
-
-
-
-
27 Audio Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the audio filters included in your
-build.
-
-
Below is a description of the currently available audio filters.
-
-
-
27.1 adelay# TOC
-
-
Delay one or more audio channels.
-
-
Samples in delayed channel are filled with silence.
-
-
The filter accepts the following option:
-
-
-delays
-Set list of delays in milliseconds for each channel separated by ’|’.
-At least one delay greater than 0 should be provided.
-Unused delays will be silently ignored. If number of given delays is
-smaller than number of channels all remaining channels will not be delayed.
-
-
-
-
-
27.1.1 Examples# TOC
-
-
- Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave
-the second channel (and any other channels that may be present) unchanged.
-
-
-
-
-
27.2 aecho# TOC
-
-
Apply echoing to the input audio.
-
-
Echoes are reflected sound and can occur naturally amongst mountains
-(and sometimes large buildings) when talking or shouting; digital echo
-effects emulate this behaviour and are often used to help fill out the
-sound of a single instrument or vocal. The time difference between the
-original signal and the reflection is the delay
, and the
-loudness of the reflected signal is the decay
.
-Multiple echoes can have different delays and decays.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain of reflected signal. Default is 0.6
.
-
-
-out_gain
-Set output gain of reflected signal. Default is 0.3
.
-
-
-delays
-Set list of time intervals in milliseconds between original signal and reflections
-separated by ’|’. Allowed range for each delay
is (0 - 90000.0]
.
-Default is 1000
.
-
-
-decays
-Set list of loudnesses of reflected signals separated by ’|’.
-Allowed range for each decay
is (0 - 1.0]
.
-Default is 0.5
.
-
-
-
-
-
27.2.1 Examples# TOC
-
-
- Make it sound as if there are twice as many instruments as are actually playing:
-
-
- If delay is very short, then it sound like a (metallic) robot playing music:
-
-
- A longer delay will sound like an open air concert in the mountains:
-
-
aecho=0.8:0.9:1000:0.3
-
-
- Same as above but with one more mountain:
-
-
aecho=0.8:0.9:1000|1800:0.3|0.25
-
-
-
-
-
27.3 aeval# TOC
-
-
Modify an audio signal according to the specified expressions.
-
-
This filter accepts one or more expressions (one for each channel),
-which are evaluated and used to modify a corresponding audio signal.
-
-
It accepts the following parameters:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. If
-the number of input channels is greater than the number of
-expressions, the last specified expression is used for the remaining
-output channels.
-
-
-channel_layout, c
-Set output channel layout. If not specified, the channel layout is
-specified by the number of expressions. If set to ‘same ’, it will
-use by default the same input channel layout.
-
-
-
-
Each expression in exprs can contain the following constants and functions:
-
-
-ch
-channel number of the current expression
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-s
-sample rate
-
-
-t
-time of the evaluated sample expressed in seconds
-
-
-nb_in_channels
-nb_out_channels
-input and output number of channels
-
-
-val(CH)
-the value of input channel with number CH
-
-
-
-
Note: this filter is slow. For faster processing you should use a
-dedicated filter.
-
-
-
27.3.1 Examples# TOC
-
-
- Half volume:
-
-
aeval=val(ch)/2:c=same
-
-
- Invert phase of the second channel:
-
-
-
-
-
27.4 afade# TOC
-
-
Apply fade-in/out effect to input audio.
-
-
A description of the accepted parameters follows.
-
-
-type, t
-Specify the effect type, can be either in
for fade-in, or
-out
for a fade-out effect. Default is in
.
-
-
-start_sample, ss
-Specify the number of the start sample for starting to apply the fade
-effect. Default is 0.
-
-
-nb_samples, ns
-Specify the number of samples for which the fade effect has to last. At
-the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence. Default is 44100.
-
-
-start_time, st
-Specify the start time of the fade effect. Default is 0.
-The value must be specified as a time duration; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-If set this option is used instead of start_sample .
-
-
-duration, d
-Specify the duration of the fade effect. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-At the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence.
-By default the duration is determined by nb_samples .
-If set this option is used instead of nb_samples .
-
-
-curve
-Set curve for fade transition.
-
-It accepts the following values:
-
-tri
-select triangular, linear slope (default)
-
-qsin
-select quarter of sine wave
-
-hsin
-select half of sine wave
-
-esin
-select exponential sine wave
-
-log
-select logarithmic
-
-par
-select inverted parabola
-
-qua
-select quadratic
-
-cub
-select cubic
-
-squ
-select square root
-
-cbr
-select cubic root
-
-
-
-
-
-
-
27.4.1 Examples# TOC
-
-
- Fade in first 15 seconds of audio:
-
-
- Fade out last 25 seconds of a 900 seconds audio:
-
-
afade=t=out:st=875:d=25
-
-
-
-
-
27.5 aformat# TOC
-
-
Set output format constraints for the input audio. The framework will
-negotiate the most appropriate format to minimize conversions.
-
-
It accepts the following parameters:
-
-sample_fmts
-A ’|’-separated list of requested sample formats.
-
-
-sample_rates
-A ’|’-separated list of requested sample rates.
-
-
-channel_layouts
-A ’|’-separated list of requested channel layouts.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-
-
If a parameter is omitted, all values are allowed.
-
-
Force the output to either unsigned 8-bit or signed 16-bit stereo
-
-
aformat=sample_fmts=u8|s16:channel_layouts=stereo
-
-
-
-
27.6 allpass# TOC
-
-
Apply a two-pole all-pass filter with central frequency (in Hz)
-frequency , and filter-width width .
-An all-pass filter changes the audio’s frequency to phase relationship
-without changing its frequency to amplitude relationship.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
27.7 amerge# TOC
-
-
Merge two or more audio streams into a single multi-channel stream.
-
-
The filter accepts the following options:
-
-
-inputs
-Set the number of inputs. Default is 2.
-
-
-
-
-
If the channel layouts of the inputs are disjoint, and therefore compatible,
-the channel layout of the output will be set accordingly and the channels
-will be reordered as necessary. If the channel layouts of the inputs are not
-disjoint, the output will have all the channels of the first input then all
-the channels of the second input, in that order, and the channel layout of
-the output will be the default value corresponding to the total number of
-channels.
-
-
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
-is FC+BL+BR, then the output will be in 5.1, with the channels in the
-following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
-first input, b1 is the first channel of the second input).
-
-
On the other hand, if both input are in stereo, the output channels will be
-in the default order: a1, a2, b1, b2, and the channel layout will be
-arbitrarily set to 4.0, which may or may not be the expected value.
-
-
All inputs must have the same sample rate, and format.
-
-
If inputs do not have the same duration, the output will stop with the
-shortest.
-
-
-
27.7.1 Examples# TOC
-
-
- Merge two mono files into a stereo stream:
-
-
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
-
-
- Multiple merges assuming 1 video stream and 6 audio streams in input.mkv :
-
-
ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
-
-
-
-
-
27.8 amix# TOC
-
-
Mixes multiple audio inputs into a single output.
-
-
Note that this filter only supports float samples (the amerge
-and pan audio filters support many formats). If the amix
-input has integer samples then aresample will be automatically
-inserted to perform the conversion to float samples.
-
-
For example
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
-
-
will mix 3 input audio streams to a single output with the same duration as the
-first input and a dropout transition time of 3 seconds.
-
-
It accepts the following parameters:
-
-inputs
-The number of inputs. If unspecified, it defaults to 2.
-
-
-duration
-How to determine the end-of-stream.
-
-longest
-The duration of the longest input. (default)
-
-
-shortest
-The duration of the shortest input.
-
-
-first
-The duration of the first input.
-
-
-
-
-
-dropout_transition
-The transition time, in seconds, for volume renormalization when an input
-stream ends. The default value is 2 seconds.
-
-
-
-
-
-
27.9 anull# TOC
-
-
Pass the audio source unchanged to the output.
-
-
-
27.10 apad# TOC
-
-
Pad the end of an audio stream with silence.
-
-
This can be used together with ffmpeg
-shortest to
-extend audio streams to the same length as the video stream.
-
-
A description of the accepted options follows.
-
-
-packet_size
-Set silence packet size. Default value is 4096.
-
-
-pad_len
-Set the number of samples of silence to add to the end. After the
-value is reached, the stream is terminated. This option is mutually
-exclusive with whole_len .
-
-
-whole_len
-Set the minimum total number of samples in the output audio stream. If
-the value is longer than the input audio length, silence is added to
-the end, until the value is reached. This option is mutually exclusive
-with pad_len .
-
-
-
-
If neither the pad_len nor the whole_len option is
-set, the filter will add silence to the end of the input stream
-indefinitely.
-
-
-
27.10.1 Examples# TOC
-
-
- Add 1024 samples of silence to the end of the input:
-
-
- Make sure the audio output will contain at least 10000 samples, pad
-the input with silence if required:
-
-
- Use ffmpeg
to pad the audio input with silence, so that the
-video stream will always result the shortest and will be converted
-until the end in the output file when using the shortest
-option:
-
-
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
-
-
-
-
-
27.11 aphaser# TOC
-
Add a phasing effect to the input audio.
-
-
A phaser filter creates series of peaks and troughs in the frequency spectrum.
-The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain. Default is 0.4.
-
-
-out_gain
-Set output gain. Default is 0.74
-
-
-delay
-Set delay in milliseconds. Default is 3.0.
-
-
-decay
-Set decay. Default is 0.4.
-
-
-speed
-Set modulation speed in Hz. Default is 0.5.
-
-
-type
-Set modulation type. Default is triangular.
-
-It accepts the following values:
-
-‘triangular, t ’
-‘sinusoidal, s ’
-
-
-
-
-
-
27.12 aresample# TOC
-
-
Resample the input audio to the specified parameters, using the
-libswresample library. If none are specified then the filter will
-automatically convert between its input and output.
-
-
This filter is also able to stretch/squeeze the audio data to make it match
-the timestamps or to inject silence / cut out audio to make it match the
-timestamps, do a combination of both or do neither.
-
-
The filter accepts the syntax
-[sample_rate :]resampler_options , where sample_rate
-expresses a sample rate and resampler_options is a list of
-key =value pairs, separated by ":". See the
-ffmpeg-resampler manual for the complete list of supported options.
-
-
-
27.12.1 Examples# TOC
-
-
- Resample the input audio to 44100Hz:
-
-
- Stretch/squeeze samples to the given timestamps, with a maximum of 1000
-samples per second compensation:
-
-
-
-
-
27.13 asetnsamples# TOC
-
-
Set the number of samples per each output audio frame.
-
-
The last output packet may contain a different number of samples, as
-the filter will flush all the remaining samples when the input audio
-signal its end.
-
-
The filter accepts the following options:
-
-
-nb_out_samples, n
-Set the number of frames per each output audio frame. The number is
-intended as the number of samples per each channel .
-Default value is 1024.
-
-
-pad, p
-If set to 1, the filter will pad the last audio frame with zeroes, so
-that the last frame will contain the same number of samples as the
-previous ones. Default value is 1.
-
-
-
-
For example, to set the number of per-frame samples to 1234 and
-disable padding for the last frame, use:
-
-
asetnsamples=n=1234:p=0
-
-
-
-
27.14 asetrate# TOC
-
-
Set the sample rate without altering the PCM data.
-This will result in a change of speed and pitch.
-
-
The filter accepts the following options:
-
-
-sample_rate, r
-Set the output sample rate. Default is 44100 Hz.
-
-
-
-
-
27.15 ashowinfo# TOC
-
-
Show a line containing various information for each input audio frame.
-The input audio is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The presentation timestamp of the input frame, in time base units; the time base
-depends on the filter input pad, and is usually 1/sample_rate .
-
-
-pts_time
-The presentation timestamp of the input frame in seconds.
-
-
-pos
-position of the frame in the input stream, -1 if this information in
-unavailable and/or meaningless (for example in case of synthetic audio)
-
-
-fmt
-The sample format.
-
-
-chlayout
-The channel layout.
-
-
-rate
-The sample rate for the audio frame.
-
-
-nb_samples
-The number of samples (per channel) in the frame.
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of the audio data. For planar
-audio, the data is treated as if all the planes were concatenated.
-
-
-plane_checksums
-A list of Adler-32 checksums for each data plane.
-
-
-
-
-
27.16 astats# TOC
-
-
Display time domain statistical information about the audio channels.
-Statistics are calculated and displayed for each audio channel and,
-where applicable, an overall figure is also given.
-
-
It accepts the following option:
-
-length
-Short window length in seconds, used for peak and trough RMS measurement.
-Default is 0.05
(50 milliseconds). Allowed range is [0.1 - 10]
.
-
-
-
-
A description of each shown parameter follows:
-
-
-DC offset
-Mean amplitude displacement from zero.
-
-
-Min level
-Minimal sample level.
-
-
-Max level
-Maximal sample level.
-
-
-Peak level dB
-RMS level dB
-Standard peak and RMS level measured in dBFS.
-
-
-RMS peak dB
-RMS trough dB
-Peak and trough values for RMS level measured over a short window.
-
-
-Crest factor
-Standard ratio of peak to RMS level (note: not in dB).
-
-
-Flat factor
-Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels
-(i.e. either Min level or Max level ).
-
-
-Peak count
-Number of occasions (not the number of samples) that the signal attained either
-Min level or Max level .
-
-
-
-
-
27.17 astreamsync# TOC
-
-
Forward two audio streams and control the order the buffers are forwarded.
-
-
The filter accepts the following options:
-
-
-expr, e
-Set the expression deciding which stream should be
-forwarded next: if the result is negative, the first stream is forwarded; if
-the result is positive or zero, the second stream is forwarded. It can use
-the following variables:
-
-
-b1 b2
-number of buffers forwarded so far on each stream
-
-s1 s2
-number of samples forwarded so far on each stream
-
-t1 t2
-current timestamp of each stream
-
-
-
-The default value is t1-t2
, which means to always forward the stream
-that has a smaller timestamp.
-
-
-
-
-
27.17.1 Examples# TOC
-
-
Stress-test amerge
by randomly sending buffers on the wrong
-input, while avoiding too much of a desynchronization:
-
-
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
-[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
-[a2] [b2] amerge
-
-
-
-
27.18 asyncts# TOC
-
-
Synchronize audio data with timestamps by squeezing/stretching it and/or
-dropping samples/adding silence when needed.
-
-
This filter is not built by default, please use aresample to do squeezing/stretching.
-
-
It accepts the following parameters:
-
-compensate
-Enable stretching/squeezing the data to make it match the timestamps. Disabled
-by default. When disabled, time gaps are covered with silence.
-
-
-min_delta
-The minimum difference between timestamps and audio data (in seconds) to trigger
-adding/dropping samples. The default value is 0.1. If you get an imperfect
-sync with this filter, try setting this parameter to 0.
-
-
-max_comp
-The maximum compensation in samples per second. Only relevant with compensate=1.
-The default value is 500.
-
-
-first_pts
-Assume that the first PTS should be this value. The time base is 1 / sample
-rate. This allows for padding/trimming at the start of the stream. By default,
-no assumption is made about the first frame’s expected PTS, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative PTS due to encoder delay.
-
-
-
-
-
-
27.19 atempo# TOC
-
-
Adjust audio tempo.
-
-
The filter accepts exactly one parameter, the audio tempo. If not
-specified then the filter will assume nominal 1.0 tempo. Tempo must
-be in the [0.5, 2.0] range.
-
-
-
27.19.1 Examples# TOC
-
-
- Slow down audio to 80% tempo:
-
-
- To speed up audio to 125% tempo:
-
-
-
-
-
27.20 atrim# TOC
-
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Timestamp (in seconds) of the start of the section to keep. I.e. the audio
-sample with the timestamp start will be the first sample in the output.
-
-
-end
-Specify time of the first audio sample that will be dropped, i.e. the
-audio sample immediately preceding the one with the timestamp end will be
-the last sample in the output.
-
-
-start_pts
-Same as start , except this option sets the start timestamp in samples
-instead of seconds.
-
-
-end_pts
-Same as end , except this option sets the end timestamp in samples instead
-of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_sample
-The number of the first sample that should be output.
-
-
-end_sample
-The number of the first sample that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _sample options simply count the
-samples that pass through the filter. So start/end_pts and start/end_sample will
-give different results when the timestamps are wrong, inexact or do not start at
-zero. Also note that this filter does not modify the timestamps. If you wish
-to have the output timestamps start at zero, insert the asetpts filter after the
-atrim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all samples that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple atrim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -af atrim=60:120
-
-
- Keep only the first 1000 samples:
-
-
ffmpeg -i INPUT -af atrim=end_sample=1000
-
-
-
-
-
-
27.21 bandpass# TOC
-
-
Apply a two-pole Butterworth band-pass filter with central
-frequency frequency , and (3dB-point) band-width width.
-The csg option selects a constant skirt gain (peak gain = Q)
-instead of the default: constant 0dB peak gain.
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-csg
-Constant skirt gain if set to 1. Defaults to 0.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
27.22 bandreject# TOC
-
-
Apply a two-pole Butterworth band-reject filter with central
-frequency frequency , and (3dB-point) band-width width .
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
27.23 bass# TOC
-
-
Boost or cut the bass (lower) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at 0 Hz. Its useful range is about -20
-(for a large cut) to +20 (for a large boost).
-Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 100
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
27.24 biquad# TOC
-
-
Apply a biquad IIR filter with the given coefficients.
-Where b0 , b1 , b2 and a0 , a1 , a2
-are the numerator and denominator coefficients respectively.
-
-
-
27.25 bs2b# TOC
-
Bauer stereo to binaural transformation, which improves headphone listening of
-stereo audio records.
-
-
It accepts the following parameters:
-
-profile
-Pre-defined crossfeed level.
-
-default
-Default level (fcut=700, feed=50).
-
-
-cmoy
-Chu Moy circuit (fcut=700, feed=60).
-
-
-jmeier
-Jan Meier circuit (fcut=650, feed=95).
-
-
-
-
-
-fcut
-Cut frequency (in Hz).
-
-
-feed
-Feed level (in Hz).
-
-
-
-
-
-
27.26 channelmap# TOC
-
-
Remap input channels to new locations.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the output stream.
-
-
-map
-Map channels from input to output. The argument is a ’|’-separated list of
-mappings, each in the in_channel -out_channel
or
-in_channel form. in_channel can be either the name of the input
-channel (e.g. FL for front left) or its index in the input channel layout.
-out_channel is the name of the output channel or its index in the output
-channel layout. If out_channel is not given then it is implicitly an
-index, starting with zero and increasing by one for each mapping.
-
-
-
-
If no mapping is present, the filter will implicitly map input channels to
-output channels, preserving indices.
-
-
For example, assuming a 5.1+downmix input MOV file,
-
-
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
-
-
will create an output WAV file tagged as stereo from the downmix channels of
-the input.
-
-
To fix a 5.1 WAV improperly encoded in AAC’s native channel order
-
-
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
-
-
-
-
27.27 channelsplit# TOC
-
-
Split each channel from an input audio stream into a separate output stream.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the input stream. The default is "stereo".
-
-
-
-
For example, assuming a stereo input MP3 file,
-
-
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
-
-
will create an output Matroska file with two audio streams, one containing only
-the left channel and the other the right channel.
-
-
Split a 5.1 WAV file into per-channel files:
-
-
ffmpeg -i in.wav -filter_complex
-'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
--map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
-front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
-side_right.wav
-
-
-
-
27.28 compand# TOC
-
Compress or expand the audio’s dynamic range.
-
-
It accepts the following parameters:
-
-
-attacks
-decays
-A list of times in seconds for each channel over which the instantaneous level
-of the input signal is averaged to determine its volume. attacks refers to
-increase of volume and decays refers to decrease of volume. For most
-situations, the attack time (response to the audio getting louder) should be
-shorter than the decay time, because the human ear is more sensitive to sudden
-loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and
-a typical value for decay is 0.8 seconds.
-
-
-points
-A list of points for the transfer function, specified in dB relative to the
-maximum possible signal amplitude. Each key points list must be defined using
-the following syntax: x0/y0|x1/y1|x2/y2|....
or
-x0/y0 x1/y1 x2/y2 ....
-
-The input values must be in strictly increasing order but the transfer function
-does not have to be monotonically rising. The point 0/0
is assumed but
-may be overridden (by 0/out-dBn
). Typical values for the transfer
-function are -70/-70|-60/-20
.
-
-
-soft-knee
-Set the curve radius in dB for all joints. It defaults to 0.01.
-
-
-gain
-Set the additional gain in dB to be applied at all points on the transfer
-function. This allows for easy adjustment of the overall gain.
-It defaults to 0.
-
-
-volume
-Set an initial volume, in dB, to be assumed for each channel when filtering
-starts. This permits the user to supply a nominal level initially, so that, for
-example, a very large gain is not applied to initial signal levels before the
-companding has begun to operate. A typical value for audio which is initially
-quiet is -90 dB. It defaults to 0.
-
-
-delay
-Set a delay, in seconds. The input audio is analyzed immediately, but audio is
-delayed before being fed to the volume adjuster. Specifying a delay
-approximately equal to the attack/decay times allows the filter to effectively
-operate in predictive rather than reactive mode. It defaults to 0.
-
-
-
-
-
-
27.28.1 Examples# TOC
-
-
- Make music with both quiet and loud passages suitable for listening to in a
-noisy environment:
-
-
compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
-
-
- A noise gate for when the noise is at a lower level than the signal:
-
-
compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
-
-
- Here is another noise gate, this time for when the noise is at a higher level
-than the signal (making it, in some ways, similar to squelch):
-
-
compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
-
-
-
-
-
27.29 earwax# TOC
-
-
Make audio easier to listen to on headphones.
-
-
This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio
-so that when listened to on headphones the stereo image is moved from
-inside your head (standard for headphones) to outside and in front of
-the listener (standard for speakers).
-
-
Ported from SoX.
-
-
-
27.30 equalizer# TOC
-
-
Apply a two-pole peaking equalisation (EQ) filter. With this
-filter, the signal-level at and around a selected frequency can
-be increased or decreased, whilst (unlike bandpass and bandreject
-filters) that at all other frequencies is unchanged.
-
-
In order to produce complex equalisation curves, this filter can
-be given several times, each with a different central frequency.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-gain, g
-Set the required gain or attenuation in dB.
-Beware of clipping when using a positive gain.
-
-
-
-
-
27.30.1 Examples# TOC
-
- Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz:
-
-
equalizer=f=1000:width_type=h:width=200:g=-10
-
-
- Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2:
-
-
equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
-
-
-
-
-
27.31 flanger# TOC
-
Apply a flanging effect to the audio.
-
-
The filter accepts the following options:
-
-
-delay
-Set base delay in milliseconds. Range from 0 to 30. Default value is 0.
-
-
-depth
-Set added swep delay in milliseconds. Range from 0 to 10. Default value is 2.
-
-
-regen
-Set percentage regeneration (delayed signal feedback). Range from -95 to 95.
-Default value is 0.
-
-
-width
-Set percentage of delayed signal mixed with original. Range from 0 to 100.
-Default value is 71.
-
-
-speed
-Set sweeps per second (Hz). Range from 0.1 to 10. Default value is 0.5.
-
-
-shape
-Set swept wave shape, can be triangular or sinusoidal .
-Default value is sinusoidal .
-
-
-phase
-Set swept wave percentage-shift for multi channel. Range from 0 to 100.
-Default value is 25.
-
-
-interp
-Set delay-line interpolation, linear or quadratic .
-Default is linear .
-
-
-
-
-
27.32 highpass# TOC
-
-
Apply a high-pass filter with 3dB point frequency.
-The filter can be either single-pole, or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 3000.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
27.33 join# TOC
-
-
Join multiple input streams into one multi-channel stream.
-
-
It accepts the following parameters:
-
-inputs
-The number of input streams. It defaults to 2.
-
-
-channel_layout
-The desired output channel layout. It defaults to stereo.
-
-
-map
-Map channels from inputs to output. The argument is a ’|’-separated list of
-mappings, each in the input_idx .in_channel -out_channel
-form. input_idx is the 0-based index of the input stream. in_channel
-can be either the name of the input channel (e.g. FL for front left) or its
-index in the specified input stream. out_channel is the name of the output
-channel.
-
-
-
-
The filter will attempt to guess the mappings when they are not specified
-explicitly. It does so by first trying to find an unused matching input channel
-and if that fails it picks the first unused input channel.
-
-
Join 3 inputs (with properly set channel layouts):
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
-
-
-
Build a 5.1 output from 6 single-channel streams:
-
-
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
-'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
-out
-
-
-
-
27.34 ladspa# TOC
-
-
Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-ladspa
.
-
-
-file, f
-Specifies the name of LADSPA plugin library to load. If the environment
-variable LADSPA_PATH
is defined, the LADSPA plugin is searched in
-each one of the directories specified by the colon separated list in
-LADSPA_PATH
, otherwise in the standard LADSPA paths, which are in
-this order: HOME/.ladspa/lib/ , /usr/local/lib/ladspa/ ,
-/usr/lib/ladspa/ .
-
-
-plugin, p
-Specifies the plugin within the library. Some libraries contain only
-one plugin, but others contain many of them. If this is not set filter
-will list all available plugins within the specified library.
-
-
-controls, c
-Set the ’|’ separated list of controls which are zero or more floating point
-values that determine the behavior of the loaded plugin (for example delay,
-threshold or gain).
-Controls need to be defined using the following syntax:
-c0=value0 |c1=value1 |c2=value2 |..., where
-valuei is the value set on the i -th control.
-If controls is set to help
, all available controls and
-their valid ranges are printed.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100. Only used if plugin have
-zero inputs.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame, default
-is 1024. Only used if plugin have zero inputs.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified duration,
-as the generated audio is always cut at the end of a complete frame.
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-Only used if plugin have zero inputs.
-
-
-
-
-
-
27.34.1 Examples# TOC
-
-
- List all available plugins within amp (LADSPA example plugin) library:
-
-
- List all available controls and their valid ranges for vcf_notch
-plugin from VCF
library:
-
-
ladspa=f=vcf:p=vcf_notch:c=help
-
-
- Simulate low quality audio equipment using Computer Music Toolkit
(CMT)
-plugin library:
-
-
ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
-
-
- Add reverberation to the audio using TAP-plugins
-(Tom’s Audio Processing plugins):
-
-
ladspa=file=tap_reverb:tap_reverb
-
-
- Generate white noise, with 0.2 amplitude:
-
-
ladspa=file=cmt:noise_source_white:c=c0=.2
-
-
- Generate 20 bpm clicks using plugin C* Click - Metronome
from the
-C* Audio Plugin Suite
(CAPS) library:
-
-
ladspa=file=caps:Click:c=c1=20'
-
-
- Apply C* Eq10X2 - Stereo 10-band equaliser
effect:
-
-
ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
-
-
-
-
-
27.34.2 Commands# TOC
-
-
This filter supports the following commands:
-
-cN
-Modify the N -th control value.
-
-If the specified value is not valid, it is ignored and prior one is kept.
-
-
-
-
-
27.35 lowpass# TOC
-
-
Apply a low-pass filter with 3dB point frequency.
-The filter can be either single-pole or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 500.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
27.36 pan# TOC
-
-
Mix channels with specific gain levels. The filter accepts the output
-channel layout followed by a set of channels definitions.
-
-
This filter is also designed to efficiently remap the channels of an audio
-stream.
-
-
The filter accepts parameters of the form:
-"l |outdef |outdef |..."
-
-
-l
-output channel layout or number of channels
-
-
-outdef
-output channel specification, of the form:
-"out_name =[gain *]in_name [+[gain *]in_name ...]"
-
-
-out_name
-output channel to define, either a channel name (FL, FR, etc.) or a channel
-number (c0, c1, etc.)
-
-
-gain
-multiplicative coefficient for the channel, 1 leaving the volume unchanged
-
-
-in_name
-input channel to use, see out_name for details; it is not possible to mix
-named and numbered input channels
-
-
-
-
If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for
-that specification will be renormalized so that the total is 1, thus
-avoiding clipping noise.
-
-
-
27.36.1 Mixing examples# TOC
-
-
For example, if you want to down-mix from stereo to mono, but with a bigger
-factor for the left channel:
-
-
pan=1c|c0=0.9*c0+0.1*c1
-
-
-
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
-7-channels surround:
-
-
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
-
-
-
Note that ffmpeg
integrates a default down-mix (and up-mix) system
-that should be preferred (see "-ac" option) unless you have very specific
-needs.
-
-
-
27.36.2 Remapping examples# TOC
-
-
The channel remapping will be effective if, and only if:
-
-
- gain coefficients are zeroes or ones,
- only one input per channel output,
-
-
-
If all these conditions are satisfied, the filter will notify the user ("Pure
-channel mapping detected"), and use an optimized and lossless method to do the
-remapping.
-
-
For example, if you have a 5.1 source and want a stereo audio stream by
-dropping the extra channels:
-
-
pan="stereo| c0=FL | c1=FR"
-
-
-
Given the same source, you can also switch front left and front right channels
-and keep the input channel layout:
-
-
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
-
-
-
If the input is a stereo audio stream, you can mute the front left channel (and
-still keep the stereo channel layout) with:
-
-
-
Still with a stereo audio stream input, you can copy the right channel in both
-front left and right:
-
-
pan="stereo| c0=FR | c1=FR"
-
-
-
-
27.37 replaygain# TOC
-
-
ReplayGain scanner filter. This filter takes an audio stream as an input and
-outputs it unchanged.
-At end of filtering it displays track_gain
and track_peak
.
-
-
-
27.38 resample# TOC
-
-
Convert the audio sample format, sample rate and channel layout. It is
-not meant to be used directly.
-
-
-
27.39 silencedetect# TOC
-
-
Detect silence in an audio stream.
-
-
This filter logs a message when it detects that the input audio volume is less
-or equal to a noise tolerance value for a duration greater or equal to the
-minimum detected noise duration.
-
-
The printed times and duration are expressed in seconds.
-
-
The filter accepts the following options:
-
-
-duration, d
-Set silence duration until notification (default is 2 seconds).
-
-
-noise, n
-Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
-specified value) or amplitude ratio. Default is -60dB, or 0.001.
-
-
-
-
-
27.39.1 Examples# TOC
-
-
- Detect 5 seconds of silence with -50dB noise tolerance:
-
-
silencedetect=n=-50dB:d=5
-
-
- Complete example with ffmpeg
to detect silence with 0.0001 noise
-tolerance in silence.mp3 :
-
-
ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
-
-
-
-
-
27.40 silenceremove# TOC
-
-
Remove silence from the beginning, middle or end of the audio.
-
-
The filter accepts the following options:
-
-
-start_periods
-This value is used to indicate if audio should be trimmed at beginning of
-the audio. A value of zero indicates no silence should be trimmed from the
-beginning. When specifying a non-zero value, it trims audio up until it
-finds non-silence. Normally, when trimming silence from beginning of audio
-the start_periods will be 1
but it can be increased to higher
-values to trim all audio up to specific count of non-silence periods.
-Default value is 0
.
-
-
-start_duration
-Specify the amount of time that non-silence must be detected before it stops
-trimming audio. By increasing the duration, bursts of noises can be treated
-as silence and trimmed off. Default value is 0
.
-
-
-start_threshold
-This indicates what sample value should be treated as silence. For digital
-audio, a value of 0
may be fine but for audio recorded from analog,
-you may wish to increase the value to account for background noise.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-stop_periods
-Set the count for trimming silence from the end of audio.
-To remove silence from the middle of a file, specify a stop_periods
-that is negative. This value is then treated as a positive value and is
-used to indicate the effect should restart processing as specified by
-start_periods , making it suitable for removing periods of silence
-in the middle of the audio.
-Default value is 0
.
-
-
-stop_duration
-Specify a duration of silence that must exist before audio is not copied any
-more. By specifying a higher duration, silence that is wanted can be left in
-the audio.
-Default value is 0
.
-
-
-stop_threshold
-This is the same as start_threshold but for trimming silence from
-the end of audio.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-leave_silence
-This indicate that stop_duration length of audio should be left intact
-at the beginning of each period of silence.
-For example, if you want to remove long pauses between words but do not want
-to remove the pauses completely. Default value is 0
.
-
-
-
-
-
-
27.40.1 Examples# TOC
-
-
- The following example shows how this filter can be used to start a recording
-that does not contain the delay at the start which usually occurs between
-pressing the record button and the start of the performance:
-
-
silenceremove=1:5:0.02
-
-
-
-
-
27.41 treble# TOC
-
-
Boost or cut treble (upper) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at whichever is the lower of ~22 kHz and the
-Nyquist frequency. Its useful range is about -20 (for a large cut)
-to +20 (for a large boost). Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 3000
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
27.42 volume# TOC
-
-
Adjust the input audio volume.
-
-
It accepts the following parameters:
-
-volume
-Set audio volume expression.
-
-Output values are clipped to the maximum value.
-
-The output audio volume is given by the relation:
-
-
output_volume = volume * input_volume
-
-
-The default value for volume is "1.0".
-
-
-precision
-This parameter represents the mathematical precision.
-
-It determines which input sample formats will be allowed, which affects the
-precision of the volume scaling.
-
-
-fixed
-8-bit fixed-point; this limits input sample format to U8, S16, and S32.
-
-float
-32-bit floating-point; this limits input sample format to FLT. (default)
-
-double
-64-bit floating-point; this limits input sample format to DBL.
-
-
-
-
-replaygain
-Choose the behaviour on encountering ReplayGain side data in input frames.
-
-
-drop
-Remove ReplayGain side data, ignoring its contents (the default).
-
-
-ignore
-Ignore ReplayGain side data, but leave it in the frame.
-
-
-track
-Prefer the track gain, if present.
-
-
-album
-Prefer the album gain, if present.
-
-
-
-
-replaygain_preamp
-Pre-amplification gain in dB to apply to the selected replaygain gain.
-
-Default value for replaygain_preamp is 0.0.
-
-
-eval
-Set when the volume expression is evaluated.
-
-It accepts the following values:
-
-‘once ’
-only evaluate expression once during the filter initialization, or
-when the ‘volume ’ command is sent
-
-
-‘frame ’
-evaluate expression for each incoming frame
-
-
-
-Default value is ‘once ’.
-
-
-
-
The volume expression can contain the following parameters.
-
-
-n
-frame number (starting at zero)
-
-nb_channels
-number of channels
-
-nb_consumed_samples
-number of samples consumed by the filter
-
-nb_samples
-number of samples in the current frame
-
-pos
-original frame position in the file
-
-pts
-frame PTS
-
-sample_rate
-sample rate
-
-startpts
-PTS at start of stream
-
-startt
-time at start of stream
-
-t
-frame time
-
-tb
-timestamp timebase
-
-volume
-last set volume value
-
-
-
-
Note that when eval is set to ‘once ’ only the
-sample_rate and tb variables are available, all other
-variables will evaluate to NAN.
-
-
-
27.42.1 Commands# TOC
-
-
This filter supports the following commands:
-
-volume
-Modify the volume expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-replaygain_noclip
-Prevent clipping by limiting the gain applied.
-
-Default value for replaygain_noclip is 1.
-
-
-
-
-
-
27.42.2 Examples# TOC
-
-
-
-
-
27.43 volumedetect# TOC
-
-
Detect the volume of the input video.
-
-
The filter has no parameters. The input is not modified. Statistics about
-the volume will be printed in the log when the input stream end is reached.
-
-
In particular it will show the mean volume (root mean square), maximum
-volume (on a per-sample basis), and the beginning of a histogram of the
-registered volume values (from the maximum value to a cumulated 1/1000 of
-the samples).
-
-
All volumes are in decibels relative to the maximum PCM value.
-
-
-
27.43.1 Examples# TOC
-
-
Here is an excerpt of the output:
-
-
[Parsed_volumedetect_0 0xa23120] mean_volume: -27 dB
-[Parsed_volumedetect_0 0xa23120] max_volume: -4 dB
-[Parsed_volumedetect_0 0xa23120] histogram_4db: 6
-[Parsed_volumedetect_0 0xa23120] histogram_5db: 62
-[Parsed_volumedetect_0 0xa23120] histogram_6db: 286
-[Parsed_volumedetect_0 0xa23120] histogram_7db: 1042
-[Parsed_volumedetect_0 0xa23120] histogram_8db: 2551
-[Parsed_volumedetect_0 0xa23120] histogram_9db: 4609
-[Parsed_volumedetect_0 0xa23120] histogram_10db: 8409
-
-
-
It means that:
-
- The mean square energy is approximately -27 dB, or 10^-2.7.
- The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB.
- There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc.
-
-
-
In other words, raising the volume by +4 dB does not cause any clipping,
-raising it by +5 dB causes clipping for 6 samples, etc.
-
-
-
-
28 Audio Sources# TOC
-
-
Below is a description of the currently available audio sources.
-
-
-
28.1 abuffer# TOC
-
-
Buffer audio frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/asrc_abuffer.h .
-
-
It accepts the following parameters:
-
-time_base
-The timebase which will be used for timestamps of submitted frames. It must be
-either a floating-point number or in numerator /denominator form.
-
-
-sample_rate
-The sample rate of the incoming audio buffers.
-
-
-sample_fmt
-The sample format of the incoming audio buffers.
-Either a sample format name or its corresponding integer representation from
-the enum AVSampleFormat in libavutil/samplefmt.h
-
-
-channel_layout
-The channel layout of the incoming audio buffers.
-Either a channel layout name from channel_layout_map in
-libavutil/channel_layout.c or its corresponding integer representation
-from the AV_CH_LAYOUT_* macros in libavutil/channel_layout.h
-
-
-channels
-The number of channels of the incoming audio buffers.
-If both channels and channel_layout are specified, then they
-must be consistent.
-
-
-
-
-
-
28.1.1 Examples# TOC
-
-
-
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
-
-
-
will instruct the source to accept planar 16bit signed stereo at 44100Hz.
-Since the sample format with name "s16p" corresponds to the number
-6 and the "stereo" channel layout corresponds to the value 0x3, this is
-equivalent to:
-
-
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
-
-
-
-
28.2 aevalsrc# TOC
-
-
Generate an audio signal specified by an expression.
-
-
This source accepts in input one or more expressions (one for each
-channel), which are evaluated and used to generate a corresponding
-audio signal.
-
-
This source accepts the following options:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. In case the
-channel_layout option is not specified, the selected channel layout
-depends on the number of provided expressions. Otherwise the last
-specified expression is applied to the remaining output channels.
-
-
-channel_layout, c
-Set the channel layout. The number of channels in the specified layout
-must be equal to the number of specified expressions.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified
-duration, as the generated audio is always cut at the end of a
-complete frame.
-
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame,
-default to 1024.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100.
-
-
-
-
Each expression in exprs can contain the following constants:
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-t
-time of the evaluated sample expressed in seconds, starting from 0
-
-
-s
-sample rate
-
-
-
-
-
-
28.2.1 Examples# TOC
-
-
- Generate silence:
-
-
- Generate a sin signal with frequency of 440 Hz, set sample rate to
-8000 Hz:
-
-
aevalsrc="sin(440*2*PI*t):s=8000"
-
-
- Generate a two channels signal, specify the channel layout (Front
-Center + Back Center) explicitly:
-
-
aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
-
-
- Generate white noise:
-
-
aevalsrc="-2+random(0)"
-
-
- Generate an amplitude modulated signal:
-
-
aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
-
-
- Generate 2.5 Hz binaural beats on a 360 Hz carrier:
-
-
aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
-
-
-
-
-
-
28.3 anullsrc# TOC
-
-
The null audio source, return unprocessed audio frames. It is mainly useful
-as a template and to be employed in analysis / debugging tools, or as
-the source for filters which ignore the input data (for example the sox
-synth filter).
-
-
This source accepts the following options:
-
-
-channel_layout, cl
-
-Specifies the channel layout, and can be either an integer or a string
-representing a channel layout. The default value of channel_layout
-is "stereo".
-
-Check the channel_layout_map definition in
-libavutil/channel_layout.c for the mapping between strings and
-channel layout values.
-
-
-sample_rate, r
-Specifies the sample rate, and defaults to 44100.
-
-
-nb_samples, n
-Set the number of samples per requested frames.
-
-
-
-
-
-
28.3.1 Examples# TOC
-
-
- Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
-
-
- Do the same operation with a more obvious syntax:
-
-
anullsrc=r=48000:cl=mono
-
-
-
-
All the parameters need to be explicitly defined.
-
-
-
28.4 flite# TOC
-
-
Synthesize a voice utterance using the libflite library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libflite
.
-
-
Note that the flite library is not thread-safe.
-
-
The filter accepts the following options:
-
-
-list_voices
-If set to 1, list the names of the available voices and exit
-immediately. Default value is 0.
-
-
-nb_samples, n
-Set the maximum number of samples per frame. Default value is 512.
-
-
-textfile
-Set the filename containing the text to speak.
-
-
-text
-Set the text to speak.
-
-
-voice, v
-Set the voice to use for the speech synthesis. Default value is
-kal
. See also the list_voices option.
-
-
-
-
-
28.4.1 Examples# TOC
-
-
- Read from file speech.txt , and synthesize the text using the
-standard flite voice:
-
-
flite=textfile=speech.txt
-
-
- Read the specified text selecting the slt
voice:
-
-
flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Input text to ffmpeg:
-
-
ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Make ffplay speak the specified text, using flite
and
-the lavfi
device:
-
-
ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
-
-
-
-
For more information about libflite, check:
-http://www.speech.cs.cmu.edu/flite/
-
-
-
28.5 sine# TOC
-
-
Generate an audio signal made of a sine wave with amplitude 1/8.
-
-
The audio signal is bit-exact.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the carrier frequency. Default is 440 Hz.
-
-
-beep_factor, b
-Enable a periodic beep every second with frequency beep_factor times
-the carrier frequency. Default is 0, meaning the beep is disabled.
-
-
-sample_rate, r
-Specify the sample rate, default is 44100.
-
-
-duration, d
-Specify the duration of the generated audio stream.
-
-
-samples_per_frame
-Set the number of samples per output frame, default is 1024.
-
-
-
-
-
28.5.1 Examples# TOC
-
-
- Generate a simple 440 Hz sine wave:
-
-
- Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
-
-
sine=220:4:d=5
-sine=f=220:b=4:d=5
-sine=frequency=220:beep_factor=4:duration=5
-
-
-
-
-
-
-
29 Audio Sinks# TOC
-
-
Below is a description of the currently available audio sinks.
-
-
-
29.1 abuffersink# TOC
-
-
Buffer audio frames, and make them available to the end of filter chain.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVABufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
29.2 anullsink# TOC
-
-
Null audio sink; do absolutely nothing with the input audio. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
30 Video Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the video filters included in your
-build.
-
-
Below is a description of the currently available video filters.
-
-
-
30.1 alphaextract# TOC
-
-
Extract the alpha component from the input as a grayscale video. This
-is especially useful with the alphamerge filter.
-
-
-
30.2 alphamerge# TOC
-
-
Add or replace the alpha component of the primary input with the
-grayscale value of a second input. This is intended for use with
-alphaextract to allow the transmission or storage of frame
-sequences that have alpha in a format that doesn’t support an alpha
-channel.
-
-
For example, to reconstruct full frames from a normal YUV-encoded video
-and a separate video created with alphaextract , you might use:
-
-
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
-
-
-
Since this filter is designed for reconstruction, it operates on frame
-sequences without considering timestamps, and terminates when either
-input reaches end of stream. This will cause problems if your encoding
-pipeline drops frames. If you’re trying to apply an image as an
-overlay to a video stream, consider the overlay filter instead.
-
-
-
30.3 ass# TOC
-
-
Same as the subtitles filter, except that it doesn’t require libavcodec
-and libavformat to work. On the other hand, it is limited to ASS (Advanced
-Substation Alpha) subtitles files.
-
-
This filter accepts the following option in addition to the common options from
-the subtitles filter:
-
-
-shaping
-Set the shaping engine
-
-Available values are:
-
-‘auto ’
-The default libass shaping engine, which is the best available.
-
-‘simple ’
-Fast, font-agnostic shaper that can do only substitutions
-
-‘complex ’
-Slower shaper using OpenType for substitutions and positioning
-
-
-
-The default is auto
.
-
-
-
-
-
30.4 bbox# TOC
-
-
Compute the bounding box for the non-black pixels in the input frame
-luminance plane.
-
-
This filter computes the bounding box containing all the pixels with a
-luminance value greater than the minimum allowed value.
-The parameters describing the bounding box are printed on the filter
-log.
-
-
The filter accepts the following option:
-
-
-min_val
-Set the minimal luminance value. Default is 16
.
-
-
-
-
-
30.5 blackdetect# TOC
-
-
Detect video intervals that are (almost) completely black. Can be
-useful to detect chapter transitions, commercials, or invalid
-recordings. Output lines contains the time for the start, end and
-duration of the detected black interval expressed in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
The filter accepts the following options:
-
-
-black_min_duration, d
-Set the minimum detected black duration expressed in seconds. It must
-be a non-negative floating point number.
-
-Default value is 2.0.
-
-
-picture_black_ratio_th, pic_th
-Set the threshold for considering a picture "black".
-Express the minimum value for the ratio:
-
-
nb_black_pixels / nb_pixels
-
-
-for which a picture is considered black.
-Default value is 0.98.
-
-
-pixel_black_th, pix_th
-Set the threshold for considering a pixel "black".
-
-The threshold expresses the maximum pixel luminance value for which a
-pixel is considered "black". The provided value is scaled according to
-the following equation:
-
-
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
-
-
-luminance_range_size and luminance_minimum_value depend on
-the input video format, the range is [0-255] for YUV full-range
-formats and [16-235] for YUV non full-range formats.
-
-Default value is 0.10.
-
-
-
-
The following example sets the maximum pixel threshold to the minimum
-value, and detects only black intervals of 2 or more seconds:
-
-
blackdetect=d=2:pix_th=0.00
-
-
-
-
30.6 blackframe# TOC
-
-
Detect frames that are (almost) completely black. Can be useful to
-detect chapter transitions or commercials. Output lines consist of
-the frame number of the detected frame, the percentage of blackness,
-the position in the file if known or -1 and the timestamp in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
It accepts the following parameters:
-
-
-amount
-The percentage of the pixels that have to be below the threshold; it defaults to
-98
.
-
-
-threshold, thresh
-The threshold below which a pixel value is considered black; it defaults to
-32
.
-
-
-
-
-
-
30.7 blend, tblend# TOC
-
-
Blend two video frames into each other.
-
-
The blend
filter takes two input streams and outputs one
-stream, the first input is the "top" layer and second input is
-"bottom" layer. Output terminates when shortest input terminates.
-
-
The tblend
(time blend) filter takes two consecutive frames
-from one single stream, and outputs the result obtained by blending
-the new frame on top of the old frame.
-
-
A description of the accepted options follows.
-
-
-c0_mode
-c1_mode
-c2_mode
-c3_mode
-all_mode
-Set blend mode for specific pixel component or all pixel components in case
-of all_mode . Default value is normal
.
-
-Available values for component modes are:
-
-‘addition ’
-‘and ’
-‘average ’
-‘burn ’
-‘darken ’
-‘difference ’
-‘difference128 ’
-‘divide ’
-‘dodge ’
-‘exclusion ’
-‘hardlight ’
-‘lighten ’
-‘multiply ’
-‘negation ’
-‘normal ’
-‘or ’
-‘overlay ’
-‘phoenix ’
-‘pinlight ’
-‘reflect ’
-‘screen ’
-‘softlight ’
-‘subtract ’
-‘vividlight ’
-‘xor ’
-
-
-
-c0_opacity
-c1_opacity
-c2_opacity
-c3_opacity
-all_opacity
-Set blend opacity for specific pixel component or all pixel components in case
-of all_opacity . Only used in combination with pixel component blend modes.
-
-
-c0_expr
-c1_expr
-c2_expr
-c3_expr
-all_expr
-Set blend expression for specific pixel component or all pixel components in case
-of all_expr . Note that related mode options will be ignored if those are set.
-
-The expressions can use the following variables:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-the coordinates of the current sample
-
-
-W
-H
-the width and height of currently filtered plane
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-TOP, A
-Value of pixel component at current location for first video frame (top layer).
-
-
-BOTTOM, B
-Value of pixel component at current location for second video frame (bottom layer).
-
-
-
-
-shortest
-Force termination when the shortest input terminates. Default is
-0
. This option is only defined for the blend
filter.
-
-
-repeatlast
-Continue applying the last bottom frame after the end of the stream. A value of
-0
disable the filter after the last frame of the bottom layer is reached.
-Default is 1
. This option is only defined for the blend
filter.
-
-
-
-
-
30.7.1 Examples# TOC
-
-
- Apply transition from bottom layer to top layer in first 10 seconds:
-
-
blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
-
-
- Apply 1x1 checkerboard effect:
-
-
blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
-
-
- Apply uncover left effect:
-
-
blend=all_expr='if(gte(N*SW+X,W),A,B)'
-
-
- Apply uncover down effect:
-
-
blend=all_expr='if(gte(Y-N*SH,0),A,B)'
-
-
- Apply uncover up-left effect:
-
-
blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
-
-
- Display differences between the current and the previous frame:
-
-
tblend=all_mode=difference128
-
-
-
-
-
30.8 boxblur# TOC
-
-
Apply a boxblur algorithm to the input video.
-
-
It accepts the following parameters:
-
-
-luma_radius, lr
-luma_power, lp
-chroma_radius, cr
-chroma_power, cp
-alpha_radius, ar
-alpha_power, ap
-
-
-
A description of the accepted options follows.
-
-
-luma_radius, lr
-chroma_radius, cr
-alpha_radius, ar
-Set an expression for the box radius in pixels used for blurring the
-corresponding input plane.
-
-The radius value must be a non-negative number, and must not be
-greater than the value of the expression min(w,h)/2
for the
-luma and alpha planes, and of min(cw,ch)/2
for the chroma
-planes.
-
-Default value for luma_radius is "2". If not specified,
-chroma_radius and alpha_radius default to the
-corresponding value set for luma_radius .
-
-The expressions can contain the following constants:
-
-w
-h
-The input width and height in pixels.
-
-
-cw
-ch
-The input chroma image width and height in pixels.
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p", hsub is 2 and vsub is 1.
-
-
-
-
-luma_power, lp
-chroma_power, cp
-alpha_power, ap
-Specify how many times the boxblur filter is applied to the
-corresponding plane.
-
-Default value for luma_power is 2. If not specified,
-chroma_power and alpha_power default to the
-corresponding value set for luma_power .
-
-A value of 0 will disable the effect.
-
-
-
-
-
30.8.1 Examples# TOC
-
-
- Apply a boxblur filter with the luma, chroma, and alpha radii
-set to 2:
-
-
boxblur=luma_radius=2:luma_power=1
-boxblur=2:1
-
-
- Set the luma radius to 2, and alpha and chroma radius to 0:
-
-
- Set the luma and chroma radii to a fraction of the video dimension:
-
-
boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
-
-
-
-
-
30.9 codecview# TOC
-
-
Visualize information exported by some codecs.
-
-
Some codecs can export information through frames using side-data or other
-means. For example, some MPEG based codecs export motion vectors through the
-export_mvs flag in the codec flags2 option.
-
-
The filter accepts the following option:
-
-
-mv
-Set motion vectors to visualize.
-
-Available flags for mv are:
-
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-
-
-
30.9.1 Examples# TOC
-
-
- Visualizes multi-directionals MVs from P and B-Frames using ffplay
:
-
-
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
-
-
-
-
-
30.10 colorbalance# TOC
-
Modify intensity of primary colors (red, green and blue) of input frames.
-
-
The filter allows an input frame to be adjusted in the shadows, midtones or highlights
-regions for the red-cyan, green-magenta or blue-yellow balance.
-
-
A positive adjustment value shifts the balance towards the primary color, a negative
-value towards the complementary color.
-
-
The filter accepts the following options:
-
-
-rs
-gs
-bs
-Adjust red, green and blue shadows (darkest pixels).
-
-
-rm
-gm
-bm
-Adjust red, green and blue midtones (medium pixels).
-
-
-rh
-gh
-bh
-Adjust red, green and blue highlights (brightest pixels).
-
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-
-
-
30.10.1 Examples# TOC
-
-
- Add red color cast to shadows:
-
-
-
-
-
30.11 colorlevels# TOC
-
-
Adjust video input frames using levels.
-
-
The filter accepts the following options:
-
-
-rimin
-gimin
-bimin
-aimin
-Adjust red, green, blue and alpha input black point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-rimax
-gimax
-bimax
-aimax
-Adjust red, green, blue and alpha input white point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 1
.
-
-Input levels are used to lighten highlights (bright tones), darken shadows
-(dark tones), change the balance of bright and dark tones.
-
-
-romin
-gomin
-bomin
-aomin
-Adjust red, green, blue and alpha output black point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 0
.
-
-
-romax
-gomax
-bomax
-aomax
-Adjust red, green, blue and alpha output white point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 1
.
-
-Output levels allows manual selection of a constrained output level range.
-
-
-
-
-
30.11.1 Examples# TOC
-
-
- Make video output darker:
-
-
colorlevels=rimin=0.058:gimin=0.058:bimin=0.058
-
-
- Increase contrast:
-
-
colorlevels=rimin=0.039:gimin=0.039:bimin=0.039:rimax=0.96:gimax=0.96:bimax=0.96
-
-
- Make video output lighter:
-
-
colorlevels=rimax=0.902:gimax=0.902:bimax=0.902
-
-
- Increase brightness:
-
-
colorlevels=romin=0.5:gomin=0.5:bomin=0.5
-
-
-
-
-
30.12 colorchannelmixer# TOC
-
-
Adjust video input frames by re-mixing color channels.
-
-
This filter modifies a color channel by adding the values associated to
-the other channels of the same pixels. For example if the value to
-modify is red, the output value will be:
-
-
red =red *rr + blue *rb + green *rg + alpha *ra
-
-
-
The filter accepts the following options:
-
-
-rr
-rg
-rb
-ra
-Adjust contribution of input red, green, blue and alpha channels for output red channel.
-Default is 1
for rr , and 0
for rg , rb and ra .
-
-
-gr
-gg
-gb
-ga
-Adjust contribution of input red, green, blue and alpha channels for output green channel.
-Default is 1
for gg , and 0
for gr , gb and ga .
-
-
-br
-bg
-bb
-ba
-Adjust contribution of input red, green, blue and alpha channels for output blue channel.
-Default is 1
for bb , and 0
for br , bg and ba .
-
-
-ar
-ag
-ab
-aa
-Adjust contribution of input red, green, blue and alpha channels for output alpha channel.
-Default is 1
for aa , and 0
for ar , ag and ab .
-
-Allowed ranges for options are [-2.0, 2.0]
.
-
-
-
-
-
30.12.1 Examples# TOC
-
-
- Convert source to grayscale:
-
-
colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
-
- Simulate sepia tones:
-
-
colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
-
-
-
-
-
30.13 colormatrix# TOC
-
-
Convert color matrix.
-
-
The filter accepts the following options:
-
-
-src
-dst
-Specify the source and destination color matrix. Both values must be
-specified.
-
-The accepted values are:
-
-‘bt709 ’
-BT.709
-
-
-‘bt601 ’
-BT.601
-
-
-‘smpte240m ’
-SMPTE-240M
-
-
-‘fcc ’
-FCC
-
-
-
-
-
-
For example to convert from BT.601 to SMPTE-240M, use the command:
-
-
colormatrix=bt601:smpte240m
-
-
-
-
30.14 copy# TOC
-
-
Copy the input source unchanged to the output. This is mainly useful for
-testing purposes.
-
-
-
30.15 crop# TOC
-
-
Crop the input video to given dimensions.
-
-
It accepts the following parameters:
-
-
-w, out_w
-The width of the output video. It defaults to iw
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-h, out_h
-The height of the output video. It defaults to ih
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-x
-The horizontal position, in the input video, of the left edge of the output
-video. It defaults to (in_w-out_w)/2
.
-This expression is evaluated per-frame.
-
-
-y
-The vertical position, in the input video, of the top edge of the output video.
-It defaults to (in_h-out_h)/2
.
-This expression is evaluated per-frame.
-
-
-keep_aspect
-If set to 1 will force the output display aspect ratio
-to be the same of the input, by changing the output sample aspect
-ratio. It defaults to 0.
-
-
-
-
The out_w , out_h , x , y parameters are
-expressions containing the following constants:
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-in_w
-in_h
-The input width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (cropped) width and height.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-n
-The number of the input frame, starting from 0.
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
The expression for out_w may depend on the value of out_h ,
-and the expression for out_h may depend on out_w , but they
-cannot depend on x and y , as x and y are
-evaluated after out_w and out_h .
-
-
The x and y parameters specify the expressions for the
-position of the top-left corner of the output (non-cropped) area. They
-are evaluated for each frame. If the evaluated value is not valid, it
-is approximated to the nearest valid value.
-
-
The expression for x may depend on y , and the expression
-for y may depend on x .
-
-
-
30.15.1 Examples# TOC
-
-
-
-
-
30.16 cropdetect# TOC
-
-
Auto-detect the crop size.
-
-
It calculates the necessary cropping parameters and prints the
-recommended parameters via the logging system. The detected dimensions
-correspond to the non-black area of the input video.
-
-
It accepts the following parameters:
-
-
-limit
-Set higher black value threshold, which can be optionally specified
-from nothing (0) to everything (255 for 8bit based formats). An intensity
-value greater to the set value is considered non-black. It defaults to 24.
-You can also specify a value between 0.0 and 1.0 which will be scaled depending
-on the bitdepth of the pixel format.
-
-
-round
-The value which the width/height should be divisible by. It defaults to
-16. The offset is automatically adjusted to center the video. Use 2 to
-get only even dimensions (needed for 4:2:2 video). 16 is best when
-encoding to most video codecs.
-
-
-reset_count, reset
-Set the counter that determines after how many frames cropdetect will
-reset the previously detected largest video area and start over to
-detect the current optimal crop area. Default value is 0.
-
-This can be useful when channel logos distort the video area. 0
-indicates ’never reset’, and returns the largest area encountered during
-playback.
-
-
-
-
-
30.17 curves# TOC
-
-
Apply color adjustments using curves.
-
-
This filter is similar to the Adobe Photoshop and GIMP curves tools. Each
-component (red, green and blue) has its values defined by N key points
-tied from each other using a smooth curve. The x-axis represents the pixel
-values from the input frame, and the y-axis the new pixel values to be set for
-the output frame.
-
-
By default, a component curve is defined by the two points (0;0) and
-(1;1) . This creates a straight line where each original pixel value is
-"adjusted" to its own value, which means no change to the image.
-
-
The filter allows you to redefine these two points and add some more. A new
-curve (using a natural cubic spline interpolation) will be define to pass
-smoothly through all these new coordinates. The new defined points needs to be
-strictly increasing over the x-axis, and their x and y values must
-be in the [0;1] interval. If the computed curves happened to go outside
-the vector spaces, the values will be clipped accordingly.
-
-
If there is no key point defined in x=0
, the filter will automatically
-insert a (0;0) point. In the same way, if there is no key point defined
-in x=1
, the filter will automatically insert a (1;1) point.
-
-
The filter accepts the following options:
-
-
-preset
-Select one of the available color presets. This option can be used in addition
-to the r , g , b parameters; in this case, the later
-options takes priority on the preset values.
-Available presets are:
-
-‘none ’
-‘color_negative ’
-‘cross_process ’
-‘darker ’
-‘increase_contrast ’
-‘lighter ’
-‘linear_contrast ’
-‘medium_contrast ’
-‘negative ’
-‘strong_contrast ’
-‘vintage ’
-
-Default is none
.
-
-master, m
-Set the master key points. These points will define a second pass mapping. It
-is sometimes called a "luminance" or "value" mapping. It can be used with
-r , g , b or all since it acts like a
-post-processing LUT.
-
-red, r
-Set the key points for the red component.
-
-green, g
-Set the key points for the green component.
-
-blue, b
-Set the key points for the blue component.
-
-all
-Set the key points for all components (not including master).
-Can be used in addition to the other key points component
-options. In this case, the unset component(s) will fallback on this
-all setting.
-
-psfile
-Specify a Photoshop curves file (.asv
) to import the settings from.
-
-
-
-
To avoid some filtergraph syntax conflicts, each key points list need to be
-defined using the following syntax: x0/y0 x1/y1 x2/y2 ...
.
-
-
-
30.17.1 Examples# TOC
-
-
-
-
-
30.18 dctdnoiz# TOC
-
-
Denoise frames using 2D DCT (frequency domain filtering).
-
-
This filter is not designed for real time.
-
-
The filter accepts the following options:
-
-
-sigma, s
-Set the noise sigma constant.
-
-This sigma defines a hard threshold of 3 * sigma
; every DCT
-coefficient (absolute value) below this threshold with be dropped.
-
-If you need a more advanced filtering, see expr .
-
-Default is 0
.
-
-
-overlap
-Set number overlapping pixels for each block. Since the filter can be slow, you
-may want to reduce this value, at the cost of a less effective filter and the
-risk of various artefacts.
-
-If the overlapping value doesn’t allow to process the whole input width or
-height, a warning will be displayed and according borders won’t be denoised.
-
-Default value is blocksize -1, which is the best possible setting.
-
-
-expr, e
-Set the coefficient factor expression.
-
-For each coefficient of a DCT block, this expression will be evaluated as a
-multiplier value for the coefficient.
-
-If this is option is set, the sigma option will be ignored.
-
-The absolute value of the coefficient can be accessed through the c
-variable.
-
-
-n
-Set the blocksize using the number of bits. 1<<n
defines the
-blocksize , which is the width and height of the processed blocks.
-
-The default value is 3 (8x8) and can be raised to 4 for a
-blocksize of 16x16. Note that changing this setting has huge consequences
-on the speed processing. Also, a larger block size does not necessarily means a
-better de-noising.
-
-
-
-
-
30.18.1 Examples# TOC
-
-
Apply a denoise with a sigma of 4.5
:
-
-
-
The same operation can be achieved using the expression system:
-
-
dctdnoiz=e='gte(c, 4.5*3)'
-
-
-
Violent denoise using a block size of 16x16
:
-
-
-
-
30.19 decimate# TOC
-
-
Drop duplicated frames at regular intervals.
-
-
The filter accepts the following options:
-
-
-cycle
-Set the number of frames from which one will be dropped. Setting this to
-N means one frame in every batch of N frames will be dropped.
-Default is 5
.
-
-
-dupthresh
-Set the threshold for duplicate detection. If the difference metric for a frame
-is less than or equal to this value, then it is declared as duplicate. Default
-is 1.1
-
-
-scthresh
-Set scene change threshold. Default is 15
.
-
-
-blockx
-blocky
-Set the size of the x and y-axis blocks used during metric calculations.
-Larger blocks give better noise suppression, but also give worse detection of
-small movements. Must be a power of two. Default is 32
.
-
-
-ppsrc
-Mark main input as a pre-processed input and activate clean source input
-stream. This allows the input to be pre-processed with various filters to help
-the metrics calculation while keeping the frame selection lossless. When set to
-1
, the first stream is for the pre-processed input, and the second
-stream is the clean source from where the kept frames are chosen. Default is
-0
.
-
-
-chroma
-Set whether or not chroma is considered in the metric calculations. Default is
-1
.
-
-
-
-
-
30.20 dejudder# TOC
-
-
Remove judder produced by partially interlaced telecined content.
-
-
Judder can be introduced, for instance, by pullup filter. If the original
-source was partially telecined content then the output of pullup,dejudder
-will have a variable frame rate. May change the recorded frame rate of the
-container. Aside from that change, this filter will not affect constant frame
-rate video.
-
-
The option available in this filter is:
-
-cycle
-Specify the length of the window over which the judder repeats.
-
-Accepts any integer greater than 1. Useful values are:
-
-‘4 ’
-If the original was telecined from 24 to 30 fps (Film to NTSC).
-
-
-‘5 ’
-If the original was telecined from 25 to 30 fps (PAL to NTSC).
-
-
-‘20 ’
-If a mixture of the two.
-
-
-
-The default is ‘4 ’.
-
-
-
-
-
30.21 delogo# TOC
-
-
Suppress a TV station logo by a simple interpolation of the surrounding
-pixels. Just set a rectangle covering the logo and watch it disappear
-(and sometimes something even uglier appear - your mileage may vary).
-
-
It accepts the following parameters:
-
-x
-y
-Specify the top left corner coordinates of the logo. They must be
-specified.
-
-
-w
-h
-Specify the width and height of the logo to clear. They must be
-specified.
-
-
-band, t
-Specify the thickness of the fuzzy edge of the rectangle (added to
-w and h ). The default value is 4.
-
-
-show
-When set to 1, a green rectangle is drawn on the screen to simplify
-finding the right x , y , w , and h parameters.
-The default value is 0.
-
-The rectangle is drawn on the outermost pixels which will be (partly)
-replaced with interpolated values. The values of the next pixels
-immediately outside this rectangle in each direction will be used to
-compute the interpolated pixel values inside the rectangle.
-
-
-
-
-
-
30.21.1 Examples# TOC
-
-
- Set a rectangle covering the area with top left corner coordinates 0,0
-and size 100x77, and a band of size 10:
-
-
delogo=x=0:y=0:w=100:h=77:band=10
-
-
-
-
-
-
30.22 deshake# TOC
-
-
Attempt to fix small changes in horizontal and/or vertical shift. This
-filter helps remove camera shake from hand-holding a camera, bumping a
-tripod, moving on a vehicle, etc.
-
-
The filter accepts the following options:
-
-
-x
-y
-w
-h
-Specify a rectangular area where to limit the search for motion
-vectors.
-If desired the search for motion vectors can be limited to a
-rectangular area of the frame defined by its top left corner, width
-and height. These parameters have the same meaning as the drawbox
-filter which can be used to visualise the position of the bounding
-box.
-
-This is useful when simultaneous movement of subjects within the frame
-might be confused for camera motion by the motion vector search.
-
-If any or all of x , y , w and h are set to -1
-then the full frame is used. This allows later options to be set
-without specifying the bounding box for the motion vector search.
-
-Default - search the whole frame.
-
-
-rx
-ry
-Specify the maximum extent of movement in x and y directions in the
-range 0-64 pixels. Default 16.
-
-
-edge
-Specify how to generate pixels to fill blanks at the edge of the
-frame. Available values are:
-
-‘blank, 0 ’
-Fill zeroes at blank locations
-
-‘original, 1 ’
-Original image at blank locations
-
-‘clamp, 2 ’
-Extruded edge value at blank locations
-
-‘mirror, 3 ’
-Mirrored edge at blank locations
-
-
-Default value is ‘mirror ’.
-
-
-blocksize
-Specify the blocksize to use for motion search. Range 4-128 pixels,
-default 8.
-
-
-contrast
-Specify the contrast threshold for blocks. Only blocks with more than
-the specified contrast (difference between darkest and lightest
-pixels) will be considered. Range 1-255, default 125.
-
-
-search
-Specify the search strategy. Available values are:
-
-‘exhaustive, 0 ’
-Set exhaustive search
-
-‘less, 1 ’
-Set less exhaustive search.
-
-
-Default value is ‘exhaustive ’.
-
-
-filename
-If set then a detailed log of the motion search is written to the
-specified file.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
-
30.23 drawbox# TOC
-
-
Draw a colored box on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the top left corner coordinates of the box. It defaults to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the box; if 0 they are interpreted as
-the input width and height. It defaults to 0.
-
-
-color, c
-Specify the color of the box to write. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the box edge color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the box edge. Default value is 3
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y offset coordinates where the box is drawn.
-
-
-w
-h
-The width and height of the drawn box.
-
-
-t
-The thickness of the drawn box.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
30.23.1 Examples# TOC
-
-
-
-
-
30.24 drawgrid# TOC
-
-
Draw a grid on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the
-input width and height, respectively, minus thickness
, so image gets
-framed. Default to 0.
-
-
-color, c
-Specify the color of the grid. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the grid color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the grid line. Default value is 1
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input grid cell width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y coordinates of some point of grid intersection (meant to configure offset).
-
-
-w
-h
-The width and height of the drawn cell.
-
-
-t
-The thickness of the drawn cell.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
30.24.1 Examples# TOC
-
-
- Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%:
-
-
drawgrid=width=100:height=100:thickness=2:color=red@0.5
-
-
- Draw a white 3x3 grid with an opacity of 50%:
-
-
drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
-
-
-
-
-
30.25 drawtext# TOC
-
-
Draw a text string or text from a specified file on top of a video, using the
-libfreetype library.
-
-
To enable compilation of this filter, you need to configure FFmpeg with
---enable-libfreetype
.
-To enable default font fallback and the font option you need to
-configure FFmpeg with --enable-libfontconfig
.
-To enable the text_shaping option, you need to configure FFmpeg with
---enable-libfribidi
.
-
-
-
30.25.1 Syntax# TOC
-
-
It accepts the following parameters:
-
-
-box
-Used to draw a box around text using the background color.
-The value must be either 1 (enable) or 0 (disable).
-The default value of box is 0.
-
-
-boxcolor
-The color to be used for drawing box around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of boxcolor is "white".
-
-
-borderw
-Set the width of the border to be drawn around the text using bordercolor .
-The default value of borderw is 0.
-
-
-bordercolor
-Set the color to be used for drawing border around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of bordercolor is "black".
-
-
-expansion
-Select how the text is expanded. Can be either none
,
-strftime
(deprecated) or
-normal
(default). See the Text expansion section
-below for details.
-
-
-fix_bounds
-If true, check and fix text coords to avoid clipping.
-
-
-fontcolor
-The color to be used for drawing fonts. For the syntax of this option, check
-the "Color" section in the ffmpeg-utils manual.
-
-The default value of fontcolor is "black".
-
-
-fontcolor_expr
-String which is expanded the same way as text to obtain dynamic
-fontcolor value. By default this option has empty value and is not
-processed. When this option is set, it overrides fontcolor option.
-
-
-font
-The font family to be used for drawing text. By default Sans.
-
-
-fontfile
-The font file to be used for drawing text. The path must be included.
-This parameter is mandatory if the fontconfig support is disabled.
-
-
-fontsize
-The font size to be used for drawing text.
-The default value of fontsize is 16.
-
-
-text_shaping
-If set to 1, attempt to shape the text (for example, reverse the order of
-right-to-left text and join Arabic characters) before drawing it.
-Otherwise, just draw the text exactly as given.
-By default 1 (if supported).
-
-
-ft_load_flags
-The flags to be used for loading the fonts.
-
-The flags map the corresponding flags supported by libfreetype, and are
-a combination of the following values:
-
-default
-no_scale
-no_hinting
-render
-no_bitmap
-vertical_layout
-force_autohint
-crop_bitmap
-pedantic
-ignore_global_advance_width
-no_recurse
-ignore_transform
-monochrome
-linear_design
-no_autohint
-
-
-Default value is "default".
-
-For more information consult the documentation for the FT_LOAD_*
-libfreetype flags.
-
-
-shadowcolor
-The color to be used for drawing a shadow behind the drawn text. For the
-syntax of this option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of shadowcolor is "black".
-
-
-shadowx
-shadowy
-The x and y offsets for the text shadow position with respect to the
-position of the text. They can be either positive or negative
-values. The default value for both is "0".
-
-
-start_number
-The starting frame number for the n/frame_num variable. The default value
-is "0".
-
-
-tabsize
-The size in number of spaces to use for rendering the tab.
-Default value is 4.
-
-
-timecode
-Set the initial timecode representation in "hh:mm:ss[:;.]ff"
-format. It can be used with or without text parameter. timecode_rate
-option must be specified.
-
-
-timecode_rate, rate, r
-Set the timecode frame rate (timecode only).
-
-
-text
-The text string to be drawn. The text must be a sequence of UTF-8
-encoded characters.
-This parameter is mandatory if no file is specified with the parameter
-textfile .
-
-
-textfile
-A text file containing text to be drawn. The text must be a sequence
-of UTF-8 encoded characters.
-
-This parameter is mandatory if no text string is specified with the
-parameter text .
-
-If both text and textfile are specified, an error is thrown.
-
-
-reload
-If set to 1, the textfile will be reloaded before each frame.
-Be sure to update it atomically, or it may be read partially, or even fail.
-
-
-x
-y
-The expressions which specify the offsets where text will be drawn
-within the video frame. They are relative to the top/left border of the
-output image.
-
-The default value of x and y is "0".
-
-See below for the list of accepted constants and functions.
-
-
-
-
The parameters for x and y are expressions containing the
-following constants and functions:
-
-
-dar
-input display aspect ratio, it is the same as (w / h ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-line_h, lh
-the height of each text line
-
-
-main_h, h, H
-the input height
-
-
-main_w, w, W
-the input width
-
-
-max_glyph_a, ascent
-the maximum distance from the baseline to the highest/upper grid
-coordinate used to place a glyph outline point, for all the rendered
-glyphs.
-It is a positive value, due to the grid’s orientation with the Y axis
-upwards.
-
-
-max_glyph_d, descent
-the maximum distance from the baseline to the lowest grid coordinate
-used to place a glyph outline point, for all the rendered glyphs.
-This is a negative value, due to the grid’s orientation, with the Y axis
-upwards.
-
-
-max_glyph_h
-maximum glyph height, that is the maximum height for all the glyphs
-contained in the rendered text, it is equivalent to ascent -
-descent .
-
-
-max_glyph_w
-maximum glyph width, that is the maximum width for all the glyphs
-contained in the rendered text
-
-
-n
-the number of input frame, starting from 0
-
-
-rand(min, max)
-return a random number included between min and max
-
-
-sar
-The input sample aspect ratio.
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-text_h, th
-the height of the rendered text
-
-
-text_w, tw
-the width of the rendered text
-
-
-x
-y
-the x and y offset coordinates where the text is drawn.
-
-These parameters allow the x and y expressions to refer
-each other, so you can for example specify y=x/dar
.
-
-
-
-
-
30.25.2 Text expansion# TOC
-
-
If expansion is set to strftime
,
-the filter recognizes strftime() sequences in the provided text and
-expands them accordingly. Check the documentation of strftime(). This
-feature is deprecated.
-
-
If expansion is set to none
, the text is printed verbatim.
-
-
If expansion is set to normal
(which is the default),
-the following expansion mechanism is used.
-
-
The backslash character ’\’, followed by any character, always expands to
-the second character.
-
-
Sequence of the form %{...}
are expanded. The text between the
-braces is a function name, possibly followed by arguments separated by ’:’.
-If the arguments contain special characters or delimiters (’:’ or ’}’),
-they should be escaped.
-
-
Note that they probably must also be escaped as the value for the
-text option in the filter argument string and as the filter
-argument in the filtergraph description, and possibly also for the shell,
-that makes up to four levels of escaping; using a text file avoids these
-problems.
-
-
The following functions are available:
-
-
-expr, e
-The expression evaluation result.
-
-It must take one argument specifying the expression to be evaluated,
-which accepts the same constants and functions as the x and
-y values. Note that not all constants should be used, for
-example the text size is not known when evaluating the expression, so
-the constants text_w and text_h will have an undefined
-value.
-
-
-expr_int_format, eif
-Evaluate the expression’s value and output as formatted integer.
-
-The first argument is the expression to be evaluated, just as for the expr function.
-The second argument specifies the output format. Allowed values are ’x’, ’X’, ’d’ and
-’u’. They are treated exactly as in the printf function.
-The third parameter is optional and sets the number of positions taken by the output.
-It can be used to add padding with zeros from the left.
-
-
-gmtime
-The time at which the filter is running, expressed in UTC.
-It can accept an argument: a strftime() format string.
-
-
-localtime
-The time at which the filter is running, expressed in the local time zone.
-It can accept an argument: a strftime() format string.
-
-
-metadata
-Frame metadata. It must take one argument specifying metadata key.
-
-
-n, frame_num
-The frame number, starting from 0.
-
-
-pict_type
-A 1 character description of the current picture type.
-
-
-pts
-The timestamp of the current frame.
-It can take up to two arguments.
-
-The first argument is the format of the timestamp; it defaults to flt
-for seconds as a decimal number with microsecond accuracy; hms
stands
-for a formatted [-]HH:MM:SS.mmm timestamp with millisecond accuracy.
-
-The second argument is an offset added to the timestamp.
-
-
-
-
-
-
30.25.3 Examples# TOC
-
-
-
-
For more information about libfreetype, check:
-http://www.freetype.org/ .
-
-
For more information about fontconfig, check:
-http://freedesktop.org/software/fontconfig/fontconfig-user.html .
-
-
For more information about libfribidi, check:
-http://fribidi.org/ .
-
-
-
30.26 edgedetect# TOC
-
-
Detect and draw edges. The filter uses the Canny Edge Detection algorithm.
-
-
The filter accepts the following options:
-
-
-low
-high
-Set low and high threshold values used by the Canny thresholding
-algorithm.
-
-The high threshold selects the "strong" edge pixels, which are then
-connected through 8-connectivity with the "weak" edge pixels selected
-by the low threshold.
-
-low and high threshold values must be chosen in the range
-[0,1], and low should be lesser or equal to high .
-
-Default value for low is 20/255
, and default value for high
-is 50/255
.
-
-
-mode
-Define the drawing mode.
-
-
-‘wires ’
-Draw white/gray wires on black background.
-
-
-‘colormix ’
-Mix the colors to create a paint/cartoon effect.
-
-
-
-Default value is wires .
-
-
-
-
-
30.26.1 Examples# TOC
-
-
- Standard edge detection with custom values for the hysteresis thresholding:
-
-
edgedetect=low=0.1:high=0.4
-
-
- Painting effect without thresholding:
-
-
edgedetect=mode=colormix:high=0
-
-
-
-
-
30.27 extractplanes# TOC
-
-
Extract color channel components from input video stream into
-separate grayscale video streams.
-
-
The filter accepts the following option:
-
-
-planes
-Set plane(s) to extract.
-
-Available values for planes are:
-
-‘y ’
-‘u ’
-‘v ’
-‘a ’
-‘r ’
-‘g ’
-‘b ’
-
-
-Choosing planes not available in the input will result in an error.
-That means you cannot select r
, g
, b
planes
-with y
, u
, v
planes at same time.
-
-
-
-
-
30.27.1 Examples# TOC
-
-
- Extract luma, u and v color channel component from input video frame
-into 3 grayscale outputs:
-
-
ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
-
-
-
-
-
30.28 elbg# TOC
-
-
Apply a posterize effect using the ELBG (Enhanced LBG) algorithm.
-
-
For each input image, the filter will compute the optimal mapping from
-the input to the output given the codebook length, that is the number
-of distinct output colors.
-
-
This filter accepts the following options.
-
-
-codebook_length, l
-Set codebook length. The value must be a positive integer, and
-represents the number of distinct output colors. Default value is 256.
-
-
-nb_steps, n
-Set the maximum number of iterations to apply for computing the optimal
-mapping. The higher the value the better the result and the higher the
-computation time. Default value is 1.
-
-
-seed, s
-Set a random seed, must be an integer included between 0 and
-UINT32_MAX. If not specified, or if explicitly set to -1, the filter
-will try to use a good random seed on a best effort basis.
-
-
-
-
-
30.29 fade# TOC
-
-
Apply a fade-in/out effect to the input video.
-
-
It accepts the following parameters:
-
-
-type, t
-The effect type can be either "in" for a fade-in, or "out" for a fade-out
-effect.
-Default is in
.
-
-
-start_frame, s
-Specify the number of the frame to start applying the fade
-effect at. Default is 0.
-
-
-nb_frames, n
-The number of frames that the fade effect lasts. At the end of the
-fade-in effect, the output video will have the same intensity as the input video.
-At the end of the fade-out transition, the output video will be filled with the
-selected color .
-Default is 25.
-
-
-alpha
-If set to 1, fade only alpha channel, if one exists on the input.
-Default value is 0.
-
-
-start_time, st
-Specify the timestamp (in seconds) of the frame to start to apply the fade
-effect. If both start_frame and start_time are specified, the fade will start at
-whichever comes last. Default is 0.
-
-
-duration, d
-The number of seconds for which the fade effect has to last. At the end of the
-fade-in effect the output video will have the same intensity as the input video,
-at the end of the fade-out transition the output video will be filled with the
-selected color .
-If both duration and nb_frames are specified, duration is used. Default is 0.
-
-
-color, c
-Specify the color of the fade. Default is "black".
-
-
-
-
-
30.29.1 Examples# TOC
-
-
-
-
-
30.30 field# TOC
-
-
Extract a single field from an interlaced image using stride
-arithmetic to avoid wasting CPU time. The output frames are marked as
-non-interlaced.
-
-
The filter accepts the following options:
-
-
-type
-Specify whether to extract the top (if the value is 0
or
-top
) or the bottom field (if the value is 1
or
-bottom
).
-
-
-
-
-
30.31 fieldmatch# TOC
-
-
Field matching filter for inverse telecine. It is meant to reconstruct the
-progressive frames from a telecined stream. The filter does not drop duplicated
-frames, so to achieve a complete inverse telecine fieldmatch
needs to be
-followed by a decimation filter such as decimate in the filtergraph.
-
-
The separation of the field matching and the decimation is notably motivated by
-the possibility of inserting a de-interlacing filter fallback between the two.
-If the source has mixed telecined and real interlaced content,
-fieldmatch
will not be able to match fields for the interlaced parts.
-But these remaining combed frames will be marked as interlaced, and thus can be
-de-interlaced by a later filter such as yadif before decimation.
-
-
In addition to the various configuration options, fieldmatch
can take an
-optional second stream, activated through the ppsrc option. If
-enabled, the frames reconstruction will be based on the fields and frames from
-this second stream. This allows the first input to be pre-processed in order to
-help the various algorithms of the filter, while keeping the output lossless
-(assuming the fields are matched properly). Typically, a field-aware denoiser,
-or brightness/contrast adjustments can help.
-
-
Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project)
-and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
-which fieldmatch
is based on. While the semantic and usage are very
-close, some behaviour and options names can differ.
-
-
The decimate filter currently only works for constant frame rate input.
-Do not use fieldmatch
and decimate if your input has mixed
-telecined and progressive content with changing framerate.
-
-
The filter accepts the following options:
-
-
-order
-Specify the assumed field order of the input stream. Available values are:
-
-
-‘auto ’
-Auto detect parity (use FFmpeg’s internal parity value).
-
-‘bff ’
-Assume bottom field first.
-
-‘tff ’
-Assume top field first.
-
-
-
-Note that it is sometimes recommended not to trust the parity announced by the
-stream.
-
-Default value is auto .
-
-
-mode
-Set the matching mode or strategy to use. pc mode is the safest in the
-sense that it won’t risk creating jerkiness due to duplicate frames when
-possible, but if there are bad edits or blended fields it will end up
-outputting combed frames when a good match might actually exist. On the other
-hand, pcn_ub mode is the most risky in terms of creating jerkiness,
-but will almost always find a good frame if there is one. The other values are
-all somewhere in between pc and pcn_ub in terms of risking
-jerkiness and creating duplicate frames versus finding good matches in sections
-with bad edits, orphaned fields, blended fields, etc.
-
-More details about p/c/n/u/b are available in p/c/n/u/b meaning section.
-
-Available values are:
-
-
-‘pc ’
-2-way matching (p/c)
-
-‘pc_n ’
-2-way matching, and trying 3rd match if still combed (p/c + n)
-
-‘pc_u ’
-2-way matching, and trying 3rd match (same order) if still combed (p/c + u)
-
-‘pc_n_ub ’
-2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if
-still combed (p/c + n + u/b)
-
-‘pcn ’
-3-way matching (p/c/n)
-
-‘pcn_ub ’
-3-way matching, and trying 4th/5th matches if all 3 of the original matches are
-detected as combed (p/c/n + u/b)
-
-
-
-The parenthesis at the end indicate the matches that would be used for that
-mode assuming order =tff (and field on auto or
-top ).
-
-In terms of speed pc mode is by far the fastest and pcn_ub is
-the slowest.
-
-Default value is pc_n .
-
-
-ppsrc
-Mark the main input stream as a pre-processed input, and enable the secondary
-input stream as the clean source to pick the fields from. See the filter
-introduction for more details. It is similar to the clip2 feature from
-VFM/TFM.
-
-Default value is 0
(disabled).
-
-
-field
-Set the field to match from. It is recommended to set this to the same value as
-order unless you experience matching failures with that setting. In
-certain circumstances changing the field that is used to match from can have a
-large impact on matching performance. Available values are:
-
-
-‘auto ’
-Automatic (same value as order ).
-
-‘bottom ’
-Match from the bottom field.
-
-‘top ’
-Match from the top field.
-
-
-
-Default value is auto .
-
-
-mchroma
-Set whether or not chroma is included during the match comparisons. In most
-cases it is recommended to leave this enabled. You should set this to 0
-only if your clip has bad chroma problems such as heavy rainbowing or other
-artifacts. Setting this to 0
could also be used to speed things up at
-the cost of some accuracy.
-
-Default value is 1
.
-
-
-y0
-y1
-These define an exclusion band which excludes the lines between y0 and
-y1 from being included in the field matching decision. An exclusion
-band can be used to ignore subtitles, a logo, or other things that may
-interfere with the matching. y0 sets the starting scan line and
-y1 sets the ending line; all lines in between y0 and
-y1 (including y0 and y1 ) will be ignored. Setting
-y0 and y1 to the same value will disable the feature.
-y0 and y1 defaults to 0
.
-
-
-scthresh
-Set the scene change detection threshold as a percentage of maximum change on
-the luma plane. Good values are in the [8.0, 14.0]
range. Scene change
-detection is only relevant in case combmatch =sc . The range for
-scthresh is [0.0, 100.0]
.
-
-Default value is 12.0
.
-
-
-combmatch
-When combatch is not none , fieldmatch
will take into
-account the combed scores of matches when deciding what match to use as the
-final match. Available values are:
-
-
-‘none ’
-No final matching based on combed scores.
-
-‘sc ’
-Combed scores are only used when a scene change is detected.
-
-‘full ’
-Use combed scores all the time.
-
-
-
-Default is sc .
-
-
-combdbg
-Force fieldmatch
to calculate the combed metrics for certain matches and
-print them. This setting is known as micout in TFM/VFM vocabulary.
-Available values are:
-
-
-‘none ’
-No forced calculation.
-
-‘pcn ’
-Force p/c/n calculations.
-
-‘pcnub ’
-Force p/c/n/u/b calculations.
-
-
-
-Default value is none .
-
-
-cthresh
-This is the area combing threshold used for combed frame detection. This
-essentially controls how "strong" or "visible" combing must be to be detected.
-Larger values mean combing must be more visible and smaller values mean combing
-can be less visible or strong and still be detected. Valid settings are from
--1
(every pixel will be detected as combed) to 255
(no pixel will
-be detected as combed). This is basically a pixel difference value. A good
-range is [8, 12]
.
-
-Default value is 9
.
-
-
-chroma
-Sets whether or not chroma is considered in the combed frame decision. Only
-disable this if your source has chroma problems (rainbowing, etc.) that are
-causing problems for the combed frame detection with chroma enabled. Actually,
-using chroma =0 is usually more reliable, except for the case
-where there is chroma only combing in the source.
-
-Default value is 0
.
-
-
-blockx
-blocky
-Respectively set the x-axis and y-axis size of the window used during combed
-frame detection. This has to do with the size of the area in which
-combpel pixels are required to be detected as combed for a frame to be
-declared combed. See the combpel parameter description for more info.
-Possible values are any number that is a power of 2 starting at 4 and going up
-to 512.
-
-Default value is 16
.
-
-
-combpel
-The number of combed pixels inside any of the blocky by
-blockx size blocks on the frame for the frame to be detected as
-combed. While cthresh controls how "visible" the combing must be, this
-setting controls "how much" combing there must be in any localized area (a
-window defined by the blockx and blocky settings) on the
-frame. Minimum value is 0
and maximum is blocky x blockx
(at
-which point no frames will ever be detected as combed). This setting is known
-as MI in TFM/VFM vocabulary.
-
-Default value is 80
.
-
-
-
-
-
30.31.1 p/c/n/u/b meaning# TOC
-
-
-
30.31.1.1 p/c/n# TOC
-
-
We assume the following telecined stream:
-
-
-
Top fields: 1 2 2 3 4
-Bottom fields: 1 2 3 4 4
-
-
-
The numbers correspond to the progressive frame the fields relate to. Here, the
-first two frames are progressive, the 3rd and 4th are combed, and so on.
-
-
When fieldmatch
is configured to run a matching from bottom
-(field =bottom ) this is how this input stream get transformed:
-
-
-
Input stream:
- T 1 2 2 3 4
- B 1 2 3 4 4 <-- matching reference
-
-Matches: c c n n c
-
-Output stream:
- T 1 2 3 4 4
- B 1 2 3 4 4
-
-
-
As a result of the field matching, we can see that some frames get duplicated.
-To perform a complete inverse telecine, you need to rely on a decimation filter
-after this operation. See for instance the decimate filter.
-
-
The same operation now matching from top fields (field =top )
-looks like this:
-
-
-
Input stream:
- T 1 2 2 3 4 <-- matching reference
- B 1 2 3 4 4
-
-Matches: c c p p c
-
-Output stream:
- T 1 2 2 3 4
- B 1 2 2 3 4
-
-
-
In these examples, we can see what p , c and n mean;
-basically, they refer to the frame and field of the opposite parity:
-
-
- p matches the field of the opposite parity in the previous frame
- c matches the field of the opposite parity in the current frame
- n matches the field of the opposite parity in the next frame
-
-
-
-
30.31.1.2 u/b# TOC
-
-
The u and b matching are a bit special in the sense that they match
-from the opposite parity flag. In the following examples, we assume that we are
-currently matching the 2nd frame (Top:2, bottom:2). According to the match, a
-’x’ is placed above and below each matched fields.
-
-
With bottom matching (field =bottom ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 1 2 2 2
- 2 2 2 1 3
-
-
-
With top matching (field =top ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 2 2 1 2
- 2 1 3 2 2
-
-
-
-
30.31.2 Examples# TOC
-
-
Simple IVTC of a top field first telecined stream:
-
-
fieldmatch=order=tff:combmatch=none, decimate
-
-
-
Advanced IVTC, with fallback on yadif for still combed frames:
-
-
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
-
-
-
-
30.32 fieldorder# TOC
-
-
Transform the field order of the input video.
-
-
It accepts the following parameters:
-
-
-order
-The output field order. Valid values are tff for top field first or bff
-for bottom field first.
-
-
-
-
The default value is ‘tff ’.
-
-
The transformation is done by shifting the picture content up or down
-by one line, and filling the remaining line with appropriate picture content.
-This method is consistent with most broadcast field order converters.
-
-
If the input video is not flagged as being interlaced, or it is already
-flagged as being of the required output field order, then this filter does
-not alter the incoming video.
-
-
It is very useful when converting to or from PAL DV material,
-which is bottom field first.
-
-
For example:
-
-
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
-
-
-
-
30.33 fifo# TOC
-
-
Buffer input images and send them when they are requested.
-
-
It is mainly useful when auto-inserted by the libavfilter
-framework.
-
-
It does not take parameters.
-
-
-
30.34 format# TOC
-
-
Convert the input video to one of the specified pixel formats.
-Libavfilter will try to pick one that is suitable as input to
-the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-"pix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
30.34.1 Examples# TOC
-
-
-
-
-
30.35 fps# TOC
-
-
Convert the video to specified constant frame rate by duplicating or dropping
-frames as necessary.
-
-
It accepts the following parameters:
-
-fps
-The desired output frame rate. The default is 25
.
-
-
-round
-Rounding method.
-
-Possible values are:
-
-zero
-zero round towards 0
-
-inf
-round away from 0
-
-down
-round towards -infinity
-
-up
-round towards +infinity
-
-near
-round to nearest
-
-
-The default is near
.
-
-
-start_time
-Assume the first PTS should be the given value, in seconds. This allows for
-padding/trimming at the start of stream. By default, no assumption is made
-about the first frame’s expected PTS, so no padding or trimming is done.
-For example, this could be set to 0 to pad the beginning with duplicates of
-the first frame if a video stream starts after the audio stream or to trim any
-frames with a negative PTS.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-fps [:round ].
-
-
See also the setpts filter.
-
-
-
30.35.1 Examples# TOC
-
-
- A typical usage in order to set the fps to 25:
-
-
- Sets the fps to 24, using abbreviation and rounding method to round to nearest:
-
-
fps=fps=film:round=near
-
-
-
-
-
30.36 framepack# TOC
-
-
Pack two different video streams into a stereoscopic video, setting proper
-metadata on supported codecs. The two views should have the same size and
-framerate and processing will stop when the shorter video ends. Please note
-that you may conveniently adjust view properties with the scale and
-fps filters.
-
-
It accepts the following parameters:
-
-format
-The desired packing format. Supported values are:
-
-
-sbs
-The views are next to each other (default).
-
-
-tab
-The views are on top of each other.
-
-
-lines
-The views are packed by line.
-
-
-columns
-The views are packed by column.
-
-
-frameseq
-The views are temporally interleaved.
-
-
-
-
-
-
-
-
Some examples:
-
-
-
# Convert left and right views into a frame-sequential video
-ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
-
-# Convert views into a side-by-side video with the same output resolution as the input
-ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
-
-
-
-
30.37 framestep# TOC
-
-
Select one frame every N-th frame.
-
-
This filter accepts the following option:
-
-step
-Select frame after every step
frames.
-Allowed values are positive integers higher than 0. Default value is 1
.
-
-
-
-
-
30.38 frei0r# TOC
-
-
Apply a frei0r effect to the input video.
-
-
To enable the compilation of this filter, you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the frei0r effect to load. If the environment variable
-FREI0R_PATH
is defined, the frei0r effect is searched for in each of the
-directories specified by the colon-separated list in FREIOR_PATH
.
-Otherwise, the standard frei0r paths are searched, in this order:
-HOME/.frei0r-1/lib/ , /usr/local/lib/frei0r-1/ ,
-/usr/lib/frei0r-1/ .
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r effect.
-
-
-
-
-
A frei0r effect parameter can be a boolean (its value is either
-"y" or "n"), a double, a color (specified as
-R /G /B , where R , G , and B are floating point
-numbers between 0.0 and 1.0, inclusive) or by a color description specified in the "Color"
-section in the ffmpeg-utils manual), a position (specified as X /Y , where
-X and Y are floating point numbers) and/or a string.
-
-
The number and types of parameters depend on the loaded effect. If an
-effect parameter is not specified, the default value is set.
-
-
-
30.38.1 Examples# TOC
-
-
- Apply the distort0r effect, setting the first two double parameters:
-
-
frei0r=filter_name=distort0r:filter_params=0.5|0.01
-
-
- Apply the colordistance effect, taking a color as the first parameter:
-
-
frei0r=colordistance:0.2/0.3/0.4
-frei0r=colordistance:violet
-frei0r=colordistance:0x112233
-
-
- Apply the perspective effect, specifying the top left and top right image
-positions:
-
-
frei0r=perspective:0.2/0.2|0.8/0.2
-
-
-
-
For more information, see
-http://frei0r.dyne.org
-
-
-
30.39 fspp# TOC
-
-
Apply fast and simple postprocessing. It is a faster version of spp .
-
-
It splits (I)DCT into horizontal/vertical passes. Unlike the simple post-
-processing filter, one of them is performed once per block, not per pixel.
-This allows for much higher speed.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 4-5. Default value is 4
.
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range 0-63.
-If not set, the filter will use the QP from the video stream (if available).
-
-
-strength
-Set filter strength. It accepts an integer in range -15 to 32. Lower values mean
-more details but also more artifacts, while higher values make the image smoother
-but also blurrier. Default value is 0
− PSNR optimal.
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
-
30.40 geq# TOC
-
-
The filter accepts the following options:
-
-
-lum_expr, lum
-Set the luminance expression.
-
-cb_expr, cb
-Set the chrominance blue expression.
-
-cr_expr, cr
-Set the chrominance red expression.
-
-alpha_expr, a
-Set the alpha expression.
-
-red_expr, r
-Set the red expression.
-
-green_expr, g
-Set the green expression.
-
-blue_expr, b
-Set the blue expression.
-
-
-
-
The colorspace is selected according to the specified options. If one
-of the lum_expr , cb_expr , or cr_expr
-options is specified, the filter will automatically select a YCbCr
-colorspace. If one of the red_expr , green_expr , or
-blue_expr options is specified, it will select an RGB
-colorspace.
-
-
If one of the chrominance expression is not defined, it falls back on the other
-one. If no alpha expression is specified it will evaluate to opaque value.
-If none of chrominance expressions are specified, they will evaluate
-to the luminance expression.
-
-
The expressions can use the following variables and functions:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-The coordinates of the current sample.
-
-
-W
-H
-The width and height of the image.
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-p(x, y)
-Return the value of the pixel at location (x ,y ) of the current
-plane.
-
-
-lum(x, y)
-Return the value of the pixel at location (x ,y ) of the luminance
-plane.
-
-
-cb(x, y)
-Return the value of the pixel at location (x ,y ) of the
-blue-difference chroma plane. Return 0 if there is no such plane.
-
-
-cr(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red-difference chroma plane. Return 0 if there is no such plane.
-
-
-r(x, y)
-g(x, y)
-b(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red/green/blue component. Return 0 if there is no such component.
-
-
-alpha(x, y)
-Return the value of the pixel at location (x ,y ) of the alpha
-plane. Return 0 if there is no such plane.
-
-
-
-
For functions, if x and y are outside the area, the value will be
-automatically clipped to the closer edge.
-
-
-
30.40.1 Examples# TOC
-
-
- Flip the image horizontally:
-
-
- Generate a bidimensional sine wave, with angle PI/3
and a
-wavelength of 100 pixels:
-
-
geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
-
-
- Generate a fancy enigmatic moving light:
-
-
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
-
-
- Generate a quick emboss effect:
-
-
format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
-
-
- Modify RGB components depending on pixel position:
-
-
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
-
-
- Create a radial gradient that is the same size as the input (also see
-the vignette filter):
-
-
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
-
-
- Create a linear gradient to use as a mask for another filter, then
-compose with overlay . In this example the video will gradually
-become more blurry from the top to the bottom of the y-axis as defined
-by the linear gradient:
-
-
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
-
-
-
-
-
30.41 gradfun# TOC
-
-
Fix the banding artifacts that are sometimes introduced into nearly flat
-regions by truncation to 8bit color depth.
-Interpolate the gradients that should go where the bands are, and
-dither them.
-
-
It is designed for playback only. Do not use it prior to
-lossy compression, because compression tends to lose the dither and
-bring back the bands.
-
-
It accepts the following parameters:
-
-
-strength
-The maximum amount by which the filter will change any one pixel. This is also
-the threshold for detecting nearly flat regions. Acceptable values range from
-.51 to 64; the default value is 1.2. Out-of-range values will be clipped to the
-valid range.
-
-
-radius
-The neighborhood to fit the gradient to. A larger radius makes for smoother
-gradients, but also prevents the filter from modifying the pixels near detailed
-regions. Acceptable values are 8-32; the default value is 16. Out-of-range
-values will be clipped to the valid range.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-strength [:radius ]
-
-
-
30.41.1 Examples# TOC
-
-
- Apply the filter with a 3.5
strength and radius of 8
:
-
-
- Specify radius, omitting the strength (which will fall-back to the default
-value):
-
-
-
-
-
-
30.42 haldclut# TOC
-
-
Apply a Hald CLUT to a video stream.
-
-
First input is the video stream to process, and second one is the Hald CLUT.
-The Hald CLUT input can be a simple picture or a complete video stream.
-
-
The filter accepts the following options:
-
-
-shortest
-Force termination when the shortest input terminates. Default is 0
.
-
-repeatlast
-Continue applying the last CLUT after the end of the stream. A value of
-0
disable the filter after the last frame of the CLUT is reached.
-Default is 1
.
-
-
-
-
haldclut
also has the same interpolation options as lut3d (both
-filters share the same internals).
-
-
More information about the Hald CLUT can be found on Eskil Steenberg’s website
-(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html .
-
-
-
30.42.1 Workflow examples# TOC
-
-
-
30.42.1.1 Hald CLUT video stream# TOC
-
-
Generate an identity Hald CLUT stream altered with various effects:
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
-
-
-
Note: make sure you use a lossless codec.
-
-
Then use it with haldclut
to apply it on some random stream:
-
-
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
-
-
-
The Hald CLUT will be applied to the 10 first seconds (duration of
-clut.nut ), then the latest picture of that CLUT stream will be applied
-to the remaining frames of the mandelbrot
stream.
-
-
-
30.42.1.2 Hald CLUT with preview# TOC
-
-
A Hald CLUT is supposed to be a squared image of Level*Level*Level
by
-Level*Level*Level
pixels. For a given Hald CLUT, FFmpeg will select the
-biggest possible square starting at the top left of the picture. The remaining
-padding pixels (bottom or right) will be ignored. This area can be used to add
-a preview of the Hald CLUT.
-
-
Typically, the following generated Hald CLUT will be supported by the
-haldclut
filter:
-
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "
- pad=iw+320 [padded_clut];
- smptebars=s=320x256, split [a][b];
- [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
- [main][b] overlay=W-320" -frames:v 1 clut.png
-
-
-
It contains the original and a preview of the effect of the CLUT: SMPTE color
-bars are displayed on the right-top, and below the same color bars processed by
-the color changes.
-
-
Then, the effect of this Hald CLUT can be visualized with:
-
-
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
-
-
-
-
30.43 hflip# TOC
-
-
Flip the input video horizontally.
-
-
For example, to horizontally flip the input video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "hflip" out.avi
-
-
-
-
30.44 histeq# TOC
-
This filter applies a global color histogram equalization on a
-per-frame basis.
-
-
It can be used to correct video that has a compressed range of pixel
-intensities. The filter redistributes the pixel intensities to
-equalize their distribution across the intensity range. It may be
-viewed as an "automatically adjusting contrast filter". This filter is
-useful only for correcting degraded or poorly captured source
-video.
-
-
The filter accepts the following options:
-
-
-strength
-Determine the amount of equalization to be applied. As the strength
-is reduced, the distribution of pixel intensities more-and-more
-approaches that of the input frame. The value must be a float number
-in the range [0,1] and defaults to 0.200.
-
-
-intensity
-Set the maximum intensity that can generated and scale the output
-values appropriately. The strength should be set as desired and then
-the intensity can be limited if needed to avoid washing-out. The value
-must be a float number in the range [0,1] and defaults to 0.210.
-
-
-antibanding
-Set the antibanding level. If enabled the filter will randomly vary
-the luminance of output pixels by a small amount to avoid banding of
-the histogram. Possible values are none
, weak
or
-strong
. It defaults to none
.
-
-
-
-
-
30.45 histogram# TOC
-
-
Compute and draw a color distribution histogram for the input video.
-
-
The computed histogram is a representation of the color component
-distribution in an image.
-
-
The filter accepts the following options:
-
-
-mode
-Set histogram mode.
-
-It accepts the following values:
-
-‘levels ’
-Standard histogram that displays the color components distribution in an
-image. Displays color graph for each color component. Shows distribution of
-the Y, U, V, A or R, G, B components, depending on input format, in the
-current frame. Below each graph a color component scale meter is shown.
-
-
-‘color ’
-Displays chroma values (U/V color placement) in a two dimensional
-graph (which is called a vectorscope). The brighter a pixel in the
-vectorscope, the more pixels of the input frame correspond to that pixel
-(i.e., more pixels have this chroma value). The V component is displayed on
-the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost
-side being V = 255. The U component is displayed on the vertical (Y) axis,
-with the top representing U = 0 and the bottom representing U = 255.
-
-The position of a white pixel in the graph corresponds to the chroma value of
-a pixel of the input clip. The graph can therefore be used to read the hue
-(color flavor) and the saturation (the dominance of the hue in the color). As
-the hue of a color changes, it moves around the square. At the center of the
-square the saturation is zero, which means that the corresponding pixel has no
-color. If the amount of a specific color is increased (while leaving the other
-colors unchanged) the saturation increases, and the indicator moves towards
-the edge of the square.
-
-
-‘color2 ’
-Chroma values in vectorscope, similar as color
but actual chroma values
-are displayed.
-
-
-‘waveform ’
-Per row/column color component graph. In row mode, the graph on the left side
-represents color component value 0 and the right side represents value = 255.
-In column mode, the top side represents color component value = 0 and bottom
-side represents value = 255.
-
-
-Default value is levels
.
-
-
-level_height
-Set height of level in levels
. Default value is 200
.
-Allowed range is [50, 2048].
-
-
-scale_height
-Set height of color scale in levels
. Default value is 12
.
-Allowed range is [0, 40].
-
-
-step
-Set step for waveform
mode. Smaller values are useful to find out how
-many values of the same luminance are distributed across input rows/columns.
-Default value is 10
. Allowed range is [1, 255].
-
-
-waveform_mode
-Set mode for waveform
. Can be either row
, or column
.
-Default is row
.
-
-
-waveform_mirror
-Set mirroring mode for waveform
. 0
means unmirrored, 1
-means mirrored. In mirrored mode, higher values will be represented on the left
-side for row
mode and at the top for column
mode. Default is
-0
(unmirrored).
-
-
-display_mode
-Set display mode for waveform
and levels
.
-It accepts the following values:
-
-‘parade ’
-Display separate graph for the color components side by side in
-row
waveform mode or one below the other in column
waveform mode
-for waveform
histogram mode. For levels
histogram mode,
-per color component graphs are placed below each other.
-
-Using this display mode in waveform
histogram mode makes it easy to
-spot color casts in the highlights and shadows of an image, by comparing the
-contours of the top and the bottom graphs of each waveform. Since whites,
-grays, and blacks are characterized by exactly equal amounts of red, green,
-and blue, neutral areas of the picture should display three waveforms of
-roughly equal width/height. If not, the correction is easy to perform by
-making level adjustments the three waveforms.
-
-
-‘overlay ’
-Presents information identical to that in the parade
, except
-that the graphs representing color components are superimposed directly
-over one another.
-
-This display mode in waveform
histogram mode makes it easier to spot
-relative differences or similarities in overlapping areas of the color
-components that are supposed to be identical, such as neutral whites, grays,
-or blacks.
-
-
-Default is parade
.
-
-
-levels_mode
-Set mode for levels
. Can be either linear
, or logarithmic
.
-Default is linear
.
-
-
-
-
-
30.45.1 Examples# TOC
-
-
- Calculate and draw histogram:
-
-
ffplay -i input -vf histogram
-
-
-
-
-
-
30.46 hqdn3d# TOC
-
-
This is a high precision/quality 3d denoise filter. It aims to reduce
-image noise, producing smooth images and making still images really
-still. It should enhance compressibility.
-
-
It accepts the following optional parameters:
-
-
-luma_spatial
-A non-negative floating point number which specifies spatial luma strength.
-It defaults to 4.0.
-
-
-chroma_spatial
-A non-negative floating point number which specifies spatial chroma strength.
-It defaults to 3.0*luma_spatial /4.0.
-
-
-luma_tmp
-A floating point number which specifies luma temporal strength. It defaults to
-6.0*luma_spatial /4.0.
-
-
-chroma_tmp
-A floating point number which specifies chroma temporal strength. It defaults to
-luma_tmp *chroma_spatial /luma_spatial .
-
-
-
-
-
30.47 hqx# TOC
-
-
Apply a high-quality magnification filter designed for pixel art. This filter
-was originally created by Maxim Stepin.
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for hq2x
, 3
for
-hq3x
and 4
for hq4x
.
-Default is 3
.
-
-
-
-
-
30.48 hue# TOC
-
-
Modify the hue and/or the saturation of the input.
-
-
It accepts the following parameters:
-
-
-h
-Specify the hue angle as a number of degrees. It accepts an expression,
-and defaults to "0".
-
-
-s
-Specify the saturation in the [-10,10] range. It accepts an expression and
-defaults to "1".
-
-
-H
-Specify the hue angle as a number of radians. It accepts an
-expression, and defaults to "0".
-
-
-b
-Specify the brightness in the [-10,10] range. It accepts an expression and
-defaults to "0".
-
-
-
-
h and H are mutually exclusive, and can’t be
-specified at the same time.
-
-
The b , h , H and s option values are
-expressions containing the following constants:
-
-
-n
-frame count of the input frame starting from 0
-
-
-pts
-presentation timestamp of the input frame expressed in time base units
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-tb
-time base of the input video
-
-
-
-
-
30.48.1 Examples# TOC
-
-
-
-
-
30.48.2 Commands# TOC
-
-
This filter supports the following commands:
-
-b
-s
-h
-H
-Modify the hue and/or the saturation and/or brightness of the input video.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
30.49 idet# TOC
-
-
Detect video interlacing type.
-
-
This filter tries to detect if the input frames as interlaced, progressive,
-top or bottom field first. It will also try and detect fields that are
-repeated between adjacent frames (a sign of telecine).
-
-
Single frame detection considers only immediately adjacent frames when classifying each frame.
-Multiple frame detection incorporates the classification history of previous frames.
-
-
The filter will log these metadata values:
-
-
-single.current_frame
-Detected type of current frame using single-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-single.tff
-Cumulative number of frames detected as top field first using single-frame detection.
-
-
-multiple.tff
-Cumulative number of frames detected as top field first using multiple-frame detection.
-
-
-single.bff
-Cumulative number of frames detected as bottom field first using single-frame detection.
-
-
-multiple.current_frame
-Detected type of current frame using multiple-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-multiple.bff
-Cumulative number of frames detected as bottom field first using multiple-frame detection.
-
-
-single.progressive
-Cumulative number of frames detected as progressive using single-frame detection.
-
-
-multiple.progressive
-Cumulative number of frames detected as progressive using multiple-frame detection.
-
-
-single.undetermined
-Cumulative number of frames that could not be classified using single-frame detection.
-
-
-multiple.undetermined
-Cumulative number of frames that could not be classified using multiple-frame detection.
-
-
-repeated.current_frame
-Which field in the current frame is repeated from the last. One of “neither”, “top”, or “bottom”.
-
-
-repeated.neither
-Cumulative number of frames with no repeated field.
-
-
-repeated.top
-Cumulative number of frames with the top field repeated from the previous frame’s top field.
-
-
-repeated.bottom
-Cumulative number of frames with the bottom field repeated from the previous frame’s bottom field.
-
-
-
-
The filter accepts the following options:
-
-
-intl_thres
-Set interlacing threshold.
-
-prog_thres
-Set progressive threshold.
-
-repeat_thres
-Threshold for repeated field detection.
-
-half_life
-Number of frames after which a given frame’s contribution to the
-statistics is halved (i.e., it contributes only 0.5 to it’s
-classification). The default of 0 means that all frames seen are given
-full weight of 1.0 forever.
-
-analyze_interlaced_flag
-When this is not 0 then idet will use the specified number of frames to determine
-if the interlaced flag is accurate, it will not count undetermined frames.
-If the flag is found to be accurate it will be used without any further
-computations, if it is found to be inaccuarte it will be cleared without any
-further computations. This allows inserting the idet filter as a low computational
-method to clean up the interlaced flag
-
-
-
-
-
30.50 il# TOC
-
-
Deinterleave or interleave fields.
-
-
This filter allows one to process interlaced images fields without
-deinterlacing them. Deinterleaving splits the input frame into 2
-fields (so called half pictures). Odd lines are moved to the top
-half of the output image, even lines to the bottom half.
-You can process (filter) them independently and then re-interleave them.
-
-
The filter accepts the following options:
-
-
-luma_mode, l
-chroma_mode, c
-alpha_mode, a
-Available values for luma_mode , chroma_mode and
-alpha_mode are:
-
-
-‘none ’
-Do nothing.
-
-
-‘deinterleave, d ’
-Deinterleave fields, placing one above the other.
-
-
-‘interleave, i ’
-Interleave fields. Reverse the effect of deinterleaving.
-
-
-Default value is none
.
-
-
-luma_swap, ls
-chroma_swap, cs
-alpha_swap, as
-Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0
.
-
-
-
-
-
30.51 interlace# TOC
-
-
Simple interlacing filter from progressive contents. This interleaves upper (or
-lower) lines from odd frames with lower (or upper) lines from even frames,
-halving the frame rate and preserving image height.
-
-
-
Original Original New Frame
- Frame 'j' Frame 'j+1' (tff)
- ========== =========== ==================
- Line 0 --------------------> Frame 'j' Line 0
- Line 1 Line 1 ----> Frame 'j+1' Line 1
- Line 2 ---------------------> Frame 'j' Line 2
- Line 3 Line 3 ----> Frame 'j+1' Line 3
- ... ... ...
-New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
-
-
-
It accepts the following optional parameters:
-
-
-scan
-This determines whether the interlaced frame is taken from the even
-(tff - default) or odd (bff) lines of the progressive frame.
-
-
-lowpass
-Enable (default) or disable the vertical lowpass filter to avoid twitter
-interlacing and reduce moire patterns.
-
-
-
-
-
30.52 kerndeint# TOC
-
-
Deinterlace input video by applying Donald Graft’s adaptive kernel
-deinterling. Work on interlaced parts of a video to produce
-progressive frames.
-
-
The description of the accepted parameters follows.
-
-
-thresh
-Set the threshold which affects the filter’s tolerance when
-determining if a pixel line must be processed. It must be an integer
-in the range [0,255] and defaults to 10. A value of 0 will result in
-applying the process on every pixels.
-
-
-map
-Paint pixels exceeding the threshold value to white if set to 1.
-Default is 0.
-
-
-order
-Set the fields order. Swap fields if set to 1, leave fields alone if
-0. Default is 0.
-
-
-sharp
-Enable additional sharpening if set to 1. Default is 0.
-
-
-twoway
-Enable twoway sharpening if set to 1. Default is 0.
-
-
-
-
-
30.52.1 Examples# TOC
-
-
- Apply default values:
-
-
kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
-
-
- Enable additional sharpening:
-
-
- Paint processed pixels in white:
-
-
-
-
-
30.53 lenscorrection# TOC
-
-
Correct radial lens distortion
-
-
This filter can be used to correct for radial distortion as can result from the use
-of wide angle lenses, and thereby re-rectify the image. To find the right parameters
-one can use tools available for example as part of opencv or simply trial-and-error.
-To use opencv use the calibration sample (under samples/cpp) from the opencv sources
-and extract the k1 and k2 coefficients from the resulting matrix.
-
-
Note that effectively the same filter is available in the open-source tools Krita and
-Digikam from the KDE project.
-
-
In contrast to the vignette filter, which can also be used to compensate lens errors,
-this filter corrects the distortion of the image, whereas vignette corrects the
-brightness distribution, so you may want to use both filters together in certain
-cases, though you will have to take care of ordering, i.e. whether vignetting should
-be applied before or after lens correction.
-
-
-
30.53.1 Options# TOC
-
-
The filter accepts the following options:
-
-
-cx
-Relative x-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-width.
-
-cy
-Relative y-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-height.
-
-k1
-Coefficient of the quadratic correction term. 0.5 means no correction.
-
-k2
-Coefficient of the double quadratic correction term. 0.5 means no correction.
-
-
-
-
The formula that generates the correction is:
-
-
r_src = r_tgt * (1 + k1 * (r_tgt / r_0 )^2 + k2 * (r_tgt / r_0 )^4)
-
-
where r_0 is halve of the image diagonal and r_src and r_tgt are the
-distances from the focal point in the source and target images, respectively.
-
-
-
30.54 lut3d# TOC
-
-
Apply a 3D LUT to an input video.
-
-
The filter accepts the following options:
-
-
-file
-Set the 3D LUT file name.
-
-Currently supported formats:
-
-‘3dl ’
-AfterEffects
-
-‘cube ’
-Iridas
-
-‘dat ’
-DaVinci
-
-‘m3d ’
-Pandora
-
-
-
-interp
-Select interpolation mode.
-
-Available values are:
-
-
-‘nearest ’
-Use values from the nearest defined point.
-
-‘trilinear ’
-Interpolate values using the 8 points defining a cube.
-
-‘tetrahedral ’
-Interpolate values using a tetrahedron.
-
-
-
-
-
-
-
30.55 lut, lutrgb, lutyuv# TOC
-
-
Compute a look-up table for binding each pixel component input value
-to an output value, and apply it to the input video.
-
-
lutyuv applies a lookup table to a YUV input video, lutrgb
-to an RGB input video.
-
-
These filters accept the following parameters:
-
-c0
-set first pixel component expression
-
-c1
-set second pixel component expression
-
-c2
-set third pixel component expression
-
-c3
-set fourth pixel component expression, corresponds to the alpha component
-
-
-r
-set red component expression
-
-g
-set green component expression
-
-b
-set blue component expression
-
-a
-alpha component expression
-
-
-y
-set Y/luminance component expression
-
-u
-set U/Cb component expression
-
-v
-set V/Cr component expression
-
-
-
-
Each of them specifies the expression to use for computing the lookup table for
-the corresponding pixel component values.
-
-
The exact component associated to each of the c* options depends on the
-format in input.
-
-
The lut filter requires either YUV or RGB pixel formats in input,
-lutrgb requires RGB pixel formats in input, and lutyuv requires YUV.
-
-
The expressions can contain the following constants and functions:
-
-
-w
-h
-The input width and height.
-
-
-val
-The input value for the pixel component.
-
-
-clipval
-The input value, clipped to the minval -maxval range.
-
-
-maxval
-The maximum value for the pixel component.
-
-
-minval
-The minimum value for the pixel component.
-
-
-negval
-The negated value for the pixel component value, clipped to the
-minval -maxval range; it corresponds to the expression
-"maxval-clipval+minval".
-
-
-clip(val)
-The computed value in val , clipped to the
-minval -maxval range.
-
-
-gammaval(gamma)
-The computed gamma correction value of the pixel component value,
-clipped to the minval -maxval range. It corresponds to the
-expression
-"pow((clipval-minval)/(maxval-minval)\,gamma )*(maxval-minval)+minval"
-
-
-
-
-
All expressions default to "val".
-
-
-
30.55.1 Examples# TOC
-
-
-
-
-
30.56 mergeplanes# TOC
-
-
Merge color channel components from several video streams.
-
-
The filter accepts up to 4 input streams, and merge selected input
-planes to the output video.
-
-
This filter accepts the following options:
-
-mapping
-Set input to output plane mapping. Default is 0
.
-
-The mappings is specified as a bitmap. It should be specified as a
-hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the
-mapping for the first plane of the output stream. ’A’ sets the number of
-the input stream to use (from 0 to 3), and ’a’ the plane number of the
-corresponding input to use (from 0 to 3). The rest of the mappings is
-similar, ’Bb’ describes the mapping for the output stream second
-plane, ’Cc’ describes the mapping for the output stream third plane and
-’Dd’ describes the mapping for the output stream fourth plane.
-
-
-format
-Set output pixel format. Default is yuva444p
.
-
-
-
-
-
30.56.1 Examples# TOC
-
-
- Merge three gray video streams of same width and height into single video stream:
-
-
[a0][a1][a2]mergeplanes=0x001020:yuv444p
-
-
- Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream:
-
-
[a0][a1]mergeplanes=0x00010210:yuva444p
-
-
- Swap Y and A plane in yuva444p stream:
-
-
format=yuva444p,mergeplanes=0x03010200:yuva444p
-
-
- Swap U and V plane in yuv420p stream:
-
-
format=yuv420p,mergeplanes=0x000201:yuv420p
-
-
- Cast a rgb24 clip to yuv444p:
-
-
format=rgb24,mergeplanes=0x000102:yuv444p
-
-
-
-
-
30.57 mcdeint# TOC
-
-
Apply motion-compensation deinterlacing.
-
-
It needs one field per frame as input and must thus be used together
-with yadif=1/3 or equivalent.
-
-
This filter accepts the following options:
-
-mode
-Set the deinterlacing mode.
-
-It accepts one of the following values:
-
-‘fast ’
-‘medium ’
-‘slow ’
-use iterative motion estimation
-
-‘extra_slow ’
-like ‘slow ’, but use multiple reference frames.
-
-
-Default value is ‘fast ’.
-
-
-parity
-Set the picture field parity assumed for the input video. It must be
-one of the following values:
-
-
-‘0, tff ’
-assume top field first
-
-‘1, bff ’
-assume bottom field first
-
-
-
-Default value is ‘bff ’.
-
-
-qp
-Set per-block quantization parameter (QP) used by the internal
-encoder.
-
-Higher values should result in a smoother motion vector field but less
-optimal individual vectors. Default value is 1.
-
-
-
-
-
30.58 mp# TOC
-
-
Apply an MPlayer filter to the input video.
-
-
This filter provides a wrapper around some of the filters of
-MPlayer/MEncoder.
-
-
This wrapper is considered experimental. Some of the wrapped filters
-may not work properly and we may drop support for them, as they will
-be implemented natively into FFmpeg. Thus you should avoid
-depending on them when writing portable scripts.
-
-
The filter accepts the parameters:
-filter_name [:=]filter_params
-
-
filter_name is the name of a supported MPlayer filter,
-filter_params is a string containing the parameters accepted by
-the named filter.
-
-
The list of the currently supported filters follows:
-
-eq2
-eq
-ilpack
-softpulldown
-
-
-
The parameter syntax and behavior for the listed filters are the same
-of the corresponding MPlayer filters. For detailed instructions check
-the "VIDEO FILTERS" section in the MPlayer manual.
-
-
-
30.58.1 Examples# TOC
-
-
- Adjust gamma, brightness, contrast:
-
-
-
-
See also mplayer(1), http://www.mplayerhq.hu/ .
-
-
-
30.59 mpdecimate# TOC
-
-
Drop frames that do not differ greatly from the previous frame in
-order to reduce frame rate.
-
-
The main use of this filter is for very-low-bitrate encoding
-(e.g. streaming over dialup modem), but it could in theory be used for
-fixing movies that were inverse-telecined incorrectly.
-
-
A description of the accepted options follows.
-
-
-max
-Set the maximum number of consecutive frames which can be dropped (if
-positive), or the minimum interval between dropped frames (if
-negative). If the value is 0, the frame is dropped unregarding the
-number of previous sequentially dropped frames.
-
-Default value is 0.
-
-
-hi
-lo
-frac
-Set the dropping threshold values.
-
-Values for hi and lo are for 8x8 pixel blocks and
-represent actual pixel value differences, so a threshold of 64
-corresponds to 1 unit of difference for each pixel, or the same spread
-out differently over the block.
-
-A frame is a candidate for dropping if no 8x8 blocks differ by more
-than a threshold of hi , and if no more than frac blocks (1
-meaning the whole image) differ by more than a threshold of lo .
-
-Default value for hi is 64*12, default value for lo is
-64*5, and default value for frac is 0.33.
-
-
-
-
-
-
30.60 negate# TOC
-
-
Negate input video.
-
-
It accepts an integer in input; if non-zero it negates the
-alpha component (if available). The default value in input is 0.
-
-
-
30.61 noformat# TOC
-
-
Force libavfilter not to use any of the specified pixel formats for the
-input to the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-apix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
30.61.1 Examples# TOC
-
-
- Force libavfilter to use a format different from yuv420p for the
-input to the vflip filter:
-
-
noformat=pix_fmts=yuv420p,vflip
-
-
- Convert the input video to any of the formats not contained in the list:
-
-
noformat=yuv420p|yuv444p|yuv410p
-
-
-
-
-
30.62 noise# TOC
-
-
Add noise on video input frame.
-
-
The filter accepts the following options:
-
-
-all_seed
-c0_seed
-c1_seed
-c2_seed
-c3_seed
-Set noise seed for specific pixel component or all pixel components in case
-of all_seed . Default value is 123457
.
-
-
-all_strength, alls
-c0_strength, c0s
-c1_strength, c1s
-c2_strength, c2s
-c3_strength, c3s
-Set noise strength for specific pixel component or all pixel components in case
-all_strength . Default value is 0
. Allowed range is [0, 100].
-
-
-all_flags, allf
-c0_flags, c0f
-c1_flags, c1f
-c2_flags, c2f
-c3_flags, c3f
-Set pixel component flags or set flags for all components if all_flags .
-Available values for component flags are:
-
-‘a ’
-averaged temporal noise (smoother)
-
-‘p ’
-mix random noise with a (semi)regular pattern
-
-‘t ’
-temporal noise (noise pattern changes between frames)
-
-‘u ’
-uniform noise (gaussian otherwise)
-
-
-
-
-
-
-
30.62.1 Examples# TOC
-
-
Add temporal and uniform noise to input video:
-
-
noise=alls=20:allf=t+u
-
-
-
-
30.63 null# TOC
-
-
Pass the video source unchanged to the output.
-
-
-
30.64 ocv# TOC
-
-
Apply a video transform using libopencv.
-
-
To enable this filter, install the libopencv library and headers and
-configure FFmpeg with --enable-libopencv
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the libopencv filter to apply.
-
-
-filter_params
-The parameters to pass to the libopencv filter. If not specified, the default
-values are assumed.
-
-
-
-
-
Refer to the official libopencv documentation for more precise
-information:
-http://docs.opencv.org/master/modules/imgproc/doc/filtering.html
-
-
Several libopencv filters are supported; see the following subsections.
-
-
-
30.64.1 dilate# TOC
-
-
Dilate an image by using a specific structuring element.
-It corresponds to the libopencv function cvDilate
.
-
-
It accepts the parameters: struct_el |nb_iterations .
-
-
struct_el represents a structuring element, and has the syntax:
-cols xrows +anchor_x xanchor_y /shape
-
-
cols and rows represent the number of columns and rows of
-the structuring element, anchor_x and anchor_y the anchor
-point, and shape the shape for the structuring element. shape
-must be "rect", "cross", "ellipse", or "custom".
-
-
If the value for shape is "custom", it must be followed by a
-string of the form "=filename ". The file with name
-filename is assumed to represent a binary image, with each
-printable character corresponding to a bright pixel. When a custom
-shape is used, cols and rows are ignored, the number
-or columns and rows of the read file are assumed instead.
-
-
The default value for struct_el is "3x3+0x0/rect".
-
-
nb_iterations specifies the number of times the transform is
-applied to the image, and defaults to 1.
-
-
Some examples:
-
-
# Use the default values
-ocv=dilate
-
-# Dilate using a structuring element with a 5x5 cross, iterating two times
-ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
-
-# Read the shape from the file diamond.shape, iterating two times.
-# The file diamond.shape may contain a pattern of characters like this
-# *
-# ***
-# *****
-# ***
-# *
-# The specified columns and rows are ignored
-# but the anchor point coordinates are not
-ocv=dilate:0x0+2x2/custom=diamond.shape|2
-
-
-
-
30.64.2 erode# TOC
-
-
Erode an image by using a specific structuring element.
-It corresponds to the libopencv function cvErode
.
-
-
It accepts the parameters: struct_el :nb_iterations ,
-with the same syntax and semantics as the dilate filter.
-
-
-
30.64.3 smooth# TOC
-
-
Smooth the input video.
-
-
The filter takes the following parameters:
-type |param1 |param2 |param3 |param4 .
-
-
type is the type of smooth filter to apply, and must be one of
-the following values: "blur", "blur_no_scale", "median", "gaussian",
-or "bilateral". The default value is "gaussian".
-
-
The meaning of param1 , param2 , param3 , and param4
-depend on the smooth type. param1 and
-param2 accept integer positive values or 0. param3 and
-param4 accept floating point values.
-
-
The default value for param1 is 3. The default value for the
-other parameters is 0.
-
-
These parameters correspond to the parameters assigned to the
-libopencv function cvSmooth
.
-
-
-
30.65 overlay# TOC
-
-
Overlay one video on top of another.
-
-
It takes two inputs and has one output. The first input is the "main"
-video on which the second input is overlaid.
-
-
It accepts the following parameters:
-
-
A description of the accepted options follows.
-
-
-x
-y
-Set the expression for the x and y coordinates of the overlaid video
-on the main video. Default value is "0" for both expressions. In case
-the expression is invalid, it is set to a huge value (meaning that the
-overlay will not be displayed within the output visible area).
-
-
-eof_action
-The action to take when EOF is encountered on the secondary input; it accepts
-one of the following values:
-
-
-repeat
-Repeat the last frame (the default).
-
-endall
-End both streams.
-
-pass
-Pass the main input through.
-
-
-
-
-eval
-Set when the expressions for x , and y are evaluated.
-
-It accepts the following values:
-
-‘init ’
-only evaluate expressions once during the filter initialization or
-when a command is processed
-
-
-‘frame ’
-evaluate expressions for each incoming frame
-
-
-
-Default value is ‘frame ’.
-
-
-shortest
-If set to 1, force the output to terminate when the shortest input
-terminates. Default value is 0.
-
-
-format
-Set the format for the output video.
-
-It accepts the following values:
-
-‘yuv420 ’
-force YUV420 output
-
-
-‘yuv422 ’
-force YUV422 output
-
-
-‘yuv444 ’
-force YUV444 output
-
-
-‘rgb ’
-force RGB output
-
-
-
-Default value is ‘yuv420 ’.
-
-
-rgb (deprecated)
-If set to 1, force the filter to accept inputs in the RGB
-color space. Default value is 0. This option is deprecated, use
-format instead.
-
-
-repeatlast
-If set to 1, force the filter to draw the last overlay frame over the
-main input until the end of the stream. A value of 0 disables this
-behavior. Default value is 1.
-
-
-
-
The x , and y expressions can contain the following
-parameters.
-
-
-main_w, W
-main_h, H
-The main input width and height.
-
-
-overlay_w, w
-overlay_h, h
-The overlay input width and height.
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values of the output
-format. For example for the pixel format "yuv422p" hsub is 2 and
-vsub is 1.
-
-
-n
-the number of input frame, starting from 0
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp, expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
Note that the n , pos , t variables are available only
-when evaluation is done per frame , and will evaluate to NAN
-when eval is set to ‘init ’.
-
-
Be aware that frames are taken from each input video in timestamp
-order, hence, if their initial timestamps differ, it is a good idea
-to pass the two inputs through a setpts=PTS-STARTPTS filter to
-have them begin in the same zero timestamp, as the example for
-the movie filter does.
-
-
You can chain together more overlays but you should test the
-efficiency of such approach.
-
-
-
30.65.1 Commands# TOC
-
-
This filter supports the following commands:
-
-x
-y
-Modify the x and y of the overlay input.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
30.65.2 Examples# TOC
-
-
-
-
-
30.66 owdenoise# TOC
-
-
Apply Overcomplete Wavelet denoiser.
-
-
The filter accepts the following options:
-
-
-depth
-Set depth.
-
-Larger depth values will denoise lower frequency components more, but
-slow down filtering.
-
-Must be an int in the range 8-16, default is 8
.
-
-
-luma_strength, ls
-Set luma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-chroma_strength, cs
-Set chroma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-
-
-
30.67 pad# TOC
-
-
Add paddings to the input image, and place the original input at the
-provided x , y coordinates.
-
-
It accepts the following parameters:
-
-
-width, w
-height, h
-Specify an expression for the size of the output image with the
-paddings added. If the value for width or height is 0, the
-corresponding input size is used for the output.
-
-The width expression can reference the value set by the
-height expression, and vice versa.
-
-The default value of width and height is 0.
-
-
-x
-y
-Specify the offsets to place the input image at within the padded area,
-with respect to the top/left border of the output image.
-
-The x expression can reference the value set by the y
-expression, and vice versa.
-
-The default value of x and y is 0.
-
-
-color
-Specify the color of the padded area. For the syntax of this option,
-check the "Color" section in the ffmpeg-utils manual.
-
-The default value of color is "black".
-
-
-
-
The value for the width , height , x , and y
-options are expressions containing the following constants:
-
-
-in_w
-in_h
-The input video width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output width and height (the size of the padded area), as
-specified by the width and height expressions.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-x
-y
-The x and y offsets as specified by the x and y
-expressions, or NAN if not yet specified.
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
30.67.1 Examples# TOC
-
-
-
-
-
30.68 perspective# TOC
-
-
Correct perspective of video not recorded perpendicular to the screen.
-
-
A description of the accepted parameters follows.
-
-
-x0
-y0
-x1
-y1
-x2
-y2
-x3
-y3
-Set coordinates expression for top left, top right, bottom left and bottom right corners.
-Default values are 0:0:W:0:0:H:W:H
with which perspective will remain unchanged.
-If the sense
option is set to source
, then the specified points will be sent
-to the corners of the destination. If the sense
option is set to destination
,
-then the corners of the source will be sent to the specified coordinates.
-
-The expressions can use the following variables:
-
-
-W
-H
-the width and height of video frame.
-
-
-
-
-interpolation
-Set interpolation for perspective correction.
-
-It accepts the following values:
-
-‘linear ’
-‘cubic ’
-
-
-Default value is ‘linear ’.
-
-
-sense
-Set interpretation of coordinate options.
-
-It accepts the following values:
-
-‘0, source ’
-
-Send point in the source specified by the given coordinates to
-the corners of the destination.
-
-
-‘1, destination ’
-
-Send the corners of the source to the point in the destination specified
-by the given coordinates.
-
-Default value is ‘source ’.
-
-
-
-
-
-
-
30.69 phase# TOC
-
-
Delay interlaced video by one field time so that the field order changes.
-
-
The intended use is to fix PAL movies that have been captured with the
-opposite field order to the film-to-video transfer.
-
-
A description of the accepted parameters follows.
-
-
-mode
-Set phase mode.
-
-It accepts the following values:
-
-‘t ’
-Capture field order top-first, transfer bottom-first.
-Filter will delay the bottom field.
-
-
-‘b ’
-Capture field order bottom-first, transfer top-first.
-Filter will delay the top field.
-
-
-‘p ’
-Capture and transfer with the same field order. This mode only exists
-for the documentation of the other options to refer to, but if you
-actually select it, the filter will faithfully do nothing.
-
-
-‘a ’
-Capture field order determined automatically by field flags, transfer
-opposite.
-Filter selects among ‘t ’ and ‘b ’ modes on a frame by frame
-basis using field flags. If no field information is available,
-then this works just like ‘u ’.
-
-
-‘u ’
-Capture unknown or varying, transfer opposite.
-Filter selects among ‘t ’ and ‘b ’ on a frame by frame basis by
-analyzing the images and selecting the alternative that produces best
-match between the fields.
-
-
-‘T ’
-Capture top-first, transfer unknown or varying.
-Filter selects among ‘t ’ and ‘p ’ using image analysis.
-
-
-‘B ’
-Capture bottom-first, transfer unknown or varying.
-Filter selects among ‘b ’ and ‘p ’ using image analysis.
-
-
-‘A ’
-Capture determined by field flags, transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using field flags and
-image analysis. If no field information is available, then this works just
-like ‘U ’. This is the default mode.
-
-
-‘U ’
-Both capture and transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using image analysis only.
-
-
-
-
-
-
-
30.70 pixdesctest# TOC
-
-
Pixel format descriptor test filter, mainly useful for internal
-testing. The output video should be equal to the input video.
-
-
For example:
-
-
format=monow, pixdesctest
-
-
-
can be used to test the monowhite pixel format descriptor definition.
-
-
-
30.71 pp# TOC
-
-
Enable the specified chain of postprocessing subfilters using libpostproc. This
-library should be automatically selected with a GPL build (--enable-gpl
).
-Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’.
-Each subfilter and some options have a short and a long name that can be used
-interchangeably, i.e. dr/dering are the same.
-
-
The filters accept the following options:
-
-
-subfilters
-Set postprocessing subfilters string.
-
-
-
-
All subfilters share common options to determine their scope:
-
-
-a/autoq
-Honor the quality commands for this subfilter.
-
-
-c/chrom
-Do chrominance filtering, too (default).
-
-
-y/nochrom
-Do luminance filtering only (no chrominance).
-
-
-n/noluma
-Do chrominance filtering only (no luminance).
-
-
-
-
These options can be appended after the subfilter name, separated by a ’|’.
-
-
Available subfilters are:
-
-
-hb/hdeblock[|difference[|flatness]]
-Horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-vb/vdeblock[|difference[|flatness]]
-Vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-ha/hadeblock[|difference[|flatness]]
-Accurate horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-va/vadeblock[|difference[|flatness]]
-Accurate vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-
-
The horizontal and vertical deblocking filters share the difference and
-flatness values so you cannot set different horizontal and vertical
-thresholds.
-
-
-h1/x1hdeblock
-Experimental horizontal deblocking filter
-
-
-v1/x1vdeblock
-Experimental vertical deblocking filter
-
-
-dr/dering
-Deringing filter
-
-
-tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
-
-threshold1
-larger -> stronger filtering
-
-threshold2
-larger -> stronger filtering
-
-threshold3
-larger -> stronger filtering
-
-
-
-
-al/autolevels[:f/fullyrange], automatic brightness / contrast correction
-
-f/fullyrange
-Stretch luminance to 0-255
.
-
-
-
-
-lb/linblenddeint
-Linear blend deinterlacing filter that deinterlaces the given block by
-filtering all lines with a (1 2 1)
filter.
-
-
-li/linipoldeint
-Linear interpolating deinterlacing filter that deinterlaces the given block by
-linearly interpolating every second line.
-
-
-ci/cubicipoldeint
-Cubic interpolating deinterlacing filter deinterlaces the given block by
-cubically interpolating every second line.
-
-
-md/mediandeint
-Median deinterlacing filter that deinterlaces the given block by applying a
-median filter to every second line.
-
-
-fd/ffmpegdeint
-FFmpeg deinterlacing filter that deinterlaces the given block by filtering every
-second line with a (-1 4 2 4 -1)
filter.
-
-
-l5/lowpass5
-Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given
-block by filtering all lines with a (-1 2 6 2 -1)
filter.
-
-
-fq/forceQuant[|quantizer]
-Overrides the quantizer table from the input with the constant quantizer you
-specify.
-
-quantizer
-Quantizer to use
-
-
-
-
-de/default
-Default pp filter combination (hb|a,vb|a,dr|a
)
-
-
-fa/fast
-Fast pp filter combination (h1|a,v1|a,dr|a
)
-
-
-ac
-High quality pp filter combination (ha|a|128|7,va|a,dr|a
)
-
-
-
-
-
30.71.1 Examples# TOC
-
-
- Apply horizontal and vertical deblocking, deringing and automatic
-brightness/contrast:
-
-
- Apply default filters without brightness/contrast correction:
-
-
- Apply default filters and temporal denoiser:
-
-
pp=default/tmpnoise|1|2|3
-
-
- Apply deblocking on luminance only, and switch vertical deblocking on or off
-automatically depending on available CPU time:
-
-
-
-
-
30.72 pp7# TOC
-
Apply Postprocessing filter 7. It is variant of the spp filter,
-similar to spp = 6 with 7 point DCT, where only the center sample is
-used after IDCT.
-
-
The filter accepts the following options:
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range
-0 to 63. If not set, the filter will use the QP from the video stream
-(if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding.
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-‘medium ’
-Set medium thresholding (good results, default).
-
-
-
-
-
-
-
30.73 psnr# TOC
-
-
Obtain the average, maximum and minimum PSNR (Peak Signal to Noise
-Ratio) between two input videos.
-
-
This filter takes in input two input videos, the first input is
-considered the "main" source and is passed unchanged to the
-output. The second input is used as a "reference" video for computing
-the PSNR.
-
-
Both video inputs must have the same resolution and pixel format for
-this filter to work correctly. Also it assumes that both inputs
-have the same number of frames, which are compared one by one.
-
-
The obtained average PSNR is printed through the logging system.
-
-
The filter stores the accumulated MSE (mean squared error) of each
-frame, and at the end of the processing it is averaged across all frames
-equally, and the following formula is applied to obtain the PSNR:
-
-
-
PSNR = 10*log10(MAX^2/MSE)
-
-
-
Where MAX is the average of the maximum values of each component of the
-image.
-
-
The description of the accepted parameters follows.
-
-
-stats_file, f
-If specified the filter will use the named file to save the PSNR of
-each individual frame.
-
-
-
-
The file printed if stats_file is selected, contains a sequence of
-key/value pairs of the form key :value for each compared
-couple of frames.
-
-
A description of each shown parameter follows:
-
-
-n
-sequential number of the input frame, starting from 1
-
-
-mse_avg
-Mean Square Error pixel-by-pixel average difference of the compared
-frames, averaged over all the image components.
-
-
-mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
-Mean Square Error pixel-by-pixel average difference of the compared
-frames for the component specified by the suffix.
-
-
-psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
-Peak Signal to Noise ratio of the compared frames for the component
-specified by the suffix.
-
-
-
-
For example:
-
-
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
-[main][ref] psnr="stats_file=stats.log" [out]
-
-
-
On this example the input file being processed is compared with the
-reference file ref_movie.mpg . The PSNR of each individual frame
-is stored in stats.log .
-
-
-
30.74 pullup# TOC
-
-
Pulldown reversal (inverse telecine) filter, capable of handling mixed
-hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive
-content.
-
-
The pullup filter is designed to take advantage of future context in making
-its decisions. This filter is stateless in the sense that it does not lock
-onto a pattern to follow, but it instead looks forward to the following
-fields in order to identify matches and rebuild progressive frames.
-
-
To produce content with an even framerate, insert the fps filter after
-pullup, use fps=24000/1001
if the input frame rate is 29.97fps,
-fps=24
for 30fps and the (rare) telecined 25fps input.
-
-
The filter accepts the following options:
-
-
-jl
-jr
-jt
-jb
-These options set the amount of "junk" to ignore at the left, right, top, and
-bottom of the image, respectively. Left and right are in units of 8 pixels,
-while top and bottom are in units of 2 lines.
-The default is 8 pixels on each side.
-
-
-sb
-Set the strict breaks. Setting this option to 1 will reduce the chances of
-filter generating an occasional mismatched frame, but it may also cause an
-excessive number of frames to be dropped during high motion sequences.
-Conversely, setting it to -1 will make filter match fields more easily.
-This may help processing of video where there is slight blurring between
-the fields, but may also cause there to be interlaced frames in the output.
-Default value is 0
.
-
-
-mp
-Set the metric plane to use. It accepts the following values:
-
-‘l ’
-Use luma plane.
-
-
-‘u ’
-Use chroma blue plane.
-
-
-‘v ’
-Use chroma red plane.
-
-
-
-This option may be set to use chroma plane instead of the default luma plane
-for doing filter’s computations. This may improve accuracy on very clean
-source material, but more likely will decrease accuracy, especially if there
-is chroma noise (rainbow effect) or any grayscale video.
-The main purpose of setting mp to a chroma plane is to reduce CPU
-load and make pullup usable in realtime on slow machines.
-
-
-
-
For best results (without duplicated frames in the output file) it is
-necessary to change the output frame rate. For example, to inverse
-telecine NTSC input:
-
-
ffmpeg -i input -vf pullup -r 24000/1001 ...
-
-
-
-
30.75 qp# TOC
-
-
Change video quantization parameters (QP).
-
-
The filter accepts the following option:
-
-
-qp
-Set expression for quantization parameter.
-
-
-
-
The expression is evaluated through the eval API and can contain, among others,
-the following constants:
-
-
-known
-1 if index is not 129, 0 otherwise.
-
-
-qp
-Sequentional index starting from -129 to 128.
-
-
-
-
-
30.75.1 Examples# TOC
-
-
- Some equation like:
-
-
-
-
-
30.76 removelogo# TOC
-
-
Suppress a TV station logo, using an image file to determine which
-pixels comprise the logo. It works by filling in the pixels that
-comprise the logo with neighboring pixels.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filter bitmap file, which can be any image format supported by
-libavformat. The width and height of the image file must match those of the
-video stream being processed.
-
-
-
-
Pixels in the provided bitmap image with a value of zero are not
-considered part of the logo, non-zero pixels are considered part of
-the logo. If you use white (255) for the logo and black (0) for the
-rest, you will be safe. For making the filter bitmap, it is
-recommended to take a screen capture of a black frame with the logo
-visible, and then using a threshold filter followed by the erode
-filter once or twice.
-
-
If needed, little splotches can be fixed manually. Remember that if
-logo pixels are not covered, the filter quality will be much
-reduced. Marking too many pixels as part of the logo does not hurt as
-much, but it will increase the amount of blurring needed to cover over
-the image and will destroy more information than necessary, and extra
-pixels will slow things down on a large logo.
-
-
-
30.77 rotate# TOC
-
-
Rotate video by an arbitrary angle expressed in radians.
-
-
The filter accepts the following options:
-
-
A description of the optional parameters follows.
-
-angle, a
-Set an expression for the angle by which to rotate the input video
-clockwise, expressed as a number of radians. A negative value will
-result in a counter-clockwise rotation. By default it is set to "0".
-
-This expression is evaluated for each frame.
-
-
-out_w, ow
-Set the output width expression, default value is "iw".
-This expression is evaluated just once during configuration.
-
-
-out_h, oh
-Set the output height expression, default value is "ih".
-This expression is evaluated just once during configuration.
-
-
-bilinear
-Enable bilinear interpolation if set to 1, a value of 0 disables
-it. Default value is 1.
-
-
-fillcolor, c
-Set the color used to fill the output area not covered by the rotated
-image. For the general syntax of this option, check the "Color" section in the
-ffmpeg-utils manual. If the special value "none" is selected then no
-background is printed (useful for example if the background is never shown).
-
-Default value is "black".
-
-
-
-
The expressions for the angle and the output size can contain the
-following constants and functions:
-
-
-n
-sequential number of the input frame, starting from 0. It is always NAN
-before the first frame is filtered.
-
-
-t
-time in seconds of the input frame, it is set to 0 when the filter is
-configured. It is always NAN before the first frame is filtered.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_w, iw
-in_h, ih
-the input video width and height
-
-
-out_w, ow
-out_h, oh
-the output width and height, that is the size of the padded area as
-specified by the width and height expressions
-
-
-rotw(a)
-roth(a)
-the minimal width/height required for completely containing the input
-video rotated by a radians.
-
-These are only available when computing the out_w and
-out_h expressions.
-
-
-
-
-
30.77.1 Examples# TOC
-
-
- Rotate the input by PI/6 radians clockwise:
-
-
- Rotate the input by PI/6 radians counter-clockwise:
-
-
- Rotate the input by 45 degrees clockwise:
-
-
- Apply a constant rotation with period T, starting from an angle of PI/3:
-
-
- Make the input video rotation oscillating with a period of T
-seconds and an amplitude of A radians:
-
-
rotate=A*sin(2*PI/T*t)
-
-
- Rotate the video, output size is chosen so that the whole rotating
-input video is always completely contained in the output:
-
-
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
-
-
- Rotate the video, reduce the output size so that no background is ever
-shown:
-
-
rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
-
-
-
-
-
30.77.2 Commands# TOC
-
-
The filter supports the following commands:
-
-
-a, angle
-Set the angle expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
30.78 sab# TOC
-
-
Apply Shape Adaptive Blur.
-
-
The filter accepts the following options:
-
-
-luma_radius, lr
-Set luma blur filter strength, must be a value in range 0.1-4.0, default
-value is 1.0. A greater value will result in a more blurred image, and
-in slower processing.
-
-
-luma_pre_filter_radius, lpfr
-Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default
-value is 1.0.
-
-
-luma_strength, ls
-Set luma maximum difference between pixels to still be considered, must
-be a value in the 0.1-100.0 range, default value is 1.0.
-
-
-chroma_radius, cr
-Set chroma blur filter strength, must be a value in range 0.1-4.0. A
-greater value will result in a more blurred image, and in slower
-processing.
-
-
-chroma_pre_filter_radius, cpfr
-Set chroma pre-filter radius, must be a value in the 0.1-2.0 range.
-
-
-chroma_strength, cs
-Set chroma maximum difference between pixels to still be considered,
-must be a value in the 0.1-100.0 range.
-
-
-
-
Each chroma option value, if not explicitly specified, is set to the
-corresponding luma option value.
-
-
-
30.79 scale# TOC
-
-
Scale (resize) the input video, using the libswscale library.
-
-
The scale filter forces the output display aspect ratio to be the same
-of the input, by changing the output sample aspect ratio.
-
-
If the input image format is different from the format requested by
-the next filter, the scale filter will convert the input to the
-requested format.
-
-
-
30.79.1 Options# TOC
-
The filter accepts the following options, or any of the options
-supported by the libswscale scaler.
-
-
See (ffmpeg-scaler)the ffmpeg-scaler manual for
-the complete list of scaler options.
-
-
-width, w
-height, h
-Set the output video dimension expression. Default value is the input
-dimension.
-
-If the value is 0, the input width is used for the output.
-
-If one of the values is -1, the scale filter will use a value that
-maintains the aspect ratio of the input image, calculated from the
-other specified dimension. If both of them are -1, the input size is
-used
-
-If one of the values is -n with n > 1, the scale filter will also use a value
-that maintains the aspect ratio of the input image, calculated from the other
-specified dimension. After that it will, however, make sure that the calculated
-dimension is divisible by n and adjust the value if necessary.
-
-See below for the list of accepted constants for use in the dimension
-expression.
-
-
-interl
-Set the interlacing mode. It accepts the following values:
-
-
-‘1 ’
-Force interlaced aware scaling.
-
-
-‘0 ’
-Do not apply interlaced scaling.
-
-
-‘-1 ’
-Select interlaced aware scaling depending on whether the source frames
-are flagged as interlaced or not.
-
-
-
-Default value is ‘0 ’.
-
-
-flags
-Set libswscale scaling flags. See
-(ffmpeg-scaler)the ffmpeg-scaler manual for the
-complete list of values. If not explicitly specified the filter applies
-the default flags.
-
-
-size, s
-Set the video size. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-in_color_matrix
-out_color_matrix
-Set in/output YCbCr color space type.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder.
-
-If not specified, the color space type depends on the pixel format.
-
-Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘bt709 ’
-Format conforming to International Telecommunication Union (ITU)
-Recommendation BT.709.
-
-
-‘fcc ’
-Set color space conforming to the United States Federal Communications
-Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a).
-
-
-‘bt601 ’
-Set color space conforming to:
-
-
- ITU Radiocommunication Sector (ITU-R) Recommendation BT.601
-
- ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G
-
- Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004
-
-
-
-
-‘smpte240m ’
-Set color space conforming to SMPTE ST 240:1999.
-
-
-
-
-in_range
-out_range
-Set in/output YCbCr sample range.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder. If not specified, the
-range depends on the pixel format. Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘jpeg/full/pc ’
-Set full range (0-255 in case of 8-bit luma).
-
-
-‘mpeg/tv ’
-Set "MPEG" range (16-235 in case of 8-bit luma).
-
-
-
-
-force_original_aspect_ratio
-Enable decreasing or increasing output video width or height if necessary to
-keep the original aspect ratio. Possible values:
-
-
-‘disable ’
-Scale the video as specified and disable this feature.
-
-
-‘decrease ’
-The output video dimensions will automatically be decreased if needed.
-
-
-‘increase ’
-The output video dimensions will automatically be increased if needed.
-
-
-
-
-One useful instance of this option is that when you know a specific device’s
-maximum allowed resolution, you can use this to limit the output video to
-that, while retaining the aspect ratio. For example, device A allows
-1280x720 playback, and your video is 1920x800. Using this option (set it to
-decrease) and specifying 1280x720 to the command line makes the output
-1280x533.
-
-Please note that this is a different thing than specifying -1 for w
-or h , you still need to specify the output resolution for this option
-to work.
-
-
-
-
-
The values of the w and h options are expressions
-containing the following constants:
-
-
-in_w
-in_h
-The input width and height
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (scaled) width and height
-
-
-ow
-oh
-These are the same as out_w and out_h
-
-
-a
-The same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-The input display aspect ratio. Calculated from (iw / ih) * sar
.
-
-
-hsub
-vsub
-horizontal and vertical input chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-ohsub
-ovsub
-horizontal and vertical output chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
30.79.2 Examples# TOC
-
-
-
-
-
30.80 separatefields# TOC
-
-
The separatefields
takes a frame-based video input and splits
-each frame into its components fields, producing a new half height clip
-with twice the frame rate and twice the frame count.
-
-
This filter use field-dominance information in frame to decide which
-of each pair of fields to place first in the output.
-If it gets it wrong use setfield filter before separatefields
filter.
-
-
-
30.81 setdar, setsar# TOC
-
-
The setdar
filter sets the Display Aspect Ratio for the filter
-output video.
-
-
This is done by changing the specified Sample (aka Pixel) Aspect
-Ratio, according to the following equation:
-
-
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
-
-
-
Keep in mind that the setdar
filter does not modify the pixel
-dimensions of the video frame. Also, the display aspect ratio set by
-this filter may be changed by later filters in the filterchain,
-e.g. in case of scaling or if another "setdar" or a "setsar" filter is
-applied.
-
-
The setsar
filter sets the Sample (aka Pixel) Aspect Ratio for
-the filter output video.
-
-
Note that as a consequence of the application of this filter, the
-output display aspect ratio will change according to the equation
-above.
-
-
Keep in mind that the sample aspect ratio set by the setsar
-filter may be changed by later filters in the filterchain, e.g. if
-another "setsar" or a "setdar" filter is applied.
-
-
It accepts the following parameters:
-
-
-r, ratio, dar (setdar
only), sar (setsar
only)
-Set the aspect ratio used by the filter.
-
-The parameter can be a floating point number string, an expression, or
-a string of the form num :den , where num and
-den are the numerator and denominator of the aspect ratio. If
-the parameter is not specified, it is assumed the value "0".
-In case the form "num :den " is used, the :
character
-should be escaped.
-
-
-max
-Set the maximum integer value to use for expressing numerator and
-denominator when reducing the expressed aspect ratio to a rational.
-Default value is 100
.
-
-
-
-
-
The parameter sar is an expression containing
-the following constants:
-
-
-E, PI, PHI
-These are approximated values for the mathematical constants e
-(Euler’s number), pi (Greek pi), and phi (the golden ratio).
-
-
-w, h
-The input width and height.
-
-
-a
-These are the same as w / h .
-
-
-sar
-The input sample aspect ratio.
-
-
-dar
-The input display aspect ratio. It is the same as
-(w / h ) * sar .
-
-
-hsub, vsub
-Horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
30.81.1 Examples# TOC
-
-
- To change the display aspect ratio to 16:9, specify one of the following:
-
-
setdar=dar=1.77777
-setdar=dar=16/9
-setdar=dar=1.77777
-
-
- To change the sample aspect ratio to 10:11, specify:
-
-
- To set a display aspect ratio of 16:9, and specify a maximum integer value of
-1000 in the aspect ratio reduction, use the command:
-
-
setdar=ratio=16/9:max=1000
-
-
-
-
-
-
30.82 setfield# TOC
-
-
Force field for the output video frame.
-
-
The setfield
filter marks the interlace type field for the
-output frames. It does not change the input frame, but only sets the
-corresponding property, which affects how the frame is treated by
-following filters (e.g. fieldorder
or yadif
).
-
-
The filter accepts the following options:
-
-
-mode
-Available values are:
-
-
-‘auto ’
-Keep the same field property.
-
-
-‘bff ’
-Mark the frame as bottom-field-first.
-
-
-‘tff ’
-Mark the frame as top-field-first.
-
-
-‘prog ’
-Mark the frame as progressive.
-
-
-
-
-
-
-
30.83 showinfo# TOC
-
-
Show a line containing various information for each input video frame.
-The input video is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The Presentation TimeStamp of the input frame, expressed as a number of
-time base units. The time base unit depends on the filter input pad.
-
-
-pts_time
-The Presentation TimeStamp of the input frame, expressed as a number of
-seconds.
-
-
-pos
-The position of the frame in the input stream, or -1 if this information is
-unavailable and/or meaningless (for example in case of synthetic video).
-
-
-fmt
-The pixel format name.
-
-
-sar
-The sample aspect ratio of the input frame, expressed in the form
-num /den .
-
-
-s
-The size of the input frame. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-i
-The type of interlaced mode ("P" for "progressive", "T" for top field first, "B"
-for bottom field first).
-
-
-iskey
-This is 1 if the frame is a key frame, 0 otherwise.
-
-
-type
-The picture type of the input frame ("I" for an I-frame, "P" for a
-P-frame, "B" for a B-frame, or "?" for an unknown type).
-Also refer to the documentation of the AVPictureType
enum and of
-the av_get_picture_type_char
function defined in
-libavutil/avutil.h .
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame.
-
-
-plane_checksum
-The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
-expressed in the form "[c0 c1 c2 c3 ]".
-
-
-
-
-
30.84 shuffleplanes# TOC
-
-
Reorder and/or duplicate video planes.
-
-
It accepts the following parameters:
-
-
-map0
-The index of the input plane to be used as the first output plane.
-
-
-map1
-The index of the input plane to be used as the second output plane.
-
-
-map2
-The index of the input plane to be used as the third output plane.
-
-
-map3
-The index of the input plane to be used as the fourth output plane.
-
-
-
-
-
The first plane has the index 0. The default is to keep the input unchanged.
-
-
Swap the second and third planes of the input:
-
-
ffmpeg -i INPUT -vf shuffleplanes=0:2:1:3 OUTPUT
-
-
-
-
30.85 signalstats# TOC
-
Evaluate various visual metrics that assist in determining issues associated
-with the digitization of analog video media.
-
-
By default the filter will log these metadata values:
-
-
-YMIN
-Display the minimal Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-YLOW
-Display the Y value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YAVG
-Display the average Y value within the input frame. Expressed in range of
-[0-255].
-
-
-YHIGH
-Display the Y value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YMAX
-Display the maximum Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-UMIN
-Display the minimal U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-ULOW
-Display the U value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UAVG
-Display the average U value within the input frame. Expressed in range of
-[0-255].
-
-
-UHIGH
-Display the U value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UMAX
-Display the maximum U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VMIN
-Display the minimal V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VLOW
-Display the V value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VAVG
-Display the average V value within the input frame. Expressed in range of
-[0-255].
-
-
-VHIGH
-Display the V value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VMAX
-Display the maximum V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-SATMIN
-Display the minimal saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATLOW
-Display the saturation value at the 10% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATAVG
-Display the average saturation value within the input frame. Expressed in range
-of [0-~181.02].
-
-
-SATHIGH
-Display the saturation value at the 90% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATMAX
-Display the maximum saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-HUEMED
-Display the median value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-HUEAVG
-Display the average value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-YDIF
-Display the average of sample value difference between all values of the Y
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-UDIF
-Display the average of sample value difference between all values of the U
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-VDIF
-Display the average of sample value difference between all values of the V
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-
-
The filter accepts the following options:
-
-
-stat
-out
-
-stat specify an additional form of image analysis.
-out output video with the specified type of pixel highlighted.
-
-Both options accept the following values:
-
-
-‘tout ’
-Identify temporal outliers pixels. A temporal outlier is a pixel
-unlike the neighboring pixels of the same field. Examples of temporal outliers
-include the results of video dropouts, head clogs, or tape tracking issues.
-
-
-‘vrep ’
-Identify vertical line repetition . Vertical line repetition includes
-similar rows of pixels within a frame. In born-digital video vertical line
-repetition is common, but this pattern is uncommon in video digitized from an
-analog source. When it occurs in video that results from the digitization of an
-analog source it can indicate concealment from a dropout compensator.
-
-
-‘brng ’
-Identify pixels that fall outside of legal broadcast range.
-
-
-
-
-color, c
-Set the highlight color for the out option. The default color is
-yellow.
-
-
-
-
-
30.85.1 Examples# TOC
-
-
-
-
-
30.86 smartblur# TOC
-
-
Blur the input video without impacting the outlines.
-
-
It accepts the following options:
-
-
-luma_radius, lr
-Set the luma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-luma_strength, ls
-Set the luma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-luma_threshold, lt
-Set the luma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-chroma_radius, cr
-Set the chroma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-chroma_strength, cs
-Set the chroma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-chroma_threshold, ct
-Set the chroma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-
-
If a chroma option is not explicitly set, the corresponding luma value
-is set.
-
-
-
30.87 stereo3d# TOC
-
-
Convert between different stereoscopic image formats.
-
-
The filters accept the following options:
-
-
-in
-Set stereoscopic image format of input.
-
-Available values for input image formats are:
-
-‘sbsl ’
-side by side parallel (left eye left, right eye right)
-
-
-‘sbsr ’
-side by side crosseye (right eye left, left eye right)
-
-
-‘sbs2l ’
-side by side parallel with half width resolution
-(left eye left, right eye right)
-
-
-‘sbs2r ’
-side by side crosseye with half width resolution
-(right eye left, left eye right)
-
-
-‘abl ’
-above-below (left eye above, right eye below)
-
-
-‘abr ’
-above-below (right eye above, left eye below)
-
-
-‘ab2l ’
-above-below with half height resolution
-(left eye above, right eye below)
-
-
-‘ab2r ’
-above-below with half height resolution
-(right eye above, left eye below)
-
-
-‘al ’
-alternating frames (left eye first, right eye second)
-
-
-‘ar ’
-alternating frames (right eye first, left eye second)
-
-Default value is ‘sbsl ’.
-
-
-
-
-out
-Set stereoscopic image format of output.
-
-Available values for output image formats are all the input formats as well as:
-
-‘arbg ’
-anaglyph red/blue gray
-(red filter on left eye, blue filter on right eye)
-
-
-‘argg ’
-anaglyph red/green gray
-(red filter on left eye, green filter on right eye)
-
-
-‘arcg ’
-anaglyph red/cyan gray
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arch ’
-anaglyph red/cyan half colored
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcc ’
-anaglyph red/cyan color
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcd ’
-anaglyph red/cyan color optimized with the least squares projection of dubois
-(red filter on left eye, cyan filter on right eye)
-
-
-‘agmg ’
-anaglyph green/magenta gray
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmh ’
-anaglyph green/magenta half colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmc ’
-anaglyph green/magenta colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmd ’
-anaglyph green/magenta color optimized with the least squares projection of dubois
-(green filter on left eye, magenta filter on right eye)
-
-
-‘aybg ’
-anaglyph yellow/blue gray
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybh ’
-anaglyph yellow/blue half colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybc ’
-anaglyph yellow/blue colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybd ’
-anaglyph yellow/blue color optimized with the least squares projection of dubois
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘irl ’
-interleaved rows (left eye has top row, right eye starts on next row)
-
-
-‘irr ’
-interleaved rows (right eye has top row, left eye starts on next row)
-
-
-‘ml ’
-mono output (left eye only)
-
-
-‘mr ’
-mono output (right eye only)
-
-
-
-Default value is ‘arcd ’.
-
-
-
-
-
30.87.1 Examples# TOC
-
-
- Convert input video from side by side parallel to anaglyph yellow/blue dubois:
-
-
- Convert input video from above bellow (left eye above, right eye below) to side by side crosseye.
-
-
-
-
-
30.88 spp# TOC
-
-
Apply a simple postprocessing filter that compresses and decompresses the image
-at several (or - in the case of quality level 6
- all) shifts
-and average the results.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-6. If set to 0
, the filter will have no
-effect. A value of 6
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding (default).
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
30.89 subtitles# TOC
-
-
Draw subtitles on top of input video using the libass library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libass
. This filter also requires a build with libavcodec and
-libavformat to convert the passed subtitles file to ASS (Advanced Substation
-Alpha) subtitles format.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filename of the subtitle file to read. It must be specified.
-
-
-original_size
-Specify the size of the original video, the video for which the ASS file
-was composed. For the syntax of this option, check the "Video size" section in
-the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic,
-this is necessary to correctly scale the fonts if the aspect ratio has been
-changed.
-
-
-charenc
-Set subtitles input character encoding. subtitles
filter only. Only
-useful if not UTF-8.
-
-
-stream_index, si
-Set subtitles stream index. subtitles
filter only.
-
-
-
-
If the first key is not specified, it is assumed that the first value
-specifies the filename .
-
-
For example, to render the file sub.srt on top of the input
-video, use the command:
-
-
-
which is equivalent to:
-
-
subtitles=filename=sub.srt
-
-
-
To render the default subtitles stream from file video.mkv , use:
-
-
-
To render the second subtitles stream from that file, use:
-
-
subtitles=video.mkv:si=1
-
-
-
-
30.90 super2xsai# TOC
-
-
Scale the input by 2x and smooth using the Super2xSaI (Scale and
-Interpolate) pixel art scaling algorithm.
-
-
Useful for enlarging pixel art images without reducing sharpness.
-
-
-
30.91 swapuv# TOC
-
Swap U & V plane.
-
-
-
30.92 telecine# TOC
-
-
Apply telecine process to the video.
-
-
This filter accepts the following options:
-
-
-first_field
-
-‘top, t ’
-top field first
-
-‘bottom, b ’
-bottom field first
-The default value is top
.
-
-
-
-
-pattern
-A string of numbers representing the pulldown pattern you wish to apply.
-The default value is 23
.
-
-
-
-
-
Some typical patterns:
-
-NTSC output (30i):
-27.5p: 32222
-24p: 23 (classic)
-24p: 2332 (preferred)
-20p: 33
-18p: 334
-16p: 3444
-
-PAL output (25i):
-27.5p: 12222
-24p: 222222222223 ("Euro pulldown")
-16.67p: 33
-16p: 33333334
-
-
-
-
30.93 thumbnail# TOC
-
Select the most representative frame in a given sequence of consecutive frames.
-
-
The filter accepts the following options:
-
-
-n
-Set the frames batch size to analyze; in a set of n frames, the filter
-will pick one of them, and then handle the next batch of n frames until
-the end. Default is 100
.
-
-
-
-
Since the filter keeps track of the whole frames sequence, a bigger n
-value will result in a higher memory usage, so a high value is not recommended.
-
-
-
30.93.1 Examples# TOC
-
-
- Extract one picture each 50 frames:
-
-
- Complete example of a thumbnail creation with ffmpeg
:
-
-
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
-
-
-
-
-
30.94 tile# TOC
-
-
Tile several successive frames together.
-
-
The filter accepts the following options:
-
-
-layout
-Set the grid size (i.e. the number of lines and columns). For the syntax of
-this option, check the "Video size" section in the ffmpeg-utils manual.
-
-
-nb_frames
-Set the maximum number of frames to render in the given area. It must be less
-than or equal to w xh . The default value is 0
, meaning all
-the area will be used.
-
-
-margin
-Set the outer border margin in pixels.
-
-
-padding
-Set the inner border thickness (i.e. the number of pixels between frames). For
-more advanced padding options (such as having different values for the edges),
-refer to the pad video filter.
-
-
-color
-Specify the color of the unused area. For the syntax of this option, check the
-"Color" section in the ffmpeg-utils manual. The default value of color
-is "black".
-
-
-
-
-
30.94.1 Examples# TOC
-
-
-
-
-
30.95 tinterlace# TOC
-
-
Perform various types of temporal field interlacing.
-
-
Frames are counted starting from 1, so the first input frame is
-considered odd.
-
-
The filter accepts the following options:
-
-
-mode
-Specify the mode of the interlacing. This option can also be specified
-as a value alone. See below for a list of values for this option.
-
-Available values are:
-
-
-‘merge, 0 ’
-Move odd frames into the upper field, even into the lower field,
-generating a double height frame at half frame rate.
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-‘drop_odd, 1 ’
-Only output even frames, odd frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
- 22222 44444
- 22222 44444
- 22222 44444
- 22222 44444
-
-
-
-‘drop_even, 2 ’
-Only output odd frames, even frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-11111 33333
-11111 33333
-11111 33333
-
-
-
-‘pad, 3 ’
-Expand each frame to full height, but pad alternate lines with black,
-generating a frame with double height at the same input frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-
-
-
-
-‘interleave_top, 4 ’
-Interleave the upper field from odd frames with the lower field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-
-‘interleave_bottom, 5 ’
-Interleave the lower field from odd frames with the upper field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-
-Output:
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-
-
-
-
-‘interlacex2, 6 ’
-Double frame rate with unchanged height. Frames are inserted each
-containing the second temporal field from the previous input frame and
-the first temporal field from the next input frame. This mode relies on
-the top_field_first flag. Useful for interlaced video displays with no
-field synchronisation.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
- 11111 22222 33333 44444
-11111 22222 33333 44444
- 11111 22222 33333 44444
-
-Output:
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-
-
-
-
-
-
-Numeric values are deprecated but are accepted for backward
-compatibility reasons.
-
-Default mode is merge
.
-
-
-flags
-Specify flags influencing the filter process.
-
-Available value for flags is:
-
-
-low_pass_filter, vlfp
-Enable vertical low-pass filtering in the filter.
-Vertical low-pass filtering is required when creating an interlaced
-destination from a progressive source which contains high-frequency
-vertical detail. Filtering will reduce interlace ’twitter’ and Moire
-patterning.
-
-Vertical low-pass filtering can only be enabled for mode
-interleave_top and interleave_bottom .
-
-
-
-
-
-
-
-
30.96 transpose# TOC
-
-
Transpose rows with columns in the input video and optionally flip it.
-
-
It accepts the following parameters:
-
-
-dir
-Specify the transposition direction.
-
-Can assume the following values:
-
-‘0, 4, cclock_flip ’
-Rotate by 90 degrees counterclockwise and vertically flip (default), that is:
-
-
L.R L.l
-. . -> . .
-l.r R.r
-
-
-
-‘1, 5, clock ’
-Rotate by 90 degrees clockwise, that is:
-
-
L.R l.L
-. . -> . .
-l.r r.R
-
-
-
-‘2, 6, cclock ’
-Rotate by 90 degrees counterclockwise, that is:
-
-
L.R R.r
-. . -> . .
-l.r L.l
-
-
-
-‘3, 7, clock_flip ’
-Rotate by 90 degrees clockwise and vertically flip, that is:
-
-
L.R r.R
-. . -> . .
-l.r l.L
-
-
-
-
-For values between 4-7, the transposition is only done if the input
-video geometry is portrait and not landscape. These values are
-deprecated, the passthrough
option should be used instead.
-
-Numerical values are deprecated, and should be dropped in favor of
-symbolic constants.
-
-
-passthrough
-Do not apply the transposition if the input geometry matches the one
-specified by the specified value. It accepts the following values:
-
-‘none ’
-Always apply transposition.
-
-‘portrait ’
-Preserve portrait geometry (when height >= width ).
-
-‘landscape ’
-Preserve landscape geometry (when width >= height ).
-
-
-
-Default value is none
.
-
-
-
-
For example to rotate by 90 degrees clockwise and preserve portrait
-layout:
-
-
transpose=dir=1:passthrough=portrait
-
-
-
The command above can also be specified as:
-
-
-
-
30.97 trim# TOC
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Specify the time of the start of the kept section, i.e. the frame with the
-timestamp start will be the first frame in the output.
-
-
-end
-Specify the time of the first frame that will be dropped, i.e. the frame
-immediately preceding the one with the timestamp end will be the last
-frame in the output.
-
-
-start_pts
-This is the same as start , except this option sets the start timestamp
-in timebase units instead of seconds.
-
-
-end_pts
-This is the same as end , except this option sets the end timestamp
-in timebase units instead of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_frame
-The number of the first frame that should be passed to the output.
-
-
-end_frame
-The number of the first frame that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _frame variants simply count the
-frames that pass through the filter. Also note that this filter does not modify
-the timestamps. If you wish for the output timestamps to start at zero, insert a
-setpts filter after the trim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all the frames that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple trim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -vf trim=60:120
-
-
- Keep only the first second:
-
-
ffmpeg -i INPUT -vf trim=duration=1
-
-
-
-
-
-
-
30.98 unsharp# TOC
-
-
Sharpen or blur the input video.
-
-
It accepts the following parameters:
-
-
-luma_msize_x, lx
-Set the luma matrix horizontal size. It must be an odd integer between
-3 and 63. The default value is 5.
-
-
-luma_msize_y, ly
-Set the luma matrix vertical size. It must be an odd integer between 3
-and 63. The default value is 5.
-
-
-luma_amount, la
-Set the luma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 1.0.
-
-
-chroma_msize_x, cx
-Set the chroma matrix horizontal size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_msize_y, cy
-Set the chroma matrix vertical size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_amount, ca
-Set the chroma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 0.0.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
All parameters are optional and default to the equivalent of the
-string ’5:5:1.0:5:5:0.0’.
-
-
-
30.98.1 Examples# TOC
-
-
- Apply strong luma sharpen effect:
-
-
unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
-
-
- Apply a strong blur of both luma and chroma parameters:
-
-
-
-
-
30.99 uspp# TOC
-
-
Apply ultra slow/simple postprocessing filter that compresses and decompresses
-the image at several (or - in the case of quality level 8
- all)
-shifts and average the results.
-
-
The way this differs from the behavior of spp is that uspp actually encodes &
-decodes each case with libavcodec Snow, whereas spp uses a simplified intra only 8x8
-DCT similar to MJPEG.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-8. If set to 0
, the filter will have no
-effect. A value of 8
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-
-
-
30.100 vidstabdetect# TOC
-
-
Analyze video stabilization/deshaking. Perform pass 1 of 2, see
-vidstabtransform for pass 2.
-
-
This filter generates a file with relative translation and rotation
-transform information about subsequent frames, which is then used by
-the vidstabtransform filter.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
This filter accepts the following options:
-
-
-result
-Set the path to the file used to write the transforms information.
-Default value is transforms.trf .
-
-
-shakiness
-Set how shaky the video is and how quick the camera is. It accepts an
-integer in the range 1-10, a value of 1 means little shakiness, a
-value of 10 means strong shakiness. Default value is 5.
-
-
-accuracy
-Set the accuracy of the detection process. It must be a value in the
-range 1-15. A value of 1 means low accuracy, a value of 15 means high
-accuracy. Default value is 15.
-
-
-stepsize
-Set stepsize of the search process. The region around minimum is
-scanned with 1 pixel resolution. Default value is 6.
-
-
-mincontrast
-Set minimum contrast. Below this value a local measurement field is
-discarded. Must be a floating point value in the range 0-1. Default
-value is 0.3.
-
-
-tripod
-Set reference frame number for tripod mode.
-
-If enabled, the motion of the frames is compared to a reference frame
-in the filtered stream, identified by the specified number. The idea
-is to compensate all movements in a more-or-less static scene and keep
-the camera view absolutely still.
-
-If set to 0, it is disabled. The frames are counted starting from 1.
-
-
-show
-Show fields and transforms in the resulting frames. It accepts an
-integer in the range 0-2. Default value is 0, which disables any
-visualization.
-
-
-
-
-
30.100.1 Examples# TOC
-
-
- Use default values:
-
-
- Analyze strongly shaky movie and put the results in file
-mytransforms.trf :
-
-
vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
-
-
- Visualize the result of internal transformations in the resulting
-video:
-
-
- Analyze a video with medium shakiness using ffmpeg
:
-
-
ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
-
-
-
-
-
30.101 vidstabtransform# TOC
-
-
Video stabilization/deshaking: pass 2 of 2,
-see vidstabdetect for pass 1.
-
-
Read a file with transform information for each frame and
-apply/compensate them. Together with the vidstabdetect
-filter this can be used to deshake videos. See also
-http://public.hronopik.de/vid.stab . It is important to also use
-the unsharp filter, see below.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
-
30.101.1 Options# TOC
-
-
-input
-Set path to the file used to read the transforms. Default value is
-transforms.trf .
-
-
-smoothing
-Set the number of frames (value*2 + 1) used for lowpass filtering the
-camera movements. Default value is 10.
-
-For example a number of 10 means that 21 frames are used (10 in the
-past and 10 in the future) to smoothen the motion in the video. A
-larger value leads to a smoother video, but limits the acceleration of
-the camera (pan/tilt movements). 0 is a special case where a static
-camera is simulated.
-
-
-optalgo
-Set the camera path optimization algorithm.
-
-Accepted values are:
-
-‘gauss ’
-gaussian kernel low-pass filter on camera motion (default)
-
-‘avg ’
-averaging on transformations
-
-
-
-
-maxshift
-Set maximal number of pixels to translate frames. Default value is -1,
-meaning no limit.
-
-
-maxangle
-Set maximal angle in radians (degree*PI/180) to rotate frames. Default
-value is -1, meaning no limit.
-
-
-crop
-Specify how to deal with borders that may be visible due to movement
-compensation.
-
-Available values are:
-
-‘keep ’
-keep image information from previous frame (default)
-
-‘black ’
-fill the border black
-
-
-
-
-invert
-Invert transforms if set to 1. Default value is 0.
-
-
-relative
-Consider transforms as relative to previous frame if set to 1,
-absolute if set to 0. Default value is 0.
-
-
-zoom
-Set percentage to zoom. A positive value will result in a zoom-in
-effect, a negative value in a zoom-out effect. Default value is 0 (no
-zoom).
-
-
-optzoom
-Set optimal zooming to avoid borders.
-
-Accepted values are:
-
-‘0 ’
-disabled
-
-‘1 ’
-optimal static zoom value is determined (only very strong movements
-will lead to visible borders) (default)
-
-‘2 ’
-optimal adaptive zoom value is determined (no borders will be
-visible), see zoomspeed
-
-
-
-Note that the value given at zoom is added to the one calculated here.
-
-
-zoomspeed
-Set percent to zoom maximally each frame (enabled when
-optzoom is set to 2). Range is from 0 to 5, default value is
-0.25.
-
-
-interpol
-Specify type of interpolation.
-
-Available values are:
-
-‘no ’
-no interpolation
-
-‘linear ’
-linear only horizontal
-
-‘bilinear ’
-linear in both directions (default)
-
-‘bicubic ’
-cubic in both directions (slow)
-
-
-
-
-tripod
-Enable virtual tripod mode if set to 1, which is equivalent to
-relative=0:smoothing=0
. Default value is 0.
-
-Use also tripod
option of vidstabdetect .
-
-
-debug
-Increase log verbosity if set to 1. Also the detected global motions
-are written to the temporary file global_motions.trf . Default
-value is 0.
-
-
-
-
-
30.101.2 Examples# TOC
-
-
-
-
-
30.102 vflip# TOC
-
-
Flip the input video vertically.
-
-
For example, to vertically flip a video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "vflip" out.avi
-
-
-
-
30.103 vignette# TOC
-
-
Make or reverse a natural vignetting effect.
-
-
The filter accepts the following options:
-
-
-angle, a
-Set lens angle expression as a number of radians.
-
-The value is clipped in the [0,PI/2]
range.
-
-Default value: "PI/5"
-
-
-x0
-y0
-Set center coordinates expressions. Respectively "w/2"
and "h/2"
-by default.
-
-
-mode
-Set forward/backward mode.
-
-Available modes are:
-
-‘forward ’
-The larger the distance from the central point, the darker the image becomes.
-
-
-‘backward ’
-The larger the distance from the central point, the brighter the image becomes.
-This can be used to reverse a vignette effect, though there is no automatic
-detection to extract the lens angle and other settings (yet). It can
-also be used to create a burning effect.
-
-
-
-Default value is ‘forward ’.
-
-
-eval
-Set evaluation mode for the expressions (angle , x0 , y0 ).
-
-It accepts the following values:
-
-‘init ’
-Evaluate expressions only once during the filter initialization.
-
-
-‘frame ’
-Evaluate expressions for each incoming frame. This is way slower than the
-‘init ’ mode since it requires all the scalers to be re-computed, but it
-allows advanced dynamic expressions.
-
-
-
-Default value is ‘init ’.
-
-
-dither
-Set dithering to reduce the circular banding effects. Default is 1
-(enabled).
-
-
-aspect
-Set vignette aspect. This setting allows one to adjust the shape of the vignette.
-Setting this value to the SAR of the input will make a rectangular vignetting
-following the dimensions of the video.
-
-Default is 1/1
.
-
-
-
-
-
30.103.1 Expressions# TOC
-
-
The alpha , x0 and y0 expressions can contain the
-following parameters.
-
-
-w
-h
-input width and height
-
-
-n
-the number of input frame, starting from 0
-
-
-pts
-the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in
-TB units, NAN if undefined
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-the PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in seconds, NAN if undefined
-
-
-tb
-time base of the input video
-
-
-
-
-
-
30.103.2 Examples# TOC
-
-
- Apply simple strong vignetting effect:
-
-
- Make a flickering vignetting:
-
-
vignette='PI/4+random(1)*PI/50':eval=frame
-
-
-
-
-
-
30.104 w3fdif# TOC
-
-
Deinterlace the input video ("w3fdif" stands for "Weston 3 Field
-Deinterlacing Filter").
-
-
Based on the process described by Martin Weston for BBC R&D, and
-implemented based on the de-interlace algorithm written by Jim
-Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter
-uses filter coefficients calculated by BBC R&D.
-
-
There are two sets of filter coefficients, so called "simple":
-and "complex". Which set of filter coefficients is used can
-be set by passing an optional parameter:
-
-
-filter
-Set the interlacing filter coefficients. Accepts one of the following values:
-
-
-‘simple ’
-Simple filter coefficient set.
-
-‘complex ’
-More-complex filter coefficient set.
-
-
-Default value is ‘complex ’.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following values:
-
-
-‘all ’
-Deinterlace all frames,
-
-‘interlaced ’
-Only deinterlace frames marked as interlaced.
-
-
-
-Default value is ‘all ’.
-
-
-
-
-
30.105 xbr# TOC
-
Apply the xBR high-quality magnification filter which is designed for pixel
-art. It follows a set of edge-detection rules, see
-http://www.libretro.com/forums/viewtopic.php?f=6&t=134 .
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for 2xBR
, 3
for
-3xBR
and 4
for 4xBR
.
-Default is 3
.
-
-
-
-
-
30.106 yadif# TOC
-
-
Deinterlace the input video ("yadif" means "yet another deinterlacing
-filter").
-
-
It accepts the following parameters:
-
-
-
-mode
-The interlacing mode to adopt. It accepts one of the following values:
-
-
-0, send_frame
-Output one frame for each frame.
-
-1, send_field
-Output one frame for each field.
-
-2, send_frame_nospatial
-Like send_frame
, but it skips the spatial interlacing check.
-
-3, send_field_nospatial
-Like send_field
, but it skips the spatial interlacing check.
-
-
-
-The default value is send_frame
.
-
-
-parity
-The picture field parity assumed for the input interlaced video. It accepts one
-of the following values:
-
-
-0, tff
-Assume the top field is first.
-
-1, bff
-Assume the bottom field is first.
-
--1, auto
-Enable automatic detection of field parity.
-
-
-
-The default value is auto
.
-If the interlacing is unknown or the decoder does not export this information,
-top field first will be assumed.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following
-values:
-
-
-0, all
-Deinterlace all frames.
-
-1, interlaced
-Only deinterlace frames marked as interlaced.
-
-
-
-The default value is all
.
-
-
-
-
-
30.107 zoompan# TOC
-
-
Apply Zoom & Pan effect.
-
-
This filter accepts the following options:
-
-
-zoom, z
-Set the zoom expression. Default is 1.
-
-
-x
-y
-Set the x and y expression. Default is 0.
-
-
-d
-Set the duration expression in number of frames.
-This sets for how many number of frames effect will last for
-single input image.
-
-
-s
-Set the output image size, default is ’hd720’.
-
-
-
-
Each expression can contain the following constants:
-
-
-in_w, iw
-Input width.
-
-
-in_h, ih
-Input height.
-
-
-out_w, ow
-Output width.
-
-
-out_h, oh
-Output height.
-
-
-in
-Input frame count.
-
-
-on
-Output frame count.
-
-
-x
-y
-Last calculated ’x’ and ’y’ position from ’x’ and ’y’ expression
-for current input frame.
-
-
-px
-py
-’x’ and ’y’ of last output frame of previous input frame or 0 when there was
-not yet such frame (first input frame).
-
-
-zoom
-Last calculated zoom from ’z’ expression for current input frame.
-
-
-pzoom
-Last calculated zoom of last output frame of previous input frame.
-
-
-duration
-Number of output frames for current input frame. Calculated from ’d’ expression
-for each input frame.
-
-
-pduration
-number of output frames created for previous input frame
-
-
-a
-Rational number: input width / input height
-
-
-sar
-sample aspect ratio
-
-
-dar
-display aspect ratio
-
-
-
-
-
-
30.107.1 Examples# TOC
-
-
- Zoom-in up to 1.5 and pan at same time to some spot near center of picture:
-
-
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
-
-
-
-
-
-
31 Video Sources# TOC
-
-
Below is a description of the currently available video sources.
-
-
-
31.1 buffer# TOC
-
-
Buffer video frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/vsrc_buffer.h .
-
-
It accepts the following parameters:
-
-
-video_size
-Specify the size (width and height) of the buffered video frames. For the
-syntax of this option, check the "Video size" section in the ffmpeg-utils
-manual.
-
-
-width
-The input video width.
-
-
-height
-The input video height.
-
-
-pix_fmt
-A string representing the pixel format of the buffered video frames.
-It may be a number corresponding to a pixel format, or a pixel format
-name.
-
-
-time_base
-Specify the timebase assumed by the timestamps of the buffered frames.
-
-
-frame_rate
-Specify the frame rate expected for the video stream.
-
-
-pixel_aspect, sar
-The sample (pixel) aspect ratio of the input video.
-
-
-sws_param
-Specify the optional parameters to be used for the scale filter which
-is automatically inserted when an input change is detected in the
-input size or format.
-
-
-
-
For example:
-
-
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
-
-
-
will instruct the source to accept video frames with size 320x240 and
-with format "yuv410p", assuming 1/24 as the timestamps timebase and
-square pixels (1:1 sample aspect ratio).
-Since the pixel format with name "yuv410p" corresponds to the number 6
-(check the enum AVPixelFormat definition in libavutil/pixfmt.h ),
-this example corresponds to:
-
-
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
-
-
-
Alternatively, the options can be specified as a flat string, but this
-syntax is deprecated:
-
-
width :height :pix_fmt :time_base.num :time_base.den :pixel_aspect.num :pixel_aspect.den [:sws_param ]
-
-
-
31.2 cellauto# TOC
-
-
Create a pattern generated by an elementary cellular automaton.
-
-
The initial state of the cellular automaton can be defined through the
-filename , and pattern options. If such options are
-not specified an initial state is created randomly.
-
-
At each new frame a new row in the video is filled with the result of
-the cellular automaton next generation. The behavior when the whole
-frame is filled is defined by the scroll option.
-
-
This source accepts the following options:
-
-
-filename, f
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified file.
-In the file, each non-whitespace character is considered an alive
-cell, a newline will terminate the row, and further characters in the
-file will be ignored.
-
-
-pattern, p
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified string.
-
-Each non-whitespace character in the string is considered an alive
-cell, a newline will terminate the row, and further characters in the
-string will be ignored.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial cellular automaton row. It
-is a floating point number value ranging from 0 to 1, defaults to
-1/PHI.
-
-This option is ignored when a file or a pattern is specified.
-
-
-random_seed, seed
-Set the seed for filling randomly the initial row, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the cellular automaton rule, it is a number ranging from 0 to 255.
-Default value is 110.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual.
-
-If filename or pattern is specified, the size is set
-by default to the width of the specified initial state row, and the
-height is set to width * PHI.
-
-If size is set, it must contain the width of the specified
-pattern string, and the specified pattern will be centered in the
-larger row.
-
-If a filename or a pattern string is not specified, the size value
-defaults to "320x518" (used for a randomly generated initial state).
-
-
-scroll
-If set to 1, scroll the output upward when all the rows in the output
-have been already filled. If set to 0, the new generated row will be
-written over the top row just after the bottom row is filled.
-Defaults to 1.
-
-
-start_full, full
-If set to 1, completely fill the output with generated rows before
-outputting the first frame.
-This is the default behavior, for disabling set the value to 0.
-
-
-stitch
-If set to 1, stitch the left and right row edges together.
-This is the default behavior, for disabling set the value to 0.
-
-
-
-
-
31.2.1 Examples# TOC
-
-
- Read the initial state from pattern , and specify an output of
-size 200x400.
-
-
cellauto=f=pattern:s=200x400
-
-
- Generate a random initial row with a width of 200 cells, with a fill
-ratio of 2/3:
-
-
cellauto=ratio=2/3:s=200x200
-
-
- Create a pattern generated by rule 18 starting by a single alive cell
-centered on an initial row with width 100:
-
-
cellauto=p=@:s=100x400:full=0:rule=18
-
-
- Specify a more elaborated initial pattern:
-
-
cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
-
-
-
-
-
-
31.3 mandelbrot# TOC
-
-
Generate a Mandelbrot set fractal, and progressively zoom towards the
-point specified with start_x and start_y .
-
-
This source accepts the following options:
-
-
-end_pts
-Set the terminal pts value. Default value is 400.
-
-
-end_scale
-Set the terminal scale value.
-Must be a floating point value. Default value is 0.3.
-
-
-inner
-Set the inner coloring mode, that is the algorithm used to draw the
-Mandelbrot fractal internal region.
-
-It shall assume one of the following values:
-
-black
-Set black mode.
-
-convergence
-Show time until convergence.
-
-mincol
-Set color based on point closest to the origin of the iterations.
-
-period
-Set period mode.
-
-
-
-Default value is mincol .
-
-
-bailout
-Set the bailout value. Default value is 10.0.
-
-
-maxiter
-Set the maximum of iterations performed by the rendering
-algorithm. Default value is 7189.
-
-
-outer
-Set outer coloring mode.
-It shall assume one of following values:
-
-iteration_count
-Set iteration cound mode.
-
-normalized_iteration_count
-set normalized iteration count mode.
-
-
-Default value is normalized_iteration_count .
-
-
-rate, r
-Set frame rate, expressed as number of frames per second. Default
-value is "25".
-
-
-size, s
-Set frame size. For the syntax of this option, check the "Video
-size" section in the ffmpeg-utils manual. Default value is "640x480".
-
-
-start_scale
-Set the initial scale value. Default value is 3.0.
-
-
-start_x
-Set the initial x position. Must be a floating point value between
--100 and 100. Default value is -0.743643887037158704752191506114774.
-
-
-start_y
-Set the initial y position. Must be a floating point value between
--100 and 100. Default value is -0.131825904205311970493132056385139.
-
-
-
-
-
31.4 mptestsrc# TOC
-
-
Generate various test patterns, as generated by the MPlayer test filter.
-
-
The size of the generated video is fixed, and is 256x256.
-This source is useful in particular for testing encoding features.
-
-
This source accepts the following options:
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-test, t
-
-Set the number or the name of the test to perform. Supported tests are:
-
-dc_luma
-dc_chroma
-freq_luma
-freq_chroma
-amp_luma
-amp_chroma
-cbp
-mv
-ring1
-ring2
-all
-
-
-Default value is "all", which will cycle through the list of all tests.
-
-
-
-
Some examples:
-
-
-
will generate a "dc_luma" test pattern.
-
-
-
31.5 frei0r_src# TOC
-
-
Provide a frei0r source.
-
-
To enable compilation of this filter you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
This source accepts the following parameters:
-
-
-size
-The size of the video to generate. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-
-framerate
-The framerate of the generated video. It may be a string of the form
-num /den or a frame rate abbreviation.
-
-
-filter_name
-The name to the frei0r source to load. For more information regarding frei0r and
-how to set the parameters, read the frei0r section in the video filters
-documentation.
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r source.
-
-
-
-
-
For example, to generate a frei0r partik0l source with size 200x200
-and frame rate 10 which is overlaid on the overlay filter main input:
-
-
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
-
-
-
-
31.6 life# TOC
-
-
Generate a life pattern.
-
-
This source is based on a generalization of John Conway’s life game.
-
-
The sourced input represents a life grid, each pixel represents a cell
-which can be in one of two possible states, alive or dead. Every cell
-interacts with its eight neighbours, which are the cells that are
-horizontally, vertically, or diagonally adjacent.
-
-
At each interaction the grid evolves according to the adopted rule,
-which specifies the number of neighbor alive cells which will make a
-cell stay alive or born. The rule option allows one to specify
-the rule to adopt.
-
-
This source accepts the following options:
-
-
-filename, f
-Set the file from which to read the initial grid state. In the file,
-each non-whitespace character is considered an alive cell, and newline
-is used to delimit the end of each row.
-
-If this option is not specified, the initial grid is generated
-randomly.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial random grid. It is a
-floating point number value ranging from 0 to 1, defaults to 1/PHI.
-It is ignored when a file is specified.
-
-
-random_seed, seed
-Set the seed for filling the initial random grid, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the life rule.
-
-A rule can be specified with a code of the kind "SNS /BNB ",
-where NS and NB are sequences of numbers in the range 0-8,
-NS specifies the number of alive neighbor cells which make a
-live cell stay alive, and NB the number of alive neighbor cells
-which make a dead cell to become alive (i.e. to "born").
-"s" and "b" can be used in place of "S" and "B", respectively.
-
-Alternatively a rule can be specified by an 18-bits integer. The 9
-high order bits are used to encode the next cell state if it is alive
-for each number of neighbor alive cells, the low order bits specify
-the rule for "borning" new cells. Higher order bits encode for an
-higher number of neighbor cells.
-For example the number 6153 = (12<<9)+9
specifies a stay alive
-rule of 12 and a born rule of 9, which corresponds to "S23/B03".
-
-Default value is "S23/B3", which is the original Conway’s game of life
-rule, and will keep a cell alive if it has 2 or 3 neighbor alive
-cells, and will born a new cell if there are three alive cells around
-a dead cell.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-If filename is specified, the size is set by default to the
-same size of the input file. If size is set, it must contain
-the size specified in the input file, and the initial grid defined in
-that file is centered in the larger resulting area.
-
-If a filename is not specified, the size value defaults to "320x240"
-(used for a randomly generated initial grid).
-
-
-stitch
-If set to 1, stitch the left and right grid edges together, and the
-top and bottom edges also. Defaults to 1.
-
-
-mold
-Set cell mold speed. If set, a dead cell will go from death_color to
-mold_color with a step of mold . mold can have a
-value from 0 to 255.
-
-
-life_color
-Set the color of living (or new born) cells.
-
-
-death_color
-Set the color of dead cells. If mold is set, this is the first color
-used to represent a dead cell.
-
-
-mold_color
-Set mold color, for definitely dead and moldy cells.
-
-For the syntax of these 3 color options, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-
-
-
31.6.1 Examples# TOC
-
-
- Read a grid from pattern , and center it on a grid of size
-300x300 pixels:
-
-
life=f=pattern:s=300x300
-
-
- Generate a random grid of size 200x200, with a fill ratio of 2/3:
-
-
life=ratio=2/3:s=200x200
-
-
- Specify a custom rule for evolving a randomly generated grid:
-
-
- Full example with slow death effect (mold) using ffplay
:
-
-
ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
-
-
-
-
-
31.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc# TOC
-
-
The color
source provides an uniformly colored input.
-
-
The haldclutsrc
source provides an identity Hald CLUT. See also
-haldclut filter.
-
-
The nullsrc
source returns unprocessed video frames. It is
-mainly useful to be employed in analysis / debugging tools, or as the
-source for filters which ignore the input data.
-
-
The rgbtestsrc
source generates an RGB test pattern useful for
-detecting RGB vs BGR issues. You should see a red, green and blue
-stripe from top to bottom.
-
-
The smptebars
source generates a color bars pattern, based on
-the SMPTE Engineering Guideline EG 1-1990.
-
-
The smptehdbars
source generates a color bars pattern, based on
-the SMPTE RP 219-2002.
-
-
The testsrc
source generates a test video pattern, showing a
-color pattern, a scrolling gradient and a timestamp. This is mainly
-intended for testing purposes.
-
-
The sources accept the following parameters:
-
-
-color, c
-Specify the color of the source, only available in the color
-source. For the syntax of this option, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-level
-Specify the level of the Hald CLUT, only available in the haldclutsrc
-source. A level of N
generates a picture of N*N*N
by N*N*N
-pixels to be used as identity matrix for 3D lookup tables. Each component is
-coded on a 1/(N*N)
scale.
-
-
-size, s
-Specify the size of the sourced video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual. The default value is
-"320x240".
-
-This option is not available with the haldclutsrc
filter.
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-sar
-Set the sample aspect ratio of the sourced video.
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-decimals, n
-Set the number of decimals to show in the timestamp, only available in the
-testsrc
source.
-
-The displayed timestamp value will correspond to the original
-timestamp value multiplied by the power of 10 of the specified
-value. Default value is 0.
-
-
-
-
For example the following:
-
-
testsrc=duration=5.3:size=qcif:rate=10
-
-
-
will generate a video with a duration of 5.3 seconds, with size
-176x144 and a frame rate of 10 frames per second.
-
-
The following graph description will generate a red source
-with an opacity of 0.2, with size "qcif" and a frame rate of 10
-frames per second.
-
-
color=c=red@0.2:s=qcif:r=10
-
-
-
If the input content is to be ignored, nullsrc
can be used. The
-following command generates noise in the luminance plane by employing
-the geq
filter:
-
-
nullsrc=s=256x256, geq=random(1)*255:128:128
-
-
-
-
31.7.1 Commands# TOC
-
-
The color
source supports the following commands:
-
-
-c, color
-Set the color of the created image. Accepts the same syntax of the
-corresponding color option.
-
-
-
-
-
-
32 Video Sinks# TOC
-
-
Below is a description of the currently available video sinks.
-
-
-
32.1 buffersink# TOC
-
-
Buffer video frames, and make them available to the end of the filter
-graph.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVBufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
-
32.2 nullsink# TOC
-
-
Null video sink: do absolutely nothing with the input video. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
33 Multimedia Filters# TOC
-
-
Below is a description of the currently available multimedia filters.
-
-
-
33.1 avectorscope# TOC
-
-
Convert input audio to a video output, representing the audio vector
-scope.
-
-
The filter is used to measure the difference between channels of stereo
-audio stream. A monoaural signal, consisting of identical left and right
-signal, results in straight vertical line. Any stereo separation is visible
-as a deviation from this line, creating a Lissajous figure.
-If the straight (or deviation from it) but horizontal line appears this
-indicates that the left and right channels are out of phase.
-
-
The filter accepts the following options:
-
-
-mode, m
-Set the vectorscope mode.
-
-Available values are:
-
-‘lissajous ’
-Lissajous rotated by 45 degrees.
-
-
-‘lissajous_xy ’
-Same as above but not rotated.
-
-
-
-Default value is ‘lissajous ’.
-
-
-size, s
-Set the video size for the output. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual. Default value is 400x400
.
-
-
-rate, r
-Set the output frame rate. Default value is 25
.
-
-
-rc
-gc
-bc
-Specify the red, green and blue contrast. Default values are 40
, 160
and 80
.
-Allowed range is [0, 255]
.
-
-
-rf
-gf
-bf
-Specify the red, green and blue fade. Default values are 15
, 10
and 5
.
-Allowed range is [0, 255]
.
-
-
-zoom
-Set the zoom factor. Default value is 1
. Allowed range is [1, 10]
.
-
-
-
-
-
33.1.1 Examples# TOC
-
-
- Complete example using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
-
-
-
-
-
33.2 concat# TOC
-
-
Concatenate audio and video streams, joining them together one after the
-other.
-
-
The filter works on segments of synchronized video and audio streams. All
-segments must have the same number of streams of each type, and that will
-also be the number of streams at output.
-
-
The filter accepts the following options:
-
-
-n
-Set the number of segments. Default is 2.
-
-
-v
-Set the number of output video streams, that is also the number of video
-streams in each segment. Default is 1.
-
-
-a
-Set the number of output audio streams, that is also the number of audio
-streams in each segment. Default is 0.
-
-
-unsafe
-Activate unsafe mode: do not fail if segments have a different format.
-
-
-
-
-
The filter has v +a outputs: first v video outputs, then
-a audio outputs.
-
-
There are n x(v +a ) inputs: first the inputs for the first
-segment, in the same order as the outputs, then the inputs for the second
-segment, etc.
-
-
Related streams do not always have exactly the same duration, for various
-reasons including codec frame size or sloppy authoring. For that reason,
-related synchronized streams (e.g. a video and its audio track) should be
-concatenated at once. The concat filter will use the duration of the longest
-stream in each segment (except the last one), and if necessary pad shorter
-audio streams with silence.
-
-
For this filter to work correctly, all segments must start at timestamp 0.
-
-
All corresponding streams must have the same parameters in all segments; the
-filtering system will automatically select a common pixel format for video
-streams, and a common sample format, sample rate and channel layout for
-audio streams, but other settings, such as resolution, must be converted
-explicitly by the user.
-
-
Different frame rates are acceptable but will result in variable frame rate
-at output; be sure to configure the output file to handle it.
-
-
-
33.2.1 Examples# TOC
-
-
-
-
-
33.3 ebur128# TOC
-
-
EBU R128 scanner filter. This filter takes an audio stream as input and outputs
-it unchanged. By default, it logs a message at a frequency of 10Hz with the
-Momentary loudness (identified by M
), Short-term loudness (S
),
-Integrated loudness (I
) and Loudness Range (LRA
).
-
-
The filter also has a video output (see the video option) with a real
-time graph to observe the loudness evolution. The graphic contains the logged
-message mentioned above, so it is not printed anymore when this option is set,
-unless the verbose logging is set. The main graphing area contains the
-short-term loudness (3 seconds of analysis), and the gauge on the right is for
-the momentary loudness (400 milliseconds).
-
-
More information about the Loudness Recommendation EBU R128 on
-http://tech.ebu.ch/loudness .
-
-
The filter accepts the following options:
-
-
-video
-Activate the video output. The audio stream is passed unchanged whether this
-option is set or no. The video stream will be the first output stream if
-activated. Default is 0
.
-
-
-size
-Set the video size. This option is for video only. For the syntax of this
-option, check the "Video size" section in the ffmpeg-utils manual. Default
-and minimum resolution is 640x480
.
-
-
-meter
-Set the EBU scale meter. Default is 9
. Common values are 9
and
-18
, respectively for EBU scale meter +9 and EBU scale meter +18. Any
-other integer value between this range is allowed.
-
-
-metadata
-Set metadata injection. If set to 1
, the audio input will be segmented
-into 100ms output frames, each of them containing various loudness information
-in metadata. All the metadata keys are prefixed with lavfi.r128.
.
-
-Default is 0
.
-
-
-framelog
-Force the frame logging level.
-
-Available values are:
-
-‘info ’
-information logging level
-
-‘verbose ’
-verbose logging level
-
-
-
-By default, the logging level is set to info . If the video or
-the metadata options are set, it switches to verbose .
-
-
-peak
-Set peak mode(s).
-
-Available modes can be cumulated (the option is a flag
type). Possible
-values are:
-
-‘none ’
-Disable any peak mode (default).
-
-‘sample ’
-Enable sample-peak mode.
-
-Simple peak mode looking for the higher sample value. It logs a message
-for sample-peak (identified by SPK
).
-
-‘true ’
-Enable true-peak mode.
-
-If enabled, the peak lookup is done on an over-sampled version of the input
-stream for better peak accuracy. It logs a message for true-peak.
-(identified by TPK
) and true-peak per frame (identified by FTPK
).
-This mode requires a build with libswresample
.
-
-
-
-
-
-
-
-
33.3.1 Examples# TOC
-
-
- Real-time graph using ffplay
, with a EBU scale meter +18:
-
-
ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
-
-
- Run an analysis with ffmpeg
:
-
-
ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
-
-
-
-
-
33.4 interleave, ainterleave# TOC
-
-
Temporally interleave frames from several inputs.
-
-
interleave
works with video inputs, ainterleave
with audio.
-
-
These filters read frames from several inputs and send the oldest
-queued frame to the output.
-
-
Input streams must have a well defined, monotonically increasing frame
-timestamp values.
-
-
In order to submit one frame to output, these filters need to enqueue
-at least one frame for each input, so they cannot work in case one
-input is not yet terminated and will not receive incoming frames.
-
-
For example consider the case when one input is a select
filter
-which always drop input frames. The interleave
filter will keep
-reading from that input, but it will never be able to send new frames
-to output until the input will send an end-of-stream signal.
-
-
Also, depending on inputs synchronization, the filters will drop
-frames in case one input receives more frames than the other ones, and
-the queue is already filled.
-
-
These filters accept the following options:
-
-
-nb_inputs, n
-Set the number of different inputs, it is 2 by default.
-
-
-
-
-
33.4.1 Examples# TOC
-
-
- Interleave frames belonging to different streams using ffmpeg
:
-
-
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
-
-
- Add flickering blur effect:
-
-
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
-
-
-
-
-
33.5 perms, aperms# TOC
-
-
Set read/write permissions for the output frames.
-
-
These filters are mainly aimed at developers to test direct path in the
-following filter in the filtergraph.
-
-
The filters accept the following options:
-
-
-mode
-Select the permissions mode.
-
-It accepts the following values:
-
-‘none ’
-Do nothing. This is the default.
-
-‘ro ’
-Set all the output frames read-only.
-
-‘rw ’
-Set all the output frames directly writable.
-
-‘toggle ’
-Make the frame read-only if writable, and writable if read-only.
-
-‘random ’
-Set each output frame read-only or writable randomly.
-
-
-
-
-seed
-Set the seed for the random mode, must be an integer included between
-0
and UINT32_MAX
. If not specified, or if explicitly set to
--1
, the filter will try to use a good random seed on a best effort
-basis.
-
-
-
-
Note: in case of auto-inserted filter between the permission filter and the
-following one, the permission might not be received as expected in that
-following filter. Inserting a format or aformat filter before the
-perms/aperms filter can avoid this problem.
-
-
-
33.6 select, aselect# TOC
-
-
Select frames to pass in output.
-
-
This filter accepts the following options:
-
-
-expr, e
-Set expression, which is evaluated for each input frame.
-
-If the expression is evaluated to zero, the frame is discarded.
-
-If the evaluation result is negative or NaN, the frame is sent to the
-first output; otherwise it is sent to the output with index
-ceil(val)-1
, assuming that the input index starts from 0.
-
-For example a value of 1.2
corresponds to the output with index
-ceil(1.2)-1 = 2-1 = 1
, that is the second output.
-
-
-outputs, n
-Set the number of outputs. The output to which to send the selected
-frame is based on the result of the evaluation. Default value is 1.
-
-
-
-
The expression can contain the following constants:
-
-
-n
-The (sequential) number of the filtered frame, starting from 0.
-
-
-selected_n
-The (sequential) number of the selected frame, starting from 0.
-
-
-prev_selected_n
-The sequential number of the last selected frame. It’s NAN if undefined.
-
-
-TB
-The timebase of the input timestamps.
-
-
-pts
-The PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in TB units. It’s NAN if undefined.
-
-
-t
-The PTS of the filtered video frame,
-expressed in seconds. It’s NAN if undefined.
-
-
-prev_pts
-The PTS of the previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_pts
-The PTS of the last previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_t
-The PTS of the last previously selected video frame. It’s NAN if undefined.
-
-
-start_pts
-The PTS of the first video frame in the video. It’s NAN if undefined.
-
-
-start_t
-The time of the first video frame in the video. It’s NAN if undefined.
-
-
-pict_type (video only)
-The type of the filtered frame. It can assume one of the following
-values:
-
-I
-P
-B
-S
-SI
-SP
-BI
-
-
-
-interlace_type (video only)
-The frame interlace type. It can assume one of the following values:
-
-PROGRESSIVE
-The frame is progressive (not interlaced).
-
-TOPFIRST
-The frame is top-field-first.
-
-BOTTOMFIRST
-The frame is bottom-field-first.
-
-
-
-
-consumed_sample_n (audio only)
-the number of selected samples before the current frame
-
-
-samples_n (audio only)
-the number of samples in the current frame
-
-
-sample_rate (audio only)
-the input sample rate
-
-
-key
-This is 1 if the filtered frame is a key-frame, 0 otherwise.
-
-
-pos
-the position in the file of the filtered frame, -1 if the information
-is not available (e.g. for synthetic video)
-
-
-scene (video only)
-value between 0 and 1 to indicate a new scene; a low value reflects a low
-probability for the current frame to introduce a new scene, while a higher
-value means the current frame is more likely to be one (see the example below)
-
-
-
-
-
The default value of the select expression is "1".
-
-
-
33.6.1 Examples# TOC
-
-
-
-
-
33.7 sendcmd, asendcmd# TOC
-
-
Send commands to filters in the filtergraph.
-
-
These filters read commands to be sent to other filters in the
-filtergraph.
-
-
sendcmd
must be inserted between two video filters,
-asendcmd
must be inserted between two audio filters, but apart
-from that they act the same way.
-
-
The specification of commands can be provided in the filter arguments
-with the commands option, or in a file specified by the
-filename option.
-
-
These filters accept the following options:
-
-commands, c
-Set the commands to be read and sent to the other filters.
-
-filename, f
-Set the filename of the commands to be read and sent to the other
-filters.
-
-
-
-
-
33.7.1 Commands syntax# TOC
-
-
A commands description consists of a sequence of interval
-specifications, comprising a list of commands to be executed when a
-particular event related to that interval occurs. The occurring event
-is typically the current frame time entering or leaving a given time
-interval.
-
-
An interval is specified by the following syntax:
-
-
-
The time interval is specified by the START and END times.
-END is optional and defaults to the maximum time.
-
-
The current frame time is considered within the specified interval if
-it is included in the interval [START , END ), that is when
-the time is greater or equal to START and is lesser than
-END .
-
-
COMMANDS consists of a sequence of one or more command
-specifications, separated by ",", relating to that interval. The
-syntax of a command specification is given by:
-
-
[FLAGS ] TARGET COMMAND ARG
-
-
-
FLAGS is optional and specifies the type of events relating to
-the time interval which enable sending the specified command, and must
-be a non-null sequence of identifier flags separated by "+" or "|" and
-enclosed between "[" and "]".
-
-
The following flags are recognized:
-
-enter
-The command is sent when the current frame timestamp enters the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was not in the given interval, and the
-current is.
-
-
-leave
-The command is sent when the current frame timestamp leaves the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was in the given interval, and the
-current is not.
-
-
-
-
If FLAGS is not specified, a default value of [enter]
is
-assumed.
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional list of argument for
-the given COMMAND .
-
-
Between one interval specification and another, whitespaces, or
-sequences of characters starting with #
until the end of line,
-are ignored and can be used to annotate comments.
-
-
A simplified BNF description of the commands specification syntax
-follows:
-
-
COMMAND_FLAG ::= "enter" | "leave"
-COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG ]
-COMMAND ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG ]
-COMMANDS ::= COMMAND [,COMMANDS ]
-INTERVAL ::= START [-END ] COMMANDS
-INTERVALS ::= INTERVAL [;INTERVALS ]
-
-
-
-
33.7.2 Examples# TOC
-
-
-
-
-
33.8 setpts, asetpts# TOC
-
-
Change the PTS (presentation timestamp) of the input frames.
-
-
setpts
works on video frames, asetpts
on audio frames.
-
-
This filter accepts the following options:
-
-
-expr
-The expression which is evaluated for each frame to construct its timestamp.
-
-
-
-
-
The expression is evaluated through the eval API and can contain the following
-constants:
-
-
-FRAME_RATE
-frame rate, only defined for constant frame-rate video
-
-
-PTS
-The presentation timestamp in input
-
-
-N
-The count of the input frame for video or the number of consumed samples,
-not including the current frame for audio, starting from 0.
-
-
-NB_CONSUMED_SAMPLES
-The number of consumed samples, not including the current frame (only
-audio)
-
-
-NB_SAMPLES, S
-The number of samples in the current frame (only audio)
-
-
-SAMPLE_RATE, SR
-The audio sample rate.
-
-
-STARTPTS
-The PTS of the first frame.
-
-
-STARTT
-the time in seconds of the first frame
-
-
-INTERLACED
-State whether the current frame is interlaced.
-
-
-T
-the time in seconds of the current frame
-
-
-POS
-original position in the file of the frame, or undefined if undefined
-for the current frame
-
-
-PREV_INPTS
-The previous input PTS.
-
-
-PREV_INT
-previous input time in seconds
-
-
-PREV_OUTPTS
-The previous output PTS.
-
-
-PREV_OUTT
-previous output time in seconds
-
-
-RTCTIME
-The wallclock (RTC) time in microseconds. This is deprecated, use time(0)
-instead.
-
-
-RTCSTART
-The wallclock (RTC) time at the start of the movie in microseconds.
-
-
-TB
-The timebase of the input timestamps.
-
-
-
-
-
-
33.8.1 Examples# TOC
-
-
- Start counting PTS from zero
-
-
- Apply fast motion effect:
-
-
- Apply slow motion effect:
-
-
- Set fixed rate of 25 frames per second:
-
-
- Set fixed rate 25 fps with some jitter:
-
-
setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
-
-
- Apply an offset of 10 seconds to the input PTS:
-
-
- Generate timestamps from a "live source" and rebase onto the current timebase:
-
-
setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
-
-
- Generate timestamps by counting samples:
-
-
-
-
-
-
33.9 settb, asettb# TOC
-
-
Set the timebase to use for the output frames timestamps.
-It is mainly useful for testing timebase configuration.
-
-
It accepts the following parameters:
-
-
-expr, tb
-The expression which is evaluated into the output timebase.
-
-
-
-
-
The value for tb is an arithmetic expression representing a
-rational. The expression can contain the constants "AVTB" (the default
-timebase), "intb" (the input timebase) and "sr" (the sample rate,
-audio only). Default value is "intb".
-
-
-
33.9.1 Examples# TOC
-
-
- Set the timebase to 1/25:
-
-
- Set the timebase to 1/10:
-
-
- Set the timebase to 1001/1000:
-
-
- Set the timebase to 2*intb:
-
-
- Set the default timebase value:
-
-
-
-
-
33.10 showcqt# TOC
-
Convert input audio to a video output representing
-frequency spectrum logarithmically (using constant Q transform with
-Brown-Puckette algorithm), with musical tone scale, from E0 to D#10 (10 octaves).
-
-
The filter accepts the following options:
-
-
-volume
-Specify transform volume (multiplier) expression. The expression can contain
-variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-a_weighting(f)
-A-weighting of equal loudness
-
-b_weighting(f)
-B-weighting of equal loudness
-
-c_weighting(f)
-C-weighting of equal loudness
-
-
-Default value is 16
.
-
-
-tlength
-Specify transform length expression. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-Default value is 384/f*tc/(384/f+tc)
.
-
-
-timeclamp
-Specify the transform timeclamp. At low frequency, there is trade-off between
-accuracy in time domain and frequency domain. If timeclamp is lower,
-event in time domain is represented more accurately (such as fast bass drum),
-otherwise event in frequency domain is represented more accurately
-(such as bass guitar). Acceptable value is [0.1, 1.0]. Default value is 0.17
.
-
-
-coeffclamp
-Specify the transform coeffclamp. If coeffclamp is lower, transform is
-more accurate, otherwise transform is faster. Acceptable value is [0.1, 10.0].
-Default value is 1.0
.
-
-
-gamma
-Specify gamma. Lower gamma makes the spectrum more contrast, higher gamma
-makes the spectrum having more range. Acceptable value is [1.0, 7.0].
-Default value is 3.0
.
-
-
-fontfile
-Specify font file for use with freetype. If not specified, use embedded font.
-
-
-fontcolor
-Specify font color expression. This is arithmetic expression that should return
-integer value 0xRRGGBB. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-midi(f)
-midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
-
-r(x), g(x), b(x)
-red, green, and blue value of intensity x
-
-
-Default value is st(0, (midi(f)-59.5)/12);
-st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
-r(1-ld(1)) + b(ld(1))
-
-
-fullhd
-If set to 1 (the default), the video size is 1920x1080 (full HD),
-if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
-
-
-fps
-Specify video fps. Default value is 25
.
-
-
-count
-Specify number of transform per frame, so there are fps*count transforms
-per second. Note that audio data rate must be divisible by fps*count.
-Default value is 6
.
-
-
-
-
-
-
33.10.1 Examples# TOC
-
-
- Playing audio while showing the spectrum:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with frame rate 30 fps:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fps=30:count=5 [out0]'
-
-
- Playing at 960x540 and lower CPU usage:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fullhd=0:count=3 [out0]'
-
-
- A1 and its harmonics: A1, A2, (near)E3, A3:
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with more accuracy in frequency domain (and slower):
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
-
-
- B-weighting of equal loudness
-
-
volume=16*b_weighting(f)
-
-
- Lower Q factor
-
-
tlength=100/f*tc/(100/f+tc)
-
-
- Custom fontcolor, C-note is colored green, others are colored blue
-
-
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
-
-
-
-
-
-
33.11 showspectrum# TOC
-
-
Convert input audio to a video output, representing the audio frequency
-spectrum.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value is
-640x512
.
-
-
-slide
-Specify how the spectrum should slide along the window.
-
-It accepts the following values:
-
-‘replace ’
-the samples start again on the left when they reach the right
-
-‘scroll ’
-the samples scroll from right to left
-
-‘fullframe ’
-frames are only produced when the samples reach the right
-
-
-
-Default value is replace
.
-
-
-mode
-Specify display mode.
-
-It accepts the following values:
-
-‘combined ’
-all channels are displayed in the same row
-
-‘separate ’
-all channels are displayed in separate rows
-
-
-
-Default value is ‘combined ’.
-
-
-color
-Specify display color mode.
-
-It accepts the following values:
-
-‘channel ’
-each channel is displayed in a separate color
-
-‘intensity ’
-each channel is is displayed using the same color scheme
-
-
-
-Default value is ‘channel ’.
-
-
-scale
-Specify scale used for calculating intensity color values.
-
-It accepts the following values:
-
-‘lin ’
-linear
-
-‘sqrt ’
-square root, default
-
-‘cbrt ’
-cubic root
-
-‘log ’
-logarithmic
-
-
-
-Default value is ‘sqrt ’.
-
-
-saturation
-Set saturation modifier for displayed colors. Negative values provide
-alternative color scheme. 0
is no saturation at all.
-Saturation must be in [-10.0, 10.0] range.
-Default value is 1
.
-
-
-win_func
-Set window function.
-
-It accepts the following values:
-
-‘none ’
-No samples pre-processing (do not expect this to be faster)
-
-‘hann ’
-Hann window
-
-‘hamming ’
-Hamming window
-
-‘blackman ’
-Blackman window
-
-
-
-Default value is hann
.
-
-
-
-
The usage is very similar to the showwaves filter; see the examples in that
-section.
-
-
-
33.11.1 Examples# TOC
-
-
- Large window with logarithmic color scaling:
-
-
showspectrum=s=1280x480:scale=log
-
-
- Complete example for a colored and sliding spectrum per channel using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
-
-
-
-
-
33.12 showwaves# TOC
-
-
Convert input audio to a video output, representing the samples waves.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value
-is "600x240".
-
-
-mode
-Set display mode.
-
-Available values are:
-
-‘point ’
-Draw a point for each sample.
-
-
-‘line ’
-Draw a vertical line for each sample.
-
-
-‘p2p ’
-Draw a point for each sample and a line between them.
-
-
-‘cline ’
-Draw a centered vertical line for each sample.
-
-
-
-Default value is point
.
-
-
-n
-Set the number of samples which are printed on the same column. A
-larger value will decrease the frame rate. Must be a positive
-integer. This option can be set only if the value for rate
-is not explicitly specified.
-
-
-rate, r
-Set the (approximate) output frame rate. This is done by setting the
-option n . Default value is "25".
-
-
-split_channels
-Set if channels should be drawn separately or overlap. Default value is 0.
-
-
-
-
-
-
33.12.1 Examples# TOC
-
-
- Output the input file audio and the corresponding video representation
-at the same time:
-
-
amovie=a.mp3,asplit[out0],showwaves[out1]
-
-
- Create a synthetic signal and show it with showwaves, forcing a
-frame rate of 30 frames per second:
-
-
aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
-
-
-
-
-
33.13 split, asplit# TOC
-
-
Split input into several identical outputs.
-
-
asplit
works with audio input, split
with video.
-
-
The filter accepts a single parameter which specifies the number of outputs. If
-unspecified, it defaults to 2.
-
-
-
33.13.1 Examples# TOC
-
-
- Create two separate outputs from the same input:
-
-
[in] split [out0][out1]
-
-
- To create 3 or more outputs, you need to specify the number of
-outputs, like in:
-
-
[in] asplit=3 [out0][out1][out2]
-
-
- Create two separate outputs from the same input, one cropped and
-one padded:
-
-
[in] split [splitout1][splitout2];
-[splitout1] crop=100:100:0:0 [cropout];
-[splitout2] pad=200:200:100:100 [padout];
-
-
- Create 5 copies of the input audio with ffmpeg
:
-
-
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
-
-
-
-
-
33.14 zmq, azmq# TOC
-
-
Receive commands sent through a libzmq client, and forward them to
-filters in the filtergraph.
-
-
zmq
and azmq
work as a pass-through filters. zmq
-must be inserted between two video filters, azmq
between two
-audio filters.
-
-
To enable these filters you need to install the libzmq library and
-headers and configure FFmpeg with --enable-libzmq
.
-
-
For more information about libzmq see:
-http://www.zeromq.org/
-
-
The zmq
and azmq
filters work as a libzmq server, which
-receives messages sent through a network interface defined by the
-bind_address option.
-
-
The received message must be in the form:
-
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional argument list for the
-given COMMAND .
-
-
Upon reception, the message is processed and the corresponding command
-is injected into the filtergraph. Depending on the result, the filter
-will send a reply to the client, adopting the format:
-
-
ERROR_CODE ERROR_REASON
-MESSAGE
-
-
-
MESSAGE is optional.
-
-
-
33.14.1 Examples# TOC
-
-
Look at tools/zmqsend for an example of a zmq client which can
-be used to send commands processed by these filters.
-
-
Consider the following filtergraph generated by ffplay
-
-
ffplay -dumpgraph 1 -f lavfi "
-color=s=100x100:c=red [l];
-color=s=100x100:c=blue [r];
-nullsrc=s=200x100, zmq [bg];
-[bg][l] overlay [bg+l];
-[bg+l][r] overlay=x=100 "
-
-
-
To change the color of the left side of the video, the following
-command can be used:
-
-
echo Parsed_color_0 c yellow | tools/zmqsend
-
-
-
To change the right side:
-
-
echo Parsed_color_1 c pink | tools/zmqsend
-
-
-
-
-
34 Multimedia Sources# TOC
-
-
Below is a description of the currently available multimedia sources.
-
-
-
34.1 amovie# TOC
-
-
This is the same as movie source, except it selects an audio
-stream by default.
-
-
-
34.2 movie# TOC
-
-
Read audio and/or video stream(s) from a movie container.
-
-
It accepts the following parameters:
-
-
-filename
-The name of the resource to read (not necessarily a file; it can also be a
-device or a stream accessed through some protocol).
-
-
-format_name, f
-Specifies the format assumed for the movie to read, and can be either
-the name of a container or an input device. If not specified, the
-format is guessed from movie_name or by probing.
-
-
-seek_point, sp
-Specifies the seek point in seconds. The frames will be output
-starting from this seek point. The parameter is evaluated with
-av_strtod
, so the numerical value may be suffixed by an IS
-postfix. The default value is "0".
-
-
-streams, s
-Specifies the streams to read. Several streams can be specified,
-separated by "+". The source will then have as many outputs, in the
-same order. The syntax is explained in the “Stream specifiers”
-section in the ffmpeg manual. Two special names, "dv" and "da" specify
-respectively the default (best suited) video and audio stream. Default
-is "dv", or "da" if the filter is called as "amovie".
-
-
-stream_index, si
-Specifies the index of the video stream to read. If the value is -1,
-the most suitable video stream will be automatically selected. The default
-value is "-1". Deprecated. If the filter is called "amovie", it will select
-audio instead of video.
-
-
-loop
-Specifies how many times to read the stream in sequence.
-If the value is less than 1, the stream will be read again and again.
-Default value is "1".
-
-Note that when the movie is looped the source timestamps are not
-changed, so it will generate non monotonically increasing timestamps.
-
-
-
-
It allows overlaying a second video on top of the main input of
-a filtergraph, as shown in this graph:
-
-
input -----------> deltapts0 --> overlay --> output
- ^
- |
-movie --> scale--> deltapts1 -------+
-
-
-
34.2.1 Examples# TOC
-
-
- Skip 3.2 seconds from the start of the AVI file in.avi, and overlay it
-on top of the input labelled "in":
-
-
movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read from a video4linux2 device, and overlay it on top of the input
-labelled "in":
-
-
movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read the first video stream and the audio stream with id 0x81 from
-dvd.vob; the video is connected to the pad named "video" and the audio is
-connected to the pad named "audio":
-
-
movie=dvd.vob:s=v:0+#0x81 [video] [audio]
-
-
-
-
-
-
35 See Also# TOC
-
-
ffprobe ,
-ffmpeg , ffplay , ffserver ,
-ffmpeg-utils ,
-ffmpeg-scaler ,
-ffmpeg-resampler ,
-ffmpeg-codecs ,
-ffmpeg-bitstream-filters ,
-ffmpeg-formats ,
-ffmpeg-devices ,
-ffmpeg-protocols ,
-ffmpeg-filters
-
-
-
-
36 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/ffprobe.html b/Externals/ffmpeg/dev/doc/ffprobe.html
deleted file mode 100644
index 1e10bf8cf5..0000000000
--- a/Externals/ffmpeg/dev/doc/ffprobe.html
+++ /dev/null
@@ -1,1113 +0,0 @@
-
-
-
-
-
-
- ffprobe Documentation
-
-
-
-
-
-
-
-
- ffprobe Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Synopsis# TOC
-
-
ffprobe [options ] [input_file ]
-
-
-
2 Description# TOC
-
-
ffprobe gathers information from multimedia streams and prints it in
-human- and machine-readable fashion.
-
-
For example it can be used to check the format of the container used
-by a multimedia stream and the format and type of each media stream
-contained in it.
-
-
If a filename is specified in input, ffprobe will try to open and
-probe the file content. If the file cannot be opened or recognized as
-a multimedia file, a positive exit code is returned.
-
-
ffprobe may be employed both as a standalone application or in
-combination with a textual filter, which may perform more
-sophisticated processing, e.g. statistical processing or plotting.
-
-
Options are used to list some of the formats supported by ffprobe or
-for specifying which information to display, and for setting how
-ffprobe will show it.
-
-
ffprobe output is designed to be easily parsable by a textual filter,
-and consists of one or more sections of a form defined by the selected
-writer, which is specified by the print_format option.
-
-
Sections may contain other nested sections, and are identified by a
-name (which may be shared by other sections), and an unique
-name. See the output of sections .
-
-
Metadata tags stored in the container or in the streams are recognized
-and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM"
-section.
-
-
-
-
3 Options# TOC
-
-
All the numerical options, if not specified otherwise, accept a string
-representing a number as input, which may be followed by one of the SI
-unit prefixes, for example: ’K’, ’M’, or ’G’.
-
-
If ’i’ is appended to the SI unit prefix, the complete prefix will be
-interpreted as a unit prefix for binary multiples, which are based on
-powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
-prefix multiplies the value by 8. This allows using, for example:
-’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
-
-
Options which do not take arguments are boolean options, and set the
-corresponding value to true. They can be set to false by prefixing
-the option name with "no". For example using "-nofoo"
-will set the boolean option with name "foo" to false.
-
-
-
3.1 Stream specifiers# TOC
-
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
-are used to precisely specify which stream(s) a given option belongs to.
-
-
A stream specifier is a string generally appended to the option name and
-separated from it by a colon. E.g. -codec:a:1 ac3
contains the
-a:1
stream specifier, which matches the second audio stream. Therefore, it
-would select the ac3 codec for the second audio stream.
-
-
A stream specifier can match several streams, so that the option is applied to all
-of them. E.g. the stream specifier in -b:a 128k
matches all audio
-streams.
-
-
An empty stream specifier matches all streams. For example, -codec copy
-or -codec: copy
would copy all the streams without reencoding.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index. E.g. -threads:1 4
would set the
-thread count for the second stream to 4.
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
-’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
-stream number stream_index of this type. Otherwise, it matches all
-streams of this type.
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number stream_index
-in the program with the id program_id . Otherwise, it matches all streams in the
-program.
-
-#stream_id or i:stream_id
-Match the stream by stream id (e.g. PID in MPEG-TS container).
-
-m:key [:value ]
-Matches streams with the metadata tag key having the specified value. If
-value is not given, matches streams that contain the given tag with any
-value.
-
-Note that in ffmpeg
, matching by metadata will only work properly for
-input files.
-
-
-
-
-
3.2 Generic options# TOC
-
-
These options are shared amongst the ff* tools.
-
-
--L
-Show license.
-
-
--h, -?, -help, --help [arg ]
-Show help. An optional parameter may be specified to print help about a specific
-item. If no argument is specified, only basic (non advanced) tool
-options are shown.
-
-Possible values of arg are:
-
-long
-Print advanced tool options in addition to the basic tool options.
-
-
-full
-Print complete list of options, including shared and private options
-for encoders, decoders, demuxers, muxers, filters, etc.
-
-
-decoder=decoder_name
-Print detailed information about the decoder named decoder_name . Use the
--decoders option to get a list of all decoders.
-
-
-encoder=encoder_name
-Print detailed information about the encoder named encoder_name . Use the
--encoders option to get a list of all encoders.
-
-
-demuxer=demuxer_name
-Print detailed information about the demuxer named demuxer_name . Use the
--formats option to get a list of all demuxers and muxers.
-
-
-muxer=muxer_name
-Print detailed information about the muxer named muxer_name . Use the
--formats option to get a list of all muxers and demuxers.
-
-
-filter=filter_name
-Print detailed information about the filter name filter_name . Use the
--filters option to get a list of all filters.
-
-
-
-
--version
-Show version.
-
-
--formats
-Show available formats (including devices).
-
-
--devices
-Show available devices.
-
-
--codecs
-Show all codecs known to libavcodec.
-
-Note that the term ’codec’ is used throughout this documentation as a shortcut
-for what is more correctly called a media bitstream format.
-
-
--decoders
-Show available decoders.
-
-
--encoders
-Show all available encoders.
-
-
--bsfs
-Show available bitstream filters.
-
-
--protocols
-Show available protocols.
-
-
--filters
-Show available libavfilter filters.
-
-
--pix_fmts
-Show available pixel formats.
-
-
--sample_fmts
-Show available sample formats.
-
-
--layouts
-Show channel names and standard channel layouts.
-
-
--colors
-Show recognized color names.
-
-
--sources device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sources of the intput device.
-Some devices may provide system-dependent source names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sources pulse,server=192.168.0.4
-
-
-
--sinks device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sinks of the output device.
-Some devices may provide system-dependent sink names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sinks pulse,server=192.168.0.4
-
-
-
--loglevel [repeat+]loglevel | -v [repeat+]loglevel
-Set the logging level used by the library.
-Adding "repeat+" indicates that repeated log output should not be compressed
-to the first line and the "Last message repeated n times" line will be
-omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-’repeat’ will not change the loglevel.
-loglevel is a string or a number containing one of the following values:
-
-‘quiet, -8 ’
-Show nothing at all; be silent.
-
-‘panic, 0 ’
-Only show fatal errors which could lead the process to crash, such as
-and assert failure. This is not currently used for anything.
-
-‘fatal, 8 ’
-Only show fatal errors. These are errors after which the process absolutely
-cannot continue after.
-
-‘error, 16 ’
-Show all errors, including ones which can be recovered from.
-
-‘warning, 24 ’
-Show all warnings and errors. Any message related to possibly
-incorrect or unexpected events will be shown.
-
-‘info, 32 ’
-Show informative messages during processing. This is in addition to
-warnings and errors. This is the default value.
-
-‘verbose, 40 ’
-Same as info
, except more verbose.
-
-‘debug, 48 ’
-Show everything, including debugging information.
-
-
-
-By default the program logs to stderr, if coloring is supported by the
-terminal, colors are used to mark errors and warnings. Log coloring
-can be disabled setting the environment variable
-AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
-the environment variable AV_LOG_FORCE_COLOR
.
-The use of the environment variable NO_COLOR
is deprecated and
-will be dropped in a following FFmpeg version.
-
-
--report
-Dump full command line and console output to a file named
-program -YYYYMMDD -HHMMSS .log
in the current
-directory.
-This file can be useful for bug reports.
-It also implies -loglevel verbose
.
-
-Setting the environment variable FFREPORT
to any value has the
-same effect. If the value is a ’:’-separated key=value sequence, these
-options will affect the report; option values must be escaped if they
-contain special characters or the options delimiter ’:’ (see the
-“Quoting and escaping” section in the ffmpeg-utils manual).
-
-The following options are recognized:
-
-file
-set the file name to use for the report; %p
is expanded to the name
-of the program, %t
is expanded to a timestamp, %%
is expanded
-to a plain %
-
-level
-set the log verbosity level using a numerical value (see -loglevel
).
-
-
-
-For example, to output a report to a file named ffreport.log
-using a log level of 32
(alias for log level info
):
-
-
-
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
-
-
-Errors in parsing the environment variable are not fatal, and will not
-appear in the report.
-
-
--hide_banner
-Suppress printing banner.
-
-All FFmpeg tools will normally show a copyright notice, build options
-and library versions. This option can be used to suppress printing
-this information.
-
-
--cpuflags flags (global )
-Allows setting and clearing cpu flags. This option is intended
-for testing. Do not use it unless you know what you’re doing.
-
-
ffmpeg -cpuflags -sse+mmx ...
-ffmpeg -cpuflags mmx ...
-ffmpeg -cpuflags 0 ...
-
-Possible flags for this option are:
-
-‘x86 ’
-
-‘mmx ’
-‘mmxext ’
-‘sse ’
-‘sse2 ’
-‘sse2slow ’
-‘sse3 ’
-‘sse3slow ’
-‘ssse3 ’
-‘atom ’
-‘sse4.1 ’
-‘sse4.2 ’
-‘avx ’
-‘xop ’
-‘fma4 ’
-‘3dnow ’
-‘3dnowext ’
-‘cmov ’
-
-
-‘ARM ’
-
-‘armv5te ’
-‘armv6 ’
-‘armv6t2 ’
-‘vfp ’
-‘vfpv3 ’
-‘neon ’
-
-
-‘PowerPC ’
-
-‘altivec ’
-
-
-‘Specific Processors ’
-
-‘pentium2 ’
-‘pentium3 ’
-‘pentium4 ’
-‘k6 ’
-‘k62 ’
-‘athlon ’
-‘athlonxp ’
-‘k8 ’
-
-
-
-
-
--opencl_bench
-Benchmark all available OpenCL devices and show the results. This option
-is only available when FFmpeg has been compiled with --enable-opencl
.
-
-
--opencl_options options (global )
-Set OpenCL environment options. This option is only available when
-FFmpeg has been compiled with --enable-opencl
.
-
-options must be a list of key =value option pairs
-separated by ’:’. See the “OpenCL Options” section in the
-ffmpeg-utils manual for the list of supported options.
-
-
-
-
-
3.3 AVOptions# TOC
-
-
These options are provided directly by the libavformat, libavdevice and
-libavcodec libraries. To see the list of available AVOptions, use the
--help option. They are separated into two categories:
-
-generic
-These options can be set for any container, codec or device. Generic options
-are listed under AVFormatContext options for containers/devices and under
-AVCodecContext options for codecs.
-
-private
-These options are specific to the given container, device or codec. Private
-options are listed under their corresponding containers/devices/codecs.
-
-
-
-
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
-an MP3 file, use the id3v2_version private option of the MP3
-muxer:
-
-
ffmpeg -i input.flac -id3v2_version 3 out.mp3
-
-
-
All codec AVOptions are per-stream, and thus a stream specifier
-should be attached to them.
-
-
Note: the -nooption syntax cannot be used for boolean
-AVOptions, use -option 0 /-option 1 .
-
-
Note: the old undocumented way of specifying per-stream AVOptions by
-prepending v/a/s to the options name is now obsolete and will be
-removed soon.
-
-
-
3.4 Main options# TOC
-
-
--f format
-Force format to use.
-
-
--unit
-Show the unit of the displayed values.
-
-
--prefix
-Use SI prefixes for the displayed values.
-Unless the "-byte_binary_prefix" option is used all the prefixes
-are decimal.
-
-
--byte_binary_prefix
-Force the use of binary prefixes for byte values.
-
-
--sexagesimal
-Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
-
-
--pretty
-Prettify the format of the displayed values, it corresponds to the
-options "-unit -prefix -byte_binary_prefix -sexagesimal".
-
-
--of, -print_format writer_name [=writer_options ]
-Set the output printing format.
-
-writer_name specifies the name of the writer, and
-writer_options specifies the options to be passed to the writer.
-
-For example for printing the output in JSON format, specify:
-
-
-For more details on the available output printing formats, see the
-Writers section below.
-
-
--sections
-Print sections structure and section information, and exit. The output
-is not meant to be parsed by a machine.
-
-
--select_streams stream_specifier
-Select only the streams specified by stream_specifier . This
-option affects only the options related to streams
-(e.g. show_streams
, show_packets
, etc.).
-
-For example to show only audio streams, you can use the command:
-
-
ffprobe -show_streams -select_streams a INPUT
-
-
-To show only video packets belonging to the video stream with index 1:
-
-
ffprobe -show_packets -select_streams v:1 INPUT
-
-
-
--show_data
-Show payload data, as a hexadecimal and ASCII dump. Coupled with
--show_packets , it will dump the packets’ data. Coupled with
--show_streams , it will dump the codec extradata.
-
-The dump is printed as the "data" field. It may contain newlines.
-
-
--show_data_hash algorithm
-Show a hash of payload data, for packets with -show_packets and for
-codec extradata with -show_streams .
-
-
--show_error
-Show information about the error found when trying to probe the input.
-
-The error information is printed within a section with name "ERROR".
-
-
--show_format
-Show information about the container format of the input multimedia
-stream.
-
-All the container format information is printed within a section with
-name "FORMAT".
-
-
--show_format_entry name
-Like -show_format , but only prints the specified entry of the
-container format information, rather than all. This option may be given more
-than once, then all specified entries will be shown.
-
-This option is deprecated, use show_entries
instead.
-
-
--show_entries section_entries
-Set list of entries to show.
-
-Entries are specified according to the following
-syntax. section_entries contains a list of section entries
-separated by :
. Each section entry is composed by a section
-name (or unique name), optionally followed by a list of entries local
-to that section, separated by ,
.
-
-If section name is specified but is followed by no =
, all
-entries are printed to output, together with all the contained
-sections. Otherwise only the entries specified in the local section
-entries list are printed. In particular, if =
is specified but
-the list of local entries is empty, then no entries will be shown for
-that section.
-
-Note that the order of specification of the local section entries is
-not honored in the output, and the usual display order will be
-retained.
-
-The formal syntax is given by:
-
-
LOCAL_SECTION_ENTRIES ::= SECTION_ENTRY_NAME [,LOCAL_SECTION_ENTRIES ]
-SECTION_ENTRY ::= SECTION_NAME [=[LOCAL_SECTION_ENTRIES ]]
-SECTION_ENTRIES ::= SECTION_ENTRY [:SECTION_ENTRIES ]
-
-
-For example, to show only the index and type of each stream, and the PTS
-time, duration time, and stream index of the packets, you can specify
-the argument:
-
-
packet=pts_time,duration_time,stream_index : stream=index,codec_type
-
-
-To show all the entries in the section "format", but only the codec
-type in the section "stream", specify the argument:
-
-
format : stream=codec_type
-
-
-To show all the tags in the stream and format sections:
-
-
stream_tags : format_tags
-
-
-To show only the title
tag (if available) in the stream
-sections:
-
-
-
--show_packets
-Show information about each packet contained in the input multimedia
-stream.
-
-The information for each single packet is printed within a dedicated
-section with name "PACKET".
-
-
--show_frames
-Show information about each frame and subtitle contained in the input
-multimedia stream.
-
-The information for each single frame is printed within a dedicated
-section with name "FRAME" or "SUBTITLE".
-
-
--show_streams
-Show information about each media stream contained in the input
-multimedia stream.
-
-Each media stream information is printed within a dedicated section
-with name "STREAM".
-
-
--show_programs
-Show information about programs and their streams contained in the input
-multimedia stream.
-
-Each media stream information is printed within a dedicated section
-with name "PROGRAM_STREAM".
-
-
--show_chapters
-Show information about chapters stored in the format.
-
-Each chapter is printed within a dedicated section with name "CHAPTER".
-
-
--count_frames
-Count the number of frames per stream and report it in the
-corresponding stream section.
-
-
--count_packets
-Count the number of packets per stream and report it in the
-corresponding stream section.
-
-
--read_intervals read_intervals
-
-Read only the specified intervals. read_intervals must be a
-sequence of interval specifications separated by ",".
-ffprobe
will seek to the interval starting point, and will
-continue reading from that.
-
-Each interval is specified by two optional parts, separated by "%".
-
-The first part specifies the interval start position. It is
-interpreted as an abolute position, or as a relative offset from the
-current position if it is preceded by the "+" character. If this first
-part is not specified, no seeking will be performed when reading this
-interval.
-
-The second part specifies the interval end position. It is interpreted
-as an absolute position, or as a relative offset from the current
-position if it is preceded by the "+" character. If the offset
-specification starts with "#", it is interpreted as the number of
-packets to read (not including the flushing packets) from the interval
-start. If no second part is specified, the program will read until the
-end of the input.
-
-Note that seeking is not accurate, thus the actual interval start
-point may be different from the specified position. Also, when an
-interval duration is specified, the absolute end time will be computed
-by adding the duration to the interval start point found by seeking
-the file, rather than to the specified start value.
-
-The formal syntax is given by:
-
-
INTERVAL ::= [START |+START_OFFSET ][%[END |+END_OFFSET ]]
-INTERVALS ::= INTERVAL [,INTERVALS ]
-
-
-A few examples follow.
-
- Seek to time 10, read packets until 20 seconds after the found seek
-point, then seek to position 01:30
(1 minute and thirty
-seconds) and read packets until position 01:45
.
-
-
- Read only 42 packets after seeking to position 01:23
:
-
-
- Read only the first 20 seconds from the start:
-
-
- Read from the start until position 02:30
:
-
-
-
-
--show_private_data, -private
-Show private data, that is data depending on the format of the
-particular shown element.
-This option is enabled by default, but you may need to disable it
-for specific uses, for example when creating XSD-compliant XML output.
-
-
--show_program_version
-Show information related to program version.
-
-Version information is printed within a section with name
-"PROGRAM_VERSION".
-
-
--show_library_versions
-Show information related to library versions.
-
-Version information for each library is printed within a section with
-name "LIBRARY_VERSION".
-
-
--show_versions
-Show information related to program and library versions. This is the
-equivalent of setting both -show_program_version and
--show_library_versions options.
-
-
--show_pixel_formats
-Show information about all pixel formats supported by FFmpeg.
-
-Pixel format information for each format is printed within a section
-with name "PIXEL_FORMAT".
-
-
--bitexact
-Force bitexact output, useful to produce output which is not dependent
-on the specific build.
-
-
--i input_file
-Read input_file .
-
-
-
-
-
-
4 Writers# TOC
-
-
A writer defines the output format adopted by ffprobe
, and will be
-used for printing all the parts of the output.
-
-
A writer may accept one or more arguments, which specify the options
-to adopt. The options are specified as a list of key =value
-pairs, separated by ":".
-
-
All writers support the following options:
-
-
-string_validation, sv
-Set string validation mode.
-
-The following values are accepted.
-
-‘fail ’
-The writer will fail immediately in case an invalid string (UTF-8)
-sequence or code point is found in the input. This is especially
-useful to validate input metadata.
-
-
-‘ignore ’
-Any validation error will be ignored. This will result in possibly
-broken output, especially with the json or xml writer.
-
-
-‘replace ’
-The writer will substitute invalid UTF-8 sequences or code points with
-the string specified with the string_validation_replacement .
-
-
-
-Default value is ‘replace ’.
-
-
-string_validation_replacement, svr
-Set replacement string to use in case string_validation is
-set to ‘replace ’.
-
-In case the option is not specified, the writer will assume the empty
-string, that is it will remove the invalid sequences from the input
-strings.
-
-
-
-
A description of the currently available writers follows.
-
-
-
4.1 default# TOC
-
Default format.
-
-
Print each section in the form:
-
-
[SECTION]
-key1=val1
-...
-keyN=valN
-[/SECTION]
-
-
-
Metadata tags are printed as a line in the corresponding FORMAT, STREAM or
-PROGRAM_STREAM section, and are prefixed by the string "TAG:".
-
-
A description of the accepted options follows.
-
-
-nokey, nk
-If set to 1 specify not to print the key of each field. Default value
-is 0.
-
-
-noprint_wrappers, nw
-If set to 1 specify not to print the section header and footer.
-Default value is 0.
-
-
-
-
-
4.2 compact, csv# TOC
-
Compact and CSV format.
-
-
The csv
writer is equivalent to compact
, but supports
-different defaults.
-
-
Each section is printed on a single line.
-If no option is specifid, the output has the form:
-
-
section|key1=val1| ... |keyN=valN
-
-
-
Metadata tags are printed in the corresponding "format" or "stream"
-section. A metadata tag key, if printed, is prefixed by the string
-"tag:".
-
-
The description of the accepted options follows.
-
-
-item_sep, s
-Specify the character to use for separating fields in the output line.
-It must be a single printable character, it is "|" by default ("," for
-the csv
writer).
-
-
-nokey, nk
-If set to 1 specify not to print the key of each field. Its default
-value is 0 (1 for the csv
writer).
-
-
-escape, e
-Set the escape mode to use, default to "c" ("csv" for the csv
-writer).
-
-It can assume one of the following values:
-
-c
-Perform C-like escaping. Strings containing a newline (’\n’), carriage
-return (’\r’), a tab (’\t’), a form feed (’\f’), the escaping
-character (’\’) or the item separator character SEP are escaped using C-like fashioned
-escaping, so that a newline is converted to the sequence "\n", a
-carriage return to "\r", ’\’ to "\\" and the separator SEP is
-converted to "\SEP ".
-
-
-csv
-Perform CSV-like escaping, as described in RFC4180. Strings
-containing a newline (’\n’), a carriage return (’\r’), a double quote
-(’"’), or SEP are enclosed in double-quotes.
-
-
-none
-Perform no escaping.
-
-
-
-
-print_section, p
-Print the section name at the begin of each line if the value is
-1
, disable it with value set to 0
. Default value is
-1
.
-
-
-
-
-
-
4.3 flat# TOC
-
Flat format.
-
-
A free-form output where each line contains an explicit key=value, such as
-"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be
-directly embedded in sh scripts as long as the separator character is an
-alphanumeric character or an underscore (see sep_char option).
-
-
The description of the accepted options follows.
-
-
-sep_char, s
-Separator character used to separate the chapter, the section name, IDs and
-potential tags in the printed field key.
-
-Default value is ’.’.
-
-
-hierarchical, h
-Specify if the section name specification should be hierarchical. If
-set to 1, and if there is more than one section in the current
-chapter, the section name will be prefixed by the name of the
-chapter. A value of 0 will disable this behavior.
-
-Default value is 1.
-
-
-
-
-
-
INI format output.
-
-
Print output in an INI based format.
-
-
The following conventions are adopted:
-
-
- all key and values are UTF-8
- ’.’ is the subgroup separator
- newline, ’\t’, ’\f’, ’\b’ and the following characters are escaped
- ’\’ is the escape character
- ’#’ is the comment indicator
- ’=’ is the key/value separator
- ’:’ is not used but usually parsed as key/value separator
-
-
-
This writer accepts options as a list of key =value pairs,
-separated by ":".
-
-
The description of the accepted options follows.
-
-
-hierarchical, h
-Specify if the section name specification should be hierarchical. If
-set to 1, and if there is more than one section in the current
-chapter, the section name will be prefixed by the name of the
-chapter. A value of 0 will disable this behavior.
-
-Default value is 1.
-
-
-
-
-
4.5 json# TOC
-
JSON based format.
-
-
Each section is printed using JSON notation.
-
-
The description of the accepted options follows.
-
-
-compact, c
-If set to 1 enable compact output, that is each section will be
-printed on a single line. Default value is 0.
-
-
-
-
For more information about JSON, see http://www.json.org/ .
-
-
-
-
XML based format.
-
-
The XML output is described in the XML schema description file
-ffprobe.xsd installed in the FFmpeg datadir.
-
-
An updated version of the schema can be retrieved at the url
-http://www.ffmpeg.org/schema/ffprobe.xsd , which redirects to the
-latest schema committed into the FFmpeg development source code tree.
-
-
Note that the output issued will be compliant to the
-ffprobe.xsd schema only when no special global output options
-(unit , prefix , byte_binary_prefix ,
-sexagesimal etc.) are specified.
-
-
The description of the accepted options follows.
-
-
-fully_qualified, q
-If set to 1 specify if the output should be fully qualified. Default
-value is 0.
-This is required for generating an XML file which can be validated
-through an XSD file.
-
-
-xsd_compliant, x
-If set to 1 perform more checks for ensuring that the output is XSD
-compliant. Default value is 0.
-This option automatically sets fully_qualified to 1.
-
-
-
-
For more information about the XML format, see
-http://www.w3.org/XML/ .
-
-
-
5 Timecode# TOC
-
-
ffprobe
supports Timecode extraction:
-
-
- MPEG1/2 timecode is extracted from the GOP, and is available in the video
-stream details (-show_streams , see timecode ).
-
- MOV timecode is extracted from tmcd track, so is available in the tmcd
-stream metadata (-show_streams , see TAG:timecode ).
-
- DV, GXF and AVI timecodes are available in format metadata
-(-show_format , see TAG:timecode ).
-
-
-
-
-
-
6 See Also# TOC
-
-
ffprobe-all ,
-ffmpeg , ffplay , ffserver ,
-ffmpeg-utils ,
-ffmpeg-scaler ,
-ffmpeg-resampler ,
-ffmpeg-codecs ,
-ffmpeg-bitstream-filters ,
-ffmpeg-formats ,
-ffmpeg-devices ,
-ffmpeg-protocols ,
-ffmpeg-filters
-
-
-
-
7 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/general.html b/Externals/ffmpeg/dev/doc/general.html
deleted file mode 100644
index df45ecabca..0000000000
--- a/Externals/ffmpeg/dev/doc/general.html
+++ /dev/null
@@ -1,986 +0,0 @@
-
-
-
-
-
-
- General Documentation
-
-
-
-
-
-
-
-
- General Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 External libraries# TOC
-
-
FFmpeg can be hooked up with a number of external libraries to add support
-for more formats. None of them are used by default, their use has to be
-explicitly requested by passing the appropriate flags to
-./configure
.
-
-
-
1.1 OpenJPEG# TOC
-
-
FFmpeg can use the OpenJPEG libraries for encoding/decoding J2K videos. Go to
-http://www.openjpeg.org/ to get the libraries and follow the installation
-instructions. To enable using OpenJPEG in FFmpeg, pass --enable-libopenjpeg
to
-./configure .
-
-
-
-
1.2 OpenCORE, VisualOn, and Fraunhofer libraries# TOC
-
-
Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer
-libraries provide encoders for a number of audio codecs.
-
-
-
OpenCORE and VisualOn libraries are under the Apache License 2.0
-(see http://www.apache.org/licenses/LICENSE-2.0 for details), which is
-incompatible to the LGPL version 2.1 and GPL version 2. You have to
-upgrade FFmpeg’s license to LGPL version 3 (or if you have enabled
-GPL components, GPL version 3) by passing --enable-version3
to configure in
-order to use it.
-
-
The Fraunhofer AAC library is licensed under a license incompatible to the GPL
-and is not known to be compatible to the LGPL. Therefore, you have to pass
---enable-nonfree
to configure to use it.
-
-
-
1.2.1 OpenCORE AMR# TOC
-
-
FFmpeg can make use of the OpenCORE libraries for AMR-NB
-decoding/encoding and AMR-WB decoding.
-
-
Go to http://sourceforge.net/projects/opencore-amr/ and follow the
-instructions for installing the libraries.
-Then pass --enable-libopencore-amrnb
and/or
---enable-libopencore-amrwb
to configure to enable them.
-
-
-
1.2.2 VisualOn AAC encoder library# TOC
-
-
FFmpeg can make use of the VisualOn AACenc library for AAC encoding.
-
-
Go to http://sourceforge.net/projects/opencore-amr/ and follow the
-instructions for installing the library.
-Then pass --enable-libvo-aacenc
to configure to enable it.
-
-
-
1.2.3 VisualOn AMR-WB encoder library# TOC
-
-
FFmpeg can make use of the VisualOn AMR-WBenc library for AMR-WB encoding.
-
-
Go to http://sourceforge.net/projects/opencore-amr/ and follow the
-instructions for installing the library.
-Then pass --enable-libvo-amrwbenc
to configure to enable it.
-
-
-
1.2.4 Fraunhofer AAC library# TOC
-
-
FFmpeg can make use of the Fraunhofer AAC library for AAC encoding.
-
-
Go to http://sourceforge.net/projects/opencore-amr/ and follow the
-instructions for installing the library.
-Then pass --enable-libfdk-aac
to configure to enable it.
-
-
-
1.3 LAME# TOC
-
-
FFmpeg can make use of the LAME library for MP3 encoding.
-
-
Go to http://lame.sourceforge.net/ and follow the
-instructions for installing the library.
-Then pass --enable-libmp3lame
to configure to enable it.
-
-
-
1.4 TwoLAME# TOC
-
-
FFmpeg can make use of the TwoLAME library for MP2 encoding.
-
-
Go to http://www.twolame.org/ and follow the
-instructions for installing the library.
-Then pass --enable-libtwolame
to configure to enable it.
-
-
-
1.5 libvpx# TOC
-
-
FFmpeg can make use of the libvpx library for VP8/VP9 encoding.
-
-
Go to http://www.webmproject.org/ and follow the instructions for
-installing the library. Then pass --enable-libvpx
to configure to
-enable it.
-
-
-
1.6 libwavpack# TOC
-
-
FFmpeg can make use of the libwavpack library for WavPack encoding.
-
-
Go to http://www.wavpack.com/ and follow the instructions for
-installing the library. Then pass --enable-libwavpack
to configure to
-enable it.
-
-
-
1.7 OpenH264# TOC
-
-
FFmpeg can make use of the OpenH264 library for H.264 encoding.
-
-
Go to http://www.openh264.org/ and follow the instructions for
-installing the library. Then pass --enable-libopenh264
to configure to
-enable it.
-
-
-
1.8 x264# TOC
-
-
FFmpeg can make use of the x264 library for H.264 encoding.
-
-
Go to http://www.videolan.org/developers/x264.html and follow the
-instructions for installing the library. Then pass --enable-libx264
to
-configure to enable it.
-
-
-
-
1.9 x265# TOC
-
-
FFmpeg can make use of the x265 library for HEVC encoding.
-
-
Go to http://x265.org/developers.html and follow the instructions
-for installing the library. Then pass --enable-libx265
to configure
-to enable it.
-
-
-
-
1.10 libilbc# TOC
-
-
iLBC is a narrowband speech codec that has been made freely available
-by Google as part of the WebRTC project. libilbc is a packaging friendly
-copy of the iLBC codec. FFmpeg can make use of the libilbc library for
-iLBC encoding and decoding.
-
-
Go to https://github.com/TimothyGu/libilbc and follow the instructions for
-installing the library. Then pass --enable-libilbc
to configure to
-enable it.
-
-
-
1.11 libzvbi# TOC
-
-
libzvbi is a VBI decoding library which can be used by FFmpeg to decode DVB
-teletext pages and DVB teletext subtitles.
-
-
Go to http://sourceforge.net/projects/zapping/ and follow the instructions for
-installing the library. Then pass --enable-libzvbi
to configure to
-enable it.
-
-
-
-
1.12 AviSynth# TOC
-
-
FFmpeg can read AviSynth scripts as input. To enable support, pass
---enable-avisynth
to configure. The correct headers are
-included in compat/avisynth/, which allows the user to enable support
-without needing to search for these headers themselves.
-
-
For Windows, supported AviSynth variants are
-AviSynth 2.5 or 2.6 for 32-bit builds and
-AviSynth+ 0.1 for 32-bit and 64-bit builds.
-
-
For Linux and OS X, the supported AviSynth variant is
-AvxSynth .
-
-
-
AviSynth and AvxSynth are loaded dynamically. Distributors can build FFmpeg
-with --enable-avisynth
, and the binaries will work regardless of the
-end user having AviSynth or AvxSynth installed - they’ll only need to be
-installed to use AviSynth scripts (obviously).
-
-
-
-
2 Supported File Formats, Codecs or Features# TOC
-
-
You can use the -formats
and -codecs
options to have an exhaustive list.
-
-
-
2.1 File Formats# TOC
-
-
FFmpeg supports the following file formats through the libavformat
-library:
-
-
-Name Encoding Decoding Comments
-4xm X 4X Technologies format, used in some games.
-8088flex TMV X
-ACT Voice X contains G.729 audio
-Adobe Filmstrip X X
-Audio IFF (AIFF) X X
-American Laser Games MM X Multimedia format used in games like Mad Dog McCree.
-3GPP AMR X X
-Amazing Studio Packed Animation File X Multimedia format used in game Heart Of Darkness.
-Apple HTTP Live Streaming X
-Artworx Data Format X
-ADP X Audio format used on the Nintendo Gamecube.
-AFC X Audio format used on the Nintendo Gamecube.
-ASF X X
-AST X X Audio format used on the Nintendo Wii.
-AVI X X
-AviSynth X
-AVR X Audio format used on Mac.
-AVS X Multimedia format used by the Creature Shock game.
-Beam Software SIFF X Audio and video format used in some games by Beam Software.
-Bethesda Softworks VID X Used in some games from Bethesda Softworks.
-Binary text X
-Bink X Multimedia format used by many games.
-Bitmap Brothers JV X Used in Z and Z95 games.
-Brute Force & Ignorance X Used in the game Flash Traffic: City of Angels.
-BRSTM X Audio format used on the Nintendo Wii.
-BWF X X
-CRI ADX X X Audio-only format used in console video games.
-Discworld II BMV X
-Interplay C93 X Used in the game Cyberia from Interplay.
-Delphine Software International CIN X Multimedia format used by Delphine Software games.
-CD+G X Video format used by CD+G karaoke disks
-Phantom Cine X
-Commodore CDXL X Amiga CD video format
-Core Audio Format X X Apple Core Audio Format
-CRC testing format X
-Creative Voice X X Created for the Sound Blaster Pro.
-CRYO APC X Audio format used in some games by CRYO Interactive Entertainment.
-D-Cinema audio X X
-Deluxe Paint Animation X
-DFA X This format is used in Chronomaster game
-DSD Stream File (DSF) X
-DV video X X
-DXA X This format is used in the non-Windows version of the Feeble Files
- game and different game cutscenes repacked for use with ScummVM.
-Electronic Arts cdata X
-Electronic Arts Multimedia X Used in various EA games; files have extensions like WVE and UV2.
-Ensoniq Paris Audio File X
-FFM (FFserver live feed) X X
-Flash (SWF) X X
-Flash 9 (AVM2) X X Only embedded audio is decoded.
-FLI/FLC/FLX animation X .fli/.flc files
-Flash Video (FLV) X X Macromedia Flash video files
-framecrc testing format X
-FunCom ISS X Audio format used in various games from FunCom like The Longest Journey.
-G.723.1 X X
-G.729 BIT X X
-G.729 raw X
-GIF Animation X X
-GXF X X General eXchange Format SMPTE 360M, used by Thomson Grass Valley
- playout servers.
-HNM X Only version 4 supported, used in some games from Cryo Interactive
-iCEDraw File X
-ICO X X Microsoft Windows ICO
-id Quake II CIN video X
-id RoQ X X Used in Quake III, Jedi Knight 2 and other computer games.
-IEC61937 encapsulation X X
-IFF X Interchange File Format
-iLBC X X
-Interplay MVE X Format used in various Interplay computer games.
-IV8 X A format generated by IndigoVision 8000 video server.
-IVF (On2) X X A format used by libvpx
-IRCAM X X
-LATM X X
-LMLM4 X Used by Linux Media Labs MPEG-4 PCI boards
-LOAS X contains LATM multiplexed AAC audio
-LRC X X
-LVF X
-LXF X VR native stream format, used by Leitch/Harris’ video servers.
-Magic Lantern Video (MLV) X
-Matroska X X
-Matroska audio X
-FFmpeg metadata X X Metadata in text format.
-MAXIS XA X Used in Sim City 3000; file extension .xa.
-MD Studio X
-Metal Gear Solid: The Twin Snakes X
-Megalux Frame X Used by Megalux Ultimate Paint
-Mobotix .mxg X
-Monkey’s Audio X
-Motion Pixels MVI X
-MOV/QuickTime/MP4 X X 3GP, 3GP2, PSP, iPod variants supported
-MP2 X X
-MP3 X X
-MPEG-1 System X X muxed audio and video, VCD format supported
-MPEG-PS (program stream) X X also known as VOB
file, SVCD and DVD format supported
-MPEG-TS (transport stream) X X also known as DVB Transport Stream
-MPEG-4 X X MPEG-4 is a variant of QuickTime.
-Mirillis FIC video X No cursor rendering.
-MIME multipart JPEG X
-MSN TCP webcam X Used by MSN Messenger webcam streams.
-MTV X
-Musepack X
-Musepack SV8 X
-Material eXchange Format (MXF) X X SMPTE 377M, used by D-Cinema, broadcast industry.
-Material eXchange Format (MXF), D-10 Mapping X X SMPTE 386M, D-10/IMX Mapping.
-NC camera feed X NC (AVIP NC4600) camera streams
-NIST SPeech HEader REsources X
-NTT TwinVQ (VQF) X Nippon Telegraph and Telephone Corporation TwinVQ.
-Nullsoft Streaming Video X
-NuppelVideo X
-NUT X X NUT Open Container Format
-Ogg X X
-Playstation Portable PMP X
-Portable Voice Format X
-TechnoTrend PVA X Used by TechnoTrend DVB PCI boards.
-QCP X
-raw ADTS (AAC) X X
-raw AC-3 X X
-raw Chinese AVS video X X
-raw CRI ADX X X
-raw Dirac X X
-raw DNxHD X X
-raw DTS X X
-raw DTS-HD X
-raw E-AC-3 X X
-raw FLAC X X
-raw GSM X
-raw H.261 X X
-raw H.263 X X
-raw H.264 X X
-raw HEVC X X
-raw Ingenient MJPEG X
-raw MJPEG X X
-raw MLP X
-raw MPEG X
-raw MPEG-1 X
-raw MPEG-2 X
-raw MPEG-4 X X
-raw NULL X
-raw video X X
-raw id RoQ X
-raw Shorten X
-raw TAK X
-raw TrueHD X X
-raw VC-1 X X
-raw PCM A-law X X
-raw PCM mu-law X X
-raw PCM signed 8 bit X X
-raw PCM signed 16 bit big-endian X X
-raw PCM signed 16 bit little-endian X X
-raw PCM signed 24 bit big-endian X X
-raw PCM signed 24 bit little-endian X X
-raw PCM signed 32 bit big-endian X X
-raw PCM signed 32 bit little-endian X X
-raw PCM unsigned 8 bit X X
-raw PCM unsigned 16 bit big-endian X X
-raw PCM unsigned 16 bit little-endian X X
-raw PCM unsigned 24 bit big-endian X X
-raw PCM unsigned 24 bit little-endian X X
-raw PCM unsigned 32 bit big-endian X X
-raw PCM unsigned 32 bit little-endian X X
-raw PCM floating-point 32 bit big-endian X X
-raw PCM floating-point 32 bit little-endian X X
-raw PCM floating-point 64 bit big-endian X X
-raw PCM floating-point 64 bit little-endian X X
-RDT X
-REDCODE R3D X File format used by RED Digital cameras, contains JPEG 2000 frames and PCM audio.
-RealMedia X X
-Redirector X
-RedSpark X
-Renderware TeXture Dictionary X
-RL2 X Audio and video format used in some games by Entertainment Software Partners.
-RPL/ARMovie X
-Lego Mindstorms RSO X X
-RSD X
-RTMP X X Output is performed by publishing stream to RTMP server
-RTP X X
-RTSP X X
-SAP X X
-SBG X
-SDP X
-Sega FILM/CPK X Used in many Sega Saturn console games.
-Silicon Graphics Movie X
-Sierra SOL X .sol files used in Sierra Online games.
-Sierra VMD X Used in Sierra CD-ROM games.
-Smacker X Multimedia format used by many games.
-SMJPEG X X Used in certain Loki game ports.
-Smush X Multimedia format used in some LucasArts games.
-Sony OpenMG (OMA) X X Audio format used in Sony Sonic Stage and Sony Vegas.
-Sony PlayStation STR X
-Sony Wave64 (W64) X X
-SoX native format X X
-SUN AU format X X
-SUP raw PGS subtitles X
-Text files X
-THP X Used on the Nintendo GameCube.
-Tiertex Limited SEQ X Tiertex .seq files used in the DOS CD-ROM version of the game Flashback.
-True Audio X
-VC-1 test bitstream X X
-Vivo X
-WAV X X
-WavPack X X
-WebM X X
-Windows Televison (WTV) X X
-Wing Commander III movie X Multimedia format used in Origin’s Wing Commander III computer game.
-Westwood Studios audio X Multimedia format used in Westwood Studios games.
-Westwood Studios VQA X Multimedia format used in Westwood Studios games.
-XMV X Microsoft video container used in Xbox games.
-xWMA X Microsoft audio container used by XAudio 2.
-eXtended BINary text (XBIN) X
-YUV4MPEG pipe X X
-Psygnosis YOP X
-
-
-
X
means that encoding (resp. decoding) is supported.
-
-
-
2.2 Image Formats# TOC
-
-
FFmpeg can read and write images for each frame of a video sequence. The
-following image formats are supported:
-
-
-Name Encoding Decoding Comments
-.Y.U.V X X one raw file per component
-Alias PIX X X Alias/Wavefront PIX image format
-animated GIF X X
-BMP X X Microsoft BMP image
-BRender PIX X Argonaut BRender 3D engine image format.
-DPX X X Digital Picture Exchange
-EXR X OpenEXR
-JPEG X X Progressive JPEG is not supported.
-JPEG 2000 X X
-JPEG-LS X X
-LJPEG X Lossless JPEG
-PAM X X PAM is a PNM extension with alpha support.
-PBM X X Portable BitMap image
-PCX X X PC Paintbrush
-PGM X X Portable GrayMap image
-PGMYUV X X PGM with U and V components in YUV 4:2:0
-PIC X Pictor/PC Paint
-PNG X X
-PPM X X Portable PixelMap image
-PTX X V.Flash PTX format
-SGI X X SGI RGB image format
-Sun Rasterfile X X Sun RAS image format
-TIFF X X YUV, JPEG and some extension is not supported yet.
-Truevision Targa X X Targa (.TGA) image format
-WebP E X WebP image format, encoding supported through external library libwebp
-XBM X X X BitMap image format
-XFace X X X-Face image format
-XWD X X X Window Dump image format
-
-
-
X
means that encoding (resp. decoding) is supported.
-
-
E
means that support is provided through an external library.
-
-
-
2.3 Video Codecs# TOC
-
-
-Name Encoding Decoding Comments
-4X Movie X Used in certain computer games.
-8088flex TMV X
-A64 multicolor X Creates video suitable to be played on a commodore 64 (multicolor mode).
-Amazing Studio PAF Video X
-American Laser Games MM X Used in games like Mad Dog McCree.
-AMV Video X X Used in Chinese MP3 players.
-ANSI/ASCII art X
-Apple Intermediate Codec X
-Apple MJPEG-B X
-Apple ProRes X X
-Apple QuickDraw X fourcc: qdrw
-Asus v1 X X fourcc: ASV1
-Asus v2 X X fourcc: ASV2
-ATI VCR1 X fourcc: VCR1
-ATI VCR2 X fourcc: VCR2
-Auravision Aura X
-Auravision Aura 2 X
-Autodesk Animator Flic video X
-Autodesk RLE X fourcc: AASC
-Avid 1:1 10-bit RGB Packer X X fourcc: AVrp
-AVS (Audio Video Standard) video X Video encoding used by the Creature Shock game.
-AYUV X X Microsoft uncompressed packed 4:4:4:4
-Beam Software VB X
-Bethesda VID video X Used in some games from Bethesda Softworks.
-Bink Video X
-Bitmap Brothers JV video X
-y41p Brooktree uncompressed 4:1:1 12-bit X X
-Brute Force & Ignorance X Used in the game Flash Traffic: City of Angels.
-C93 video X Codec used in Cyberia game.
-CamStudio X fourcc: CSCD
-CD+G X Video codec for CD+G karaoke disks
-CDXL X Amiga CD video codec
-Chinese AVS video E X AVS1-P2, JiZhun profile, encoding through external library libxavs
-Delphine Software International CIN video X Codec used in Delphine Software International games.
-Discworld II BMV Video X
-Canopus Lossless Codec X
-Cinepak X
-Cirrus Logic AccuPak X X fourcc: CLJR
-CPiA Video Format X
-Creative YUV (CYUV) X
-DFA X Codec used in Chronomaster game.
-Dirac E X supported through external library libschroedinger
-Deluxe Paint Animation X
-DNxHD X X aka SMPTE VC3
-Duck TrueMotion 1.0 X fourcc: DUCK
-Duck TrueMotion 2.0 X fourcc: TM20
-DV (Digital Video) X X
-Dxtory capture format X
-Feeble Files/ScummVM DXA X Codec originally used in Feeble Files game.
-Electronic Arts CMV video X Used in NHL 95 game.
-Electronic Arts Madcow video X
-Electronic Arts TGV video X
-Electronic Arts TGQ video X
-Electronic Arts TQI video X
-Escape 124 X
-Escape 130 X
-FFmpeg video codec #1 X X lossless codec (fourcc: FFV1)
-Flash Screen Video v1 X X fourcc: FSV1
-Flash Screen Video v2 X X
-Flash Video (FLV) X X Sorenson H.263 used in Flash
-Forward Uncompressed X
-Fraps X
-Go2Webinar X fourcc: G2M4
-H.261 X X
-H.263 / H.263-1996 X X
-H.263+ / H.263-1998 / H.263 version 2 X X
-H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 E X encoding supported through external library libx264 and OpenH264
-HEVC X X encoding supported through the external library libx265
-HNM version 4 X
-HuffYUV X X
-HuffYUV FFmpeg variant X X
-IBM Ultimotion X fourcc: ULTI
-id Cinematic video X Used in Quake II.
-id RoQ video X X Used in Quake III, Jedi Knight 2, other computer games.
-IFF ILBM X IFF interleaved bitmap
-IFF ByteRun1 X IFF run length encoded bitmap
-Intel H.263 X
-Intel Indeo 2 X
-Intel Indeo 3 X
-Intel Indeo 4 X
-Intel Indeo 5 X
-Interplay C93 X Used in the game Cyberia from Interplay.
-Interplay MVE video X Used in Interplay .MVE files.
-J2K X X
-Karl Morton’s video codec X Codec used in Worms games.
-Kega Game Video (KGV1) X Kega emulator screen capture codec.
-Lagarith X
-LCL (LossLess Codec Library) MSZH X
-LCL (LossLess Codec Library) ZLIB E E
-LOCO X
-LucasArts SANM/Smush X Used in LucasArts games / SMUSH animations.
-lossless MJPEG X X
-Microsoft ATC Screen X Also known as Microsoft Screen 3.
-Microsoft Expression Encoder Screen X Also known as Microsoft Titanium Screen 2.
-Microsoft RLE X
-Microsoft Screen 1 X Also known as Windows Media Video V7 Screen.
-Microsoft Screen 2 X Also known as Windows Media Video V9 Screen.
-Microsoft Video 1 X
-Mimic X Used in MSN Messenger Webcam streams.
-Miro VideoXL X fourcc: VIXL
-MJPEG (Motion JPEG) X X
-Mobotix MxPEG video X
-Motion Pixels video X
-MPEG-1 video X X
-MPEG-2 video X X
-MPEG-4 part 2 X X libxvidcore can be used alternatively for encoding.
-MPEG-4 part 2 Microsoft variant version 1 X
-MPEG-4 part 2 Microsoft variant version 2 X X
-MPEG-4 part 2 Microsoft variant version 3 X X
-Nintendo Gamecube THP video X
-NuppelVideo/RTjpeg X Video encoding used in NuppelVideo files.
-On2 VP3 X still experimental
-On2 VP5 X fourcc: VP50
-On2 VP6 X fourcc: VP60,VP61,VP62
-On2 VP7 X fourcc: VP70,VP71
-VP8 E X fourcc: VP80, encoding supported through external library libvpx
-VP9 E X encoding supported through external library libvpx
-Pinnacle TARGA CineWave YUV16 X fourcc: Y216
-Prores X fourcc: apch,apcn,apcs,apco
-Q-team QPEG X fourccs: QPEG, Q1.0, Q1.1
-QuickTime 8BPS video X
-QuickTime Animation (RLE) video X X fourcc: ’rle ’
-QuickTime Graphics (SMC) X fourcc: ’smc ’
-QuickTime video (RPZA) X fourcc: rpza
-R10K AJA Kona 10-bit RGB Codec X X
-R210 Quicktime Uncompressed RGB 10-bit X X
-Raw Video X X
-RealVideo 1.0 X X
-RealVideo 2.0 X X
-RealVideo 3.0 X still far from ideal
-RealVideo 4.0 X
-Renderware TXD (TeXture Dictionary) X Texture dictionaries used by the Renderware Engine.
-RL2 video X used in some games by Entertainment Software Partners
-Sierra VMD video X Used in Sierra VMD files.
-Silicon Graphics Motion Video Compressor 1 (MVC1) X
-Silicon Graphics Motion Video Compressor 2 (MVC2) X
-Silicon Graphics RLE 8-bit video X
-Smacker video X Video encoding used in Smacker.
-SMPTE VC-1 X
-Snow X X experimental wavelet codec (fourcc: SNOW)
-Sony PlayStation MDEC (Motion DECoder) X
-Sorenson Vector Quantizer 1 X X fourcc: SVQ1
-Sorenson Vector Quantizer 3 X fourcc: SVQ3
-Sunplus JPEG (SP5X) X fourcc: SP5X
-TechSmith Screen Capture Codec X fourcc: TSCC
-TechSmith Screen Capture Codec 2 X fourcc: TSC2
-Theora E X encoding supported through external library libtheora
-Tiertex Limited SEQ video X Codec used in DOS CD-ROM FlashBack game.
-Ut Video X X
-v210 QuickTime uncompressed 4:2:2 10-bit X X
-v308 QuickTime uncompressed 4:4:4 X X
-v408 QuickTime uncompressed 4:4:4:4 X X
-v410 QuickTime uncompressed 4:4:4 10-bit X X
-VBLE Lossless Codec X
-VMware Screen Codec / VMware Video X Codec used in videos captured by VMware.
-Westwood Studios VQA (Vector Quantized Animation) video X
-Windows Media Image X
-Windows Media Video 7 X X
-Windows Media Video 8 X X
-Windows Media Video 9 X not completely working
-Wing Commander III / Xan X Used in Wing Commander III .MVE files.
-Wing Commander IV / Xan X Used in Wing Commander IV.
-Winnov WNV1 X
-WMV7 X X
-YAMAHA SMAF X X
-Psygnosis YOP Video X
-yuv4 X X libquicktime uncompressed packed 4:2:0
-ZeroCodec Lossless Video X
-ZLIB X X part of LCL, encoder experimental
-Zip Motion Blocks Video X X Encoder works only in PAL8.
-
-
-
X
means that encoding (resp. decoding) is supported.
-
-
E
means that support is provided through an external library.
-
-
-
2.4 Audio Codecs# TOC
-
-
-Name Encoding Decoding Comments
-8SVX exponential X
-8SVX fibonacci X
-AAC+ E X encoding supported through external library libaacplus
-AAC E X encoding supported through external library libfaac and libvo-aacenc
-AC-3 IX IX
-ADPCM 4X Movie X
-ADPCM CDROM XA X
-ADPCM Creative Technology X 16 -> 4, 8 -> 4, 8 -> 3, 8 -> 2
-ADPCM Electronic Arts X Used in various EA titles.
-ADPCM Electronic Arts Maxis CDROM XS X Used in Sim City 3000.
-ADPCM Electronic Arts R1 X
-ADPCM Electronic Arts R2 X
-ADPCM Electronic Arts R3 X
-ADPCM Electronic Arts XAS X
-ADPCM G.722 X X
-ADPCM G.726 X X
-ADPCM IMA AMV X Used in AMV files
-ADPCM IMA Electronic Arts EACS X
-ADPCM IMA Electronic Arts SEAD X
-ADPCM IMA Funcom X
-ADPCM IMA QuickTime X X
-ADPCM IMA Loki SDL MJPEG X
-ADPCM IMA WAV X X
-ADPCM IMA Westwood X
-ADPCM ISS IMA X Used in FunCom games.
-ADPCM IMA Dialogic X
-ADPCM IMA Duck DK3 X Used in some Sega Saturn console games.
-ADPCM IMA Duck DK4 X Used in some Sega Saturn console games.
-ADPCM IMA Radical X
-ADPCM Microsoft X X
-ADPCM MS IMA X X
-ADPCM Nintendo Gamecube AFC X
-ADPCM Nintendo Gamecube DTK X
-ADPCM Nintendo Gamecube THP X
-ADPCM QT IMA X X
-ADPCM SEGA CRI ADX X X Used in Sega Dreamcast games.
-ADPCM Shockwave Flash X X
-ADPCM Sound Blaster Pro 2-bit X
-ADPCM Sound Blaster Pro 2.6-bit X
-ADPCM Sound Blaster Pro 4-bit X
-ADPCM VIMA Used in LucasArts SMUSH animations.
-ADPCM Westwood Studios IMA X Used in Westwood Studios games like Command and Conquer.
-ADPCM Yamaha X X
-AMR-NB E X encoding supported through external library libopencore-amrnb
-AMR-WB E X encoding supported through external library libvo-amrwbenc
-Amazing Studio PAF Audio X
-Apple lossless audio X X QuickTime fourcc ’alac’
-ATRAC1 X
-ATRAC3 X
-ATRAC3+ X
-Bink Audio X Used in Bink and Smacker files in many games.
-CELT E decoding supported through external library libcelt
-Delphine Software International CIN audio X Codec used in Delphine Software International games.
-Discworld II BMV Audio X
-COOK X All versions except 5.1 are supported.
-DCA (DTS Coherent Acoustics) X X
-DPCM id RoQ X X Used in Quake III, Jedi Knight 2 and other computer games.
-DPCM Interplay X Used in various Interplay computer games.
-DPCM Sierra Online X Used in Sierra Online game audio files.
-DPCM Sol X
-DPCM Xan X Used in Origin’s Wing Commander IV AVI files.
-DSD (Direct Stream Digitial), least significant bit first X
-DSD (Direct Stream Digitial), most significant bit first X
-DSD (Direct Stream Digitial), least significant bit first, planar X
-DSD (Direct Stream Digitial), most significant bit first, planar X
-DSP Group TrueSpeech X
-DV audio X
-Enhanced AC-3 X X
-EVRC (Enhanced Variable Rate Codec) X
-FLAC (Free Lossless Audio Codec) X IX
-G.723.1 X X
-G.729 X
-GSM E X encoding supported through external library libgsm
-GSM Microsoft variant E X encoding supported through external library libgsm
-IAC (Indeo Audio Coder) X
-iLBC (Internet Low Bitrate Codec) E E encoding and decoding supported through external library libilbc
-IMC (Intel Music Coder) X
-MACE (Macintosh Audio Compression/Expansion) 3:1 X
-MACE (Macintosh Audio Compression/Expansion) 6:1 X
-MLP (Meridian Lossless Packing) X Used in DVD-Audio discs.
-Monkey’s Audio X
-MP1 (MPEG audio layer 1) IX
-MP2 (MPEG audio layer 2) IX IX encoding supported also through external library TwoLAME
-MP3 (MPEG audio layer 3) E IX encoding supported through external library LAME, ADU MP3 and MP3onMP4 also supported
-MPEG-4 Audio Lossless Coding (ALS) X
-Musepack SV7 X
-Musepack SV8 X
-Nellymoser Asao X X
-On2 AVC (Audio for Video Codec) X
-Opus E E supported through external library libopus
-PCM A-law X X
-PCM mu-law X X
-PCM signed 8-bit planar X X
-PCM signed 16-bit big-endian planar X X
-PCM signed 16-bit little-endian planar X X
-PCM signed 24-bit little-endian planar X X
-PCM signed 32-bit little-endian planar X X
-PCM 32-bit floating point big-endian X X
-PCM 32-bit floating point little-endian X X
-PCM 64-bit floating point big-endian X X
-PCM 64-bit floating point little-endian X X
-PCM D-Cinema audio signed 24-bit X X
-PCM signed 8-bit X X
-PCM signed 16-bit big-endian X X
-PCM signed 16-bit little-endian X X
-PCM signed 24-bit big-endian X X
-PCM signed 24-bit little-endian X X
-PCM signed 32-bit big-endian X X
-PCM signed 32-bit little-endian X X
-PCM signed 16/20/24-bit big-endian in MPEG-TS X
-PCM unsigned 8-bit X X
-PCM unsigned 16-bit big-endian X X
-PCM unsigned 16-bit little-endian X X
-PCM unsigned 24-bit big-endian X X
-PCM unsigned 24-bit little-endian X X
-PCM unsigned 32-bit big-endian X X
-PCM unsigned 32-bit little-endian X X
-PCM Zork X
-QCELP / PureVoice X
-QDesign Music Codec 2 X There are still some distortions.
-RealAudio 1.0 (14.4K) X X Real 14400 bit/s codec
-RealAudio 2.0 (28.8K) X Real 28800 bit/s codec
-RealAudio 3.0 (dnet) IX X Real low bitrate AC-3 codec
-RealAudio Lossless X
-RealAudio SIPR / ACELP.NET X
-Shorten X
-Sierra VMD audio X Used in Sierra VMD files.
-Smacker audio X
-SMPTE 302M AES3 audio X X
-Sonic X X experimental codec
-Sonic lossless X X experimental codec
-Speex E E supported through external library libspeex
-TAK (Tom’s lossless Audio Kompressor) X
-True Audio (TTA) X X
-TrueHD X Used in HD-DVD and Blu-Ray discs.
-TwinVQ (VQF flavor) X
-VIMA X Used in LucasArts SMUSH animations.
-Vorbis E X A native but very primitive encoder exists.
-Voxware MetaSound X
-WavPack X X
-Westwood Audio (SND1) X
-Windows Media Audio 1 X X
-Windows Media Audio 2 X X
-Windows Media Audio Lossless X
-Windows Media Audio Pro X
-Windows Media Audio Voice X
-
-
-
X
means that encoding (resp. decoding) is supported.
-
-
E
means that support is provided through an external library.
-
-
I
means that an integer-only version is available, too (ensures high
-performance on systems without hardware floating point support).
-
-
-
2.5 Subtitle Formats# TOC
-
-
-Name Muxing Demuxing Encoding Decoding
-3GPP Timed Text X X
-AQTitle X X
-DVB X X X X
-DVB teletext X E
-DVD X X X X
-JACOsub X X X
-MicroDVD X X X
-MPL2 X X
-MPsub (MPlayer) X X
-PGS X
-PJS (Phoenix) X X
-RealText X X
-SAMI X X
-Spruce format (STL) X X
-SSA/ASS X X X X
-SubRip (SRT) X X X X
-SubViewer v1 X X
-SubViewer X X
-TED Talks captions X X
-VobSub (IDX+SUB) X X
-VPlayer X X
-WebVTT X X X X
-XSUB X X
-
-
-
X
means that the feature is supported.
-
-
E
means that support is provided through an external library.
-
-
-
2.6 Network Protocols# TOC
-
-
-Name Support
-file X
-FTP X
-Gopher X
-HLS X
-HTTP X
-HTTPS X
-Icecast X
-MMSH X
-MMST X
-pipe X
-RTMP X
-RTMPE X
-RTMPS X
-RTMPT X
-RTMPTE X
-RTMPTS X
-RTP X
-SAMBA E
-SCTP X
-SFTP E
-TCP X
-TLS X
-UDP X
-
-
-
X
means that the protocol is supported.
-
-
E
means that support is provided through an external library.
-
-
-
-
2.7 Input/Output Devices# TOC
-
-
-Name Input Output
-ALSA X X
-BKTR X
-caca X
-DV1394 X
-Lavfi virtual device X
-Linux framebuffer X X
-JACK X
-LIBCDIO X
-LIBDC1394 X
-OpenAL X
-OpenGL X
-OSS X X
-PulseAudio X X
-SDL X
-Video4Linux2 X X
-VfW capture X
-X11 grabbing X
-Win32 grabbing X
-
-
-
X
means that input/output is supported.
-
-
-
2.8 Timecode# TOC
-
-
-Codec/format Read Write
-AVI X X
-DV X X
-GXF X X
-MOV X X
-MPEG1/2 X X
-MXF X X
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/git-howto.html b/Externals/ffmpeg/dev/doc/git-howto.html
deleted file mode 100644
index dbbc681f4e..0000000000
--- a/Externals/ffmpeg/dev/doc/git-howto.html
+++ /dev/null
@@ -1,493 +0,0 @@
-
-
-
-
-
-
- Using git to develop FFmpeg
-
-
-
-
-
-
-
-
- Using git to develop FFmpeg
-
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Introduction# TOC
-
-
This document aims in giving some quick references on a set of useful git
-commands. You should always use the extensive and detailed documentation
-provided directly by git:
-
-
-
-
shows you the available subcommands,
-
-
-
git <command> --help
-man git-<command>
-
-
-
shows information about the subcommand <command>.
-
-
Additional information could be found on the
-Git Reference website
-
-
For more information about the Git project, visit the
-
-
Git website
-
-
Consult these resources whenever you have problems, they are quite exhaustive.
-
-
What follows now is a basic introduction to Git and some FFmpeg-specific
-guidelines to ease the contribution to the project
-
-
-
2 Basics Usage# TOC
-
-
-
2.1 Get GIT# TOC
-
-
You can get git from http://git-scm.com/
-Most distribution and operating system provide a package for it.
-
-
-
-
2.2 Cloning the source tree# TOC
-
-
-
git clone git://source.ffmpeg.org/ffmpeg <target>
-
-
-
This will put the FFmpeg sources into the directory <target> .
-
-
-
git clone git@source.ffmpeg.org:ffmpeg <target>
-
-
-
This will put the FFmpeg sources into the directory <target> and let
-you push back your changes to the remote repository.
-
-
Make sure that you do not have Windows line endings in your checkouts,
-otherwise you may experience spurious compilation failures. One way to
-achieve this is to run
-
-
-
git config --global core.autocrlf false
-
-
-
-
-
2.3 Updating the source tree to the latest revision# TOC
-
-
-
-
pulls in the latest changes from the tracked branch. The tracked branch
-can be remote. By default the master branch tracks the branch master in
-the remote origin.
-
-
-
--rebase
(see below) is recommended.
-
-
-
2.4 Rebasing your local branches# TOC
-
-
-
-
fetches the changes from the main repository and replays your local commits
-over it. This is required to keep all your local changes at the top of
-FFmpeg’s master tree. The master tree will reject pushes with merge commits.
-
-
-
-
2.5 Adding/removing files/directories# TOC
-
-
-
git add [-A] <filename/dirname>
-git rm [-r] <filename/dirname>
-
-
-
GIT needs to get notified of all changes you make to your working
-directory that makes files appear or disappear.
-Line moves across files are automatically tracked.
-
-
-
-
2.6 Showing modifications# TOC
-
-
-
git diff <filename(s)>
-
-
-
will show all local modifications in your working directory as unified diff.
-
-
-
-
2.7 Inspecting the changelog# TOC
-
-
-
-
You may also use the graphical tools like gitview or gitk or the web
-interface available at http://source.ffmpeg.org/
-
-
-
2.8 Checking source tree status# TOC
-
-
-
-
detects all the changes you made and lists what actions will be taken in case
-of a commit (additions, modifications, deletions, etc.).
-
-
-
-
2.9 Committing# TOC
-
-
-
-
to double check your changes before committing them to avoid trouble later
-on. All experienced developers do this on each and every commit, no matter
-how small.
-Every one of them has been saved from looking like a fool by this many times.
-It’s very easy for stray debug output or cosmetic modifications to slip in,
-please avoid problems through this extra level of scrutiny.
-
-
For cosmetics-only commits you should get (almost) empty output from
-
-
-
git diff -w -b <filename(s)>
-
-
-
Also check the output of
-
-
-
-
to make sure you don’t have untracked files or deletions.
-
-
-
git add [-i|-p|-A] <filenames/dirnames>
-
-
-
Make sure you have told git your name and email address
-
-
-
git config --global user.name "My Name"
-git config --global user.email my@email.invalid
-
-
-
Use –global to set the global configuration for all your git checkouts.
-
-
Git will select the changes to the files for commit. Optionally you can use
-the interactive or the patch mode to select hunk by hunk what should be
-added to the commit.
-
-
-
-
-
Git will commit the selected changes to your current local branch.
-
-
You will be prompted for a log message in an editor, which is either
-set in your personal configuration file through
-
-
-
git config --global core.editor
-
-
-
or set by one of the following environment variables:
-GIT_EDITOR , VISUAL or EDITOR .
-
-
Log messages should be concise but descriptive. Explain why you made a change,
-what you did will be obvious from the changes themselves most of the time.
-Saying just "bug fix" or "10l" is bad. Remember that people of varying skill
-levels look at and educate themselves while reading through your code. Don’t
-include filenames in log messages, Git provides that information.
-
-
Possibly make the commit message have a terse, descriptive first line, an
-empty line and then a full description. The first line will be used to name
-the patch by git format-patch.
-
-
-
2.10 Preparing a patchset# TOC
-
-
-
git format-patch <commit> [-o directory]
-
-
-
will generate a set of patches for each commit between <commit> and
-current HEAD . E.g.
-
-
-
git format-patch origin/master
-
-
-
will generate patches for all commits on current branch which are not
-present in upstream.
-A useful shortcut is also
-
-
-
-
which will generate patches from last n commits.
-By default the patches are created in the current directory.
-
-
-
2.11 Sending patches for review# TOC
-
-
-
git send-email <commit list|directory>
-
-
-
will send the patches created by git format-patch
or directly
-generates them. All the email fields can be configured in the global/local
-configuration or overridden by command line.
-Note that this tool must often be installed separately (e.g. git-email
-package on Debian-based distros).
-
-
-
-
2.12 Renaming/moving/copying files or contents of files# TOC
-
-
Git automatically tracks such changes, making those normal commits.
-
-
-
mv/cp path/file otherpath/otherfile
-git add [-A] .
-git commit
-
-
-
-
-
3 Git configuration# TOC
-
-
In order to simplify a few workflows, it is advisable to configure both
-your personal Git installation and your local FFmpeg repository.
-
-
-
3.1 Personal Git installation# TOC
-
-
Add the following to your ~/.gitconfig to help git send-email
-and git format-patch
detect renames:
-
-
-
[diff]
- renames = copy
-
-
-
-
3.2 Repository configuration# TOC
-
-
In order to have git send-email
automatically send patches
-to the ffmpeg-devel mailing list, add the following stanza
-to /path/to/ffmpeg/repository/.git/config :
-
-
-
[sendemail]
- to = ffmpeg-devel@ffmpeg.org
-
-
-
-
4 FFmpeg specific# TOC
-
-
-
4.1 Reverting broken commits# TOC
-
-
-
-
git reset
will uncommit the changes till <commit> rewriting
-the current branch history.
-
-
-
-
allows one to amend the last commit details quickly.
-
-
-
git rebase -i origin/master
-
-
-
will replay local commits over the main repository allowing to edit, merge
-or remove some of them in the process.
-
-
-
git reset
, git commit --amend
and git rebase
-rewrite history, so you should use them ONLY on your local or topic branches.
-The main repository will reject those changes.
-
-
-
-
git revert
will generate a revert commit. This will not make the
-faulty commit disappear from the history.
-
-
-
4.2 Pushing changes to remote trees# TOC
-
-
-
-
Will push the changes to the default remote (origin ).
-Git will prevent you from pushing changes if the local and remote trees are
-out of sync. Refer to and to sync the local tree.
-
-
-
git remote add <name> <url>
-
-
-
Will add additional remote with a name reference, it is useful if you want
-to push your local branch for review on a remote host.
-
-
-
git push <remote> <refspec>
-
-
-
Will push the changes to the <remote> repository.
-Omitting <refspec> makes git push
update all the remote
-branches matching the local ones.
-
-
-
4.3 Finding a specific svn revision# TOC
-
-
Since version 1.7.1 git supports :/foo syntax for specifying commits
-based on a regular expression. see man gitrevisions
-
-
-
git show :/'as revision 23456'
-
-
-
will show the svn changeset r23456 . With older git versions searching in
-the git log
output is the easiest option (especially if a pager with
-search capabilities is used).
-This commit can be checked out with
-
-
-
git checkout -b svn_23456 :/'as revision 23456'
-
-
-
or for git < 1.7.1 with
-
-
-
git checkout -b svn_23456 $SHA1
-
-
-
where $SHA1 is the commit hash from the git log
output.
-
-
-
-
5 pre-push checklist# TOC
-
-
Once you have a set of commits that you feel are ready for pushing,
-work through the following checklist to doublecheck everything is in
-proper order. This list tries to be exhaustive. In case you are just
-pushing a typo in a comment, some of the steps may be unnecessary.
-Apply your common sense, but if in doubt, err on the side of caution.
-
-
First, make sure that the commits and branches you are going to push
-match what you want pushed and that nothing is missing, extraneous or
-wrong. You can see what will be pushed by running the git push command
-with –dry-run first. And then inspecting the commits listed with
-git log -p 1234567..987654
. The git status
command
-may help in finding local changes that have been forgotten to be added.
-
-
Next let the code pass through a full run of our testsuite.
-
-
- make distclean
- /path/to/ffmpeg/configure
- make check
- if fate fails due to missing samples run make fate-rsync
and retry
-
-
-
Make sure all your changes have been checked before pushing them, the
-testsuite only checks against regressions and that only to some extend. It does
-obviously not check newly added features/code to be working unless you have
-added a test for that (which is recommended).
-
-
Also note that every single commit should pass the test suite, not just
-the result of a series of patches.
-
-
Once everything passed, push the changes to your public ffmpeg clone and post a
-merge request to ffmpeg-devel. You can also push them directly but this is not
-recommended.
-
-
-
6 Server Issues# TOC
-
-
Contact the project admins root@ffmpeg.org if you have technical
-problems with the GIT server.
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/libavcodec.html b/Externals/ffmpeg/dev/doc/libavcodec.html
deleted file mode 100644
index b15f00ec35..0000000000
--- a/Externals/ffmpeg/dev/doc/libavcodec.html
+++ /dev/null
@@ -1,76 +0,0 @@
-
-
-
-
-
-
- Libavcodec Documentation
-
-
-
-
-
-
-
-
- Libavcodec Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libavcodec library provides a generic encoding/decoding framework
-and contains multiple decoders and encoders for audio, video and
-subtitle streams, and several bitstream filters.
-
-
The shared architecture provides various services ranging from bit
-stream I/O to DSP optimizations, and makes it suitable for
-implementing robust and fast codecs as well as for experimentation.
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-codecs , bitstream-filters ,
-libavutil
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/libavdevice.html b/Externals/ffmpeg/dev/doc/libavdevice.html
deleted file mode 100644
index dd0379b147..0000000000
--- a/Externals/ffmpeg/dev/doc/libavdevice.html
+++ /dev/null
@@ -1,73 +0,0 @@
-
-
-
-
-
-
- Libavdevice Documentation
-
-
-
-
-
-
-
-
- Libavdevice Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libavdevice library provides a generic framework for grabbing from
-and rendering to many common multimedia input/output devices, and
-supports several input and output devices, including Video4Linux2,
-VfW, DShow, and ALSA.
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-devices ,
-libavutil , libavcodec , libavformat
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/libavfilter.html b/Externals/ffmpeg/dev/doc/libavfilter.html
deleted file mode 100644
index 0bfd0e598a..0000000000
--- a/Externals/ffmpeg/dev/doc/libavfilter.html
+++ /dev/null
@@ -1,72 +0,0 @@
-
-
-
-
-
-
- Libavfilter Documentation
-
-
-
-
-
-
-
-
- Libavfilter Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libavfilter library provides a generic audio/video filtering
-framework containing several filters, sources and sinks.
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-filters ,
-libavutil , libswscale , libswresample ,
-libavcodec , libavformat , libavdevice
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/libavformat.html b/Externals/ffmpeg/dev/doc/libavformat.html
deleted file mode 100644
index 57e29c5057..0000000000
--- a/Externals/ffmpeg/dev/doc/libavformat.html
+++ /dev/null
@@ -1,76 +0,0 @@
-
-
-
-
-
-
- Libavformat Documentation
-
-
-
-
-
-
-
-
- Libavformat Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libavformat library provides a generic framework for multiplexing
-and demultiplexing (muxing and demuxing) audio, video and subtitle
-streams. It encompasses multiple muxers and demuxers for multimedia
-container formats.
-
-
It also supports several input and output protocols to access a media
-resource.
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-formats , ffmpeg-protocols ,
-libavutil , libavcodec
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/libavutil.html b/Externals/ffmpeg/dev/doc/libavutil.html
deleted file mode 100644
index 23e471d17a..0000000000
--- a/Externals/ffmpeg/dev/doc/libavutil.html
+++ /dev/null
@@ -1,95 +0,0 @@
-
-
-
-
-
-
- Libavutil Documentation
-
-
-
-
-
-
-
-
- Libavutil Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libavutil library is a utility library to aid portable
-multimedia programming. It contains safe portable string functions,
-random number generators, data structures, additional mathematics
-functions, cryptography and multimedia related functionality (like
-enumerations for pixel and sample formats). It is not a library for
-code needed by both libavcodec and libavformat.
-
-
The goals for this library is to be:
-
-
-Modular
-It should have few interdependencies and the possibility of disabling individual
-parts during ./configure
.
-
-
-Small
-Both sources and objects should be small.
-
-
-Efficient
-It should have low CPU and memory usage.
-
-
-Useful
-It should avoid useless features that almost no one needs.
-
-
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-utils
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/libswresample.html b/Externals/ffmpeg/dev/doc/libswresample.html
deleted file mode 100644
index 6df93990ef..0000000000
--- a/Externals/ffmpeg/dev/doc/libswresample.html
+++ /dev/null
@@ -1,95 +0,0 @@
-
-
-
-
-
-
- Libswresample Documentation
-
-
-
-
-
-
-
-
- Libswresample Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libswresample library performs highly optimized audio resampling,
-rematrixing and sample format conversion operations.
-
-
Specifically, this library performs the following conversions:
-
-
- Resampling : is the process of changing the audio rate, for
-example from a high sample rate of 44100Hz to 8000Hz. Audio
-conversion from high to low sample rate is a lossy process. Several
-resampling options and algorithms are available.
-
- Format conversion : is the process of converting the type of
-samples, for example from 16-bit signed samples to unsigned 8-bit or
-float samples. It also handles packing conversion, when passing from
-packed layout (all samples belonging to distinct channels interleaved
-in the same buffer), to planar layout (all samples belonging to the
-same channel stored in a dedicated buffer or "plane").
-
- Rematrixing : is the process of changing the channel layout, for
-example from stereo to mono. When the input channels cannot be mapped
-to the output streams, the process is lossy, since it involves
-different gain factors and mixing.
-
-
-
Various other audio conversions (e.g. stretching and padding) are
-enabled through dedicated options.
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-resampler ,
-libavutil
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/libswscale.html b/Externals/ffmpeg/dev/doc/libswscale.html
deleted file mode 100644
index 425df90758..0000000000
--- a/Externals/ffmpeg/dev/doc/libswscale.html
+++ /dev/null
@@ -1,89 +0,0 @@
-
-
-
-
-
-
- Libswscale Documentation
-
-
-
-
-
-
-
-
- Libswscale Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libswscale library performs highly optimized image scaling and
-colorspace and pixel format conversion operations.
-
-
Specifically, this library performs the following conversions:
-
-
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-scaler ,
-libavutil
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/nut.html b/Externals/ffmpeg/dev/doc/nut.html
deleted file mode 100644
index 7b16df6a4d..0000000000
--- a/Externals/ffmpeg/dev/doc/nut.html
+++ /dev/null
@@ -1,211 +0,0 @@
-
-
-
-
-
-
- NUT
-
-
-
-
-
-
-
-
- NUT
-
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
NUT is a low overhead generic container format. It stores audio, video,
-subtitle and user-defined streams in a simple, yet efficient, way.
-
-
It was created by a group of FFmpeg and MPlayer developers in 2003
-and was finalized in 2008.
-
-
The official nut specification is at svn://svn.mplayerhq.hu/nut
-In case of any differences between this text and the official specification,
-the official specification shall prevail.
-
-
-
-
NUT has some variants signaled by using the flags field in its main header.
-
-
-BROADCAST Extend the syncpoint to report the sender wallclock
-PIPE Omit completely the syncpoint
-
-
-
-
2.1 BROADCAST# TOC
-
-
The BROADCAST variant provides a secondary time reference to facilitate
-detecting endpoint latency and network delays.
-It assumes all the endpoint clocks are syncronized.
-To be used in real-time scenarios.
-
-
-
2.2 PIPE# TOC
-
-
The PIPE variant assumes NUT is used as non-seekable intermediate container,
-by not using syncpoint removes unneeded overhead and reduces the overall
-memory usage.
-
-
-
3 Container-specific codec tags# TOC
-
-
-
3.1 Generic raw YUVA formats# TOC
-
-
Since many exotic planar YUVA pixel formats are not considered by
-the AVI/QuickTime FourCC lists, the following scheme is adopted for
-representing them.
-
-
The first two bytes can contain the values:
-Y1 = only Y
-Y2 = Y+A
-Y3 = YUV
-Y4 = YUVA
-
-
The third byte represents the width and height chroma subsampling
-values for the UV planes, that is the amount to shift the luma
-width/height right to find the chroma width/height.
-
-
The fourth byte is the number of bits used (8, 16, ...).
-
-
If the order of bytes is inverted, that means that each component has
-to be read big-endian.
-
-
-
3.2 Raw Audio# TOC
-
-
-ALAW A-LAW
-ULAW MU-LAW
-P<type><interleaving><bits> little-endian PCM
-<bits><interleaving><type>P big-endian PCM
-
-
-
<type> is S for signed integer, U for unsigned integer, F for IEEE float
-<interleaving> is D for default, P is for planar.
-<bits> is 8/16/24/32
-
-
-
PFD[32] would for example be signed 32 bit little-endian IEEE float
-
-
-
-
3.3 Subtitles# TOC
-
-
-UTF8 Raw UTF-8
-SSA[0] SubStation Alpha
-DVDS DVD subtitles
-DVBS DVB subtitles
-
-
-
-
3.4 Raw Data# TOC
-
-
-
-
-
3.5 Codecs# TOC
-
-
-3IV1 non-compliant MPEG-4 generated by old 3ivx
-ASV1 Asus Video
-ASV2 Asus Video 2
-CVID Cinepak
-CYUV Creative YUV
-DIVX non-compliant MPEG-4 generated by old DivX
-DUCK Truemotion 1
-FFV1 FFmpeg video 1
-FFVH FFmpeg Huffyuv
-H261 ITU H.261
-H262 ITU H.262
-H263 ITU H.263
-H264 ITU H.264
-HFYU Huffyuv
-I263 Intel H.263
-IV31 Indeo 3.1
-IV32 Indeo 3.2
-IV50 Indeo 5.0
-LJPG ITU JPEG (lossless)
-MJLS ITU JPEG-LS
-MJPG ITU JPEG
-MPG4 MS MPEG-4v1 (not ISO MPEG-4)
-MP42 MS MPEG-4v2
-MP43 MS MPEG-4v3
-MP4V ISO MPEG-4 Part 2 Video (from old encoders)
-mpg1 ISO MPEG-1 Video
-mpg2 ISO MPEG-2 Video
-MRLE MS RLE
-MSVC MS Video 1
-RT21 Indeo 2.1
-RV10 RealVideo 1.0
-RV20 RealVideo 2.0
-RV30 RealVideo 3.0
-RV40 RealVideo 4.0
-SNOW FFmpeg Snow
-SVQ1 Sorenson Video 1
-SVQ3 Sorenson Video 3
-theo Xiph Theora
-TM20 Truemotion 2.0
-UMP4 non-compliant MPEG-4 generated by UB Video MPEG-4
-VCR1 ATI VCR1
-VP30 VP 3.0
-VP31 VP 3.1
-VP50 VP 5.0
-VP60 VP 6.0
-VP61 VP 6.1
-VP62 VP 6.2
-VP70 VP 7.0
-WMV1 MS WMV7
-WMV2 MS WMV8
-WMV3 MS WMV9
-WV1F non-compliant MPEG-4 generated by ?
-WVC1 VC-1
-XVID non-compliant MPEG-4 generated by old Xvid
-XVIX non-compliant MPEG-4 generated by old Xvid with interlacing bug
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/doc/platform.html b/Externals/ffmpeg/dev/doc/platform.html
deleted file mode 100644
index fb57926de2..0000000000
--- a/Externals/ffmpeg/dev/doc/platform.html
+++ /dev/null
@@ -1,447 +0,0 @@
-
-
-
-
-
-
- Platform Specific Information
-
-
-
-
-
-
-
-
- Platform Specific Information
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Unix-like# TOC
-
-
Some parts of FFmpeg cannot be built with version 2.15 of the GNU
-assembler which is still provided by a few AMD64 distributions. To
-make sure your compiler really uses the required version of gas
-after a binutils upgrade, run:
-
-
-
$(gcc -print-prog-name=as) --version
-
-
-
If not, then you should install a different compiler that has no
-hard-coded path to gas. In the worst case pass --disable-asm
-to configure.
-
-
-
1.1 Advanced linking configuration# TOC
-
-
If you compiled FFmpeg libraries statically and you want to use them to
-build your own shared library, you may need to force PIC support (with
---enable-pic
during FFmpeg configure) and add the following option
-to your project LDFLAGS:
-
-
-
-
If your target platform requires position independent binaries, you should
-pass the correct linking flag (e.g. -pie
) to --extra-ldexeflags
.
-
-
-
-
-
BSD make will not build FFmpeg, you need to install and use GNU Make
-(gmake
).
-
-
-
1.3 (Open)Solaris# TOC
-
-
GNU Make is required to build FFmpeg, so you have to invoke (gmake
),
-standard Solaris Make will not work. When building with a non-c99 front-end
-(gcc, generic suncc) add either --extra-libs=/usr/lib/values-xpg6.o
-or --extra-libs=/usr/lib/64/values-xpg6.o
to the configure options
-since the libc is not c99-compliant by default. The probes performed by
-configure may raise an exception leading to the death of configure itself
-due to a bug in the system shell. Simply invoke a different shell such as
-bash directly to work around this:
-
-
-
-
-
1.4 Darwin (Mac OS X, iPhone)# TOC
-
-
The toolchain provided with Xcode is sufficient to build the basic
-unacelerated code.
-
-
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
-https://github.com/FFmpeg/gas-preprocessor or
-https://github.com/yuvi/gas-preprocessor (currently outdated) to build the optimized
-assembly functions. Put the Perl script somewhere
-in your PATH, FFmpeg’s configure will pick it up automatically.
-
-
Mac OS X on amd64 and x86 requires yasm
to build most of the
-optimized assembly functions. Fink ,
-Gentoo Prefix ,
-Homebrew
-or MacPorts can easily provide it.
-
-
-
-
-
-
Using a cross-compiler is preferred for various reasons.
-http://www.delorie.com/howto/djgpp/linux-x-djgpp.html
-
-
-
-
-
-
For information about compiling FFmpeg on OS/2 see
-http://www.edm2.com/index.php/FFmpeg .
-
-
-
-
4 Windows# TOC
-
-
To get help and instructions for building FFmpeg under Windows, check out
-the FFmpeg Windows Help Forum at http://ffmpeg.zeranoe.com/forum/ .
-
-
-
4.1 Native Windows compilation using MinGW or MinGW-w64# TOC
-
-
FFmpeg can be built to run natively on Windows using the MinGW or MinGW-w64
-toolchains. Install the latest versions of MSYS and MinGW or MinGW-w64 from
-http://www.mingw.org/ or http://mingw-w64.sourceforge.net/ .
-You can find detailed installation instructions in the download section and
-the FAQ.
-
-
Notes:
-
-
- Building natively using MSYS can be sped up by disabling implicit rules
-in the Makefile by calling make -r
instead of plain make
. This
-speed up is close to non-existent for normal one-off builds and is only
-noticeable when running make for a second time (for example during
-make install
).
-
- In order to compile FFplay, you must have the MinGW development library
-of SDL and pkg-config
installed.
-
- By using ./configure --enable-shared
when configuring FFmpeg,
-you can build the FFmpeg libraries (e.g. libavutil, libavcodec,
-libavformat) as DLLs.
-
-
-
-
-
4.2 Microsoft Visual C++ or Intel C++ Compiler for Windows# TOC
-
-
FFmpeg can be built with MSVC 2012 or earlier using a C99-to-C89 conversion utility
-and wrapper, or with MSVC 2013 and ICL natively.
-
-
You will need the following prerequisites:
-
-
-
-
To set up a proper environment in MSYS, you need to run msys.bat
from
-the Visual Studio or Intel Compiler command prompt.
-
-
Place yasm.exe
somewhere in your PATH
. If using MSVC 2012 or
-earlier, place c99wrap.exe
and c99conv.exe
somewhere in your
-PATH
as well.
-
-
Next, make sure any other headers and libs you want to use, such as zlib, are
-located in a spot that the compiler can see. Do so by modifying the LIB
-and INCLUDE
environment variables to include the Windows-style
-paths to these directories. Alternatively, you can try and use the
---extra-cflags
/--extra-ldflags
configure options. If using MSVC
-2012 or earlier, place inttypes.h
somewhere the compiler can see too.
-
-
Finally, run:
-
-
-
For MSVC:
-./configure --toolchain=msvc
-
-For ICL:
-./configure --toolchain=icl
-
-make
-make install
-
-
-
If you wish to compile shared libraries, add --enable-shared
to your
-configure options. Note that due to the way MSVC and ICL handle DLL imports and
-exports, you cannot compile static and shared libraries at the same time, and
-enabling shared libraries will automatically disable the static ones.
-
-
Notes:
-
-
-
-
-
4.2.1 Linking to FFmpeg with Microsoft Visual C++# TOC
-
-
If you plan to link with MSVC-built static libraries, you will need
-to make sure you have Runtime Library
set to
-Multi-threaded (/MT)
in your project’s settings.
-
-
You will need to define inline
to something MSVC understands:
-
-
#define inline __inline
-
-
-
Also note, that as stated in Microsoft Visual C++ , you will need
-an MSVC-compatible inttypes.h .
-
-
If you plan on using import libraries created by dlltool, you must
-set References
to No (/OPT:NOREF)
under the linker optimization
-settings, otherwise the resulting binaries will fail during runtime.
-This is not required when using import libraries generated by lib.exe
.
-This issue is reported upstream at
-http://sourceware.org/bugzilla/show_bug.cgi?id=12633 .
-
-
To create import libraries that work with the /OPT:REF
option
-(which is enabled by default in Release mode), follow these steps:
-
-
- Open the Visual Studio Command Prompt .
-
-Alternatively, in a normal command line prompt, call vcvars32.bat
-which sets up the environment variables for the Visual C++ tools
-(the standard location for this file is something like
-C:\Program Files (x86_\Microsoft Visual Studio 10.0\VC\bin\vcvars32.bat ).
-
- Enter the bin directory where the created LIB and DLL files
-are stored.
-
- Generate new import libraries with lib.exe
:
-
-
-
lib /machine:i386 /def:..\lib\foo-version.def /out:foo.lib
-
-
-Replace foo-version
and foo
with the respective library names.
-
-
-
-
-
4.3 Cross compilation for Windows with Linux# TOC
-
-
You must use the MinGW cross compilation tools available at
-http://www.mingw.org/ .
-
-
Then configure FFmpeg with the following options:
-
-
./configure --target-os=mingw32 --cross-prefix=i386-mingw32msvc-
-
-
(you can change the cross-prefix according to the prefix chosen for the
-MinGW tools).
-
-
Then you can easily test FFmpeg with Wine .
-
-
-
4.4 Compilation under Cygwin# TOC
-
-
Please use Cygwin 1.7.x as the obsolete 1.5.x Cygwin versions lack
-llrint() in its C library.
-
-
Install your Cygwin with all the "Base" packages, plus the
-following "Devel" ones:
-
-
binutils, gcc4-core, make, git, mingw-runtime, texinfo
-
-
-
In order to run FATE you will also need the following "Utils" packages:
-
-
-
If you want to build FFmpeg with additional libraries, download Cygwin
-"Devel" packages for Ogg and Vorbis from any Cygwin packages repository:
-
-
libogg-devel, libvorbis-devel
-
-
-
These library packages are only available from
-Cygwin Ports :
-
-
-
yasm, libSDL-devel, libfaac-devel, libaacplus-devel, libgsm-devel, libmp3lame-devel,
-libschroedinger1.0-devel, speex-devel, libtheora-devel, libxvidcore-devel
-
-
-
The recommendation for x264 is to build it from source, as it evolves too
-quickly for Cygwin Ports to be up to date.
-
-
-
4.5 Crosscompilation for Windows under Cygwin# TOC
-
-
With Cygwin you can create Windows binaries that do not need the cygwin1.dll.
-
-
Just install your Cygwin as explained before, plus these additional
-"Devel" packages:
-
-
gcc-mingw-core, mingw-runtime, mingw-zlib
-
-
-
and add some special flags to your configure invocation.
-
-
For a static build run
-
-
./configure --target-os=mingw32 --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
-
-
-
and for a build with shared libraries
-
-
./configure --target-os=mingw32 --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
-
-
-
-
5 Plan 9# TOC
-
-
The native Plan 9 compiler
-does not implement all the C99 features needed by FFmpeg so the gcc
-port must be used. Furthermore, a few items missing from the C
-library and shell environment need to be fixed.
-
-
- GNU awk, grep, make, and sed
-
-Working packages of these tools can be found at
-ports2plan9 .
-They can be installed with 9front’s pkg
-utility by setting pkgpath
to
-http://ports2plan9.googlecode.com/files/
.
-
- Missing/broken head
and printf
commands
-
-Replacements adequate for building FFmpeg can be found in the
-compat/plan9
directory. Place these somewhere they will be
-found by the shell. These are not full implementations of the
-commands and are not suitable for general use.
-
- Missing C99 stdint.h
and inttypes.h
-
-Replacement headers are available from
-http://code.google.com/p/plan9front/issues/detail?id=152 .
-
- Missing or non-standard library functions
-
-Some functions in the C library are missing or incomplete. The
-gcc-apelibs-1207
package from
-ports2plan9
-includes an updated C library, but installing the full package gives
-unusable executables. Instead, keep the files from gccbin.tgz
-under /386/lib/gnu
. From the libc.a
archive in the
-gcc-apelibs-1207
package, extract the following object files and
-turn them into a library:
-
-
- strerror.o
- strtoll.o
- snprintf.o
- vsnprintf.o
- vfprintf.o
- _IO_getc.o
- _IO_putc.o
-
-
-Use the --extra-libs
option of configure
to inform the
-build system of this library.
-
- FPU exceptions enabled by default
-
-Unlike most other systems, Plan 9 enables FPU exceptions by default.
-These must be disabled before calling any FFmpeg functions. While the
-included tools will do this automatically, other users of the
-libraries must do it themselves.
-
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/dev/include/libavdevice/avdevice.h b/Externals/ffmpeg/dev/include/libavdevice/avdevice.h
deleted file mode 100644
index 2d675b012d..0000000000
--- a/Externals/ffmpeg/dev/include/libavdevice/avdevice.h
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVDEVICE_AVDEVICE_H
-#define AVDEVICE_AVDEVICE_H
-
-#include "version.h"
-
-/**
- * @file
- * @ingroup lavd
- * Main libavdevice API header
- */
-
-/**
- * @defgroup lavd Special devices muxing/demuxing library
- * @{
- * Libavdevice is a complementary library to @ref libavf "libavformat". It
- * provides various "special" platform-specific muxers and demuxers, e.g. for
- * grabbing devices, audio capture and playback etc. As a consequence, the
- * (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own
- * I/O functions). The filename passed to avformat_open_input() often does not
- * refer to an actually existing file, but has some special device-specific
- * meaning - e.g. for x11grab it is the display name.
- *
- * To use libavdevice, simply call avdevice_register_all() to register all
- * compiled muxers and demuxers. They all use standard libavformat API.
- * @}
- */
-
-#include "libavutil/log.h"
-#include "libavutil/opt.h"
-#include "libavutil/dict.h"
-#include "libavformat/avformat.h"
-
-/**
- * Return the LIBAVDEVICE_VERSION_INT constant.
- */
-unsigned avdevice_version(void);
-
-/**
- * Return the libavdevice build-time configuration.
- */
-const char *avdevice_configuration(void);
-
-/**
- * Return the libavdevice license.
- */
-const char *avdevice_license(void);
-
-/**
- * Initialize libavdevice and register all the input and output devices.
- * @warning This function is not thread safe.
- */
-void avdevice_register_all(void);
-
-/**
- * Audio input devices iterator.
- *
- * If d is NULL, returns the first registered input audio/video device,
- * if d is non-NULL, returns the next registered input audio/video device after d
- * or NULL if d is the last one.
- */
-AVInputFormat *av_input_audio_device_next(AVInputFormat *d);
-
-/**
- * Video input devices iterator.
- *
- * If d is NULL, returns the first registered input audio/video device,
- * if d is non-NULL, returns the next registered input audio/video device after d
- * or NULL if d is the last one.
- */
-AVInputFormat *av_input_video_device_next(AVInputFormat *d);
-
-/**
- * Audio output devices iterator.
- *
- * If d is NULL, returns the first registered output audio/video device,
- * if d is non-NULL, returns the next registered output audio/video device after d
- * or NULL if d is the last one.
- */
-AVOutputFormat *av_output_audio_device_next(AVOutputFormat *d);
-
-/**
- * Video output devices iterator.
- *
- * If d is NULL, returns the first registered output audio/video device,
- * if d is non-NULL, returns the next registered output audio/video device after d
- * or NULL if d is the last one.
- */
-AVOutputFormat *av_output_video_device_next(AVOutputFormat *d);
-
-typedef struct AVDeviceRect {
- int x; /**< x coordinate of top left corner */
- int y; /**< y coordinate of top left corner */
- int width; /**< width */
- int height; /**< height */
-} AVDeviceRect;
-
-/**
- * Message types used by avdevice_app_to_dev_control_message().
- */
-enum AVAppToDevMessageType {
- /**
- * Dummy message.
- */
- AV_APP_TO_DEV_NONE = MKBETAG('N','O','N','E'),
-
- /**
- * Window size change message.
- *
- * Message is sent to the device every time the application changes the size
- * of the window device renders to.
- * Message should also be sent right after window is created.
- *
- * data: AVDeviceRect: new window size.
- */
- AV_APP_TO_DEV_WINDOW_SIZE = MKBETAG('G','E','O','M'),
-
- /**
- * Repaint request message.
- *
- * Message is sent to the device when window has to be repainted.
- *
- * data: AVDeviceRect: area required to be repainted.
- * NULL: whole area is required to be repainted.
- */
- AV_APP_TO_DEV_WINDOW_REPAINT = MKBETAG('R','E','P','A'),
-
- /**
- * Request pause/play.
- *
- * Application requests pause/unpause playback.
- * Mostly usable with devices that have internal buffer.
- * By default devices are not paused.
- *
- * data: NULL
- */
- AV_APP_TO_DEV_PAUSE = MKBETAG('P', 'A', 'U', ' '),
- AV_APP_TO_DEV_PLAY = MKBETAG('P', 'L', 'A', 'Y'),
- AV_APP_TO_DEV_TOGGLE_PAUSE = MKBETAG('P', 'A', 'U', 'T'),
-
- /**
- * Volume control message.
- *
- * Set volume level. It may be device-dependent if volume
- * is changed per stream or system wide. Per stream volume
- * change is expected when possible.
- *
- * data: double: new volume with range of 0.0 - 1.0.
- */
- AV_APP_TO_DEV_SET_VOLUME = MKBETAG('S', 'V', 'O', 'L'),
-
- /**
- * Mute control messages.
- *
- * Change mute state. It may be device-dependent if mute status
- * is changed per stream or system wide. Per stream mute status
- * change is expected when possible.
- *
- * data: NULL.
- */
- AV_APP_TO_DEV_MUTE = MKBETAG(' ', 'M', 'U', 'T'),
- AV_APP_TO_DEV_UNMUTE = MKBETAG('U', 'M', 'U', 'T'),
- AV_APP_TO_DEV_TOGGLE_MUTE = MKBETAG('T', 'M', 'U', 'T'),
-
- /**
- * Get volume/mute messages.
- *
- * Force the device to send AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED or
- * AV_DEV_TO_APP_MUTE_STATE_CHANGED command respectively.
- *
- * data: NULL.
- */
- AV_APP_TO_DEV_GET_VOLUME = MKBETAG('G', 'V', 'O', 'L'),
- AV_APP_TO_DEV_GET_MUTE = MKBETAG('G', 'M', 'U', 'T'),
-};
-
-/**
- * Message types used by avdevice_dev_to_app_control_message().
- */
-enum AVDevToAppMessageType {
- /**
- * Dummy message.
- */
- AV_DEV_TO_APP_NONE = MKBETAG('N','O','N','E'),
-
- /**
- * Create window buffer message.
- *
- * Device requests to create a window buffer. Exact meaning is device-
- * and application-dependent. Message is sent before rendering first
- * frame and all one-shot initializations should be done here.
- * Application is allowed to ignore preferred window buffer size.
- *
- * @note: Application is obligated to inform about window buffer size
- * with AV_APP_TO_DEV_WINDOW_SIZE message.
- *
- * data: AVDeviceRect: preferred size of the window buffer.
- * NULL: no preferred size of the window buffer.
- */
- AV_DEV_TO_APP_CREATE_WINDOW_BUFFER = MKBETAG('B','C','R','E'),
-
- /**
- * Prepare window buffer message.
- *
- * Device requests to prepare a window buffer for rendering.
- * Exact meaning is device- and application-dependent.
- * Message is sent before rendering of each frame.
- *
- * data: NULL.
- */
- AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER = MKBETAG('B','P','R','E'),
-
- /**
- * Display window buffer message.
- *
- * Device requests to display a window buffer.
- * Message is sent when new frame is ready to be displayed.
- * Usually buffers need to be swapped in handler of this message.
- *
- * data: NULL.
- */
- AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER = MKBETAG('B','D','I','S'),
-
- /**
- * Destroy window buffer message.
- *
- * Device requests to destroy a window buffer.
- * Message is sent when device is about to be destroyed and window
- * buffer is not required anymore.
- *
- * data: NULL.
- */
- AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER = MKBETAG('B','D','E','S'),
-
- /**
- * Buffer fullness status messages.
- *
- * Device signals buffer overflow/underflow.
- *
- * data: NULL.
- */
- AV_DEV_TO_APP_BUFFER_OVERFLOW = MKBETAG('B','O','F','L'),
- AV_DEV_TO_APP_BUFFER_UNDERFLOW = MKBETAG('B','U','F','L'),
-
- /**
- * Buffer readable/writable.
- *
- * Device informs that buffer is readable/writable.
- * When possible, device informs how many bytes can be read/write.
- *
- * @warning Device may not inform when number of bytes than can be read/write changes.
- *
- * data: int64_t: amount of bytes available to read/write.
- * NULL: amount of bytes available to read/write is not known.
- */
- AV_DEV_TO_APP_BUFFER_READABLE = MKBETAG('B','R','D',' '),
- AV_DEV_TO_APP_BUFFER_WRITABLE = MKBETAG('B','W','R',' '),
-
- /**
- * Mute state change message.
- *
- * Device informs that mute state has changed.
- *
- * data: int: 0 for not muted state, non-zero for muted state.
- */
- AV_DEV_TO_APP_MUTE_STATE_CHANGED = MKBETAG('C','M','U','T'),
-
- /**
- * Volume level change message.
- *
- * Device informs that volume level has changed.
- *
- * data: double: new volume with range of 0.0 - 1.0.
- */
- AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED = MKBETAG('C','V','O','L'),
-};
-
-/**
- * Send control message from application to device.
- *
- * @param s device context.
- * @param type message type.
- * @param data message data. Exact type depends on message type.
- * @param data_size size of message data.
- * @return >= 0 on success, negative on error.
- * AVERROR(ENOSYS) when device doesn't implement handler of the message.
- */
-int avdevice_app_to_dev_control_message(struct AVFormatContext *s,
- enum AVAppToDevMessageType type,
- void *data, size_t data_size);
-
-/**
- * Send control message from device to application.
- *
- * @param s device context.
- * @param type message type.
- * @param data message data. Can be NULL.
- * @param data_size size of message data.
- * @return >= 0 on success, negative on error.
- * AVERROR(ENOSYS) when application doesn't implement handler of the message.
- */
-int avdevice_dev_to_app_control_message(struct AVFormatContext *s,
- enum AVDevToAppMessageType type,
- void *data, size_t data_size);
-
-/**
- * Following API allows user to probe device capabilities (supported codecs,
- * pixel formats, sample formats, resolutions, channel counts, etc).
- * It is build on top op AVOption API.
- * Queried capabilities allows to set up converters of video or audio
- * parameters that fit to the device.
- *
- * List of capabilities that can be queried:
- * - Capabilities valid for both audio and video devices:
- * - codec: supported audio/video codecs.
- * type: AV_OPT_TYPE_INT (AVCodecID value)
- * - Capabilities valid for audio devices:
- * - sample_format: supported sample formats.
- * type: AV_OPT_TYPE_INT (AVSampleFormat value)
- * - sample_rate: supported sample rates.
- * type: AV_OPT_TYPE_INT
- * - channels: supported number of channels.
- * type: AV_OPT_TYPE_INT
- * - channel_layout: supported channel layouts.
- * type: AV_OPT_TYPE_INT64
- * - Capabilities valid for video devices:
- * - pixel_format: supported pixel formats.
- * type: AV_OPT_TYPE_INT (AVPixelFormat value)
- * - window_size: supported window sizes (describes size of the window size presented to the user).
- * type: AV_OPT_TYPE_IMAGE_SIZE
- * - frame_size: supported frame sizes (describes size of provided video frames).
- * type: AV_OPT_TYPE_IMAGE_SIZE
- * - fps: supported fps values
- * type: AV_OPT_TYPE_RATIONAL
- *
- * Value of the capability may be set by user using av_opt_set() function
- * and AVDeviceCapabilitiesQuery object. Following queries will
- * limit results to the values matching already set capabilities.
- * For example, setting a codec may impact number of formats or fps values
- * returned during next query. Setting invalid value may limit results to zero.
- *
- * Example of the usage basing on opengl output device:
- *
- * @code
- * AVFormatContext *oc = NULL;
- * AVDeviceCapabilitiesQuery *caps = NULL;
- * AVOptionRanges *ranges;
- * int ret;
- *
- * if ((ret = avformat_alloc_output_context2(&oc, NULL, "opengl", NULL)) < 0)
- * goto fail;
- * if (avdevice_capabilities_create(&caps, oc, NULL) < 0)
- * goto fail;
- *
- * //query codecs
- * if (av_opt_query_ranges(&ranges, caps, "codec", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
- * goto fail;
- * //pick codec here and set it
- * av_opt_set(caps, "codec", AV_CODEC_ID_RAWVIDEO, 0);
- *
- * //query format
- * if (av_opt_query_ranges(&ranges, caps, "pixel_format", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
- * goto fail;
- * //pick format here and set it
- * av_opt_set(caps, "pixel_format", AV_PIX_FMT_YUV420P, 0);
- *
- * //query and set more capabilities
- *
- * fail:
- * //clean up code
- * avdevice_capabilities_free(&query, oc);
- * avformat_free_context(oc);
- * @endcode
- */
-
-/**
- * Structure describes device capabilities.
- *
- * It is used by devices in conjunction with av_device_capabilities AVOption table
- * to implement capabilities probing API based on AVOption API. Should not be used directly.
- */
-typedef struct AVDeviceCapabilitiesQuery {
- const AVClass *av_class;
- AVFormatContext *device_context;
- enum AVCodecID codec;
- enum AVSampleFormat sample_format;
- enum AVPixelFormat pixel_format;
- int sample_rate;
- int channels;
- int64_t channel_layout;
- int window_width;
- int window_height;
- int frame_width;
- int frame_height;
- AVRational fps;
-} AVDeviceCapabilitiesQuery;
-
-/**
- * AVOption table used by devices to implement device capabilities API. Should not be used by a user.
- */
-extern const AVOption av_device_capabilities[];
-
-/**
- * Initialize capabilities probing API based on AVOption API.
- *
- * avdevice_capabilities_free() must be called when query capabilities API is
- * not used anymore.
- *
- * @param[out] caps Device capabilities data. Pointer to a NULL pointer must be passed.
- * @param s Context of the device.
- * @param device_options An AVDictionary filled with device-private options.
- * On return this parameter will be destroyed and replaced with a dict
- * containing options that were not found. May be NULL.
- * The same options must be passed later to avformat_write_header() for output
- * devices or avformat_open_input() for input devices, or at any other place
- * that affects device-private options.
- *
- * @return >= 0 on success, negative otherwise.
- */
-int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
- AVDictionary **device_options);
-
-/**
- * Free resources created by avdevice_capabilities_create()
- *
- * @param caps Device capabilities data to be freed.
- * @param s Context of the device.
- */
-void avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s);
-
-/**
- * Structure describes basic parameters of the device.
- */
-typedef struct AVDeviceInfo {
- char *device_name; /**< device name, format depends on device */
- char *device_description; /**< human friendly name */
-} AVDeviceInfo;
-
-/**
- * List of devices.
- */
-typedef struct AVDeviceInfoList {
- AVDeviceInfo **devices; /**< list of autodetected devices */
- int nb_devices; /**< number of autodetected devices */
- int default_device; /**< index of default device or -1 if no default */
-} AVDeviceInfoList;
-
-/**
- * List devices.
- *
- * Returns available device names and their parameters.
- *
- * @note: Some devices may accept system-dependent device names that cannot be
- * autodetected. The list returned by this function cannot be assumed to
- * be always completed.
- *
- * @param s device context.
- * @param[out] device_list list of autodetected devices.
- * @return count of autodetected devices, negative on error.
- */
-int avdevice_list_devices(struct AVFormatContext *s, AVDeviceInfoList **device_list);
-
-/**
- * Convenient function to free result of avdevice_list_devices().
- *
- * @param devices device list to be freed.
- */
-void avdevice_free_list_devices(AVDeviceInfoList **device_list);
-
-/**
- * List devices.
- *
- * Returns available device names and their parameters.
- * These are convinient wrappers for avdevice_list_devices().
- * Device context is allocated and deallocated internally.
- *
- * @param device device format. May be NULL if device name is set.
- * @param device_name device name. May be NULL if device format is set.
- * @param device_options An AVDictionary filled with device-private options. May be NULL.
- * The same options must be passed later to avformat_write_header() for output
- * devices or avformat_open_input() for input devices, or at any other place
- * that affects device-private options.
- * @param[out] device_list list of autodetected devices
- * @return count of autodetected devices, negative on error.
- * @note device argument takes precedence over device_name when both are set.
- */
-int avdevice_list_input_sources(struct AVInputFormat *device, const char *device_name,
- AVDictionary *device_options, AVDeviceInfoList **device_list);
-int avdevice_list_output_sinks(struct AVOutputFormat *device, const char *device_name,
- AVDictionary *device_options, AVDeviceInfoList **device_list);
-
-#endif /* AVDEVICE_AVDEVICE_H */
diff --git a/Externals/ffmpeg/dev/include/libavdevice/version.h b/Externals/ffmpeg/dev/include/libavdevice/version.h
deleted file mode 100644
index 8de07f08b2..0000000000
--- a/Externals/ffmpeg/dev/include/libavdevice/version.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVDEVICE_VERSION_H
-#define AVDEVICE_VERSION_H
-
-/**
- * @file
- * @ingroup lavd
- * Libavdevice version macros
- */
-
-#include "libavutil/version.h"
-
-#define LIBAVDEVICE_VERSION_MAJOR 56
-#define LIBAVDEVICE_VERSION_MINOR 4
-#define LIBAVDEVICE_VERSION_MICRO 100
-
-#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
- LIBAVDEVICE_VERSION_MINOR, \
- LIBAVDEVICE_VERSION_MICRO)
-#define LIBAVDEVICE_VERSION AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \
- LIBAVDEVICE_VERSION_MINOR, \
- LIBAVDEVICE_VERSION_MICRO)
-#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT
-
-#define LIBAVDEVICE_IDENT "Lavd" AV_STRINGIFY(LIBAVDEVICE_VERSION)
-
-/**
- * FF_API_* defines may be placed below to indicate public API that will be
- * dropped at a future version bump. The defines themselves are not part of
- * the public API and may change, break or disappear at any time.
- */
-
-#endif /* AVDEVICE_VERSION_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/asrc_abuffer.h b/Externals/ffmpeg/dev/include/libavfilter/asrc_abuffer.h
deleted file mode 100644
index aa3446166f..0000000000
--- a/Externals/ffmpeg/dev/include/libavfilter/asrc_abuffer.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVFILTER_ASRC_ABUFFER_H
-#define AVFILTER_ASRC_ABUFFER_H
-
-#include "avfilter.h"
-
-/**
- * @file
- * memory buffer source for audio
- *
- * @deprecated use buffersrc.h instead.
- */
-
-/**
- * Queue an audio buffer to the audio buffer source.
- *
- * @param abuffersrc audio source buffer context
- * @param data pointers to the samples planes
- * @param linesize linesizes of each audio buffer plane
- * @param nb_samples number of samples per channel
- * @param sample_fmt sample format of the audio data
- * @param ch_layout channel layout of the audio data
- * @param planar flag to indicate if audio data is planar or packed
- * @param pts presentation timestamp of the audio buffer
- * @param flags unused
- *
- * @deprecated use av_buffersrc_add_ref() instead.
- */
-attribute_deprecated
-int av_asrc_buffer_add_samples(AVFilterContext *abuffersrc,
- uint8_t *data[8], int linesize[8],
- int nb_samples, int sample_rate,
- int sample_fmt, int64_t ch_layout, int planar,
- int64_t pts, int av_unused flags);
-
-/**
- * Queue an audio buffer to the audio buffer source.
- *
- * This is similar to av_asrc_buffer_add_samples(), but the samples
- * are stored in a buffer with known size.
- *
- * @param abuffersrc audio source buffer context
- * @param buf pointer to the samples data, packed is assumed
- * @param size the size in bytes of the buffer, it must contain an
- * integer number of samples
- * @param sample_fmt sample format of the audio data
- * @param ch_layout channel layout of the audio data
- * @param pts presentation timestamp of the audio buffer
- * @param flags unused
- *
- * @deprecated use av_buffersrc_add_ref() instead.
- */
-attribute_deprecated
-int av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc,
- uint8_t *buf, int buf_size,
- int sample_rate,
- int sample_fmt, int64_t ch_layout, int planar,
- int64_t pts, int av_unused flags);
-
-/**
- * Queue an audio buffer to the audio buffer source.
- *
- * @param abuffersrc audio source buffer context
- * @param samplesref buffer ref to queue
- * @param flags unused
- *
- * @deprecated use av_buffersrc_add_ref() instead.
- */
-attribute_deprecated
-int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc,
- AVFilterBufferRef *samplesref,
- int av_unused flags);
-
-#endif /* AVFILTER_ASRC_ABUFFER_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/avcodec.h b/Externals/ffmpeg/dev/include/libavfilter/avcodec.h
deleted file mode 100644
index d3d0e20e71..0000000000
--- a/Externals/ffmpeg/dev/include/libavfilter/avcodec.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVFILTER_AVCODEC_H
-#define AVFILTER_AVCODEC_H
-
-/**
- * @file
- * libavcodec/libavfilter gluing utilities
- *
- * This should be included in an application ONLY if the installed
- * libavfilter has been compiled with libavcodec support, otherwise
- * symbols defined below will not be available.
- */
-
-#include "avfilter.h"
-
-#if FF_API_AVFILTERBUFFER
-/**
- * Create and return a picref reference from the data and properties
- * contained in frame.
- *
- * @param perms permissions to assign to the new buffer reference
- * @deprecated avfilter APIs work natively with AVFrame instead.
- */
-attribute_deprecated
-AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms);
-
-
-/**
- * Create and return a picref reference from the data and properties
- * contained in frame.
- *
- * @param perms permissions to assign to the new buffer reference
- * @deprecated avfilter APIs work natively with AVFrame instead.
- */
-attribute_deprecated
-AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame,
- int perms);
-
-/**
- * Create and return a buffer reference from the data and properties
- * contained in frame.
- *
- * @param perms permissions to assign to the new buffer reference
- * @deprecated avfilter APIs work natively with AVFrame instead.
- */
-attribute_deprecated
-AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type,
- const AVFrame *frame,
- int perms);
-#endif
-
-#endif /* AVFILTER_AVCODEC_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/avfilter.h b/Externals/ffmpeg/dev/include/libavfilter/avfilter.h
deleted file mode 100644
index b5220b96d9..0000000000
--- a/Externals/ffmpeg/dev/include/libavfilter/avfilter.h
+++ /dev/null
@@ -1,1531 +0,0 @@
-/*
- * filter layer
- * Copyright (c) 2007 Bobby Bingham
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVFILTER_AVFILTER_H
-#define AVFILTER_AVFILTER_H
-
-/**
- * @file
- * @ingroup lavfi
- * Main libavfilter public API header
- */
-
-/**
- * @defgroup lavfi Libavfilter - graph-based frame editing library
- * @{
- */
-
-#include
-
-#include "libavutil/attributes.h"
-#include "libavutil/avutil.h"
-#include "libavutil/dict.h"
-#include "libavutil/frame.h"
-#include "libavutil/log.h"
-#include "libavutil/samplefmt.h"
-#include "libavutil/pixfmt.h"
-#include "libavutil/rational.h"
-
-#include "libavfilter/version.h"
-
-/**
- * Return the LIBAVFILTER_VERSION_INT constant.
- */
-unsigned avfilter_version(void);
-
-/**
- * Return the libavfilter build-time configuration.
- */
-const char *avfilter_configuration(void);
-
-/**
- * Return the libavfilter license.
- */
-const char *avfilter_license(void);
-
-typedef struct AVFilterContext AVFilterContext;
-typedef struct AVFilterLink AVFilterLink;
-typedef struct AVFilterPad AVFilterPad;
-typedef struct AVFilterFormats AVFilterFormats;
-
-#if FF_API_AVFILTERBUFFER
-/**
- * A reference-counted buffer data type used by the filter system. Filters
- * should not store pointers to this structure directly, but instead use the
- * AVFilterBufferRef structure below.
- */
-typedef struct AVFilterBuffer {
- uint8_t *data[8]; ///< buffer data for each plane/channel
-
- /**
- * pointers to the data planes/channels.
- *
- * For video, this should simply point to data[].
- *
- * For planar audio, each channel has a separate data pointer, and
- * linesize[0] contains the size of each channel buffer.
- * For packed audio, there is just one data pointer, and linesize[0]
- * contains the total size of the buffer for all channels.
- *
- * Note: Both data and extended_data will always be set, but for planar
- * audio with more channels that can fit in data, extended_data must be used
- * in order to access all channels.
- */
- uint8_t **extended_data;
- int linesize[8]; ///< number of bytes per line
-
- /** private data to be used by a custom free function */
- void *priv;
- /**
- * A pointer to the function to deallocate this buffer if the default
- * function is not sufficient. This could, for example, add the memory
- * back into a memory pool to be reused later without the overhead of
- * reallocating it from scratch.
- */
- void (*free)(struct AVFilterBuffer *buf);
-
- int format; ///< media format
- int w, h; ///< width and height of the allocated buffer
- unsigned refcount; ///< number of references to this buffer
-} AVFilterBuffer;
-
-#define AV_PERM_READ 0x01 ///< can read from the buffer
-#define AV_PERM_WRITE 0x02 ///< can write to the buffer
-#define AV_PERM_PRESERVE 0x04 ///< nobody else can overwrite the buffer
-#define AV_PERM_REUSE 0x08 ///< can output the buffer multiple times, with the same contents each time
-#define AV_PERM_REUSE2 0x10 ///< can output the buffer multiple times, modified each time
-#define AV_PERM_NEG_LINESIZES 0x20 ///< the buffer requested can have negative linesizes
-#define AV_PERM_ALIGN 0x40 ///< the buffer must be aligned
-
-#define AVFILTER_ALIGN 16 //not part of ABI
-
-/**
- * Audio specific properties in a reference to an AVFilterBuffer. Since
- * AVFilterBufferRef is common to different media formats, audio specific
- * per reference properties must be separated out.
- */
-typedef struct AVFilterBufferRefAudioProps {
- uint64_t channel_layout; ///< channel layout of audio buffer
- int nb_samples; ///< number of audio samples per channel
- int sample_rate; ///< audio buffer sample rate
- int channels; ///< number of channels (do not access directly)
-} AVFilterBufferRefAudioProps;
-
-/**
- * Video specific properties in a reference to an AVFilterBuffer. Since
- * AVFilterBufferRef is common to different media formats, video specific
- * per reference properties must be separated out.
- */
-typedef struct AVFilterBufferRefVideoProps {
- int w; ///< image width
- int h; ///< image height
- AVRational sample_aspect_ratio; ///< sample aspect ratio
- int interlaced; ///< is frame interlaced
- int top_field_first; ///< field order
- enum AVPictureType pict_type; ///< picture type of the frame
- int key_frame; ///< 1 -> keyframe, 0-> not
- int qp_table_linesize; ///< qp_table stride
- int qp_table_size; ///< qp_table size
- int8_t *qp_table; ///< array of Quantization Parameters
-} AVFilterBufferRefVideoProps;
-
-/**
- * A reference to an AVFilterBuffer. Since filters can manipulate the origin of
- * a buffer to, for example, crop image without any memcpy, the buffer origin
- * and dimensions are per-reference properties. Linesize is also useful for
- * image flipping, frame to field filters, etc, and so is also per-reference.
- *
- * TODO: add anything necessary for frame reordering
- */
-typedef struct AVFilterBufferRef {
- AVFilterBuffer *buf; ///< the buffer that this is a reference to
- uint8_t *data[8]; ///< picture/audio data for each plane
- /**
- * pointers to the data planes/channels.
- *
- * For video, this should simply point to data[].
- *
- * For planar audio, each channel has a separate data pointer, and
- * linesize[0] contains the size of each channel buffer.
- * For packed audio, there is just one data pointer, and linesize[0]
- * contains the total size of the buffer for all channels.
- *
- * Note: Both data and extended_data will always be set, but for planar
- * audio with more channels that can fit in data, extended_data must be used
- * in order to access all channels.
- */
- uint8_t **extended_data;
- int linesize[8]; ///< number of bytes per line
-
- AVFilterBufferRefVideoProps *video; ///< video buffer specific properties
- AVFilterBufferRefAudioProps *audio; ///< audio buffer specific properties
-
- /**
- * presentation timestamp. The time unit may change during
- * filtering, as it is specified in the link and the filter code
- * may need to rescale the PTS accordingly.
- */
- int64_t pts;
- int64_t pos; ///< byte position in stream, -1 if unknown
-
- int format; ///< media format
-
- int perms; ///< permissions, see the AV_PERM_* flags
-
- enum AVMediaType type; ///< media type of buffer data
-
- AVDictionary *metadata; ///< dictionary containing metadata key=value tags
-} AVFilterBufferRef;
-
-/**
- * Copy properties of src to dst, without copying the actual data
- */
-attribute_deprecated
-void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, const AVFilterBufferRef *src);
-
-/**
- * Add a new reference to a buffer.
- *
- * @param ref an existing reference to the buffer
- * @param pmask a bitmask containing the allowable permissions in the new
- * reference
- * @return a new reference to the buffer with the same properties as the
- * old, excluding any permissions denied by pmask
- */
-attribute_deprecated
-AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask);
-
-/**
- * Remove a reference to a buffer. If this is the last reference to the
- * buffer, the buffer itself is also automatically freed.
- *
- * @param ref reference to the buffer, may be NULL
- *
- * @note it is recommended to use avfilter_unref_bufferp() instead of this
- * function
- */
-attribute_deprecated
-void avfilter_unref_buffer(AVFilterBufferRef *ref);
-
-/**
- * Remove a reference to a buffer and set the pointer to NULL.
- * If this is the last reference to the buffer, the buffer itself
- * is also automatically freed.
- *
- * @param ref pointer to the buffer reference
- */
-attribute_deprecated
-void avfilter_unref_bufferp(AVFilterBufferRef **ref);
-#endif
-
-/**
- * Get the number of channels of a buffer reference.
- */
-attribute_deprecated
-int avfilter_ref_get_channels(AVFilterBufferRef *ref);
-
-#if FF_API_AVFILTERPAD_PUBLIC
-/**
- * A filter pad used for either input or output.
- *
- * See doc/filter_design.txt for details on how to implement the methods.
- *
- * @warning this struct might be removed from public API.
- * users should call avfilter_pad_get_name() and avfilter_pad_get_type()
- * to access the name and type fields; there should be no need to access
- * any other fields from outside of libavfilter.
- */
-struct AVFilterPad {
- /**
- * Pad name. The name is unique among inputs and among outputs, but an
- * input may have the same name as an output. This may be NULL if this
- * pad has no need to ever be referenced by name.
- */
- const char *name;
-
- /**
- * AVFilterPad type.
- */
- enum AVMediaType type;
-
- /**
- * Input pads:
- * Minimum required permissions on incoming buffers. Any buffer with
- * insufficient permissions will be automatically copied by the filter
- * system to a new buffer which provides the needed access permissions.
- *
- * Output pads:
- * Guaranteed permissions on outgoing buffers. Any buffer pushed on the
- * link must have at least these permissions; this fact is checked by
- * asserts. It can be used to optimize buffer allocation.
- */
- attribute_deprecated int min_perms;
-
- /**
- * Input pads:
- * Permissions which are not accepted on incoming buffers. Any buffer
- * which has any of these permissions set will be automatically copied
- * by the filter system to a new buffer which does not have those
- * permissions. This can be used to easily disallow buffers with
- * AV_PERM_REUSE.
- *
- * Output pads:
- * Permissions which are automatically removed on outgoing buffers. It
- * can be used to optimize buffer allocation.
- */
- attribute_deprecated int rej_perms;
-
- /**
- * @deprecated unused
- */
- int (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref);
-
- /**
- * Callback function to get a video buffer. If NULL, the filter system will
- * use ff_default_get_video_buffer().
- *
- * Input video pads only.
- */
- AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h);
-
- /**
- * Callback function to get an audio buffer. If NULL, the filter system will
- * use ff_default_get_audio_buffer().
- *
- * Input audio pads only.
- */
- AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples);
-
- /**
- * @deprecated unused
- */
- int (*end_frame)(AVFilterLink *link);
-
- /**
- * @deprecated unused
- */
- int (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir);
-
- /**
- * Filtering callback. This is where a filter receives a frame with
- * audio/video data and should do its processing.
- *
- * Input pads only.
- *
- * @return >= 0 on success, a negative AVERROR on error. This function
- * must ensure that frame is properly unreferenced on error if it
- * hasn't been passed on to another filter.
- */
- int (*filter_frame)(AVFilterLink *link, AVFrame *frame);
-
- /**
- * Frame poll callback. This returns the number of immediately available
- * samples. It should return a positive value if the next request_frame()
- * is guaranteed to return one frame (with no delay).
- *
- * Defaults to just calling the source poll_frame() method.
- *
- * Output pads only.
- */
- int (*poll_frame)(AVFilterLink *link);
-
- /**
- * Frame request callback. A call to this should result in at least one
- * frame being output over the given link. This should return zero on
- * success, and another value on error.
- * See ff_request_frame() for the error codes with a specific
- * meaning.
- *
- * Output pads only.
- */
- int (*request_frame)(AVFilterLink *link);
-
- /**
- * Link configuration callback.
- *
- * For output pads, this should set the following link properties:
- * video: width, height, sample_aspect_ratio, time_base
- * audio: sample_rate.
- *
- * This should NOT set properties such as format, channel_layout, etc which
- * are negotiated between filters by the filter system using the
- * query_formats() callback before this function is called.
- *
- * For input pads, this should check the properties of the link, and update
- * the filter's internal state as necessary.
- *
- * For both input and output pads, this should return zero on success,
- * and another value on error.
- */
- int (*config_props)(AVFilterLink *link);
-
- /**
- * The filter expects a fifo to be inserted on its input link,
- * typically because it has a delay.
- *
- * input pads only.
- */
- int needs_fifo;
-
- /**
- * The filter expects writable frames from its input link,
- * duplicating data buffers if needed.
- *
- * input pads only.
- */
- int needs_writable;
-};
-#endif
-
-/**
- * Get the number of elements in a NULL-terminated array of AVFilterPads (e.g.
- * AVFilter.inputs/outputs).
- */
-int avfilter_pad_count(const AVFilterPad *pads);
-
-/**
- * Get the name of an AVFilterPad.
- *
- * @param pads an array of AVFilterPads
- * @param pad_idx index of the pad in the array it; is the caller's
- * responsibility to ensure the index is valid
- *
- * @return name of the pad_idx'th pad in pads
- */
-const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx);
-
-/**
- * Get the type of an AVFilterPad.
- *
- * @param pads an array of AVFilterPads
- * @param pad_idx index of the pad in the array; it is the caller's
- * responsibility to ensure the index is valid
- *
- * @return type of the pad_idx'th pad in pads
- */
-enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx);
-
-/**
- * The number of the filter inputs is not determined just by AVFilter.inputs.
- * The filter might add additional inputs during initialization depending on the
- * options supplied to it.
- */
-#define AVFILTER_FLAG_DYNAMIC_INPUTS (1 << 0)
-/**
- * The number of the filter outputs is not determined just by AVFilter.outputs.
- * The filter might add additional outputs during initialization depending on
- * the options supplied to it.
- */
-#define AVFILTER_FLAG_DYNAMIC_OUTPUTS (1 << 1)
-/**
- * The filter supports multithreading by splitting frames into multiple parts
- * and processing them concurrently.
- */
-#define AVFILTER_FLAG_SLICE_THREADS (1 << 2)
-/**
- * Some filters support a generic "enable" expression option that can be used
- * to enable or disable a filter in the timeline. Filters supporting this
- * option have this flag set. When the enable expression is false, the default
- * no-op filter_frame() function is called in place of the filter_frame()
- * callback defined on each input pad, thus the frame is passed unchanged to
- * the next filters.
- */
-#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC (1 << 16)
-/**
- * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will
- * have its filter_frame() callback(s) called as usual even when the enable
- * expression is false. The filter will disable filtering within the
- * filter_frame() callback(s) itself, for example executing code depending on
- * the AVFilterContext->is_disabled value.
- */
-#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17)
-/**
- * Handy mask to test whether the filter supports or no the timeline feature
- * (internally or generically).
- */
-#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL)
-
-/**
- * Filter definition. This defines the pads a filter contains, and all the
- * callback functions used to interact with the filter.
- */
-typedef struct AVFilter {
- /**
- * Filter name. Must be non-NULL and unique among filters.
- */
- const char *name;
-
- /**
- * A description of the filter. May be NULL.
- *
- * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
- */
- const char *description;
-
- /**
- * List of inputs, terminated by a zeroed element.
- *
- * NULL if there are no (static) inputs. Instances of filters with
- * AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in
- * this list.
- */
- const AVFilterPad *inputs;
- /**
- * List of outputs, terminated by a zeroed element.
- *
- * NULL if there are no (static) outputs. Instances of filters with
- * AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in
- * this list.
- */
- const AVFilterPad *outputs;
-
- /**
- * A class for the private data, used to declare filter private AVOptions.
- * This field is NULL for filters that do not declare any options.
- *
- * If this field is non-NULL, the first member of the filter private data
- * must be a pointer to AVClass, which will be set by libavfilter generic
- * code to this class.
- */
- const AVClass *priv_class;
-
- /**
- * A combination of AVFILTER_FLAG_*
- */
- int flags;
-
- /*****************************************************************
- * All fields below this line are not part of the public API. They
- * may not be used outside of libavfilter and can be changed and
- * removed at will.
- * New public fields should be added right above.
- *****************************************************************
- */
-
- /**
- * Filter initialization function.
- *
- * This callback will be called only once during the filter lifetime, after
- * all the options have been set, but before links between filters are
- * established and format negotiation is done.
- *
- * Basic filter initialization should be done here. Filters with dynamic
- * inputs and/or outputs should create those inputs/outputs here based on
- * provided options. No more changes to this filter's inputs/outputs can be
- * done after this callback.
- *
- * This callback must not assume that the filter links exist or frame
- * parameters are known.
- *
- * @ref AVFilter.uninit "uninit" is guaranteed to be called even if
- * initialization fails, so this callback does not have to clean up on
- * failure.
- *
- * @return 0 on success, a negative AVERROR on failure
- */
- int (*init)(AVFilterContext *ctx);
-
- /**
- * Should be set instead of @ref AVFilter.init "init" by the filters that
- * want to pass a dictionary of AVOptions to nested contexts that are
- * allocated during init.
- *
- * On return, the options dict should be freed and replaced with one that
- * contains all the options which could not be processed by this filter (or
- * with NULL if all the options were processed).
- *
- * Otherwise the semantics is the same as for @ref AVFilter.init "init".
- */
- int (*init_dict)(AVFilterContext *ctx, AVDictionary **options);
-
- /**
- * Filter uninitialization function.
- *
- * Called only once right before the filter is freed. Should deallocate any
- * memory held by the filter, release any buffer references, etc. It does
- * not need to deallocate the AVFilterContext.priv memory itself.
- *
- * This callback may be called even if @ref AVFilter.init "init" was not
- * called or failed, so it must be prepared to handle such a situation.
- */
- void (*uninit)(AVFilterContext *ctx);
-
- /**
- * Query formats supported by the filter on its inputs and outputs.
- *
- * This callback is called after the filter is initialized (so the inputs
- * and outputs are fixed), shortly before the format negotiation. This
- * callback may be called more than once.
- *
- * This callback must set AVFilterLink.out_formats on every input link and
- * AVFilterLink.in_formats on every output link to a list of pixel/sample
- * formats that the filter supports on that link. For audio links, this
- * filter must also set @ref AVFilterLink.in_samplerates "in_samplerates" /
- * @ref AVFilterLink.out_samplerates "out_samplerates" and
- * @ref AVFilterLink.in_channel_layouts "in_channel_layouts" /
- * @ref AVFilterLink.out_channel_layouts "out_channel_layouts" analogously.
- *
- * This callback may be NULL for filters with one input, in which case
- * libavfilter assumes that it supports all input formats and preserves
- * them on output.
- *
- * @return zero on success, a negative value corresponding to an
- * AVERROR code otherwise
- */
- int (*query_formats)(AVFilterContext *);
-
- int priv_size; ///< size of private data to allocate for the filter
-
- /**
- * Used by the filter registration system. Must not be touched by any other
- * code.
- */
- struct AVFilter *next;
-
- /**
- * Make the filter instance process a command.
- *
- * @param cmd the command to process, for handling simplicity all commands must be alphanumeric only
- * @param arg the argument for the command
- * @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported.
- * @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be
- * time consuming then a filter should treat it like an unsupported command
- *
- * @returns >=0 on success otherwise an error code.
- * AVERROR(ENOSYS) on unsupported commands
- */
- int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags);
-
- /**
- * Filter initialization function, alternative to the init()
- * callback. Args contains the user-supplied parameters, opaque is
- * used for providing binary data.
- */
- int (*init_opaque)(AVFilterContext *ctx, void *opaque);
-} AVFilter;
-
-/**
- * Process multiple parts of the frame concurrently.
- */
-#define AVFILTER_THREAD_SLICE (1 << 0)
-
-typedef struct AVFilterInternal AVFilterInternal;
-
-/** An instance of a filter */
-struct AVFilterContext {
- const AVClass *av_class; ///< needed for av_log() and filters common options
-
- const AVFilter *filter; ///< the AVFilter of which this is an instance
-
- char *name; ///< name of this filter instance
-
- AVFilterPad *input_pads; ///< array of input pads
- AVFilterLink **inputs; ///< array of pointers to input links
-#if FF_API_FOO_COUNT
- attribute_deprecated unsigned input_count; ///< @deprecated use nb_inputs
-#endif
- unsigned nb_inputs; ///< number of input pads
-
- AVFilterPad *output_pads; ///< array of output pads
- AVFilterLink **outputs; ///< array of pointers to output links
-#if FF_API_FOO_COUNT
- attribute_deprecated unsigned output_count; ///< @deprecated use nb_outputs
-#endif
- unsigned nb_outputs; ///< number of output pads
-
- void *priv; ///< private data for use by the filter
-
- struct AVFilterGraph *graph; ///< filtergraph this filter belongs to
-
- /**
- * Type of multithreading being allowed/used. A combination of
- * AVFILTER_THREAD_* flags.
- *
- * May be set by the caller before initializing the filter to forbid some
- * or all kinds of multithreading for this filter. The default is allowing
- * everything.
- *
- * When the filter is initialized, this field is combined using bit AND with
- * AVFilterGraph.thread_type to get the final mask used for determining
- * allowed threading types. I.e. a threading type needs to be set in both
- * to be allowed.
- *
- * After the filter is initialized, libavfilter sets this field to the
- * threading type that is actually used (0 for no multithreading).
- */
- int thread_type;
-
- /**
- * An opaque struct for libavfilter internal use.
- */
- AVFilterInternal *internal;
-
- struct AVFilterCommand *command_queue;
-
- char *enable_str; ///< enable expression string
- void *enable; ///< parsed expression (AVExpr*)
- double *var_values; ///< variable values for the enable expression
- int is_disabled; ///< the enabled state from the last expression evaluation
-};
-
-/**
- * A link between two filters. This contains pointers to the source and
- * destination filters between which this link exists, and the indexes of
- * the pads involved. In addition, this link also contains the parameters
- * which have been negotiated and agreed upon between the filter, such as
- * image dimensions, format, etc.
- */
-struct AVFilterLink {
- AVFilterContext *src; ///< source filter
- AVFilterPad *srcpad; ///< output pad on the source filter
-
- AVFilterContext *dst; ///< dest filter
- AVFilterPad *dstpad; ///< input pad on the dest filter
-
- enum AVMediaType type; ///< filter media type
-
- /* These parameters apply only to video */
- int w; ///< agreed upon image width
- int h; ///< agreed upon image height
- AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio
- /* These parameters apply only to audio */
- uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h)
- int sample_rate; ///< samples per second
-
- int format; ///< agreed upon media format
-
- /**
- * Define the time base used by the PTS of the frames/samples
- * which will pass through this link.
- * During the configuration stage, each filter is supposed to
- * change only the output timebase, while the timebase of the
- * input link is assumed to be an unchangeable property.
- */
- AVRational time_base;
-
- /*****************************************************************
- * All fields below this line are not part of the public API. They
- * may not be used outside of libavfilter and can be changed and
- * removed at will.
- * New public fields should be added right above.
- *****************************************************************
- */
- /**
- * Lists of formats and channel layouts supported by the input and output
- * filters respectively. These lists are used for negotiating the format
- * to actually be used, which will be loaded into the format and
- * channel_layout members, above, when chosen.
- *
- */
- AVFilterFormats *in_formats;
- AVFilterFormats *out_formats;
-
- /**
- * Lists of channel layouts and sample rates used for automatic
- * negotiation.
- */
- AVFilterFormats *in_samplerates;
- AVFilterFormats *out_samplerates;
- struct AVFilterChannelLayouts *in_channel_layouts;
- struct AVFilterChannelLayouts *out_channel_layouts;
-
- /**
- * Audio only, the destination filter sets this to a non-zero value to
- * request that buffers with the given number of samples should be sent to
- * it. AVFilterPad.needs_fifo must also be set on the corresponding input
- * pad.
- * Last buffer before EOF will be padded with silence.
- */
- int request_samples;
-
- /** stage of the initialization of the link properties (dimensions, etc) */
- enum {
- AVLINK_UNINIT = 0, ///< not started
- AVLINK_STARTINIT, ///< started, but incomplete
- AVLINK_INIT ///< complete
- } init_state;
-
- struct AVFilterPool *pool;
-
- /**
- * Graph the filter belongs to.
- */
- struct AVFilterGraph *graph;
-
- /**
- * Current timestamp of the link, as defined by the most recent
- * frame(s), in AV_TIME_BASE units.
- */
- int64_t current_pts;
-
- /**
- * Index in the age array.
- */
- int age_index;
-
- /**
- * Frame rate of the stream on the link, or 1/0 if unknown;
- * if left to 0/0, will be automatically be copied from the first input
- * of the source filter if it exists.
- *
- * Sources should set it to the best estimation of the real frame rate.
- * Filters should update it if necessary depending on their function.
- * Sinks can use it to set a default output frame rate.
- * It is similar to the r_frame_rate field in AVStream.
- */
- AVRational frame_rate;
-
- /**
- * Buffer partially filled with samples to achieve a fixed/minimum size.
- */
- AVFrame *partial_buf;
-
- /**
- * Size of the partial buffer to allocate.
- * Must be between min_samples and max_samples.
- */
- int partial_buf_size;
-
- /**
- * Minimum number of samples to filter at once. If filter_frame() is
- * called with fewer samples, it will accumulate them in partial_buf.
- * This field and the related ones must not be changed after filtering
- * has started.
- * If 0, all related fields are ignored.
- */
- int min_samples;
-
- /**
- * Maximum number of samples to filter at once. If filter_frame() is
- * called with more samples, it will split them.
- */
- int max_samples;
-
- /**
- * The buffer reference currently being received across the link by the
- * destination filter. This is used internally by the filter system to
- * allow automatic copying of buffers which do not have sufficient
- * permissions for the destination. This should not be accessed directly
- * by the filters.
- */
- AVFilterBufferRef *cur_buf_copy;
-
- /**
- * True if the link is closed.
- * If set, all attempts of start_frame, filter_frame or request_frame
- * will fail with AVERROR_EOF, and if necessary the reference will be
- * destroyed.
- * If request_frame returns AVERROR_EOF, this flag is set on the
- * corresponding link.
- * It can be set also be set by either the source or the destination
- * filter.
- */
- int closed;
-
- /**
- * Number of channels.
- */
- int channels;
-
- /**
- * True if a frame is being requested on the link.
- * Used internally by the framework.
- */
- unsigned frame_requested;
-
- /**
- * Link processing flags.
- */
- unsigned flags;
-
- /**
- * Number of past frames sent through the link.
- */
- int64_t frame_count;
-};
-
-/**
- * Link two filters together.
- *
- * @param src the source filter
- * @param srcpad index of the output pad on the source filter
- * @param dst the destination filter
- * @param dstpad index of the input pad on the destination filter
- * @return zero on success
- */
-int avfilter_link(AVFilterContext *src, unsigned srcpad,
- AVFilterContext *dst, unsigned dstpad);
-
-/**
- * Free the link in *link, and set its pointer to NULL.
- */
-void avfilter_link_free(AVFilterLink **link);
-
-/**
- * Get the number of channels of a link.
- */
-int avfilter_link_get_channels(AVFilterLink *link);
-
-/**
- * Set the closed field of a link.
- */
-void avfilter_link_set_closed(AVFilterLink *link, int closed);
-
-/**
- * Negotiate the media format, dimensions, etc of all inputs to a filter.
- *
- * @param filter the filter to negotiate the properties for its inputs
- * @return zero on successful negotiation
- */
-int avfilter_config_links(AVFilterContext *filter);
-
-#if FF_API_AVFILTERBUFFER
-/**
- * Create a buffer reference wrapped around an already allocated image
- * buffer.
- *
- * @param data pointers to the planes of the image to reference
- * @param linesize linesizes for the planes of the image to reference
- * @param perms the required access permissions
- * @param w the width of the image specified by the data and linesize arrays
- * @param h the height of the image specified by the data and linesize arrays
- * @param format the pixel format of the image specified by the data and linesize arrays
- */
-attribute_deprecated
-AVFilterBufferRef *
-avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms,
- int w, int h, enum AVPixelFormat format);
-
-/**
- * Create an audio buffer reference wrapped around an already
- * allocated samples buffer.
- *
- * See avfilter_get_audio_buffer_ref_from_arrays_channels() for a version
- * that can handle unknown channel layouts.
- *
- * @param data pointers to the samples plane buffers
- * @param linesize linesize for the samples plane buffers
- * @param perms the required access permissions
- * @param nb_samples number of samples per channel
- * @param sample_fmt the format of each sample in the buffer to allocate
- * @param channel_layout the channel layout of the buffer
- */
-attribute_deprecated
-AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
- int linesize,
- int perms,
- int nb_samples,
- enum AVSampleFormat sample_fmt,
- uint64_t channel_layout);
-/**
- * Create an audio buffer reference wrapped around an already
- * allocated samples buffer.
- *
- * @param data pointers to the samples plane buffers
- * @param linesize linesize for the samples plane buffers
- * @param perms the required access permissions
- * @param nb_samples number of samples per channel
- * @param sample_fmt the format of each sample in the buffer to allocate
- * @param channels the number of channels of the buffer
- * @param channel_layout the channel layout of the buffer,
- * must be either 0 or consistent with channels
- */
-attribute_deprecated
-AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data,
- int linesize,
- int perms,
- int nb_samples,
- enum AVSampleFormat sample_fmt,
- int channels,
- uint64_t channel_layout);
-
-#endif
-
-
-#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically
-#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw)
-
-/**
- * Make the filter instance process a command.
- * It is recommended to use avfilter_graph_send_command().
- */
-int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags);
-
-/** Initialize the filter system. Register all builtin filters. */
-void avfilter_register_all(void);
-
-#if FF_API_OLD_FILTER_REGISTER
-/** Uninitialize the filter system. Unregister all filters. */
-attribute_deprecated
-void avfilter_uninit(void);
-#endif
-
-/**
- * Register a filter. This is only needed if you plan to use
- * avfilter_get_by_name later to lookup the AVFilter structure by name. A
- * filter can still by instantiated with avfilter_graph_alloc_filter even if it
- * is not registered.
- *
- * @param filter the filter to register
- * @return 0 if the registration was successful, a negative value
- * otherwise
- */
-int avfilter_register(AVFilter *filter);
-
-/**
- * Get a filter definition matching the given name.
- *
- * @param name the filter name to find
- * @return the filter definition, if any matching one is registered.
- * NULL if none found.
- */
-#if !FF_API_NOCONST_GET_NAME
-const
-#endif
-AVFilter *avfilter_get_by_name(const char *name);
-
-/**
- * Iterate over all registered filters.
- * @return If prev is non-NULL, next registered filter after prev or NULL if
- * prev is the last filter. If prev is NULL, return the first registered filter.
- */
-const AVFilter *avfilter_next(const AVFilter *prev);
-
-#if FF_API_OLD_FILTER_REGISTER
-/**
- * If filter is NULL, returns a pointer to the first registered filter pointer,
- * if filter is non-NULL, returns the next pointer after filter.
- * If the returned pointer points to NULL, the last registered filter
- * was already reached.
- * @deprecated use avfilter_next()
- */
-attribute_deprecated
-AVFilter **av_filter_next(AVFilter **filter);
-#endif
-
-#if FF_API_AVFILTER_OPEN
-/**
- * Create a filter instance.
- *
- * @param filter_ctx put here a pointer to the created filter context
- * on success, NULL on failure
- * @param filter the filter to create an instance of
- * @param inst_name Name to give to the new instance. Can be NULL for none.
- * @return >= 0 in case of success, a negative error code otherwise
- * @deprecated use avfilter_graph_alloc_filter() instead
- */
-attribute_deprecated
-int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name);
-#endif
-
-
-#if FF_API_AVFILTER_INIT_FILTER
-/**
- * Initialize a filter.
- *
- * @param filter the filter to initialize
- * @param args A string of parameters to use when initializing the filter.
- * The format and meaning of this string varies by filter.
- * @param opaque Any extra non-string data needed by the filter. The meaning
- * of this parameter varies by filter.
- * @return zero on success
- */
-attribute_deprecated
-int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque);
-#endif
-
-/**
- * Initialize a filter with the supplied parameters.
- *
- * @param ctx uninitialized filter context to initialize
- * @param args Options to initialize the filter with. This must be a
- * ':'-separated list of options in the 'key=value' form.
- * May be NULL if the options have been set directly using the
- * AVOptions API or there are no options that need to be set.
- * @return 0 on success, a negative AVERROR on failure
- */
-int avfilter_init_str(AVFilterContext *ctx, const char *args);
-
-/**
- * Initialize a filter with the supplied dictionary of options.
- *
- * @param ctx uninitialized filter context to initialize
- * @param options An AVDictionary filled with options for this filter. On
- * return this parameter will be destroyed and replaced with
- * a dict containing options that were not found. This dictionary
- * must be freed by the caller.
- * May be NULL, then this function is equivalent to
- * avfilter_init_str() with the second parameter set to NULL.
- * @return 0 on success, a negative AVERROR on failure
- *
- * @note This function and avfilter_init_str() do essentially the same thing,
- * the difference is in manner in which the options are passed. It is up to the
- * calling code to choose whichever is more preferable. The two functions also
- * behave differently when some of the provided options are not declared as
- * supported by the filter. In such a case, avfilter_init_str() will fail, but
- * this function will leave those extra options in the options AVDictionary and
- * continue as usual.
- */
-int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options);
-
-/**
- * Free a filter context. This will also remove the filter from its
- * filtergraph's list of filters.
- *
- * @param filter the filter to free
- */
-void avfilter_free(AVFilterContext *filter);
-
-/**
- * Insert a filter in the middle of an existing link.
- *
- * @param link the link into which the filter should be inserted
- * @param filt the filter to be inserted
- * @param filt_srcpad_idx the input pad on the filter to connect
- * @param filt_dstpad_idx the output pad on the filter to connect
- * @return zero on success
- */
-int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
- unsigned filt_srcpad_idx, unsigned filt_dstpad_idx);
-
-#if FF_API_AVFILTERBUFFER
-/**
- * Copy the frame properties of src to dst, without copying the actual
- * image data.
- *
- * @return 0 on success, a negative number on error.
- */
-attribute_deprecated
-int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
-
-/**
- * Copy the frame properties and data pointers of src to dst, without copying
- * the actual data.
- *
- * @return 0 on success, a negative number on error.
- */
-attribute_deprecated
-int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src);
-#endif
-
-/**
- * @return AVClass for AVFilterContext.
- *
- * @see av_opt_find().
- */
-const AVClass *avfilter_get_class(void);
-
-typedef struct AVFilterGraphInternal AVFilterGraphInternal;
-
-/**
- * A function pointer passed to the @ref AVFilterGraph.execute callback to be
- * executed multiple times, possibly in parallel.
- *
- * @param ctx the filter context the job belongs to
- * @param arg an opaque parameter passed through from @ref
- * AVFilterGraph.execute
- * @param jobnr the index of the job being executed
- * @param nb_jobs the total number of jobs
- *
- * @return 0 on success, a negative AVERROR on error
- */
-typedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
-
-/**
- * A function executing multiple jobs, possibly in parallel.
- *
- * @param ctx the filter context to which the jobs belong
- * @param func the function to be called multiple times
- * @param arg the argument to be passed to func
- * @param ret a nb_jobs-sized array to be filled with return values from each
- * invocation of func
- * @param nb_jobs the number of jobs to execute
- *
- * @return 0 on success, a negative AVERROR on error
- */
-typedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func,
- void *arg, int *ret, int nb_jobs);
-
-typedef struct AVFilterGraph {
- const AVClass *av_class;
-#if FF_API_FOO_COUNT
- attribute_deprecated
- unsigned filter_count_unused;
-#endif
- AVFilterContext **filters;
-#if !FF_API_FOO_COUNT
- unsigned nb_filters;
-#endif
-
- char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters
- char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters
-#if FF_API_FOO_COUNT
- unsigned nb_filters;
-#endif
-
- /**
- * Type of multithreading allowed for filters in this graph. A combination
- * of AVFILTER_THREAD_* flags.
- *
- * May be set by the caller at any point, the setting will apply to all
- * filters initialized after that. The default is allowing everything.
- *
- * When a filter in this graph is initialized, this field is combined using
- * bit AND with AVFilterContext.thread_type to get the final mask used for
- * determining allowed threading types. I.e. a threading type needs to be
- * set in both to be allowed.
- */
- int thread_type;
-
- /**
- * Maximum number of threads used by filters in this graph. May be set by
- * the caller before adding any filters to the filtergraph. Zero (the
- * default) means that the number of threads is determined automatically.
- */
- int nb_threads;
-
- /**
- * Opaque object for libavfilter internal use.
- */
- AVFilterGraphInternal *internal;
-
- /**
- * Opaque user data. May be set by the caller to an arbitrary value, e.g. to
- * be used from callbacks like @ref AVFilterGraph.execute.
- * Libavfilter will not touch this field in any way.
- */
- void *opaque;
-
- /**
- * This callback may be set by the caller immediately after allocating the
- * graph and before adding any filters to it, to provide a custom
- * multithreading implementation.
- *
- * If set, filters with slice threading capability will call this callback
- * to execute multiple jobs in parallel.
- *
- * If this field is left unset, libavfilter will use its internal
- * implementation, which may or may not be multithreaded depending on the
- * platform and build options.
- */
- avfilter_execute_func *execute;
-
- char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions
-
- /**
- * Private fields
- *
- * The following fields are for internal use only.
- * Their type, offset, number and semantic can change without notice.
- */
-
- AVFilterLink **sink_links;
- int sink_links_count;
-
- unsigned disable_auto_convert;
-} AVFilterGraph;
-
-/**
- * Allocate a filter graph.
- */
-AVFilterGraph *avfilter_graph_alloc(void);
-
-/**
- * Create a new filter instance in a filter graph.
- *
- * @param graph graph in which the new filter will be used
- * @param filter the filter to create an instance of
- * @param name Name to give to the new instance (will be copied to
- * AVFilterContext.name). This may be used by the caller to identify
- * different filters, libavfilter itself assigns no semantics to
- * this parameter. May be NULL.
- *
- * @return the context of the newly created filter instance (note that it is
- * also retrievable directly through AVFilterGraph.filters or with
- * avfilter_graph_get_filter()) on success or NULL on failure.
- */
-AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
- const AVFilter *filter,
- const char *name);
-
-/**
- * Get a filter instance identified by instance name from graph.
- *
- * @param graph filter graph to search through.
- * @param name filter instance name (should be unique in the graph).
- * @return the pointer to the found filter instance or NULL if it
- * cannot be found.
- */
-AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, const char *name);
-
-#if FF_API_AVFILTER_OPEN
-/**
- * Add an existing filter instance to a filter graph.
- *
- * @param graphctx the filter graph
- * @param filter the filter to be added
- *
- * @deprecated use avfilter_graph_alloc_filter() to allocate a filter in a
- * filter graph
- */
-attribute_deprecated
-int avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter);
-#endif
-
-/**
- * Create and add a filter instance into an existing graph.
- * The filter instance is created from the filter filt and inited
- * with the parameters args and opaque.
- *
- * In case of success put in *filt_ctx the pointer to the created
- * filter instance, otherwise set *filt_ctx to NULL.
- *
- * @param name the instance name to give to the created filter instance
- * @param graph_ctx the filter graph
- * @return a negative AVERROR error code in case of failure, a non
- * negative value otherwise
- */
-int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,
- const char *name, const char *args, void *opaque,
- AVFilterGraph *graph_ctx);
-
-/**
- * Enable or disable automatic format conversion inside the graph.
- *
- * Note that format conversion can still happen inside explicitly inserted
- * scale and aresample filters.
- *
- * @param flags any of the AVFILTER_AUTO_CONVERT_* constants
- */
-void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags);
-
-enum {
- AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */
- AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */
-};
-
-/**
- * Check validity and configure all the links and formats in the graph.
- *
- * @param graphctx the filter graph
- * @param log_ctx context used for logging
- * @return >= 0 in case of success, a negative AVERROR code otherwise
- */
-int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx);
-
-/**
- * Free a graph, destroy its links, and set *graph to NULL.
- * If *graph is NULL, do nothing.
- */
-void avfilter_graph_free(AVFilterGraph **graph);
-
-/**
- * A linked-list of the inputs/outputs of the filter chain.
- *
- * This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(),
- * where it is used to communicate open (unlinked) inputs and outputs from and
- * to the caller.
- * This struct specifies, per each not connected pad contained in the graph, the
- * filter context and the pad index required for establishing a link.
- */
-typedef struct AVFilterInOut {
- /** unique name for this input/output in the list */
- char *name;
-
- /** filter context associated to this input/output */
- AVFilterContext *filter_ctx;
-
- /** index of the filt_ctx pad to use for linking */
- int pad_idx;
-
- /** next input/input in the list, NULL if this is the last */
- struct AVFilterInOut *next;
-} AVFilterInOut;
-
-/**
- * Allocate a single AVFilterInOut entry.
- * Must be freed with avfilter_inout_free().
- * @return allocated AVFilterInOut on success, NULL on failure.
- */
-AVFilterInOut *avfilter_inout_alloc(void);
-
-/**
- * Free the supplied list of AVFilterInOut and set *inout to NULL.
- * If *inout is NULL, do nothing.
- */
-void avfilter_inout_free(AVFilterInOut **inout);
-
-#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE
-/**
- * Add a graph described by a string to a graph.
- *
- * @note The caller must provide the lists of inputs and outputs,
- * which therefore must be known before calling the function.
- *
- * @note The inputs parameter describes inputs of the already existing
- * part of the graph; i.e. from the point of view of the newly created
- * part, they are outputs. Similarly the outputs parameter describes
- * outputs of the already existing filters, which are provided as
- * inputs to the parsed filters.
- *
- * @param graph the filter graph where to link the parsed graph context
- * @param filters string to be parsed
- * @param inputs linked list to the inputs of the graph
- * @param outputs linked list to the outputs of the graph
- * @return zero on success, a negative AVERROR code on error
- */
-int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
- AVFilterInOut *inputs, AVFilterInOut *outputs,
- void *log_ctx);
-#else
-/**
- * Add a graph described by a string to a graph.
- *
- * @param graph the filter graph where to link the parsed graph context
- * @param filters string to be parsed
- * @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
- * If non-NULL, *inputs is updated to contain the list of open inputs
- * after the parsing, should be freed with avfilter_inout_free().
- * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
- * If non-NULL, *outputs is updated to contain the list of open outputs
- * after the parsing, should be freed with avfilter_inout_free().
- * @return non negative on success, a negative AVERROR code on error
- * @deprecated Use avfilter_graph_parse_ptr() instead.
- */
-attribute_deprecated
-int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
- AVFilterInOut **inputs, AVFilterInOut **outputs,
- void *log_ctx);
-#endif
-
-/**
- * Add a graph described by a string to a graph.
- *
- * @param graph the filter graph where to link the parsed graph context
- * @param filters string to be parsed
- * @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
- * If non-NULL, *inputs is updated to contain the list of open inputs
- * after the parsing, should be freed with avfilter_inout_free().
- * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
- * If non-NULL, *outputs is updated to contain the list of open outputs
- * after the parsing, should be freed with avfilter_inout_free().
- * @return non negative on success, a negative AVERROR code on error
- */
-int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,
- AVFilterInOut **inputs, AVFilterInOut **outputs,
- void *log_ctx);
-
-/**
- * Add a graph described by a string to a graph.
- *
- * @param[in] graph the filter graph where to link the parsed graph context
- * @param[in] filters string to be parsed
- * @param[out] inputs a linked list of all free (unlinked) inputs of the
- * parsed graph will be returned here. It is to be freed
- * by the caller using avfilter_inout_free().
- * @param[out] outputs a linked list of all free (unlinked) outputs of the
- * parsed graph will be returned here. It is to be freed by the
- * caller using avfilter_inout_free().
- * @return zero on success, a negative AVERROR code on error
- *
- * @note This function returns the inputs and outputs that are left
- * unlinked after parsing the graph and the caller then deals with
- * them.
- * @note This function makes no reference whatsoever to already
- * existing parts of the graph and the inputs parameter will on return
- * contain inputs of the newly parsed part of the graph. Analogously
- * the outputs parameter will contain outputs of the newly created
- * filters.
- */
-int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
- AVFilterInOut **inputs,
- AVFilterInOut **outputs);
-
-/**
- * Send a command to one or more filter instances.
- *
- * @param graph the filter graph
- * @param target the filter(s) to which the command should be sent
- * "all" sends to all filters
- * otherwise it can be a filter or filter instance name
- * which will send the command to all matching filters.
- * @param cmd the command to send, for handling simplicity all commands must be alphanumeric only
- * @param arg the argument for the command
- * @param res a buffer with size res_size where the filter(s) can return a response.
- *
- * @returns >=0 on success otherwise an error code.
- * AVERROR(ENOSYS) on unsupported commands
- */
-int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags);
-
-/**
- * Queue a command for one or more filter instances.
- *
- * @param graph the filter graph
- * @param target the filter(s) to which the command should be sent
- * "all" sends to all filters
- * otherwise it can be a filter or filter instance name
- * which will send the command to all matching filters.
- * @param cmd the command to sent, for handling simplicity all commands must be alphanumeric only
- * @param arg the argument for the command
- * @param ts time at which the command should be sent to the filter
- *
- * @note As this executes commands after this function returns, no return code
- * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported.
- */
-int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts);
-
-
-/**
- * Dump a graph into a human-readable string representation.
- *
- * @param graph the graph to dump
- * @param options formatting options; currently ignored
- * @return a string, or NULL in case of memory allocation failure;
- * the string must be freed using av_free
- */
-char *avfilter_graph_dump(AVFilterGraph *graph, const char *options);
-
-/**
- * Request a frame on the oldest sink link.
- *
- * If the request returns AVERROR_EOF, try the next.
- *
- * Note that this function is not meant to be the sole scheduling mechanism
- * of a filtergraph, only a convenience function to help drain a filtergraph
- * in a balanced way under normal circumstances.
- *
- * Also note that AVERROR_EOF does not mean that frames did not arrive on
- * some of the sinks during the process.
- * When there are multiple sink links, in case the requested link
- * returns an EOF, this may cause a filter to flush pending frames
- * which are sent to another sink link, although unrequested.
- *
- * @return the return value of ff_request_frame(),
- * or AVERROR_EOF if all links returned AVERROR_EOF
- */
-int avfilter_graph_request_oldest(AVFilterGraph *graph);
-
-/**
- * @}
- */
-
-#endif /* AVFILTER_AVFILTER_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/avfiltergraph.h b/Externals/ffmpeg/dev/include/libavfilter/avfiltergraph.h
deleted file mode 100644
index b31d581ca0..0000000000
--- a/Externals/ffmpeg/dev/include/libavfilter/avfiltergraph.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Filter graphs
- * copyright (c) 2007 Bobby Bingham
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVFILTER_AVFILTERGRAPH_H
-#define AVFILTER_AVFILTERGRAPH_H
-
-#include "avfilter.h"
-#include "libavutil/log.h"
-
-#endif /* AVFILTER_AVFILTERGRAPH_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/buffersink.h b/Externals/ffmpeg/dev/include/libavfilter/buffersink.h
deleted file mode 100644
index 24cd2feac7..0000000000
--- a/Externals/ffmpeg/dev/include/libavfilter/buffersink.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVFILTER_BUFFERSINK_H
-#define AVFILTER_BUFFERSINK_H
-
-/**
- * @file
- * @ingroup lavfi_buffersink
- * memory buffer sink API for audio and video
- */
-
-#include "avfilter.h"
-
-/**
- * @defgroup lavfi_buffersink Buffer sink API
- * @ingroup lavfi
- * @{
- */
-
-#if FF_API_AVFILTERBUFFER
-/**
- * Get an audio/video buffer data from buffer_sink and put it in bufref.
- *
- * This function works with both audio and video buffer sinks.
- *
- * @param buffer_sink pointer to a buffersink or abuffersink context
- * @param flags a combination of AV_BUFFERSINK_FLAG_* flags
- * @return >= 0 in case of success, a negative AVERROR code in case of
- * failure
- */
-attribute_deprecated
-int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink,
- AVFilterBufferRef **bufref, int flags);
-
-/**
- * Get the number of immediately available frames.
- */
-attribute_deprecated
-int av_buffersink_poll_frame(AVFilterContext *ctx);
-
-/**
- * Get a buffer with filtered data from sink and put it in buf.
- *
- * @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
- * @param buf pointer to the buffer will be written here if buf is non-NULL. buf
- * must be freed by the caller using avfilter_unref_buffer().
- * Buf may also be NULL to query whether a buffer is ready to be
- * output.
- *
- * @return >= 0 in case of success, a negative AVERROR code in case of
- * failure.
- */
-attribute_deprecated
-int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf);
-
-/**
- * Same as av_buffersink_read, but with the ability to specify the number of
- * samples read. This function is less efficient than av_buffersink_read(),
- * because it copies the data around.
- *
- * @param ctx pointer to a context of the abuffersink AVFilter.
- * @param buf pointer to the buffer will be written here if buf is non-NULL. buf
- * must be freed by the caller using avfilter_unref_buffer(). buf
- * will contain exactly nb_samples audio samples, except at the end
- * of stream, when it can contain less than nb_samples.
- * Buf may also be NULL to query whether a buffer is ready to be
- * output.
- *
- * @warning do not mix this function with av_buffersink_read(). Use only one or
- * the other with a single sink, not both.
- */
-attribute_deprecated
-int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
- int nb_samples);
-#endif
-
-/**
- * Get a frame with filtered data from sink and put it in frame.
- *
- * @param ctx pointer to a buffersink or abuffersink filter context.
- * @param frame pointer to an allocated frame that will be filled with data.
- * The data must be freed using av_frame_unref() / av_frame_free()
- * @param flags a combination of AV_BUFFERSINK_FLAG_* flags
- *
- * @return >= 0 in for success, a negative AVERROR code for failure.
- */
-int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags);
-
-/**
- * Tell av_buffersink_get_buffer_ref() to read video/samples buffer
- * reference, but not remove it from the buffer. This is useful if you
- * need only to read a video/samples buffer, without to fetch it.
- */
-#define AV_BUFFERSINK_FLAG_PEEK 1
-
-/**
- * Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
- * If a frame is already buffered, it is read (and removed from the buffer),
- * but if no frame is present, return AVERROR(EAGAIN).
- */
-#define AV_BUFFERSINK_FLAG_NO_REQUEST 2
-
-/**
- * Struct to use for initializing a buffersink context.
- */
-typedef struct {
- const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE
-} AVBufferSinkParams;
-
-/**
- * Create an AVBufferSinkParams structure.
- *
- * Must be freed with av_free().
- */
-AVBufferSinkParams *av_buffersink_params_alloc(void);
-
-/**
- * Struct to use for initializing an abuffersink context.
- */
-typedef struct {
- const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE
- const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1
- const int *channel_counts; ///< list of allowed channel counts, terminated by -1
- int all_channel_counts; ///< if not 0, accept any channel count or layout
- int *sample_rates; ///< list of allowed sample rates, terminated by -1
-} AVABufferSinkParams;
-
-/**
- * Create an AVABufferSinkParams structure.
- *
- * Must be freed with av_free().
- */
-AVABufferSinkParams *av_abuffersink_params_alloc(void);
-
-/**
- * Set the frame size for an audio buffer sink.
- *
- * All calls to av_buffersink_get_buffer_ref will return a buffer with
- * exactly the specified number of samples, or AVERROR(EAGAIN) if there is
- * not enough. The last buffer at EOF will be padded with 0.
- */
-void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size);
-
-/**
- * Get the frame rate of the input.
- */
-AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx);
-
-/**
- * Get a frame with filtered data from sink and put it in frame.
- *
- * @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
- * @param frame pointer to an allocated frame that will be filled with data.
- * The data must be freed using av_frame_unref() / av_frame_free()
- *
- * @return
- * - >= 0 if a frame was successfully returned.
- * - AVERROR(EAGAIN) if no frames are available at this point; more
- * input frames must be added to the filtergraph to get more output.
- * - AVERROR_EOF if there will be no more output frames on this sink.
- * - A different negative AVERROR code in other failure cases.
- */
-int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame);
-
-/**
- * Same as av_buffersink_get_frame(), but with the ability to specify the number
- * of samples read. This function is less efficient than
- * av_buffersink_get_frame(), because it copies the data around.
- *
- * @param ctx pointer to a context of the abuffersink AVFilter.
- * @param frame pointer to an allocated frame that will be filled with data.
- * The data must be freed using av_frame_unref() / av_frame_free()
- * frame will contain exactly nb_samples audio samples, except at
- * the end of stream, when it can contain less than nb_samples.
- *
- * @return The return codes have the same meaning as for
- * av_buffersink_get_samples().
- *
- * @warning do not mix this function with av_buffersink_get_frame(). Use only one or
- * the other with a single sink, not both.
- */
-int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples);
-
-/**
- * @}
- */
-
-#endif /* AVFILTER_BUFFERSINK_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/buffersrc.h b/Externals/ffmpeg/dev/include/libavfilter/buffersrc.h
deleted file mode 100644
index ea34c04ee9..0000000000
--- a/Externals/ffmpeg/dev/include/libavfilter/buffersrc.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVFILTER_BUFFERSRC_H
-#define AVFILTER_BUFFERSRC_H
-
-/**
- * @file
- * @ingroup lavfi_buffersrc
- * Memory buffer source API.
- */
-
-#include "libavcodec/avcodec.h"
-#include "avfilter.h"
-
-/**
- * @defgroup lavfi_buffersrc Buffer source API
- * @ingroup lavfi
- * @{
- */
-
-enum {
-
- /**
- * Do not check for format changes.
- */
- AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1,
-
-#if FF_API_AVFILTERBUFFER
- /**
- * Ignored
- */
- AV_BUFFERSRC_FLAG_NO_COPY = 2,
-#endif
-
- /**
- * Immediately push the frame to the output.
- */
- AV_BUFFERSRC_FLAG_PUSH = 4,
-
- /**
- * Keep a reference to the frame.
- * If the frame if reference-counted, create a new reference; otherwise
- * copy the frame data.
- */
- AV_BUFFERSRC_FLAG_KEEP_REF = 8,
-
-};
-
-/**
- * Add buffer data in picref to buffer_src.
- *
- * @param buffer_src pointer to a buffer source context
- * @param picref a buffer reference, or NULL to mark EOF
- * @param flags a combination of AV_BUFFERSRC_FLAG_*
- * @return >= 0 in case of success, a negative AVERROR code
- * in case of failure
- */
-int av_buffersrc_add_ref(AVFilterContext *buffer_src,
- AVFilterBufferRef *picref, int flags);
-
-/**
- * Get the number of failed requests.
- *
- * A failed request is when the request_frame method is called while no
- * frame is present in the buffer.
- * The number is reset when a frame is added.
- */
-unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src);
-
-#if FF_API_AVFILTERBUFFER
-/**
- * Add a buffer to a filtergraph.
- *
- * @param ctx an instance of the buffersrc filter
- * @param buf buffer containing frame data to be passed down the filtergraph.
- * This function will take ownership of buf, the user must not free it.
- * A NULL buf signals EOF -- i.e. no more frames will be sent to this filter.
- *
- * @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame()
- */
-attribute_deprecated
-int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf);
-#endif
-
-/**
- * Add a frame to the buffer source.
- *
- * @param ctx an instance of the buffersrc filter
- * @param frame frame to be added. If the frame is reference counted, this
- * function will make a new reference to it. Otherwise the frame data will be
- * copied.
- *
- * @return 0 on success, a negative AVERROR on error
- *
- * This function is equivalent to av_buffersrc_add_frame_flags() with the
- * AV_BUFFERSRC_FLAG_KEEP_REF flag.
- */
-int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame);
-
-/**
- * Add a frame to the buffer source.
- *
- * @param ctx an instance of the buffersrc filter
- * @param frame frame to be added. If the frame is reference counted, this
- * function will take ownership of the reference(s) and reset the frame.
- * Otherwise the frame data will be copied. If this function returns an error,
- * the input frame is not touched.
- *
- * @return 0 on success, a negative AVERROR on error.
- *
- * @note the difference between this function and av_buffersrc_write_frame() is
- * that av_buffersrc_write_frame() creates a new reference to the input frame,
- * while this function takes ownership of the reference passed to it.
- *
- * This function is equivalent to av_buffersrc_add_frame_flags() without the
- * AV_BUFFERSRC_FLAG_KEEP_REF flag.
- */
-int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);
-
-/**
- * Add a frame to the buffer source.
- *
- * By default, if the frame is reference-counted, this function will take
- * ownership of the reference(s) and reset the frame. This can be controlled
- * using the flags.
- *
- * If this function returns an error, the input frame is not touched.
- *
- * @param buffer_src pointer to a buffer source context
- * @param frame a frame, or NULL to mark EOF
- * @param flags a combination of AV_BUFFERSRC_FLAG_*
- * @return >= 0 in case of success, a negative AVERROR code
- * in case of failure
- */
-int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src,
- AVFrame *frame, int flags);
-
-
-/**
- * @}
- */
-
-#endif /* AVFILTER_BUFFERSRC_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/version.h b/Externals/ffmpeg/dev/include/libavfilter/version.h
deleted file mode 100644
index 383eb55ef4..0000000000
--- a/Externals/ffmpeg/dev/include/libavfilter/version.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Version macros.
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVFILTER_VERSION_H
-#define AVFILTER_VERSION_H
-
-/**
- * @file
- * @ingroup lavfi
- * Libavfilter version macros
- */
-
-#include "libavutil/version.h"
-
-#define LIBAVFILTER_VERSION_MAJOR 5
-#define LIBAVFILTER_VERSION_MINOR 7
-#define LIBAVFILTER_VERSION_MICRO 101
-
-#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
- LIBAVFILTER_VERSION_MINOR, \
- LIBAVFILTER_VERSION_MICRO)
-#define LIBAVFILTER_VERSION AV_VERSION(LIBAVFILTER_VERSION_MAJOR, \
- LIBAVFILTER_VERSION_MINOR, \
- LIBAVFILTER_VERSION_MICRO)
-#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT
-
-#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION)
-
-/**
- * FF_API_* defines may be placed below to indicate public API that will be
- * dropped at a future version bump. The defines themselves are not part of
- * the public API and may change, break or disappear at any time.
- */
-
-#ifndef FF_API_AVFILTERPAD_PUBLIC
-#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 6)
-#endif
-#ifndef FF_API_FOO_COUNT
-#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 6)
-#endif
-#ifndef FF_API_AVFILTERBUFFER
-#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 6)
-#endif
-#ifndef FF_API_OLD_FILTER_OPTS
-#define FF_API_OLD_FILTER_OPTS (LIBAVFILTER_VERSION_MAJOR < 6)
-#endif
-#ifndef FF_API_AVFILTER_OPEN
-#define FF_API_AVFILTER_OPEN (LIBAVFILTER_VERSION_MAJOR < 6)
-#endif
-#ifndef FF_API_AVFILTER_INIT_FILTER
-#define FF_API_AVFILTER_INIT_FILTER (LIBAVFILTER_VERSION_MAJOR < 6)
-#endif
-#ifndef FF_API_OLD_FILTER_REGISTER
-#define FF_API_OLD_FILTER_REGISTER (LIBAVFILTER_VERSION_MAJOR < 6)
-#endif
-#ifndef FF_API_OLD_GRAPH_PARSE
-#define FF_API_OLD_GRAPH_PARSE (LIBAVFILTER_VERSION_MAJOR < 5)
-#endif
-#ifndef FF_API_NOCONST_GET_NAME
-#define FF_API_NOCONST_GET_NAME (LIBAVFILTER_VERSION_MAJOR < 6)
-#endif
-
-#endif /* AVFILTER_VERSION_H */
diff --git a/Externals/ffmpeg/dev/include/libpostproc/postprocess.h b/Externals/ffmpeg/dev/include/libpostproc/postprocess.h
deleted file mode 100644
index e00ed968d7..0000000000
--- a/Externals/ffmpeg/dev/include/libpostproc/postprocess.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef POSTPROC_POSTPROCESS_H
-#define POSTPROC_POSTPROCESS_H
-
-/**
- * @file
- * @ingroup lpp
- * external API header
- */
-
-/**
- * @defgroup lpp Libpostproc
- * @{
- */
-
-#include "libpostproc/version.h"
-
-/**
- * Return the LIBPOSTPROC_VERSION_INT constant.
- */
-unsigned postproc_version(void);
-
-/**
- * Return the libpostproc build-time configuration.
- */
-const char *postproc_configuration(void);
-
-/**
- * Return the libpostproc license.
- */
-const char *postproc_license(void);
-
-#define PP_QUALITY_MAX 6
-
-#define QP_STORE_T int8_t
-
-#include
-
-typedef void pp_context;
-typedef void pp_mode;
-
-#if LIBPOSTPROC_VERSION_INT < (52<<16)
-typedef pp_context pp_context_t;
-typedef pp_mode pp_mode_t;
-extern const char *const pp_help; ///< a simple help text
-#else
-extern const char pp_help[]; ///< a simple help text
-#endif
-
-void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
- uint8_t * dst[3], const int dstStride[3],
- int horizontalSize, int verticalSize,
- const QP_STORE_T *QP_store, int QP_stride,
- pp_mode *mode, pp_context *ppContext, int pict_type);
-
-
-/**
- * Return a pp_mode or NULL if an error occurred.
- *
- * @param name the string after "-pp" on the command line
- * @param quality a number from 0 to PP_QUALITY_MAX
- */
-pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality);
-void pp_free_mode(pp_mode *mode);
-
-pp_context *pp_get_context(int width, int height, int flags);
-void pp_free_context(pp_context *ppContext);
-
-#define PP_CPU_CAPS_MMX 0x80000000
-#define PP_CPU_CAPS_MMX2 0x20000000
-#define PP_CPU_CAPS_3DNOW 0x40000000
-#define PP_CPU_CAPS_ALTIVEC 0x10000000
-#define PP_CPU_CAPS_AUTO 0x00080000
-
-#define PP_FORMAT 0x00000008
-#define PP_FORMAT_420 (0x00000011|PP_FORMAT)
-#define PP_FORMAT_422 (0x00000001|PP_FORMAT)
-#define PP_FORMAT_411 (0x00000002|PP_FORMAT)
-#define PP_FORMAT_444 (0x00000000|PP_FORMAT)
-#define PP_FORMAT_440 (0x00000010|PP_FORMAT)
-
-#define PP_PICT_TYPE_QP2 0x00000010 ///< MPEG2 style QScale
-
-/**
- * @}
- */
-
-#endif /* POSTPROC_POSTPROCESS_H */
diff --git a/Externals/ffmpeg/dev/include/libpostproc/version.h b/Externals/ffmpeg/dev/include/libpostproc/version.h
deleted file mode 100644
index 59c24660f8..0000000000
--- a/Externals/ffmpeg/dev/include/libpostproc/version.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Version macros.
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef POSTPROC_POSTPROCESS_VERSION_H
-#define POSTPROC_POSTPROCESS_VERSION_H
-
-/**
- * @file
- * Libpostproc version macros
- */
-
-#include "libavutil/avutil.h"
-
-#define LIBPOSTPROC_VERSION_MAJOR 53
-#define LIBPOSTPROC_VERSION_MINOR 3
-#define LIBPOSTPROC_VERSION_MICRO 100
-
-#define LIBPOSTPROC_VERSION_INT AV_VERSION_INT(LIBPOSTPROC_VERSION_MAJOR, \
- LIBPOSTPROC_VERSION_MINOR, \
- LIBPOSTPROC_VERSION_MICRO)
-#define LIBPOSTPROC_VERSION AV_VERSION(LIBPOSTPROC_VERSION_MAJOR, \
- LIBPOSTPROC_VERSION_MINOR, \
- LIBPOSTPROC_VERSION_MICRO)
-#define LIBPOSTPROC_BUILD LIBPOSTPROC_VERSION_INT
-
-#define LIBPOSTPROC_IDENT "postproc" AV_STRINGIFY(LIBPOSTPROC_VERSION)
-
-#endif /* POSTPROC_POSTPROCESS_VERSION_H */
diff --git a/Externals/ffmpeg/dev/include/libswresample/swresample.h b/Externals/ffmpeg/dev/include/libswresample/swresample.h
deleted file mode 100644
index 37656a667d..0000000000
--- a/Externals/ffmpeg/dev/include/libswresample/swresample.h
+++ /dev/null
@@ -1,534 +0,0 @@
-/*
- * Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at)
- *
- * This file is part of libswresample
- *
- * libswresample is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * libswresample is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with libswresample; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef SWRESAMPLE_SWRESAMPLE_H
-#define SWRESAMPLE_SWRESAMPLE_H
-
-/**
- * @file
- * @ingroup lswr
- * libswresample public header
- */
-
-/**
- * @defgroup lswr Libswresample
- * @{
- *
- * Libswresample (lswr) is a library that handles audio resampling, sample
- * format conversion and mixing.
- *
- * Interaction with lswr is done through SwrContext, which is
- * allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters
- * must be set with the @ref avoptions API.
- *
- * The first thing you will need to do in order to use lswr is to allocate
- * SwrContext. This can be done with swr_alloc() or swr_alloc_set_opts(). If you
- * are using the former, you must set options through the @ref avoptions API.
- * The latter function provides the same feature, but it allows you to set some
- * common options in the same statement.
- *
- * For example the following code will setup conversion from planar float sample
- * format to interleaved signed 16-bit integer, downsampling from 48kHz to
- * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing
- * matrix). This is using the swr_alloc() function.
- * @code
- * SwrContext *swr = swr_alloc();
- * av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0);
- * av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
- * av_opt_set_int(swr, "in_sample_rate", 48000, 0);
- * av_opt_set_int(swr, "out_sample_rate", 44100, 0);
- * av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
- * av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
- * @endcode
- *
- * The same job can be done using swr_alloc_set_opts() as well:
- * @code
- * SwrContext *swr = swr_alloc_set_opts(NULL, // we're allocating a new context
- * AV_CH_LAYOUT_STEREO, // out_ch_layout
- * AV_SAMPLE_FMT_S16, // out_sample_fmt
- * 44100, // out_sample_rate
- * AV_CH_LAYOUT_5POINT1, // in_ch_layout
- * AV_SAMPLE_FMT_FLTP, // in_sample_fmt
- * 48000, // in_sample_rate
- * 0, // log_offset
- * NULL); // log_ctx
- * @endcode
- *
- * Once all values have been set, it must be initialized with swr_init(). If
- * you need to change the conversion parameters, you can change the parameters
- * using @ref AVOptions, as described above in the first example; or by using
- * swr_alloc_set_opts(), but with the first argument the allocated context.
- * You must then call swr_init() again.
- *
- * The conversion itself is done by repeatedly calling swr_convert().
- * Note that the samples may get buffered in swr if you provide insufficient
- * output space or if sample rate conversion is done, which requires "future"
- * samples. Samples that do not require future input can be retrieved at any
- * time by using swr_convert() (in_count can be set to 0).
- * At the end of conversion the resampling buffer can be flushed by calling
- * swr_convert() with NULL in and 0 in_count.
- *
- * The samples used in the conversion process can be managed with the libavutil
- * @ref lavu_sampmanip "samples manipulation" API, including av_samples_alloc()
- * function used in the following example.
- *
- * The delay between input and output, can at any time be found by using
- * swr_get_delay().
- *
- * The following code demonstrates the conversion loop assuming the parameters
- * from above and caller-defined functions get_input() and handle_output():
- * @code
- * uint8_t **input;
- * int in_samples;
- *
- * while (get_input(&input, &in_samples)) {
- * uint8_t *output;
- * int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) +
- * in_samples, 44100, 48000, AV_ROUND_UP);
- * av_samples_alloc(&output, NULL, 2, out_samples,
- * AV_SAMPLE_FMT_S16, 0);
- * out_samples = swr_convert(swr, &output, out_samples,
- * input, in_samples);
- * handle_output(output, out_samples);
- * av_freep(&output);
- * }
- * @endcode
- *
- * When the conversion is finished, the conversion
- * context and everything associated with it must be freed with swr_free().
- * A swr_close() function is also available, but it exists mainly for
- * compatibility with libavresample, and is not required to be called.
- *
- * There will be no memory leak if the data is not completely flushed before
- * swr_free().
- */
-
-#include
-#include "libavutil/frame.h"
-#include "libavutil/samplefmt.h"
-
-#include "libswresample/version.h"
-
-#if LIBSWRESAMPLE_VERSION_MAJOR < 1
-#define SWR_CH_MAX 32 ///< Maximum number of channels
-#endif
-
-/**
- * @name Option constants
- * These constants are used for the @ref avoptions interface for lswr.
- * @{
- *
- */
-
-#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate
-//TODO use int resample ?
-//long term TODO can we enable this dynamically?
-
-/** Dithering algorithms */
-enum SwrDitherType {
- SWR_DITHER_NONE = 0,
- SWR_DITHER_RECTANGULAR,
- SWR_DITHER_TRIANGULAR,
- SWR_DITHER_TRIANGULAR_HIGHPASS,
-
- SWR_DITHER_NS = 64, ///< not part of API/ABI
- SWR_DITHER_NS_LIPSHITZ,
- SWR_DITHER_NS_F_WEIGHTED,
- SWR_DITHER_NS_MODIFIED_E_WEIGHTED,
- SWR_DITHER_NS_IMPROVED_E_WEIGHTED,
- SWR_DITHER_NS_SHIBATA,
- SWR_DITHER_NS_LOW_SHIBATA,
- SWR_DITHER_NS_HIGH_SHIBATA,
- SWR_DITHER_NB, ///< not part of API/ABI
-};
-
-/** Resampling Engines */
-enum SwrEngine {
- SWR_ENGINE_SWR, /**< SW Resampler */
- SWR_ENGINE_SOXR, /**< SoX Resampler */
- SWR_ENGINE_NB, ///< not part of API/ABI
-};
-
-/** Resampling Filter Types */
-enum SwrFilterType {
- SWR_FILTER_TYPE_CUBIC, /**< Cubic */
- SWR_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall Windowed Sinc */
- SWR_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */
-};
-
-/**
- * @}
- */
-
-/**
- * The libswresample context. Unlike libavcodec and libavformat, this structure
- * is opaque. This means that if you would like to set options, you must use
- * the @ref avoptions API and cannot directly set values to members of the
- * structure.
- */
-typedef struct SwrContext SwrContext;
-
-/**
- * Get the AVClass for SwrContext. It can be used in combination with
- * AV_OPT_SEARCH_FAKE_OBJ for examining options.
- *
- * @see av_opt_find().
- * @return the AVClass of SwrContext
- */
-const AVClass *swr_get_class(void);
-
-/**
- * @name SwrContext constructor functions
- * @{
- */
-
-/**
- * Allocate SwrContext.
- *
- * If you use this function you will need to set the parameters (manually or
- * with swr_alloc_set_opts()) before calling swr_init().
- *
- * @see swr_alloc_set_opts(), swr_init(), swr_free()
- * @return NULL on error, allocated context otherwise
- */
-struct SwrContext *swr_alloc(void);
-
-/**
- * Initialize context after user parameters have been set.
- * @note The context must be configured using the AVOption API.
- *
- * @see av_opt_set_int()
- * @see av_opt_set_dict()
- *
- * @param[in,out] s Swr context to initialize
- * @return AVERROR error code in case of failure.
- */
-int swr_init(struct SwrContext *s);
-
-/**
- * Check whether an swr context has been initialized or not.
- *
- * @param[in] s Swr context to check
- * @see swr_init()
- * @return positive if it has been initialized, 0 if not initialized
- */
-int swr_is_initialized(struct SwrContext *s);
-
-/**
- * Allocate SwrContext if needed and set/reset common parameters.
- *
- * This function does not require s to be allocated with swr_alloc(). On the
- * other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters
- * on the allocated context.
- *
- * @param s existing Swr context if available, or NULL if not
- * @param out_ch_layout output channel layout (AV_CH_LAYOUT_*)
- * @param out_sample_fmt output sample format (AV_SAMPLE_FMT_*).
- * @param out_sample_rate output sample rate (frequency in Hz)
- * @param in_ch_layout input channel layout (AV_CH_LAYOUT_*)
- * @param in_sample_fmt input sample format (AV_SAMPLE_FMT_*).
- * @param in_sample_rate input sample rate (frequency in Hz)
- * @param log_offset logging level offset
- * @param log_ctx parent logging context, can be NULL
- *
- * @see swr_init(), swr_free()
- * @return NULL on error, allocated context otherwise
- */
-struct SwrContext *swr_alloc_set_opts(struct SwrContext *s,
- int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,
- int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate,
- int log_offset, void *log_ctx);
-
-/**
- * @}
- *
- * @name SwrContext destructor functions
- * @{
- */
-
-/**
- * Free the given SwrContext and set the pointer to NULL.
- *
- * @param[in] s a pointer to a pointer to Swr context
- */
-void swr_free(struct SwrContext **s);
-
-/**
- * Closes the context so that swr_is_initialized() returns 0.
- *
- * The context can be brought back to life by running swr_init(),
- * swr_init() can also be used without swr_close().
- * This function is mainly provided for simplifying the usecase
- * where one tries to support libavresample and libswresample.
- *
- * @param[in,out] s Swr context to be closed
- */
-void swr_close(struct SwrContext *s);
-
-/**
- * @}
- *
- * @name Core conversion functions
- * @{
- */
-
-/** Convert audio.
- *
- * in and in_count can be set to 0 to flush the last few samples out at the
- * end.
- *
- * If more input is provided than output space then the input will be buffered.
- * You can avoid this buffering by providing more output space than input.
- * Conversion will run directly without copying whenever possible.
- *
- * @param s allocated Swr context, with parameters set
- * @param out output buffers, only the first one need be set in case of packed audio
- * @param out_count amount of space available for output in samples per channel
- * @param in input buffers, only the first one need to be set in case of packed audio
- * @param in_count number of input samples available in one channel
- *
- * @return number of samples output per channel, negative value on error
- */
-int swr_convert(struct SwrContext *s, uint8_t **out, int out_count,
- const uint8_t **in , int in_count);
-
-/**
- * Convert the next timestamp from input to output
- * timestamps are in 1/(in_sample_rate * out_sample_rate) units.
- *
- * @note There are 2 slightly differently behaving modes.
- * @li When automatic timestamp compensation is not used, (min_compensation >= FLT_MAX)
- * in this case timestamps will be passed through with delays compensated
- * @li When automatic timestamp compensation is used, (min_compensation < FLT_MAX)
- * in this case the output timestamps will match output sample numbers.
- * See ffmpeg-resampler(1) for the two modes of compensation.
- *
- * @param s[in] initialized Swr context
- * @param pts[in] timestamp for the next input sample, INT64_MIN if unknown
- * @see swr_set_compensation(), swr_drop_output(), and swr_inject_silence() are
- * function used internally for timestamp compensation.
- * @return the output timestamp for the next output sample
- */
-int64_t swr_next_pts(struct SwrContext *s, int64_t pts);
-
-/**
- * @}
- *
- * @name Low-level option setting functions
- * These functons provide a means to set low-level options that is not possible
- * with the AVOption API.
- * @{
- */
-
-/**
- * Activate resampling compensation ("soft" compensation). This function is
- * internally called when needed in swr_next_pts().
- *
- * @param[in,out] s allocated Swr context. If it is not initialized,
- * or SWR_FLAG_RESAMPLE is not set, swr_init() is
- * called with the flag set.
- * @param[in] sample_delta delta in PTS per sample
- * @param[in] compensation_distance number of samples to compensate for
- * @return >= 0 on success, AVERROR error codes if:
- * @li @c s is NULL,
- * @li @c compensation_distance is less than 0,
- * @li @c compensation_distance is 0 but sample_delta is not,
- * @li compensation unsupported by resampler, or
- * @li swr_init() fails when called.
- */
-int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance);
-
-/**
- * Set a customized input channel mapping.
- *
- * @param[in,out] s allocated Swr context, not yet initialized
- * @param[in] channel_map customized input channel mapping (array of channel
- * indexes, -1 for a muted channel)
- * @return >= 0 on success, or AVERROR error code in case of failure.
- */
-int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map);
-
-/**
- * Set a customized remix matrix.
- *
- * @param s allocated Swr context, not yet initialized
- * @param matrix remix coefficients; matrix[i + stride * o] is
- * the weight of input channel i in output channel o
- * @param stride offset between lines of the matrix
- * @return >= 0 on success, or AVERROR error code in case of failure.
- */
-int swr_set_matrix(struct SwrContext *s, const double *matrix, int stride);
-
-/**
- * @}
- *
- * @name Sample handling functions
- * @{
- */
-
-/**
- * Drops the specified number of output samples.
- *
- * This function, along with swr_inject_silence(), is called by swr_next_pts()
- * if needed for "hard" compensation.
- *
- * @param s allocated Swr context
- * @param count number of samples to be dropped
- *
- * @return >= 0 on success, or a negative AVERROR code on failure
- */
-int swr_drop_output(struct SwrContext *s, int count);
-
-/**
- * Injects the specified number of silence samples.
- *
- * This function, along with swr_drop_output(), is called by swr_next_pts()
- * if needed for "hard" compensation.
- *
- * @param s allocated Swr context
- * @param count number of samples to be dropped
- *
- * @return >= 0 on success, or a negative AVERROR code on failure
- */
-int swr_inject_silence(struct SwrContext *s, int count);
-
-/**
- * Gets the delay the next input sample will experience relative to the next output sample.
- *
- * Swresample can buffer data if more input has been provided than available
- * output space, also converting between sample rates needs a delay.
- * This function returns the sum of all such delays.
- * The exact delay is not necessarily an integer value in either input or
- * output sample rate. Especially when downsampling by a large value, the
- * output sample rate may be a poor choice to represent the delay, similarly
- * for upsampling and the input sample rate.
- *
- * @param s swr context
- * @param base timebase in which the returned delay will be:
- * @li if it's set to 1 the returned delay is in seconds
- * @li if it's set to 1000 the returned delay is in milliseconds
- * @li if it's set to the input sample rate then the returned
- * delay is in input samples
- * @li if it's set to the output sample rate then the returned
- * delay is in output samples
- * @li if it's the least common multiple of in_sample_rate and
- * out_sample_rate then an exact rounding-free delay will be
- * returned
- * @returns the delay in 1 / @c base units.
- */
-int64_t swr_get_delay(struct SwrContext *s, int64_t base);
-
-/**
- * @}
- *
- * @name Configuration accessors
- * @{
- */
-
-/**
- * Return the @ref LIBSWRESAMPLE_VERSION_INT constant.
- *
- * This is useful to check if the build-time libswresample has the same version
- * as the run-time one.
- *
- * @returns the unsigned int-typed version
- */
-unsigned swresample_version(void);
-
-/**
- * Return the swr build-time configuration.
- *
- * @returns the build-time @c ./configure flags
- */
-const char *swresample_configuration(void);
-
-/**
- * Return the swr license.
- *
- * @returns the license of libswresample, determined at build-time
- */
-const char *swresample_license(void);
-
-/**
- * @}
- *
- * @name AVFrame based API
- * @{
- */
-
-/**
- * Convert the samples in the input AVFrame and write them to the output AVFrame.
- *
- * Input and output AVFrames must have channel_layout, sample_rate and format set.
- *
- * If the output AVFrame does not have the data pointers allocated the nb_samples
- * field will be set using av_frame_get_buffer()
- * is called to allocate the frame.
- *
- * The output AVFrame can be NULL or have fewer allocated samples than required.
- * In this case, any remaining samples not written to the output will be added
- * to an internal FIFO buffer, to be returned at the next call to this function
- * or to swr_convert().
- *
- * If converting sample rate, there may be data remaining in the internal
- * resampling delay buffer. swr_get_delay() tells the number of
- * remaining samples. To get this data as output, call this function or
- * swr_convert() with NULL input.
- *
- * If the SwrContext configuration does not match the output and
- * input AVFrame settings the conversion does not take place and depending on
- * which AVFrame is not matching AVERROR_OUTPUT_CHANGED, AVERROR_INPUT_CHANGED
- * or the result of a bitwise-OR of them is returned.
- *
- * @see swr_delay()
- * @see swr_convert()
- * @see swr_get_delay()
- *
- * @param swr audio resample context
- * @param output output AVFrame
- * @param input input AVFrame
- * @return 0 on success, AVERROR on failure or nonmatching
- * configuration.
- */
-int swr_convert_frame(SwrContext *swr,
- AVFrame *output, const AVFrame *input);
-
-/**
- * Configure or reconfigure the SwrContext using the information
- * provided by the AVFrames.
- *
- * The original resampling context is reset even on failure.
- * The function calls swr_close() internally if the context is open.
- *
- * @see swr_close();
- *
- * @param swr audio resample context
- * @param output output AVFrame
- * @param input input AVFrame
- * @return 0 on success, AVERROR on failure.
- */
-int swr_config_frame(SwrContext *swr, const AVFrame *out, const AVFrame *in);
-
-/**
- * @}
- * @}
- */
-
-#endif /* SWRESAMPLE_SWRESAMPLE_H */
diff --git a/Externals/ffmpeg/dev/include/libswresample/version.h b/Externals/ffmpeg/dev/include/libswresample/version.h
deleted file mode 100644
index 61c76fa2f4..0000000000
--- a/Externals/ffmpeg/dev/include/libswresample/version.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Version macros.
- *
- * This file is part of libswresample
- *
- * libswresample is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * libswresample is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with libswresample; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef SWR_VERSION_H
-#define SWR_VERSION_H
-
-/**
- * @file
- * Libswresample version macros
- */
-
-#include "libavutil/avutil.h"
-
-#define LIBSWRESAMPLE_VERSION_MAJOR 1
-#define LIBSWRESAMPLE_VERSION_MINOR 1
-#define LIBSWRESAMPLE_VERSION_MICRO 100
-
-#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \
- LIBSWRESAMPLE_VERSION_MINOR, \
- LIBSWRESAMPLE_VERSION_MICRO)
-#define LIBSWRESAMPLE_VERSION AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \
- LIBSWRESAMPLE_VERSION_MINOR, \
- LIBSWRESAMPLE_VERSION_MICRO)
-#define LIBSWRESAMPLE_BUILD LIBSWRESAMPLE_VERSION_INT
-
-#define LIBSWRESAMPLE_IDENT "SwR" AV_STRINGIFY(LIBSWRESAMPLE_VERSION)
-
-#endif /* SWR_VERSION_H */
diff --git a/Externals/ffmpeg/dev/lib/avcodec-56.def b/Externals/ffmpeg/dev/lib/avcodec-56.def
deleted file mode 100644
index 962bd0f5e0..0000000000
--- a/Externals/ffmpeg/dev/lib/avcodec-56.def
+++ /dev/null
@@ -1,219 +0,0 @@
-EXPORTS
- audio_resample
- audio_resample_close
- av_audio_convert
- av_audio_convert_alloc
- av_audio_convert_free
- av_audio_resample_init
- av_bitstream_filter_close
- av_bitstream_filter_filter
- av_bitstream_filter_init
- av_bitstream_filter_next
- av_codec_ffversion DATA
- av_codec_get_chroma_intra_matrix
- av_codec_get_codec_descriptor
- av_codec_get_lowres
- av_codec_get_max_lowres
- av_codec_get_pkt_timebase
- av_codec_get_seek_preroll
- av_codec_is_decoder
- av_codec_is_encoder
- av_codec_next
- av_codec_set_chroma_intra_matrix
- av_codec_set_codec_descriptor
- av_codec_set_lowres
- av_codec_set_pkt_timebase
- av_codec_set_seek_preroll
- av_copy_packet
- av_copy_packet_side_data
- av_dct_calc
- av_dct_end
- av_dct_init
- av_destruct_packet
- av_dup_packet
- av_dv_codec_profile
- av_dv_codec_profile2
- av_dv_frame_profile
- av_fast_padded_malloc
- av_fast_padded_mallocz
- av_fft_calc
- av_fft_end
- av_fft_init
- av_fft_permute
- av_free_packet
- av_get_audio_frame_duration
- av_get_bits_per_sample
- av_get_codec_tag_string
- av_get_exact_bits_per_sample
- av_get_pcm_codec
- av_get_profile_name
- av_grow_packet
- av_hwaccel_next
- av_imdct_calc
- av_imdct_half
- av_init_packet
- av_lockmgr_register
- av_log_ask_for_sample
- av_log_missing_feature
- av_mdct_calc
- av_mdct_end
- av_mdct_init
- av_new_packet
- av_packet_copy_props
- av_packet_free_side_data
- av_packet_from_data
- av_packet_get_side_data
- av_packet_merge_side_data
- av_packet_move_ref
- av_packet_new_side_data
- av_packet_pack_dictionary
- av_packet_ref
- av_packet_rescale_ts
- av_packet_shrink_side_data
- av_packet_split_side_data
- av_packet_unpack_dictionary
- av_packet_unref
- av_parser_change
- av_parser_close
- av_parser_init
- av_parser_next
- av_parser_parse2
- av_picture_copy
- av_picture_crop
- av_picture_pad
- av_rdft_calc
- av_rdft_end
- av_rdft_init
- av_register_bitstream_filter
- av_register_codec_parser
- av_register_hwaccel
- av_resample
- av_resample_close
- av_resample_compensate
- av_resample_init
- av_shrink_packet
- av_vorbis_parse_frame
- av_vorbis_parse_frame_flags
- av_vorbis_parse_free
- av_vorbis_parse_init
- av_vorbis_parse_reset
- av_xiphlacing
- available_bits
- avcodec_align_dimensions
- avcodec_align_dimensions2
- avcodec_alloc_context3
- avcodec_alloc_frame
- avcodec_chroma_pos_to_enum
- avcodec_close
- avcodec_configuration
- avcodec_copy_context
- avcodec_dct_alloc
- avcodec_dct_get_class
- avcodec_dct_init
- avcodec_decode_audio3
- avcodec_decode_audio4
- avcodec_decode_subtitle2
- avcodec_decode_video2
- avcodec_default_execute
- avcodec_default_execute2
- avcodec_default_get_buffer
- avcodec_default_get_buffer2
- avcodec_default_get_format
- avcodec_default_reget_buffer
- avcodec_default_release_buffer
- avcodec_descriptor_get
- avcodec_descriptor_get_by_name
- avcodec_descriptor_next
- avcodec_encode_audio
- avcodec_encode_audio2
- avcodec_encode_subtitle
- avcodec_encode_video
- avcodec_encode_video2
- avcodec_enum_to_chroma_pos
- avcodec_fill_audio_frame
- avcodec_find_best_pix_fmt2
- avcodec_find_best_pix_fmt_of_2
- avcodec_find_best_pix_fmt_of_list
- avcodec_find_decoder
- avcodec_find_decoder_by_name
- avcodec_find_encoder
- avcodec_find_encoder_by_name
- avcodec_flush_buffers
- avcodec_free_context
- avcodec_free_frame
- avcodec_get_chroma_sub_sample
- avcodec_get_class
- avcodec_get_context_defaults3
- avcodec_get_edge_width
- avcodec_get_frame_class
- avcodec_get_frame_defaults
- avcodec_get_name
- avcodec_get_pix_fmt_loss
- avcodec_get_subtitle_rect_class
- avcodec_get_type
- avcodec_is_open
- avcodec_license
- avcodec_open2
- avcodec_pix_fmt_to_codec_tag
- avcodec_register
- avcodec_register_all
- avcodec_set_dimensions
- avcodec_string
- avcodec_version
- aver_isf_history
- avpicture_alloc
- avpicture_deinterlace
- avpicture_fill
- avpicture_free
- avpicture_get_size
- avpicture_layout
- avpriv_aac_parse_header
- avpriv_ac3_channel_layout_tab DATA
- avpriv_ac3_parse_header
- avpriv_ac3_parse_header2
- avpriv_align_put_bits
- avpriv_bprint_to_extradata
- avpriv_color_frame
- avpriv_copy_bits
- avpriv_copy_pce_data
- avpriv_dca_convert_bitstream
- avpriv_dca_sample_rates DATA
- avpriv_dirac_parse_sequence_header
- avpriv_dnxhd_get_frame_size
- avpriv_do_elbg
- avpriv_dv_frame_profile2
- avpriv_exif_decode_ifd
- avpriv_find_pix_fmt
- avpriv_find_start_code
- avpriv_flac_is_extradata_valid
- avpriv_flac_parse_streaminfo
- avpriv_get_raw_pix_fmt_tags
- avpriv_h264_has_num_reorder_frames
- avpriv_init_elbg
- avpriv_lock_avformat
- avpriv_mjpeg_bits_ac_chrominance DATA
- avpriv_mjpeg_bits_ac_luminance DATA
- avpriv_mjpeg_bits_dc_chrominance DATA
- avpriv_mjpeg_bits_dc_luminance DATA
- avpriv_mjpeg_val_ac_chrominance DATA
- avpriv_mjpeg_val_ac_luminance DATA
- avpriv_mjpeg_val_dc DATA
- avpriv_mpa_bitrate_tab DATA
- avpriv_mpa_decode_header
- avpriv_mpa_decode_header2
- avpriv_mpa_freq_tab DATA
- avpriv_mpeg4audio_get_config
- avpriv_mpeg4audio_sample_rates DATA
- avpriv_mpegaudio_decode_header
- avpriv_pix_fmt_bps_avi DATA
- avpriv_pix_fmt_bps_mov DATA
- avpriv_put_string
- avpriv_split_xiph_headers
- avpriv_tak_parse_streaminfo
- avpriv_toupper4
- avpriv_unlock_avformat
- avpriv_vorbis_parse_extradata
- avpriv_vorbis_parse_frame
- avpriv_vorbis_parse_frame_flags
- avpriv_vorbis_parse_reset
- avsubtitle_free
diff --git a/Externals/ffmpeg/dev/lib/avdevice-56.def b/Externals/ffmpeg/dev/lib/avdevice-56.def
deleted file mode 100644
index f453187e60..0000000000
--- a/Externals/ffmpeg/dev/lib/avdevice-56.def
+++ /dev/null
@@ -1,19 +0,0 @@
-EXPORTS
- av_device_capabilities DATA
- av_device_ffversion DATA
- av_input_audio_device_next
- av_input_video_device_next
- av_output_audio_device_next
- av_output_video_device_next
- avdevice_app_to_dev_control_message
- avdevice_capabilities_create
- avdevice_capabilities_free
- avdevice_configuration
- avdevice_dev_to_app_control_message
- avdevice_free_list_devices
- avdevice_license
- avdevice_list_devices
- avdevice_list_input_sources
- avdevice_list_output_sinks
- avdevice_register_all
- avdevice_version
diff --git a/Externals/ffmpeg/dev/lib/avdevice.lib b/Externals/ffmpeg/dev/lib/avdevice.lib
deleted file mode 100644
index 600f7b636c..0000000000
Binary files a/Externals/ffmpeg/dev/lib/avdevice.lib and /dev/null differ
diff --git a/Externals/ffmpeg/dev/lib/avfilter-5.def b/Externals/ffmpeg/dev/lib/avfilter-5.def
deleted file mode 100644
index 7268f51bbb..0000000000
--- a/Externals/ffmpeg/dev/lib/avfilter-5.def
+++ /dev/null
@@ -1,81 +0,0 @@
-EXPORTS
- av_abuffersink_params_alloc
- av_buffersink_get_buffer_ref
- av_buffersink_get_frame
- av_buffersink_get_frame_flags
- av_buffersink_get_frame_rate
- av_buffersink_get_samples
- av_buffersink_params_alloc
- av_buffersink_poll_frame
- av_buffersink_read
- av_buffersink_read_samples
- av_buffersink_set_frame_size
- av_buffersrc_add_frame
- av_buffersrc_add_frame_flags
- av_buffersrc_add_ref
- av_buffersrc_buffer
- av_buffersrc_get_nb_failed_requests
- av_buffersrc_write_frame
- av_filter_ffversion DATA
- av_filter_next
- avfilter_add_matrix
- avfilter_all_channel_layouts DATA
- avfilter_config_links
- avfilter_configuration
- avfilter_copy_buf_props
- avfilter_copy_buffer_ref_props
- avfilter_copy_frame_props
- avfilter_free
- avfilter_get_audio_buffer_ref_from_arrays
- avfilter_get_audio_buffer_ref_from_arrays_channels
- avfilter_get_audio_buffer_ref_from_frame
- avfilter_get_buffer_ref_from_frame
- avfilter_get_by_name
- avfilter_get_class
- avfilter_get_matrix
- avfilter_get_video_buffer_ref_from_arrays
- avfilter_get_video_buffer_ref_from_frame
- avfilter_graph_add_filter
- avfilter_graph_alloc
- avfilter_graph_alloc_filter
- avfilter_graph_config
- avfilter_graph_create_filter
- avfilter_graph_dump
- avfilter_graph_free
- avfilter_graph_get_filter
- avfilter_graph_parse
- avfilter_graph_parse2
- avfilter_graph_parse_ptr
- avfilter_graph_queue_command
- avfilter_graph_request_oldest
- avfilter_graph_send_command
- avfilter_graph_set_auto_convert
- avfilter_init_dict
- avfilter_init_filter
- avfilter_init_str
- avfilter_inout_alloc
- avfilter_inout_free
- avfilter_insert_filter
- avfilter_license
- avfilter_link
- avfilter_link_free
- avfilter_link_get_channels
- avfilter_link_set_closed
- avfilter_make_format64_list
- avfilter_mul_matrix
- avfilter_next
- avfilter_open
- avfilter_pad_count
- avfilter_pad_get_name
- avfilter_pad_get_type
- avfilter_process_command
- avfilter_ref_buffer
- avfilter_ref_get_channels
- avfilter_register
- avfilter_register_all
- avfilter_sub_matrix
- avfilter_transform
- avfilter_uninit
- avfilter_unref_buffer
- avfilter_unref_bufferp
- avfilter_version
diff --git a/Externals/ffmpeg/dev/lib/avfilter.lib b/Externals/ffmpeg/dev/lib/avfilter.lib
deleted file mode 100644
index 46b6cd509d..0000000000
Binary files a/Externals/ffmpeg/dev/lib/avfilter.lib and /dev/null differ
diff --git a/Externals/ffmpeg/dev/lib/avformat-56.def b/Externals/ffmpeg/dev/lib/avformat-56.def
deleted file mode 100644
index dfce181ede..0000000000
--- a/Externals/ffmpeg/dev/lib/avformat-56.def
+++ /dev/null
@@ -1,161 +0,0 @@
-EXPORTS
- av_add_index_entry
- av_append_packet
- av_codec_get_id
- av_codec_get_tag
- av_codec_get_tag2
- av_convert_lang_to
- av_demuxer_open
- av_dump_format
- av_filename_number_test
- av_find_best_stream
- av_find_default_stream_index
- av_find_input_format
- av_find_program_from_stream
- av_fmt_ctx_get_duration_estimation_method
- av_format_ffversion DATA
- av_format_get_audio_codec
- av_format_get_control_message_cb
- av_format_get_metadata_header_padding
- av_format_get_opaque
- av_format_get_probe_score
- av_format_get_subtitle_codec
- av_format_get_video_codec
- av_format_inject_global_side_data
- av_format_set_audio_codec
- av_format_set_control_message_cb
- av_format_set_metadata_header_padding
- av_format_set_opaque
- av_format_set_subtitle_codec
- av_format_set_video_codec
- av_get_frame_filename
- av_get_output_timestamp
- av_get_packet
- av_guess_codec
- av_guess_format
- av_guess_frame_rate
- av_guess_sample_aspect_ratio
- av_hex_dump
- av_hex_dump_log
- av_iformat_next
- av_index_search_timestamp
- av_interleaved_write_frame
- av_interleaved_write_uncoded_frame
- av_match_ext
- av_new_program
- av_oformat_next
- av_pkt_dump2
- av_pkt_dump_log2
- av_probe_input_buffer
- av_probe_input_buffer2
- av_probe_input_format
- av_probe_input_format2
- av_probe_input_format3
- av_read_frame
- av_read_pause
- av_read_play
- av_register_all
- av_register_input_format
- av_register_output_format
- av_sdp_create
- av_seek_frame
- av_stream_get_end_pts
- av_stream_get_parser
- av_stream_get_r_frame_rate
- av_stream_get_recommended_encoder_configuration
- av_stream_get_side_data
- av_stream_set_r_frame_rate
- av_stream_set_recommended_encoder_configuration
- av_url_split
- av_write_frame
- av_write_trailer
- av_write_uncoded_frame
- av_write_uncoded_frame_query
- avformat_alloc_context
- avformat_alloc_output_context2
- avformat_close_input
- avformat_configuration
- avformat_find_stream_info
- avformat_free_context
- avformat_get_class
- avformat_get_mov_audio_tags
- avformat_get_mov_video_tags
- avformat_get_riff_audio_tags
- avformat_get_riff_video_tags
- avformat_license
- avformat_match_stream_specifier
- avformat_network_deinit
- avformat_network_init
- avformat_new_stream
- avformat_open_input
- avformat_query_codec
- avformat_queue_attached_pictures
- avformat_seek_file
- avformat_version
- avformat_write_header
- avio_alloc_context
- avio_check
- avio_close
- avio_close_dyn_buf
- avio_closep
- avio_enum_protocols
- avio_feof
- avio_find_protocol_name
- avio_flush
- avio_get_str
- avio_get_str16be
- avio_get_str16le
- avio_open
- avio_open2
- avio_open_dyn_buf
- avio_pause
- avio_printf
- avio_put_str
- avio_put_str16le
- avio_r8
- avio_rb16
- avio_rb24
- avio_rb32
- avio_rb64
- avio_read
- avio_read_to_bprint
- avio_rl16
- avio_rl24
- avio_rl32
- avio_rl64
- avio_seek
- avio_seek_time
- avio_size
- avio_skip
- avio_w8
- avio_wb16
- avio_wb24
- avio_wb32
- avio_wb64
- avio_wl16
- avio_wl24
- avio_wl32
- avio_wl64
- avio_write
- avpriv_dv_get_packet
- avpriv_dv_init_demux
- avpriv_dv_produce_packet
- avpriv_mpegts_parse_close
- avpriv_mpegts_parse_open
- avpriv_mpegts_parse_packet
- avpriv_new_chapter
- avpriv_set_pts_info
- ff_inet_aton
- ff_rtp_get_local_rtcp_port
- ff_rtp_get_local_rtp_port
- ff_rtsp_parse_line
- ff_socket_nonblock
- ffio_open_dyn_packet_buf
- ffio_set_buf_size
- ffurl_close
- ffurl_open
- ffurl_read_complete
- ffurl_seek
- ffurl_size
- ffurl_write
- url_feof
diff --git a/Externals/ffmpeg/dev/lib/avutil-54.def b/Externals/ffmpeg/dev/lib/avutil-54.def
deleted file mode 100644
index e02e465fe6..0000000000
--- a/Externals/ffmpeg/dev/lib/avutil-54.def
+++ /dev/null
@@ -1,441 +0,0 @@
-EXPORTS
- av_add_q
- av_add_stable
- av_adler32_update
- av_aes_alloc
- av_aes_crypt
- av_aes_init
- av_aes_size DATA
- av_asprintf
- av_audio_fifo_alloc
- av_audio_fifo_drain
- av_audio_fifo_free
- av_audio_fifo_read
- av_audio_fifo_realloc
- av_audio_fifo_reset
- av_audio_fifo_size
- av_audio_fifo_space
- av_audio_fifo_write
- av_base64_decode
- av_base64_encode
- av_basename
- av_blowfish_crypt
- av_blowfish_crypt_ecb
- av_blowfish_init
- av_bmg_get
- av_bprint_append_data
- av_bprint_channel_layout
- av_bprint_chars
- av_bprint_clear
- av_bprint_escape
- av_bprint_finalize
- av_bprint_get_buffer
- av_bprint_init
- av_bprint_init_for_buffer
- av_bprint_strftime
- av_bprintf
- av_buffer_alloc
- av_buffer_allocz
- av_buffer_create
- av_buffer_default_free
- av_buffer_get_opaque
- av_buffer_get_ref_count
- av_buffer_is_writable
- av_buffer_make_writable
- av_buffer_pool_get
- av_buffer_pool_init
- av_buffer_pool_uninit
- av_buffer_realloc
- av_buffer_ref
- av_buffer_unref
- av_calloc
- av_camellia_alloc
- av_camellia_crypt
- av_camellia_init
- av_camellia_size DATA
- av_cast5_alloc
- av_cast5_crypt
- av_cast5_crypt2
- av_cast5_init
- av_cast5_size DATA
- av_channel_layout_extract_channel
- av_chroma_location_name
- av_color_primaries_name
- av_color_range_name
- av_color_space_name
- av_color_transfer_name
- av_compare_mod
- av_compare_ts
- av_cpu_count
- av_crc
- av_crc_get_table
- av_crc_init
- av_ctz
- av_d2q
- av_d2str
- av_default_get_category
- av_default_item_name
- av_des_crypt
- av_des_init
- av_des_mac
- av_dict_copy
- av_dict_count
- av_dict_free
- av_dict_get
- av_dict_get_string
- av_dict_parse_string
- av_dict_set
- av_dict_set_int
- av_dirname
- av_display_matrix_flip
- av_display_rotation_get
- av_display_rotation_set
- av_div_q
- av_downmix_info_update_side_data
- av_dynarray2_add
- av_dynarray_add
- av_dynarray_add_nofree
- av_escape
- av_expr_eval
- av_expr_free
- av_expr_parse
- av_expr_parse_and_eval
- av_fast_malloc
- av_fast_realloc
- av_fifo_alloc
- av_fifo_alloc_array
- av_fifo_drain
- av_fifo_free
- av_fifo_freep
- av_fifo_generic_read
- av_fifo_generic_write
- av_fifo_grow
- av_fifo_realloc2
- av_fifo_reset
- av_fifo_size
- av_fifo_space
- av_file_map
- av_file_unmap
- av_find_best_pix_fmt_of_2
- av_find_info_tag
- av_find_nearest_q_idx
- av_fopen_utf8
- av_force_cpu_flags
- av_frame_alloc
- av_frame_clone
- av_frame_copy
- av_frame_copy_props
- av_frame_free
- av_frame_get_best_effort_timestamp
- av_frame_get_buffer
- av_frame_get_channel_layout
- av_frame_get_channels
- av_frame_get_color_range
- av_frame_get_colorspace
- av_frame_get_decode_error_flags
- av_frame_get_metadata
- av_frame_get_pkt_duration
- av_frame_get_pkt_pos
- av_frame_get_pkt_size
- av_frame_get_plane_buffer
- av_frame_get_qp_table
- av_frame_get_sample_rate
- av_frame_get_side_data
- av_frame_is_writable
- av_frame_make_writable
- av_frame_move_ref
- av_frame_new_side_data
- av_frame_ref
- av_frame_remove_side_data
- av_frame_set_best_effort_timestamp
- av_frame_set_channel_layout
- av_frame_set_channels
- av_frame_set_color_range
- av_frame_set_colorspace
- av_frame_set_decode_error_flags
- av_frame_set_metadata
- av_frame_set_pkt_duration
- av_frame_set_pkt_pos
- av_frame_set_pkt_size
- av_frame_set_qp_table
- av_frame_set_sample_rate
- av_frame_side_data_name
- av_frame_unref
- av_free
- av_freep
- av_gcd
- av_get_alt_sample_fmt
- av_get_bits_per_pixel
- av_get_bytes_per_sample
- av_get_channel_description
- av_get_channel_layout
- av_get_channel_layout_channel_index
- av_get_channel_layout_nb_channels
- av_get_channel_layout_string
- av_get_channel_name
- av_get_colorspace_name
- av_get_cpu_flags
- av_get_default_channel_layout
- av_get_double
- av_get_int
- av_get_known_color_name
- av_get_media_type_string
- av_get_packed_sample_fmt
- av_get_padded_bits_per_pixel
- av_get_picture_type_char
- av_get_pix_fmt
- av_get_pix_fmt_loss
- av_get_pix_fmt_name
- av_get_pix_fmt_string
- av_get_planar_sample_fmt
- av_get_q
- av_get_random_seed
- av_get_sample_fmt
- av_get_sample_fmt_name
- av_get_sample_fmt_string
- av_get_standard_channel_layout
- av_get_string
- av_get_time_base_q
- av_get_token
- av_gettime
- av_gettime_relative
- av_gettime_relative_is_monotonic
- av_hash_alloc
- av_hash_final
- av_hash_final_b64
- av_hash_final_bin
- av_hash_final_hex
- av_hash_freep
- av_hash_get_name
- av_hash_get_size
- av_hash_init
- av_hash_names
- av_hash_update
- av_hmac_alloc
- av_hmac_calc
- av_hmac_final
- av_hmac_free
- av_hmac_init
- av_hmac_update
- av_image_alloc
- av_image_check_sar
- av_image_check_size
- av_image_copy
- av_image_copy_plane
- av_image_copy_to_buffer
- av_image_fill_arrays
- av_image_fill_linesizes
- av_image_fill_max_pixsteps
- av_image_fill_pointers
- av_image_get_buffer_size
- av_image_get_linesize
- av_int_list_length_for_size
- av_isdigit
- av_isgraph
- av_isspace
- av_isxdigit
- av_lfg_init
- av_log
- av_log2
- av_log2_16bit
- av_log_default_callback
- av_log_format_line
- av_log_get_flags
- av_log_get_level
- av_log_set_callback
- av_log_set_flags
- av_log_set_level
- av_lzo1x_decode
- av_malloc
- av_mallocz
- av_match_list
- av_match_name
- av_max_alloc
- av_md5_alloc
- av_md5_final
- av_md5_init
- av_md5_size DATA
- av_md5_sum
- av_md5_update
- av_memcpy_backptr
- av_memdup
- av_mul_q
- av_murmur3_alloc
- av_murmur3_final
- av_murmur3_init
- av_murmur3_init_seeded
- av_murmur3_update
- av_nearer_q
- av_next_option
- av_opt_child_class_next
- av_opt_child_next
- av_opt_copy
- av_opt_eval_double
- av_opt_eval_flags
- av_opt_eval_float
- av_opt_eval_int
- av_opt_eval_int64
- av_opt_eval_q
- av_opt_find
- av_opt_find2
- av_opt_flag_is_set
- av_opt_free
- av_opt_freep_ranges
- av_opt_get
- av_opt_get_channel_layout
- av_opt_get_dict_val
- av_opt_get_double
- av_opt_get_image_size
- av_opt_get_int
- av_opt_get_key_value
- av_opt_get_pixel_fmt
- av_opt_get_q
- av_opt_get_sample_fmt
- av_opt_get_video_rate
- av_opt_is_set_to_default
- av_opt_is_set_to_default_by_name
- av_opt_next
- av_opt_ptr
- av_opt_query_ranges
- av_opt_query_ranges_default
- av_opt_serialize
- av_opt_set
- av_opt_set_bin
- av_opt_set_channel_layout
- av_opt_set_defaults
- av_opt_set_defaults2
- av_opt_set_dict
- av_opt_set_dict2
- av_opt_set_dict_val
- av_opt_set_double
- av_opt_set_from_string
- av_opt_set_image_size
- av_opt_set_int
- av_opt_set_pixel_fmt
- av_opt_set_q
- av_opt_set_sample_fmt
- av_opt_set_video_rate
- av_opt_show2
- av_parse_color
- av_parse_cpu_caps
- av_parse_cpu_flags
- av_parse_ratio
- av_parse_time
- av_parse_video_rate
- av_parse_video_size
- av_pix_fmt_count_planes
- av_pix_fmt_desc_get
- av_pix_fmt_desc_get_id
- av_pix_fmt_desc_next
- av_pix_fmt_descriptors DATA
- av_pix_fmt_get_chroma_sub_sample
- av_pix_fmt_swap_endianness
- av_pixelutils_get_sad_fn
- av_rc4_crypt
- av_rc4_init
- av_read_image_line
- av_realloc
- av_realloc_array
- av_realloc_f
- av_reallocp
- av_reallocp_array
- av_reduce
- av_rescale
- av_rescale_delta
- av_rescale_q
- av_rescale_q_rnd
- av_rescale_rnd
- av_reverse DATA
- av_ripemd_alloc
- av_ripemd_final
- av_ripemd_init
- av_ripemd_size DATA
- av_ripemd_update
- av_sample_fmt_is_planar
- av_samples_alloc
- av_samples_alloc_array_and_samples
- av_samples_copy
- av_samples_fill_arrays
- av_samples_get_buffer_size
- av_samples_set_silence
- av_set_cpu_flags_mask
- av_set_double
- av_set_int
- av_set_options_string
- av_set_q
- av_set_string3
- av_sha512_alloc
- av_sha512_final
- av_sha512_init
- av_sha512_size DATA
- av_sha512_update
- av_sha_alloc
- av_sha_final
- av_sha_init
- av_sha_size DATA
- av_sha_update
- av_small_strptime
- av_stereo3d_alloc
- av_stereo3d_create_side_data
- av_strcasecmp
- av_strdup
- av_strerror
- av_stristart
- av_stristr
- av_strlcat
- av_strlcatf
- av_strlcpy
- av_strncasecmp
- av_strndup
- av_strnstr
- av_strstart
- av_strtod
- av_strtok
- av_sub_q
- av_tempfile
- av_thread_message_queue_alloc
- av_thread_message_queue_free
- av_thread_message_queue_recv
- av_thread_message_queue_send
- av_thread_message_queue_set_err_recv
- av_thread_message_queue_set_err_send
- av_timecode_adjust_ntsc_framenum2
- av_timecode_check_frame_rate
- av_timecode_get_smpte_from_framenum
- av_timecode_init
- av_timecode_init_from_string
- av_timecode_make_mpeg_tc_string
- av_timecode_make_smpte_tc_string
- av_timecode_make_string
- av_timegm
- av_tree_destroy
- av_tree_enumerate
- av_tree_find
- av_tree_insert
- av_tree_node_alloc
- av_tree_node_size DATA
- av_usleep
- av_utf8_decode
- av_util_ffversion DATA
- av_vbprintf
- av_vlog
- av_write_image_line
- av_xtea_crypt
- av_xtea_init
- avpriv_alloc_fixed_dsp
- avpriv_cga_font DATA
- avpriv_emms_yasm DATA
- avpriv_float_dsp_alloc
- avpriv_float_dsp_init
- avpriv_frame_get_metadatap
- avpriv_init_lls
- avpriv_open
- avpriv_report_missing_feature
- avpriv_request_sample
- avpriv_scalarproduct_float_c
- avpriv_set_systematic_pal2
- avpriv_solve_lls
- avpriv_vga16_font DATA
- avutil_configuration
- avutil_license
- avutil_version
diff --git a/Externals/ffmpeg/dev/lib/libavcodec.dll.a b/Externals/ffmpeg/dev/lib/libavcodec.dll.a
deleted file mode 100644
index 4eaadfc46f..0000000000
Binary files a/Externals/ffmpeg/dev/lib/libavcodec.dll.a and /dev/null differ
diff --git a/Externals/ffmpeg/dev/lib/libavdevice.dll.a b/Externals/ffmpeg/dev/lib/libavdevice.dll.a
deleted file mode 100644
index 5a8f4bd651..0000000000
Binary files a/Externals/ffmpeg/dev/lib/libavdevice.dll.a and /dev/null differ
diff --git a/Externals/ffmpeg/dev/lib/libavfilter.dll.a b/Externals/ffmpeg/dev/lib/libavfilter.dll.a
deleted file mode 100644
index cf8577a906..0000000000
Binary files a/Externals/ffmpeg/dev/lib/libavfilter.dll.a and /dev/null differ
diff --git a/Externals/ffmpeg/dev/lib/libavformat.dll.a b/Externals/ffmpeg/dev/lib/libavformat.dll.a
deleted file mode 100644
index 720c267654..0000000000
Binary files a/Externals/ffmpeg/dev/lib/libavformat.dll.a and /dev/null differ
diff --git a/Externals/ffmpeg/dev/lib/libavutil.dll.a b/Externals/ffmpeg/dev/lib/libavutil.dll.a
deleted file mode 100644
index 0220047180..0000000000
Binary files a/Externals/ffmpeg/dev/lib/libavutil.dll.a and /dev/null differ
diff --git a/Externals/ffmpeg/dev/lib/libpostproc.dll.a b/Externals/ffmpeg/dev/lib/libpostproc.dll.a
deleted file mode 100644
index c7b8ba266a..0000000000
Binary files a/Externals/ffmpeg/dev/lib/libpostproc.dll.a and /dev/null differ
diff --git a/Externals/ffmpeg/dev/lib/libswresample.dll.a b/Externals/ffmpeg/dev/lib/libswresample.dll.a
deleted file mode 100644
index 999ec7f7ae..0000000000
Binary files a/Externals/ffmpeg/dev/lib/libswresample.dll.a and /dev/null differ
diff --git a/Externals/ffmpeg/dev/lib/libswscale.dll.a b/Externals/ffmpeg/dev/lib/libswscale.dll.a
deleted file mode 100644
index 1dafa6ed2e..0000000000
Binary files a/Externals/ffmpeg/dev/lib/libswscale.dll.a and /dev/null differ
diff --git a/Externals/ffmpeg/dev/lib/postproc-53.def b/Externals/ffmpeg/dev/lib/postproc-53.def
deleted file mode 100644
index 62c4c69d7f..0000000000
--- a/Externals/ffmpeg/dev/lib/postproc-53.def
+++ /dev/null
@@ -1,11 +0,0 @@
-EXPORTS
- postproc_configuration
- postproc_ffversion DATA
- postproc_license
- postproc_version
- pp_free_context
- pp_free_mode
- pp_get_context
- pp_get_mode_by_name_and_quality
- pp_help DATA
- pp_postprocess
diff --git a/Externals/ffmpeg/dev/lib/postproc.lib b/Externals/ffmpeg/dev/lib/postproc.lib
deleted file mode 100644
index 1425faa559..0000000000
Binary files a/Externals/ffmpeg/dev/lib/postproc.lib and /dev/null differ
diff --git a/Externals/ffmpeg/dev/lib/swresample-1.def b/Externals/ffmpeg/dev/lib/swresample-1.def
deleted file mode 100644
index a4aedce94a..0000000000
--- a/Externals/ffmpeg/dev/lib/swresample-1.def
+++ /dev/null
@@ -1,22 +0,0 @@
-EXPORTS
- swr_alloc
- swr_alloc_set_opts
- swr_close
- swr_config_frame
- swr_convert
- swr_convert_frame
- swr_drop_output
- swr_ffversion DATA
- swr_free
- swr_get_class
- swr_get_delay
- swr_init
- swr_inject_silence
- swr_is_initialized
- swr_next_pts
- swr_set_channel_mapping
- swr_set_compensation
- swr_set_matrix
- swresample_configuration
- swresample_license
- swresample_version
diff --git a/Externals/ffmpeg/dev/lib/swresample.lib b/Externals/ffmpeg/dev/lib/swresample.lib
deleted file mode 100644
index eedf4a8c75..0000000000
Binary files a/Externals/ffmpeg/dev/lib/swresample.lib and /dev/null differ
diff --git a/Externals/ffmpeg/dev/lib/swscale-3.def b/Externals/ffmpeg/dev/lib/swscale-3.def
deleted file mode 100644
index d6330d95f2..0000000000
--- a/Externals/ffmpeg/dev/lib/swscale-3.def
+++ /dev/null
@@ -1,36 +0,0 @@
-EXPORTS
- sws_addVec
- sws_allocVec
- sws_alloc_context
- sws_cloneVec
- sws_context_class DATA
- sws_convVec
- sws_convertPalette8ToPacked24
- sws_convertPalette8ToPacked32
- sws_freeContext
- sws_freeFilter
- sws_freeVec
- sws_getCachedContext
- sws_getCoefficients
- sws_getColorspaceDetails
- sws_getConstVec
- sws_getContext
- sws_getDefaultFilter
- sws_getGaussianVec
- sws_getIdentityVec
- sws_get_class
- sws_init_context
- sws_isSupportedEndiannessConversion
- sws_isSupportedInput
- sws_isSupportedOutput
- sws_normalizeVec
- sws_printVec2
- sws_rgb2rgb_init
- sws_scale
- sws_scaleVec
- sws_setColorspaceDetails
- sws_shiftVec
- sws_subVec
- swscale_configuration
- swscale_license
- swscale_version
diff --git a/Externals/ffmpeg/shared/bin/avdevice-56.dll b/Externals/ffmpeg/shared/bin/avdevice-56.dll
deleted file mode 100644
index 147f99f79f..0000000000
Binary files a/Externals/ffmpeg/shared/bin/avdevice-56.dll and /dev/null differ
diff --git a/Externals/ffmpeg/shared/bin/ffmpeg.exe b/Externals/ffmpeg/shared/bin/ffmpeg.exe
deleted file mode 100644
index 9221b38029..0000000000
Binary files a/Externals/ffmpeg/shared/bin/ffmpeg.exe and /dev/null differ
diff --git a/Externals/ffmpeg/shared/bin/ffplay.exe b/Externals/ffmpeg/shared/bin/ffplay.exe
deleted file mode 100644
index a7072b8fc0..0000000000
Binary files a/Externals/ffmpeg/shared/bin/ffplay.exe and /dev/null differ
diff --git a/Externals/ffmpeg/shared/bin/ffprobe.exe b/Externals/ffmpeg/shared/bin/ffprobe.exe
deleted file mode 100644
index b22ed9356a..0000000000
Binary files a/Externals/ffmpeg/shared/bin/ffprobe.exe and /dev/null differ
diff --git a/Externals/ffmpeg/shared/bin/postproc-53.dll b/Externals/ffmpeg/shared/bin/postproc-53.dll
deleted file mode 100644
index cd536480cd..0000000000
Binary files a/Externals/ffmpeg/shared/bin/postproc-53.dll and /dev/null differ
diff --git a/Externals/ffmpeg/shared/doc/developer.html b/Externals/ffmpeg/shared/doc/developer.html
deleted file mode 100644
index ac67aae49a..0000000000
--- a/Externals/ffmpeg/shared/doc/developer.html
+++ /dev/null
@@ -1,777 +0,0 @@
-
-
-
-
-
-
- Developer Documentation
-
-
-
-
-
-
-
-
- Developer Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Developers Guide# TOC
-
-
-
1.1 Notes for external developers# TOC
-
-
This document is mostly useful for internal FFmpeg developers.
-External developers who need to use the API in their application should
-refer to the API doxygen documentation in the public headers, and
-check the examples in doc/examples and in the source code to
-see how the public API is employed.
-
-
You can use the FFmpeg libraries in your commercial program, but you
-are encouraged to publish any patch you make . In this case the
-best way to proceed is to send your patches to the ffmpeg-devel
-mailing list following the guidelines illustrated in the remainder of
-this document.
-
-
For more detailed legal information about the use of FFmpeg in
-external programs read the LICENSE file in the source tree and
-consult http://ffmpeg.org/legal.html .
-
-
-
1.2 Contributing# TOC
-
-
There are 3 ways by which code gets into ffmpeg.
-
- Submitting Patches to the main developer mailing list
- see Submitting patches for details.
- Directly committing changes to the main tree.
- Committing changes to a git clone, for example on github.com or
- gitorious.org. And asking us to merge these changes.
-
-
-
Whichever way, changes should be reviewed by the maintainer of the code
-before they are committed. And they should follow the Coding Rules .
-The developer making the commit and the author are responsible for their changes
-and should try to fix issues their commit causes.
-
-
-
1.3 Coding Rules# TOC
-
-
-
1.3.1 Code formatting conventions# TOC
-
-
There are the following guidelines regarding the indentation in files:
-
-
- Indent size is 4.
-
- The TAB character is forbidden outside of Makefiles as is any
-form of trailing whitespace. Commits containing either will be
-rejected by the git repository.
-
- You should try to limit your code lines to 80 characters; however, do so if
-and only if this improves readability.
-
-
The presentation is one inspired by ’indent -i4 -kr -nut’.
-
-
The main priority in FFmpeg is simplicity and small code size in order to
-minimize the bug count.
-
-
-
1.3.2 Comments# TOC
-
Use the JavaDoc/Doxygen format (see examples below) so that code documentation
-can be generated automatically. All nontrivial functions should have a comment
-above them explaining what the function does, even if it is just one sentence.
-All structures and their member variables should be documented, too.
-
-
Avoid Qt-style and similar Doxygen syntax with !
in it, i.e. replace
-//!
with ///
and similar. Also @ syntax should be employed
-for markup commands, i.e. use @param
and not \param
.
-
-
-
/**
- * @file
- * MPEG codec.
- * @author ...
- */
-
-/**
- * Summary sentence.
- * more text ...
- * ...
- */
-typedef struct Foobar {
- int var1; /**< var1 description */
- int var2; ///< var2 description
- /** var3 description */
- int var3;
-} Foobar;
-
-/**
- * Summary sentence.
- * more text ...
- * ...
- * @param my_parameter description of my_parameter
- * @return return value description
- */
-int myfunc(int my_parameter)
-...
-
-
-
-
1.3.3 C language features# TOC
-
-
FFmpeg is programmed in the ISO C90 language with a few additional
-features from ISO C99, namely:
-
-
- the ‘inline ’ keyword;
-
- ‘// ’ comments;
-
- designated struct initializers (‘struct s x = { .i = 17 }; ’)
-
- compound literals (‘x = (struct s) { 17, 23 }; ’)
-
-
-
These features are supported by all compilers we care about, so we will not
-accept patches to remove their use unless they absolutely do not impair
-clarity and performance.
-
-
All code must compile with recent versions of GCC and a number of other
-currently supported compilers. To ensure compatibility, please do not use
-additional C99 features or GCC extensions. Especially watch out for:
-
-
- mixing statements and declarations;
-
- ‘long long ’ (use ‘int64_t ’ instead);
-
- ‘__attribute__ ’ not protected by ‘#ifdef __GNUC__ ’ or similar;
-
- GCC statement expressions (‘(x = ({ int y = 4; y; }) ’).
-
-
-
-
1.3.4 Naming conventions# TOC
-
All names should be composed with underscores (_), not CamelCase. For example,
-‘avfilter_get_video_buffer ’ is an acceptable function name and
-‘AVFilterGetVideo ’ is not. The exception from this are type names, like
-for example structs and enums; they should always be in the CamelCase
-
-
There are the following conventions for naming variables and functions:
-
-
- For local variables no prefix is required.
-
- For file-scope variables and functions declared as static
, no prefix
-is required.
-
- For variables and functions visible outside of file scope, but only used
-internally by a library, an ff_
prefix should be used,
-e.g. ‘ff_w64_demuxer ’.
-
- For variables and functions visible outside of file scope, used internally
-across multiple libraries, use avpriv_
as prefix, for example,
-‘avpriv_aac_parse_header ’.
-
- Each library has its own prefix for public symbols, in addition to the
-commonly used av_
(avformat_
for libavformat,
-avcodec_
for libavcodec, swr_
for libswresample, etc).
-Check the existing code and choose names accordingly.
-Note that some symbols without these prefixes are also exported for
-retro-compatibility reasons. These exceptions are declared in the
-lib<name>/lib<name>.v
files.
-
-
-
Furthermore, name space reserved for the system should not be invaded.
-Identifiers ending in _t
are reserved by
-POSIX .
-Also avoid names starting with __
or _
followed by an uppercase
-letter as they are reserved by the C standard. Names starting with _
-are reserved at the file level and may not be used for externally visible
-symbols. If in doubt, just avoid names starting with _
altogether.
-
-
-
1.3.5 Miscellaneous conventions# TOC
-
-
- fprintf and printf are forbidden in libavformat and libavcodec,
-please use av_log() instead.
-
- Casts should be used only when necessary. Unneeded parentheses
-should also be avoided if they don’t make the code easier to understand.
-
-
-
-
1.3.6 Editor configuration# TOC
-
In order to configure Vim to follow FFmpeg formatting conventions, paste
-the following snippet into your .vimrc :
-
-
" indentation rules for FFmpeg: 4 spaces, no tabs
-set expandtab
-set shiftwidth=4
-set softtabstop=4
-set cindent
-set cinoptions=(0
-" Allow tabs in Makefiles.
-autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
-" Trailing whitespace and tabs are forbidden, so highlight them.
-highlight ForbiddenWhitespace ctermbg=red guibg=red
-match ForbiddenWhitespace /\s\+$\|\t/
-" Do not highlight spaces at the end of line while typing on that line.
-autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@<!$/
-
-
-
For Emacs, add these roughly equivalent lines to your .emacs.d/init.el :
-
-
(c-add-style "ffmpeg"
- '("k&r"
- (c-basic-offset . 4)
- (indent-tabs-mode . nil)
- (show-trailing-whitespace . t)
- (c-offsets-alist
- (statement-cont . (c-lineup-assignments +)))
- )
- )
-(setq c-default-style "ffmpeg")
-
-
-
-
1.4 Development Policy# TOC
-
-
- Contributions should be licensed under the
-LGPL 2.1 ,
-including an "or any later version" clause, or, if you prefer
-a gift-style license, the
-ISC or
-MIT license.
-GPL 2 including
-an "or any later version" clause is also acceptable, but LGPL is
-preferred.
-If you add a new file, give it a proper license header. Do not copy and
-paste it from a random place, use an existing file as template.
-
- You must not commit code which breaks FFmpeg! (Meaning unfinished but
-enabled code which breaks compilation or compiles but does not work or
-breaks the regression tests)
-You can commit unfinished stuff (for testing etc), but it must be disabled
-(#ifdef etc) by default so it does not interfere with other developers’
-work.
-
- The commit message should have a short first line in the form of
-a ‘topic: short description ’ as a header, separated by a newline
-from the body consisting of an explanation of why the change is necessary.
-If the commit fixes a known bug on the bug tracker, the commit message
-should include its bug ID. Referring to the issue on the bug tracker does
-not exempt you from writing an excerpt of the bug in the commit message.
-
- You do not have to over-test things. If it works for you, and you think it
-should work for others, then commit. If your code has problems
-(portability, triggers compiler bugs, unusual environment etc) they will be
-reported and eventually fixed.
-
- Do not commit unrelated changes together, split them into self-contained
-pieces. Also do not forget that if part B depends on part A, but A does not
-depend on B, then A can and should be committed first and separate from B.
-Keeping changes well split into self-contained parts makes reviewing and
-understanding them on the commit log mailing list easier. This also helps
-in case of debugging later on.
-Also if you have doubts about splitting or not splitting, do not hesitate to
-ask/discuss it on the developer mailing list.
-
- Do not change behavior of the programs (renaming options etc) or public
-API or ABI without first discussing it on the ffmpeg-devel mailing list.
-Do not remove functionality from the code. Just improve!
-
-Note: Redundant code can be removed.
-
- Do not commit changes to the build system (Makefiles, configure script)
-which change behavior, defaults etc, without asking first. The same
-applies to compiler warning fixes, trivial looking fixes and to code
-maintained by other developers. We usually have a reason for doing things
-the way we do. Send your changes as patches to the ffmpeg-devel mailing
-list, and if the code maintainers say OK, you may commit. This does not
-apply to files you wrote and/or maintain.
-
- We refuse source indentation and other cosmetic changes if they are mixed
-with functional changes, such commits will be rejected and removed. Every
-developer has his own indentation style, you should not change it. Of course
-if you (re)write something, you can use your own style, even though we would
-prefer if the indentation throughout FFmpeg was consistent (Many projects
-force a given indentation style - we do not.). If you really need to make
-indentation changes (try to avoid this), separate them strictly from real
-changes.
-
-NOTE: If you had to put if(){ .. } over a large (> 5 lines) chunk of code,
-then either do NOT change the indentation of the inner part within (do not
-move it to the right)! or do so in a separate commit
-
- Always fill out the commit log message. Describe in a few lines what you
-changed and why. You can refer to mailing list postings if you fix a
-particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
-Recommended format:
-
-
-
area changed: Short 1 line description
-
-details describing what and why and giving references.
-
-
- Make sure the author of the commit is set correctly. (see git commit –author)
-If you apply a patch, send an
-answer to ffmpeg-devel (or wherever you got the patch from) saying that
-you applied the patch.
-
- When applying patches that have been discussed (at length) on the mailing
-list, reference the thread in the log message.
-
- Do NOT commit to code actively maintained by others without permission.
-Send a patch to ffmpeg-devel instead. If no one answers within a reasonable
-timeframe (12h for build failures and security fixes, 3 days small changes,
-1 week for big patches) then commit your patch if you think it is OK.
-Also note, the maintainer can simply ask for more time to review!
-
- Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits
-are sent there and reviewed by all the other developers. Bugs and possible
-improvements or general questions regarding commits are discussed there. We
-expect you to react if problems with your code are uncovered.
-
- Update the documentation if you change behavior or add features. If you are
-unsure how best to do this, send a patch to ffmpeg-devel, the documentation
-maintainer(s) will review and commit your stuff.
-
- Try to keep important discussions and requests (also) on the public
-developer mailing list, so that all developers can benefit from them.
-
- Never write to unallocated memory, never write over the end of arrays,
-always check values read from some untrusted source before using them
-as array index or other risky things.
-
- Remember to check if you need to bump versions for the specific libav*
-parts (libavutil, libavcodec, libavformat) you are changing. You need
-to change the version integer.
-Incrementing the first component means no backward compatibility to
-previous versions (e.g. removal of a function from the public API).
-Incrementing the second component means backward compatible change
-(e.g. addition of a function to the public API or extension of an
-existing data structure).
-Incrementing the third component means a noteworthy binary compatible
-change (e.g. encoder bug fix that matters for the decoder). The third
-component always starts at 100 to distinguish FFmpeg from Libav.
-
- Compiler warnings indicate potential bugs or code with bad style. If a type of
-warning always points to correct and clean code, that warning should
-be disabled, not the code changed.
-Thus the remaining warnings can either be bugs or correct code.
-If it is a bug, the bug has to be fixed. If it is not, the code should
-be changed to not generate a warning unless that causes a slowdown
-or obfuscates the code.
-
- Make sure that no parts of the codebase that you maintain are missing from the
-MAINTAINERS file. If something that you want to maintain is missing add it with
-your name after it.
-If at some point you no longer want to maintain some code, then please help
-finding a new maintainer and also don’t forget updating the MAINTAINERS file.
-
-
-
We think our rules are not too hard. If you have comments, contact us.
-
-
-
1.5 Submitting patches# TOC
-
-
First, read the Coding Rules above if you did not yet, in particular
-the rules regarding patch submission.
-
-
When you submit your patch, please use git format-patch
or
-git send-email
. We cannot read other diffs :-)
-
-
Also please do not submit a patch which contains several unrelated changes.
-Split it into separate, self-contained pieces. This does not mean splitting
-file by file. Instead, make the patch as small as possible while still
-keeping it as a logical unit that contains an individual change, even
-if it spans multiple files. This makes reviewing your patches much easier
-for us and greatly increases your chances of getting your patch applied.
-
-
Use the patcheck tool of FFmpeg to check your patch.
-The tool is located in the tools directory.
-
-
Run the Regression tests before submitting a patch in order to verify
-it does not cause unexpected problems.
-
-
It also helps quite a bit if you tell us what the patch does (for example
-’replaces lrint by lrintf’), and why (for example ’*BSD isn’t C99 compliant
-and has no lrint()’)
-
-
Also please if you send several patches, send each patch as a separate mail,
-do not attach several unrelated patches to the same mail.
-
-
Patches should be posted to the
-ffmpeg-devel
-mailing list. Use git send-email
when possible since it will properly
-send patches without requiring extra care. If you cannot, then send patches
-as base64-encoded attachments, so your patch is not trashed during
-transmission.
-
-
Your patch will be reviewed on the mailing list. You will likely be asked
-to make some changes and are expected to send in an improved version that
-incorporates the requests from the review. This process may go through
-several iterations. Once your patch is deemed good enough, some developer
-will pick it up and commit it to the official FFmpeg tree.
-
-
Give us a few days to react. But if some time passes without reaction,
-send a reminder by email. Your patch should eventually be dealt with.
-
-
-
-
1.6 New codecs or formats checklist# TOC
-
-
- Did you use av_cold for codec initialization and close functions?
-
- Did you add a long_name under NULL_IF_CONFIG_SMALL to the AVCodec or
-AVInputFormat/AVOutputFormat struct?
-
- Did you bump the minor version number (and reset the micro version
-number) in libavcodec/version.h or libavformat/version.h ?
-
- Did you register it in allcodecs.c or allformats.c ?
-
- Did you add the AVCodecID to avcodec.h ?
-When adding new codec IDs, also add an entry to the codec descriptor
-list in libavcodec/codec_desc.c .
-
- If it has a FourCC, did you add it to libavformat/riff.c ,
-even if it is only a decoder?
-
- Did you add a rule to compile the appropriate files in the Makefile?
-Remember to do this even if you’re just adding a format to a file that is
-already being compiled by some other rule, like a raw demuxer.
-
- Did you add an entry to the table of supported formats or codecs in
-doc/general.texi ?
-
- Did you add an entry in the Changelog?
-
- If it depends on a parser or a library, did you add that dependency in
-configure?
-
- Did you git add
the appropriate files before committing?
-
- Did you make sure it compiles standalone, i.e. with
-configure --disable-everything --enable-decoder=foo
-(or --enable-demuxer
or whatever your component is)?
-
-
-
-
-
1.7 patch submission checklist# TOC
-
-
- Does make fate
pass with the patch applied?
-
- Was the patch generated with git format-patch or send-email?
-
- Did you sign off your patch? (git commit -s)
-See http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches for the meaning
-of sign off.
-
- Did you provide a clear git commit log message?
-
- Is the patch against latest FFmpeg git master branch?
-
- Are you subscribed to ffmpeg-devel?
-(the list is subscribers only due to spam)
-
- Have you checked that the changes are minimal, so that the same cannot be
-achieved with a smaller patch and/or simpler final code?
-
- If the change is to speed critical code, did you benchmark it?
-
- If you did any benchmarks, did you provide them in the mail?
-
- Have you checked that the patch does not introduce buffer overflows or
-other security issues?
-
- Did you test your decoder or demuxer against damaged data? If no, see
-tools/trasher, the noise bitstream filter, and
-zzuf . Your decoder or demuxer
-should not crash, end in a (near) infinite loop, or allocate ridiculous
-amounts of memory when fed damaged data.
-
- Does the patch not mix functional and cosmetic changes?
-
- Did you add tabs or trailing whitespace to the code? Both are forbidden.
-
- Is the patch attached to the email you send?
-
- Is the mime type of the patch correct? It should be text/x-diff or
-text/x-patch or at least text/plain and not application/octet-stream.
-
- If the patch fixes a bug, did you provide a verbose analysis of the bug?
-
- If the patch fixes a bug, did you provide enough information, including
-a sample, so the bug can be reproduced and the fix can be verified?
-Note please do not attach samples >100k to mails but rather provide a
-URL, you can upload to ftp://upload.ffmpeg.org
-
- Did you provide a verbose summary about what the patch does change?
-
- Did you provide a verbose explanation why it changes things like it does?
-
- Did you provide a verbose summary of the user visible advantages and
-disadvantages if the patch is applied?
-
- Did you provide an example so we can verify the new feature added by the
-patch easily?
-
- If you added a new file, did you insert a license header? It should be
-taken from FFmpeg, not randomly copied and pasted from somewhere else.
-
- You should maintain alphabetical order in alphabetically ordered lists as
-long as doing so does not break API/ABI compatibility.
-
- Lines with similar content should be aligned vertically when doing so
-improves readability.
-
- Consider to add a regression test for your code.
-
- If you added YASM code please check that things still work with –disable-yasm
-
- Make sure you check the return values of function and return appropriate
-error codes. Especially memory allocation functions like av_malloc()
-are notoriously left unchecked, which is a serious problem.
-
- Test your code with valgrind and or Address Sanitizer to ensure it’s free
-of leaks, out of array accesses, etc.
-
-
-
-
1.8 Patch review process# TOC
-
-
All patches posted to ffmpeg-devel will be reviewed, unless they contain a
-clear note that the patch is not for the git master branch.
-Reviews and comments will be posted as replies to the patch on the
-mailing list. The patch submitter then has to take care of every comment,
-that can be by resubmitting a changed patch or by discussion. Resubmitted
-patches will themselves be reviewed like any other patch. If at some point
-a patch passes review with no comments then it is approved, that can for
-simple and small patches happen immediately while large patches will generally
-have to be changed and reviewed many times before they are approved.
-After a patch is approved it will be committed to the repository.
-
-
We will review all submitted patches, but sometimes we are quite busy so
-especially for large patches this can take several weeks.
-
-
If you feel that the review process is too slow and you are willing to try to
-take over maintainership of the area of code you change then just clone
-git master and maintain the area of code there. We will merge each area from
-where its best maintained.
-
-
When resubmitting patches, please do not make any significant changes
-not related to the comments received during review. Such patches will
-be rejected. Instead, submit significant changes or new features as
-separate patches.
-
-
-
1.9 Regression tests# TOC
-
-
Before submitting a patch (or committing to the repository), you should at least
-test that you did not break anything.
-
-
Running ’make fate’ accomplishes this, please see fate.html for details.
-
-
[Of course, some patches may change the results of the regression tests. In
-this case, the reference results of the regression tests shall be modified
-accordingly].
-
-
-
1.9.1 Adding files to the fate-suite dataset# TOC
-
-
When there is no muxer or encoder available to generate test media for a
-specific test then the media has to be included in the fate-suite.
-First please make sure that the sample file is as small as possible to test the
-respective decoder or demuxer sufficiently. Large files increase network
-bandwidth and disk space requirements.
-Once you have a working fate test and fate sample, provide in the commit
-message or introductory message for the patch series that you post to
-the ffmpeg-devel mailing list, a direct link to download the sample media.
-
-
-
-
1.9.2 Visualizing Test Coverage# TOC
-
-
The FFmpeg build system allows visualizing the test coverage in an easy
-manner with the coverage tools gcov
/lcov
. This involves
-the following steps:
-
-
- Configure to compile with instrumentation enabled:
- configure --toolchain=gcov
.
-
- Run your test case, either manually or via FATE. This can be either
- the full FATE regression suite, or any arbitrary invocation of any
- front-end tool provided by FFmpeg, in any combination.
-
- Run make lcov
to generate coverage data in HTML format.
-
- View lcov/index.html
in your preferred HTML viewer.
-
-
-
You can use the command make lcov-reset
to reset the coverage
-measurements. You will need to rerun make lcov
after running a
-new test.
-
-
-
1.9.3 Using Valgrind# TOC
-
-
The configure script provides a shortcut for using valgrind to spot bugs
-related to memory handling. Just add the option
---toolchain=valgrind-memcheck
or --toolchain=valgrind-massif
-to your configure line, and reasonable defaults will be set for running
-FATE under the supervision of either the memcheck or the
-massif tool of the valgrind suite.
-
-
In case you need finer control over how valgrind is invoked, use the
---target-exec='valgrind <your_custom_valgrind_options>
option in
-your configure line instead.
-
-
-
1.10 Release process# TOC
-
-
FFmpeg maintains a set of release branches , which are the
-recommended deliverable for system integrators and distributors (such as
-Linux distributions, etc.). At regular times, a release
-manager prepares, tests and publishes tarballs on the
-http://ffmpeg.org website.
-
-
There are two kinds of releases:
-
-
- Major releases always include the latest and greatest
-features and functionality.
-
- Point releases are cut from release branches,
-which are named release/X
, with X
being the release
-version number.
-
-
-
Note that we promise to our users that shared libraries from any FFmpeg
-release never break programs that have been compiled against
-previous versions of the same release series in any case!
-
-
However, from time to time, we do make API changes that require adaptations
-in applications. Such changes are only allowed in (new) major releases and
-require further steps such as bumping library version numbers and/or
-adjustments to the symbol versioning file. Please discuss such changes
-on the ffmpeg-devel mailing list in time to allow forward planning.
-
-
-
1.10.1 Criteria for Point Releases# TOC
-
-
Changes that match the following criteria are valid candidates for
-inclusion into a point release:
-
-
- Fixes a security issue, preferably identified by a CVE
-number issued by http://cve.mitre.org/ .
-
- Fixes a documented bug in https://trac.ffmpeg.org .
-
- Improves the included documentation.
-
- Retains both source code and binary compatibility with previous
-point releases of the same release branch.
-
-
-
The order for checking the rules is (1 OR 2 OR 3) AND 4.
-
-
-
-
1.10.2 Release Checklist# TOC
-
-
The release process involves the following steps:
-
-
- Ensure that the RELEASE file contains the version number for
-the upcoming release.
-
- Add the release at https://trac.ffmpeg.org/admin/ticket/versions .
-
- Announce the intent to do a release to the mailing list.
-
- Make sure all relevant security fixes have been backported. See
-https://ffmpeg.org/security.html .
-
- Ensure that the FATE regression suite still passes in the release
-branch on at least i386 and amd64
-(cf. Regression tests ).
-
- Prepare the release tarballs in bz2
and gz
formats, and
-supplementing files that contain gpg
signatures
-
- Publish the tarballs at http://ffmpeg.org/releases . Create and
-push an annotated tag in the form nX
, with X
-containing the version number.
-
- Propose and send a patch to the ffmpeg-devel mailing list
-with a news entry for the website.
-
- Publish the news entry.
-
- Send announcement to the mailing list.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/examples/Makefile b/Externals/ffmpeg/shared/doc/examples/Makefile
deleted file mode 100644
index 9f03f04b57..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/Makefile
+++ /dev/null
@@ -1,44 +0,0 @@
-# use pkg-config for getting CFLAGS and LDLIBS
-FFMPEG_LIBS= libavdevice \
- libavformat \
- libavfilter \
- libavcodec \
- libswresample \
- libswscale \
- libavutil \
-
-CFLAGS += -Wall -g
-CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
-LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
-
-EXAMPLES= avio_reading \
- decoding_encoding \
- demuxing_decoding \
- extract_mvs \
- filtering_video \
- filtering_audio \
- metadata \
- muxing \
- remuxing \
- resampling_audio \
- scaling_video \
- transcode_aac \
- transcoding \
-
-OBJS=$(addsuffix .o,$(EXAMPLES))
-
-# the following examples make explicit use of the math library
-avcodec: LDLIBS += -lm
-decoding_encoding: LDLIBS += -lm
-muxing: LDLIBS += -lm
-resampling_audio: LDLIBS += -lm
-
-.phony: all clean-test clean
-
-all: $(OBJS) $(EXAMPLES)
-
-clean-test:
- $(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
-
-clean: clean-test
- $(RM) $(EXAMPLES) $(OBJS)
diff --git a/Externals/ffmpeg/shared/doc/examples/README b/Externals/ffmpeg/shared/doc/examples/README
deleted file mode 100644
index c1ce619d35..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/README
+++ /dev/null
@@ -1,23 +0,0 @@
-FFmpeg examples README
-----------------------
-
-Both following use cases rely on pkg-config and make, thus make sure
-that you have them installed and working on your system.
-
-
-Method 1: build the installed examples in a generic read/write user directory
-
-Copy to a read/write user directory and just use "make", it will link
-to the libraries on your system, assuming the PKG_CONFIG_PATH is
-correctly configured.
-
-Method 2: build the examples in-tree
-
-Assuming you are in the source FFmpeg checkout directory, you need to build
-FFmpeg (no need to make install in any prefix). Then just run "make examples".
-This will build the examples using the FFmpeg build system. You can clean those
-examples using "make examplesclean"
-
-If you want to try the dedicated Makefile examples (to emulate the first
-method), go into doc/examples and run a command such as
-PKG_CONFIG_PATH=pc-uninstalled make.
diff --git a/Externals/ffmpeg/shared/doc/examples/avio_reading.c b/Externals/ffmpeg/shared/doc/examples/avio_reading.c
deleted file mode 100644
index 02474e907a..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/avio_reading.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2014 Stefano Sabatini
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * @file
- * libavformat AVIOContext API example.
- *
- * Make libavformat demuxer access media content through a custom
- * AVIOContext read callback.
- * @example avio_reading.c
- */
-
-#include
-#include
-#include
-#include
-
-struct buffer_data {
- uint8_t *ptr;
- size_t size; ///< size left in the buffer
-};
-
-static int read_packet(void *opaque, uint8_t *buf, int buf_size)
-{
- struct buffer_data *bd = (struct buffer_data *)opaque;
- buf_size = FFMIN(buf_size, bd->size);
-
- printf("ptr:%p size:%zu\n", bd->ptr, bd->size);
-
- /* copy internal buffer data to buf */
- memcpy(buf, bd->ptr, buf_size);
- bd->ptr += buf_size;
- bd->size -= buf_size;
-
- return buf_size;
-}
-
-int main(int argc, char *argv[])
-{
- AVFormatContext *fmt_ctx = NULL;
- AVIOContext *avio_ctx = NULL;
- uint8_t *buffer = NULL, *avio_ctx_buffer = NULL;
- size_t buffer_size, avio_ctx_buffer_size = 4096;
- char *input_filename = NULL;
- int ret = 0;
- struct buffer_data bd = { 0 };
-
- if (argc != 2) {
- fprintf(stderr, "usage: %s input_file\n"
- "API example program to show how to read from a custom buffer "
- "accessed through AVIOContext.\n", argv[0]);
- return 1;
- }
- input_filename = argv[1];
-
- /* register codecs and formats and other lavf/lavc components*/
- av_register_all();
-
- /* slurp file content into buffer */
- ret = av_file_map(input_filename, &buffer, &buffer_size, 0, NULL);
- if (ret < 0)
- goto end;
-
- /* fill opaque structure used by the AVIOContext read callback */
- bd.ptr = buffer;
- bd.size = buffer_size;
-
- if (!(fmt_ctx = avformat_alloc_context())) {
- ret = AVERROR(ENOMEM);
- goto end;
- }
-
- avio_ctx_buffer = av_malloc(avio_ctx_buffer_size);
- if (!avio_ctx_buffer) {
- ret = AVERROR(ENOMEM);
- goto end;
- }
- avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
- 0, &bd, &read_packet, NULL, NULL);
- if (!avio_ctx) {
- ret = AVERROR(ENOMEM);
- goto end;
- }
- fmt_ctx->pb = avio_ctx;
-
- ret = avformat_open_input(&fmt_ctx, NULL, NULL, NULL);
- if (ret < 0) {
- fprintf(stderr, "Could not open input\n");
- goto end;
- }
-
- ret = avformat_find_stream_info(fmt_ctx, NULL);
- if (ret < 0) {
- fprintf(stderr, "Could not find stream information\n");
- goto end;
- }
-
- av_dump_format(fmt_ctx, 0, input_filename, 0);
-
-end:
- avformat_close_input(&fmt_ctx);
- /* note: the internal buffer could have changed, and be != avio_ctx_buffer */
- if (avio_ctx) {
- av_freep(&avio_ctx->buffer);
- av_freep(&avio_ctx);
- }
- av_file_unmap(buffer, buffer_size);
-
- if (ret < 0) {
- fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
- return 1;
- }
-
- return 0;
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/decoding_encoding.c b/Externals/ffmpeg/shared/doc/examples/decoding_encoding.c
deleted file mode 100644
index 80da66431b..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/decoding_encoding.c
+++ /dev/null
@@ -1,665 +0,0 @@
-/*
- * Copyright (c) 2001 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * @file
- * libavcodec API use example.
- *
- * @example decoding_encoding.c
- * Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
- * not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
- * format handling
- */
-
-#include
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define INBUF_SIZE 4096
-#define AUDIO_INBUF_SIZE 20480
-#define AUDIO_REFILL_THRESH 4096
-
-/* check that a given sample format is supported by the encoder */
-static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
-{
- const enum AVSampleFormat *p = codec->sample_fmts;
-
- while (*p != AV_SAMPLE_FMT_NONE) {
- if (*p == sample_fmt)
- return 1;
- p++;
- }
- return 0;
-}
-
-/* just pick the highest supported samplerate */
-static int select_sample_rate(AVCodec *codec)
-{
- const int *p;
- int best_samplerate = 0;
-
- if (!codec->supported_samplerates)
- return 44100;
-
- p = codec->supported_samplerates;
- while (*p) {
- best_samplerate = FFMAX(*p, best_samplerate);
- p++;
- }
- return best_samplerate;
-}
-
-/* select layout with the highest channel count */
-static int select_channel_layout(AVCodec *codec)
-{
- const uint64_t *p;
- uint64_t best_ch_layout = 0;
- int best_nb_channels = 0;
-
- if (!codec->channel_layouts)
- return AV_CH_LAYOUT_STEREO;
-
- p = codec->channel_layouts;
- while (*p) {
- int nb_channels = av_get_channel_layout_nb_channels(*p);
-
- if (nb_channels > best_nb_channels) {
- best_ch_layout = *p;
- best_nb_channels = nb_channels;
- }
- p++;
- }
- return best_ch_layout;
-}
-
-/*
- * Audio encoding example
- */
-static void audio_encode_example(const char *filename)
-{
- AVCodec *codec;
- AVCodecContext *c= NULL;
- AVFrame *frame;
- AVPacket pkt;
- int i, j, k, ret, got_output;
- int buffer_size;
- FILE *f;
- uint16_t *samples;
- float t, tincr;
-
- printf("Encode audio file %s\n", filename);
-
- /* find the MP2 encoder */
- codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
- if (!codec) {
- fprintf(stderr, "Codec not found\n");
- exit(1);
- }
-
- c = avcodec_alloc_context3(codec);
- if (!c) {
- fprintf(stderr, "Could not allocate audio codec context\n");
- exit(1);
- }
-
- /* put sample parameters */
- c->bit_rate = 64000;
-
- /* check that the encoder supports s16 pcm input */
- c->sample_fmt = AV_SAMPLE_FMT_S16;
- if (!check_sample_fmt(codec, c->sample_fmt)) {
- fprintf(stderr, "Encoder does not support sample format %s",
- av_get_sample_fmt_name(c->sample_fmt));
- exit(1);
- }
-
- /* select other audio parameters supported by the encoder */
- c->sample_rate = select_sample_rate(codec);
- c->channel_layout = select_channel_layout(codec);
- c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
-
- /* open it */
- if (avcodec_open2(c, codec, NULL) < 0) {
- fprintf(stderr, "Could not open codec\n");
- exit(1);
- }
-
- f = fopen(filename, "wb");
- if (!f) {
- fprintf(stderr, "Could not open %s\n", filename);
- exit(1);
- }
-
- /* frame containing input raw audio */
- frame = av_frame_alloc();
- if (!frame) {
- fprintf(stderr, "Could not allocate audio frame\n");
- exit(1);
- }
-
- frame->nb_samples = c->frame_size;
- frame->format = c->sample_fmt;
- frame->channel_layout = c->channel_layout;
-
- /* the codec gives us the frame size, in samples,
- * we calculate the size of the samples buffer in bytes */
- buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
- c->sample_fmt, 0);
- if (buffer_size < 0) {
- fprintf(stderr, "Could not get sample buffer size\n");
- exit(1);
- }
- samples = av_malloc(buffer_size);
- if (!samples) {
- fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
- buffer_size);
- exit(1);
- }
- /* setup the data pointers in the AVFrame */
- ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
- (const uint8_t*)samples, buffer_size, 0);
- if (ret < 0) {
- fprintf(stderr, "Could not setup audio frame\n");
- exit(1);
- }
-
- /* encode a single tone sound */
- t = 0;
- tincr = 2 * M_PI * 440.0 / c->sample_rate;
- for (i = 0; i < 200; i++) {
- av_init_packet(&pkt);
- pkt.data = NULL; // packet data will be allocated by the encoder
- pkt.size = 0;
-
- for (j = 0; j < c->frame_size; j++) {
- samples[2*j] = (int)(sin(t) * 10000);
-
- for (k = 1; k < c->channels; k++)
- samples[2*j + k] = samples[2*j];
- t += tincr;
- }
- /* encode the samples */
- ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
- if (ret < 0) {
- fprintf(stderr, "Error encoding audio frame\n");
- exit(1);
- }
- if (got_output) {
- fwrite(pkt.data, 1, pkt.size, f);
- av_free_packet(&pkt);
- }
- }
-
- /* get the delayed frames */
- for (got_output = 1; got_output; i++) {
- ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
- if (ret < 0) {
- fprintf(stderr, "Error encoding frame\n");
- exit(1);
- }
-
- if (got_output) {
- fwrite(pkt.data, 1, pkt.size, f);
- av_free_packet(&pkt);
- }
- }
- fclose(f);
-
- av_freep(&samples);
- av_frame_free(&frame);
- avcodec_close(c);
- av_free(c);
-}
-
-/*
- * Audio decoding.
- */
-static void audio_decode_example(const char *outfilename, const char *filename)
-{
- AVCodec *codec;
- AVCodecContext *c= NULL;
- int len;
- FILE *f, *outfile;
- uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
- AVPacket avpkt;
- AVFrame *decoded_frame = NULL;
-
- av_init_packet(&avpkt);
-
- printf("Decode audio file %s to %s\n", filename, outfilename);
-
- /* find the mpeg audio decoder */
- codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
- if (!codec) {
- fprintf(stderr, "Codec not found\n");
- exit(1);
- }
-
- c = avcodec_alloc_context3(codec);
- if (!c) {
- fprintf(stderr, "Could not allocate audio codec context\n");
- exit(1);
- }
-
- /* open it */
- if (avcodec_open2(c, codec, NULL) < 0) {
- fprintf(stderr, "Could not open codec\n");
- exit(1);
- }
-
- f = fopen(filename, "rb");
- if (!f) {
- fprintf(stderr, "Could not open %s\n", filename);
- exit(1);
- }
- outfile = fopen(outfilename, "wb");
- if (!outfile) {
- av_free(c);
- exit(1);
- }
-
- /* decode until eof */
- avpkt.data = inbuf;
- avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
-
- while (avpkt.size > 0) {
- int i, ch;
- int got_frame = 0;
-
- if (!decoded_frame) {
- if (!(decoded_frame = av_frame_alloc())) {
- fprintf(stderr, "Could not allocate audio frame\n");
- exit(1);
- }
- }
-
- len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
- if (len < 0) {
- fprintf(stderr, "Error while decoding\n");
- exit(1);
- }
- if (got_frame) {
- /* if a frame has been decoded, output it */
- int data_size = av_get_bytes_per_sample(c->sample_fmt);
- if (data_size < 0) {
- /* This should not occur, checking just for paranoia */
- fprintf(stderr, "Failed to calculate data size\n");
- exit(1);
- }
- for (i=0; inb_samples; i++)
- for (ch=0; chchannels; ch++)
- fwrite(decoded_frame->data[ch] + data_size*i, 1, data_size, outfile);
- }
- avpkt.size -= len;
- avpkt.data += len;
- avpkt.dts =
- avpkt.pts = AV_NOPTS_VALUE;
- if (avpkt.size < AUDIO_REFILL_THRESH) {
- /* Refill the input buffer, to avoid trying to decode
- * incomplete frames. Instead of this, one could also use
- * a parser, or use a proper container format through
- * libavformat. */
- memmove(inbuf, avpkt.data, avpkt.size);
- avpkt.data = inbuf;
- len = fread(avpkt.data + avpkt.size, 1,
- AUDIO_INBUF_SIZE - avpkt.size, f);
- if (len > 0)
- avpkt.size += len;
- }
- }
-
- fclose(outfile);
- fclose(f);
-
- avcodec_close(c);
- av_free(c);
- av_frame_free(&decoded_frame);
-}
-
-/*
- * Video encoding example
- */
-static void video_encode_example(const char *filename, int codec_id)
-{
- AVCodec *codec;
- AVCodecContext *c= NULL;
- int i, ret, x, y, got_output;
- FILE *f;
- AVFrame *frame;
- AVPacket pkt;
- uint8_t endcode[] = { 0, 0, 1, 0xb7 };
-
- printf("Encode video file %s\n", filename);
-
- /* find the mpeg1 video encoder */
- codec = avcodec_find_encoder(codec_id);
- if (!codec) {
- fprintf(stderr, "Codec not found\n");
- exit(1);
- }
-
- c = avcodec_alloc_context3(codec);
- if (!c) {
- fprintf(stderr, "Could not allocate video codec context\n");
- exit(1);
- }
-
- /* put sample parameters */
- c->bit_rate = 400000;
- /* resolution must be a multiple of two */
- c->width = 352;
- c->height = 288;
- /* frames per second */
- c->time_base = (AVRational){1,25};
- /* emit one intra frame every ten frames
- * check frame pict_type before passing frame
- * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
- * then gop_size is ignored and the output of encoder
- * will always be I frame irrespective to gop_size
- */
- c->gop_size = 10;
- c->max_b_frames = 1;
- c->pix_fmt = AV_PIX_FMT_YUV420P;
-
- if (codec_id == AV_CODEC_ID_H264)
- av_opt_set(c->priv_data, "preset", "slow", 0);
-
- /* open it */
- if (avcodec_open2(c, codec, NULL) < 0) {
- fprintf(stderr, "Could not open codec\n");
- exit(1);
- }
-
- f = fopen(filename, "wb");
- if (!f) {
- fprintf(stderr, "Could not open %s\n", filename);
- exit(1);
- }
-
- frame = av_frame_alloc();
- if (!frame) {
- fprintf(stderr, "Could not allocate video frame\n");
- exit(1);
- }
- frame->format = c->pix_fmt;
- frame->width = c->width;
- frame->height = c->height;
-
- /* the image can be allocated by any means and av_image_alloc() is
- * just the most convenient way if av_malloc() is to be used */
- ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
- c->pix_fmt, 32);
- if (ret < 0) {
- fprintf(stderr, "Could not allocate raw picture buffer\n");
- exit(1);
- }
-
- /* encode 1 second of video */
- for (i = 0; i < 25; i++) {
- av_init_packet(&pkt);
- pkt.data = NULL; // packet data will be allocated by the encoder
- pkt.size = 0;
-
- fflush(stdout);
- /* prepare a dummy image */
- /* Y */
- for (y = 0; y < c->height; y++) {
- for (x = 0; x < c->width; x++) {
- frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
- }
- }
-
- /* Cb and Cr */
- for (y = 0; y < c->height/2; y++) {
- for (x = 0; x < c->width/2; x++) {
- frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
- frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
- }
- }
-
- frame->pts = i;
-
- /* encode the image */
- ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
- if (ret < 0) {
- fprintf(stderr, "Error encoding frame\n");
- exit(1);
- }
-
- if (got_output) {
- printf("Write frame %3d (size=%5d)\n", i, pkt.size);
- fwrite(pkt.data, 1, pkt.size, f);
- av_free_packet(&pkt);
- }
- }
-
- /* get the delayed frames */
- for (got_output = 1; got_output; i++) {
- fflush(stdout);
-
- ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
- if (ret < 0) {
- fprintf(stderr, "Error encoding frame\n");
- exit(1);
- }
-
- if (got_output) {
- printf("Write frame %3d (size=%5d)\n", i, pkt.size);
- fwrite(pkt.data, 1, pkt.size, f);
- av_free_packet(&pkt);
- }
- }
-
- /* add sequence end code to have a real mpeg file */
- fwrite(endcode, 1, sizeof(endcode), f);
- fclose(f);
-
- avcodec_close(c);
- av_free(c);
- av_freep(&frame->data[0]);
- av_frame_free(&frame);
- printf("\n");
-}
-
-/*
- * Video decoding example
- */
-
-static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
- char *filename)
-{
- FILE *f;
- int i;
-
- f = fopen(filename,"w");
- fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
- for (i = 0; i < ysize; i++)
- fwrite(buf + i * wrap, 1, xsize, f);
- fclose(f);
-}
-
-static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
- AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
-{
- int len, got_frame;
- char buf[1024];
-
- len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
- if (len < 0) {
- fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
- return len;
- }
- if (got_frame) {
- printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
- fflush(stdout);
-
- /* the picture is allocated by the decoder, no need to free it */
- snprintf(buf, sizeof(buf), outfilename, *frame_count);
- pgm_save(frame->data[0], frame->linesize[0],
- avctx->width, avctx->height, buf);
- (*frame_count)++;
- }
- if (pkt->data) {
- pkt->size -= len;
- pkt->data += len;
- }
- return 0;
-}
-
-static void video_decode_example(const char *outfilename, const char *filename)
-{
- AVCodec *codec;
- AVCodecContext *c= NULL;
- int frame_count;
- FILE *f;
- AVFrame *frame;
- uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
- AVPacket avpkt;
-
- av_init_packet(&avpkt);
-
- /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
- memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
-
- printf("Decode video file %s to %s\n", filename, outfilename);
-
- /* find the mpeg1 video decoder */
- codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
- if (!codec) {
- fprintf(stderr, "Codec not found\n");
- exit(1);
- }
-
- c = avcodec_alloc_context3(codec);
- if (!c) {
- fprintf(stderr, "Could not allocate video codec context\n");
- exit(1);
- }
-
- if(codec->capabilities&CODEC_CAP_TRUNCATED)
- c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
-
- /* For some codecs, such as msmpeg4 and mpeg4, width and height
- MUST be initialized there because this information is not
- available in the bitstream. */
-
- /* open it */
- if (avcodec_open2(c, codec, NULL) < 0) {
- fprintf(stderr, "Could not open codec\n");
- exit(1);
- }
-
- f = fopen(filename, "rb");
- if (!f) {
- fprintf(stderr, "Could not open %s\n", filename);
- exit(1);
- }
-
- frame = av_frame_alloc();
- if (!frame) {
- fprintf(stderr, "Could not allocate video frame\n");
- exit(1);
- }
-
- frame_count = 0;
- for (;;) {
- avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
- if (avpkt.size == 0)
- break;
-
- /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
- and this is the only method to use them because you cannot
- know the compressed data size before analysing it.
-
- BUT some other codecs (msmpeg4, mpeg4) are inherently frame
- based, so you must call them with all the data for one
- frame exactly. You must also initialize 'width' and
- 'height' before initializing them. */
-
- /* NOTE2: some codecs allow the raw parameters (frame size,
- sample rate) to be changed at any frame. We handle this, so
- you should also take care of it */
-
- /* here, we use a stream based decoder (mpeg1video), so we
- feed decoder and see if it could decode a frame */
- avpkt.data = inbuf;
- while (avpkt.size > 0)
- if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
- exit(1);
- }
-
- /* some codecs, such as MPEG, transmit the I and P frame with a
- latency of one frame. You must do the following to have a
- chance to get the last frame of the video */
- avpkt.data = NULL;
- avpkt.size = 0;
- decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
-
- fclose(f);
-
- avcodec_close(c);
- av_free(c);
- av_frame_free(&frame);
- printf("\n");
-}
-
-int main(int argc, char **argv)
-{
- const char *output_type;
-
- /* register all the codecs */
- avcodec_register_all();
-
- if (argc < 2) {
- printf("usage: %s output_type\n"
- "API example program to decode/encode a media stream with libavcodec.\n"
- "This program generates a synthetic stream and encodes it to a file\n"
- "named test.h264, test.mp2 or test.mpg depending on output_type.\n"
- "The encoded stream is then decoded and written to a raw data output.\n"
- "output_type must be chosen between 'h264', 'mp2', 'mpg'.\n",
- argv[0]);
- return 1;
- }
- output_type = argv[1];
-
- if (!strcmp(output_type, "h264")) {
- video_encode_example("test.h264", AV_CODEC_ID_H264);
- } else if (!strcmp(output_type, "mp2")) {
- audio_encode_example("test.mp2");
- audio_decode_example("test.pcm", "test.mp2");
- } else if (!strcmp(output_type, "mpg")) {
- video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
- video_decode_example("test%02d.pgm", "test.mpg");
- } else {
- fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
- output_type);
- return 1;
- }
-
- return 0;
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/demuxing_decoding.c b/Externals/ffmpeg/shared/doc/examples/demuxing_decoding.c
deleted file mode 100644
index 2ce4018c79..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/demuxing_decoding.c
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * Copyright (c) 2012 Stefano Sabatini
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * @file
- * Demuxing and decoding example.
- *
- * Show how to use the libavformat and libavcodec API to demux and
- * decode audio and video data.
- * @example demuxing_decoding.c
- */
-
-#include
-#include
-#include
-#include
-
-static AVFormatContext *fmt_ctx = NULL;
-static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
-static AVStream *video_stream = NULL, *audio_stream = NULL;
-static const char *src_filename = NULL;
-static const char *video_dst_filename = NULL;
-static const char *audio_dst_filename = NULL;
-static FILE *video_dst_file = NULL;
-static FILE *audio_dst_file = NULL;
-
-static uint8_t *video_dst_data[4] = {NULL};
-static int video_dst_linesize[4];
-static int video_dst_bufsize;
-
-static int video_stream_idx = -1, audio_stream_idx = -1;
-static AVFrame *frame = NULL;
-static AVPacket pkt;
-static int video_frame_count = 0;
-static int audio_frame_count = 0;
-
-/* The different ways of decoding and managing data memory. You are not
- * supposed to support all the modes in your application but pick the one most
- * appropriate to your needs. Look for the use of api_mode in this example to
- * see what are the differences of API usage between them */
-enum {
- API_MODE_OLD = 0, /* old method, deprecated */
- API_MODE_NEW_API_REF_COUNT = 1, /* new method, using the frame reference counting */
- API_MODE_NEW_API_NO_REF_COUNT = 2, /* new method, without reference counting */
-};
-
-static int api_mode = API_MODE_OLD;
-
-static int decode_packet(int *got_frame, int cached)
-{
- int ret = 0;
- int decoded = pkt.size;
-
- *got_frame = 0;
-
- if (pkt.stream_index == video_stream_idx) {
- /* decode video frame */
- ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
- if (ret < 0) {
- fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
- return ret;
- }
-
- if (*got_frame) {
- printf("video_frame%s n:%d coded_n:%d pts:%s\n",
- cached ? "(cached)" : "",
- video_frame_count++, frame->coded_picture_number,
- av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
-
- /* copy decoded frame to destination buffer:
- * this is required since rawvideo expects non aligned data */
- av_image_copy(video_dst_data, video_dst_linesize,
- (const uint8_t **)(frame->data), frame->linesize,
- video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
-
- /* write to rawvideo file */
- fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
- }
- } else if (pkt.stream_index == audio_stream_idx) {
- /* decode audio frame */
- ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
- if (ret < 0) {
- fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
- return ret;
- }
- /* Some audio decoders decode only part of the packet, and have to be
- * called again with the remainder of the packet data.
- * Sample: fate-suite/lossless-audio/luckynight-partial.shn
- * Also, some decoders might over-read the packet. */
- decoded = FFMIN(ret, pkt.size);
-
- if (*got_frame) {
- size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
- printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
- cached ? "(cached)" : "",
- audio_frame_count++, frame->nb_samples,
- av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
-
- /* Write the raw audio data samples of the first plane. This works
- * fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
- * most audio decoders output planar audio, which uses a separate
- * plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
- * In other words, this code will write only the first audio channel
- * in these cases.
- * You should use libswresample or libavfilter to convert the frame
- * to packed data. */
- fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
- }
- }
-
- /* If we use the new API with reference counting, we own the data and need
- * to de-reference it when we don't use it anymore */
- if (*got_frame && api_mode == API_MODE_NEW_API_REF_COUNT)
- av_frame_unref(frame);
-
- return decoded;
-}
-
-static int open_codec_context(int *stream_idx,
- AVFormatContext *fmt_ctx, enum AVMediaType type)
-{
- int ret;
- AVStream *st;
- AVCodecContext *dec_ctx = NULL;
- AVCodec *dec = NULL;
- AVDictionary *opts = NULL;
-
- ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
- if (ret < 0) {
- fprintf(stderr, "Could not find %s stream in input file '%s'\n",
- av_get_media_type_string(type), src_filename);
- return ret;
- } else {
- *stream_idx = ret;
- st = fmt_ctx->streams[*stream_idx];
-
- /* find decoder for the stream */
- dec_ctx = st->codec;
- dec = avcodec_find_decoder(dec_ctx->codec_id);
- if (!dec) {
- fprintf(stderr, "Failed to find %s codec\n",
- av_get_media_type_string(type));
- return AVERROR(EINVAL);
- }
-
- /* Init the decoders, with or without reference counting */
- if (api_mode == API_MODE_NEW_API_REF_COUNT)
- av_dict_set(&opts, "refcounted_frames", "1", 0);
- if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
- fprintf(stderr, "Failed to open %s codec\n",
- av_get_media_type_string(type));
- return ret;
- }
- }
-
- return 0;
-}
-
-static int get_format_from_sample_fmt(const char **fmt,
- enum AVSampleFormat sample_fmt)
-{
- int i;
- struct sample_fmt_entry {
- enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
- } sample_fmt_entries[] = {
- { AV_SAMPLE_FMT_U8, "u8", "u8" },
- { AV_SAMPLE_FMT_S16, "s16be", "s16le" },
- { AV_SAMPLE_FMT_S32, "s32be", "s32le" },
- { AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
- { AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
- };
- *fmt = NULL;
-
- for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
- struct sample_fmt_entry *entry = &sample_fmt_entries[i];
- if (sample_fmt == entry->sample_fmt) {
- *fmt = AV_NE(entry->fmt_be, entry->fmt_le);
- return 0;
- }
- }
-
- fprintf(stderr,
- "sample format %s is not supported as output format\n",
- av_get_sample_fmt_name(sample_fmt));
- return -1;
-}
-
-int main (int argc, char **argv)
-{
- int ret = 0, got_frame;
-
- if (argc != 4 && argc != 5) {
- fprintf(stderr, "usage: %s [-refcount=] "
- "input_file video_output_file audio_output_file\n"
- "API example program to show how to read frames from an input file.\n"
- "This program reads frames from a file, decodes them, and writes decoded\n"
- "video frames to a rawvideo file named video_output_file, and decoded\n"
- "audio frames to a rawaudio file named audio_output_file.\n\n"
- "If the -refcount option is specified, the program use the\n"
- "reference counting frame system which allows keeping a copy of\n"
- "the data for longer than one decode call. If unset, it's using\n"
- "the classic old method.\n"
- "\n", argv[0]);
- exit(1);
- }
- if (argc == 5) {
- const char *mode = argv[1] + strlen("-refcount=");
- if (!strcmp(mode, "old")) api_mode = API_MODE_OLD;
- else if (!strcmp(mode, "new_norefcount")) api_mode = API_MODE_NEW_API_NO_REF_COUNT;
- else if (!strcmp(mode, "new_refcount")) api_mode = API_MODE_NEW_API_REF_COUNT;
- else {
- fprintf(stderr, "unknow mode '%s'\n", mode);
- exit(1);
- }
- argv++;
- }
- src_filename = argv[1];
- video_dst_filename = argv[2];
- audio_dst_filename = argv[3];
-
- /* register all formats and codecs */
- av_register_all();
-
- /* open input file, and allocate format context */
- if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
- fprintf(stderr, "Could not open source file %s\n", src_filename);
- exit(1);
- }
-
- /* retrieve stream information */
- if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
- fprintf(stderr, "Could not find stream information\n");
- exit(1);
- }
-
- if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
- video_stream = fmt_ctx->streams[video_stream_idx];
- video_dec_ctx = video_stream->codec;
-
- video_dst_file = fopen(video_dst_filename, "wb");
- if (!video_dst_file) {
- fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
- ret = 1;
- goto end;
- }
-
- /* allocate image where the decoded image will be put */
- ret = av_image_alloc(video_dst_data, video_dst_linesize,
- video_dec_ctx->width, video_dec_ctx->height,
- video_dec_ctx->pix_fmt, 1);
- if (ret < 0) {
- fprintf(stderr, "Could not allocate raw video buffer\n");
- goto end;
- }
- video_dst_bufsize = ret;
- }
-
- if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
- audio_stream = fmt_ctx->streams[audio_stream_idx];
- audio_dec_ctx = audio_stream->codec;
- audio_dst_file = fopen(audio_dst_filename, "wb");
- if (!audio_dst_file) {
- fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
- ret = 1;
- goto end;
- }
- }
-
- /* dump input information to stderr */
- av_dump_format(fmt_ctx, 0, src_filename, 0);
-
- if (!audio_stream && !video_stream) {
- fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
- ret = 1;
- goto end;
- }
-
- /* When using the new API, you need to use the libavutil/frame.h API, while
- * the classic frame management is available in libavcodec */
- if (api_mode == API_MODE_OLD)
- frame = avcodec_alloc_frame();
- else
- frame = av_frame_alloc();
- if (!frame) {
- fprintf(stderr, "Could not allocate frame\n");
- ret = AVERROR(ENOMEM);
- goto end;
- }
-
- /* initialize packet, set data to NULL, let the demuxer fill it */
- av_init_packet(&pkt);
- pkt.data = NULL;
- pkt.size = 0;
-
- if (video_stream)
- printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
- if (audio_stream)
- printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
-
- /* read frames from the file */
- while (av_read_frame(fmt_ctx, &pkt) >= 0) {
- AVPacket orig_pkt = pkt;
- do {
- ret = decode_packet(&got_frame, 0);
- if (ret < 0)
- break;
- pkt.data += ret;
- pkt.size -= ret;
- } while (pkt.size > 0);
- av_free_packet(&orig_pkt);
- }
-
- /* flush cached frames */
- pkt.data = NULL;
- pkt.size = 0;
- do {
- decode_packet(&got_frame, 1);
- } while (got_frame);
-
- printf("Demuxing succeeded.\n");
-
- if (video_stream) {
- printf("Play the output video file with the command:\n"
- "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
- av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
- video_dst_filename);
- }
-
- if (audio_stream) {
- enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
- int n_channels = audio_dec_ctx->channels;
- const char *fmt;
-
- if (av_sample_fmt_is_planar(sfmt)) {
- const char *packed = av_get_sample_fmt_name(sfmt);
- printf("Warning: the sample format the decoder produced is planar "
- "(%s). This example will output the first channel only.\n",
- packed ? packed : "?");
- sfmt = av_get_packed_sample_fmt(sfmt);
- n_channels = 1;
- }
-
- if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
- goto end;
-
- printf("Play the output audio file with the command:\n"
- "ffplay -f %s -ac %d -ar %d %s\n",
- fmt, n_channels, audio_dec_ctx->sample_rate,
- audio_dst_filename);
- }
-
-end:
- avcodec_close(video_dec_ctx);
- avcodec_close(audio_dec_ctx);
- avformat_close_input(&fmt_ctx);
- if (video_dst_file)
- fclose(video_dst_file);
- if (audio_dst_file)
- fclose(audio_dst_file);
- if (api_mode == API_MODE_OLD)
- avcodec_free_frame(&frame);
- else
- av_frame_free(&frame);
- av_free(video_dst_data[0]);
-
- return ret < 0;
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/extract_mvs.c b/Externals/ffmpeg/shared/doc/examples/extract_mvs.c
deleted file mode 100644
index d6fd61335e..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/extract_mvs.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright (c) 2012 Stefano Sabatini
- * Copyright (c) 2014 Clément Bœsch
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include
-#include
-
-static AVFormatContext *fmt_ctx = NULL;
-static AVCodecContext *video_dec_ctx = NULL;
-static AVStream *video_stream = NULL;
-static const char *src_filename = NULL;
-
-static int video_stream_idx = -1;
-static AVFrame *frame = NULL;
-static AVPacket pkt;
-static int video_frame_count = 0;
-
-static int decode_packet(int *got_frame, int cached)
-{
- int decoded = pkt.size;
-
- *got_frame = 0;
-
- if (pkt.stream_index == video_stream_idx) {
- int ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
- if (ret < 0) {
- fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
- return ret;
- }
-
- if (*got_frame) {
- int i;
- AVFrameSideData *sd;
-
- video_frame_count++;
- sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
- if (sd) {
- const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
- for (i = 0; i < sd->size / sizeof(*mvs); i++) {
- const AVMotionVector *mv = &mvs[i];
- printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
- video_frame_count, mv->source,
- mv->w, mv->h, mv->src_x, mv->src_y,
- mv->dst_x, mv->dst_y, mv->flags);
- }
- }
- }
- }
-
- return decoded;
-}
-
-static int open_codec_context(int *stream_idx,
- AVFormatContext *fmt_ctx, enum AVMediaType type)
-{
- int ret;
- AVStream *st;
- AVCodecContext *dec_ctx = NULL;
- AVCodec *dec = NULL;
- AVDictionary *opts = NULL;
-
- ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
- if (ret < 0) {
- fprintf(stderr, "Could not find %s stream in input file '%s'\n",
- av_get_media_type_string(type), src_filename);
- return ret;
- } else {
- *stream_idx = ret;
- st = fmt_ctx->streams[*stream_idx];
-
- /* find decoder for the stream */
- dec_ctx = st->codec;
- dec = avcodec_find_decoder(dec_ctx->codec_id);
- if (!dec) {
- fprintf(stderr, "Failed to find %s codec\n",
- av_get_media_type_string(type));
- return AVERROR(EINVAL);
- }
-
- /* Init the video decoder */
- av_dict_set(&opts, "flags2", "+export_mvs", 0);
- if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
- fprintf(stderr, "Failed to open %s codec\n",
- av_get_media_type_string(type));
- return ret;
- }
- }
-
- return 0;
-}
-
-int main(int argc, char **argv)
-{
- int ret = 0, got_frame;
-
- if (argc != 2) {
- fprintf(stderr, "Usage: %s \n", argv[0]);
- exit(1);
- }
- src_filename = argv[1];
-
- av_register_all();
-
- if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
- fprintf(stderr, "Could not open source file %s\n", src_filename);
- exit(1);
- }
-
- if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
- fprintf(stderr, "Could not find stream information\n");
- exit(1);
- }
-
- if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
- video_stream = fmt_ctx->streams[video_stream_idx];
- video_dec_ctx = video_stream->codec;
- }
-
- av_dump_format(fmt_ctx, 0, src_filename, 0);
-
- if (!video_stream) {
- fprintf(stderr, "Could not find video stream in the input, aborting\n");
- ret = 1;
- goto end;
- }
-
- frame = av_frame_alloc();
- if (!frame) {
- fprintf(stderr, "Could not allocate frame\n");
- ret = AVERROR(ENOMEM);
- goto end;
- }
-
- printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
-
- /* initialize packet, set data to NULL, let the demuxer fill it */
- av_init_packet(&pkt);
- pkt.data = NULL;
- pkt.size = 0;
-
- /* read frames from the file */
- while (av_read_frame(fmt_ctx, &pkt) >= 0) {
- AVPacket orig_pkt = pkt;
- do {
- ret = decode_packet(&got_frame, 0);
- if (ret < 0)
- break;
- pkt.data += ret;
- pkt.size -= ret;
- } while (pkt.size > 0);
- av_free_packet(&orig_pkt);
- }
-
- /* flush cached frames */
- pkt.data = NULL;
- pkt.size = 0;
- do {
- decode_packet(&got_frame, 1);
- } while (got_frame);
-
-end:
- avcodec_close(video_dec_ctx);
- avformat_close_input(&fmt_ctx);
- av_frame_free(&frame);
- return ret < 0;
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/filter_audio.c b/Externals/ffmpeg/shared/doc/examples/filter_audio.c
deleted file mode 100644
index 01761dcee4..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/filter_audio.c
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * copyright (c) 2013 Andrew Kelley
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * libavfilter API usage example.
- *
- * @example filter_audio.c
- * This example will generate a sine wave audio,
- * pass it through a simple filter chain, and then compute the MD5 checksum of
- * the output data.
- *
- * The filter chain it uses is:
- * (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
- *
- * abuffer: This provides the endpoint where you can feed the decoded samples.
- * volume: In this example we hardcode it to 0.90.
- * aformat: This converts the samples to the samplefreq, channel layout,
- * and sample format required by the audio device.
- * abuffersink: This provides the endpoint where you can read the samples after
- * they have passed through the filter chain.
- */
-
-#include
-#include
-#include
-#include
-
-#include "libavutil/channel_layout.h"
-#include "libavutil/md5.h"
-#include "libavutil/mem.h"
-#include "libavutil/opt.h"
-#include "libavutil/samplefmt.h"
-
-#include "libavfilter/avfilter.h"
-#include "libavfilter/buffersink.h"
-#include "libavfilter/buffersrc.h"
-
-#define INPUT_SAMPLERATE 48000
-#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
-#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
-
-#define VOLUME_VAL 0.90
-
-static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
- AVFilterContext **sink)
-{
- AVFilterGraph *filter_graph;
- AVFilterContext *abuffer_ctx;
- AVFilter *abuffer;
- AVFilterContext *volume_ctx;
- AVFilter *volume;
- AVFilterContext *aformat_ctx;
- AVFilter *aformat;
- AVFilterContext *abuffersink_ctx;
- AVFilter *abuffersink;
-
- AVDictionary *options_dict = NULL;
- uint8_t options_str[1024];
- uint8_t ch_layout[64];
-
- int err;
-
- /* Create a new filtergraph, which will contain all the filters. */
- filter_graph = avfilter_graph_alloc();
- if (!filter_graph) {
- fprintf(stderr, "Unable to create filter graph.\n");
- return AVERROR(ENOMEM);
- }
-
- /* Create the abuffer filter;
- * it will be used for feeding the data into the graph. */
- abuffer = avfilter_get_by_name("abuffer");
- if (!abuffer) {
- fprintf(stderr, "Could not find the abuffer filter.\n");
- return AVERROR_FILTER_NOT_FOUND;
- }
-
- abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
- if (!abuffer_ctx) {
- fprintf(stderr, "Could not allocate the abuffer instance.\n");
- return AVERROR(ENOMEM);
- }
-
- /* Set the filter options through the AVOptions API. */
- av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
- av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
- av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
- av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
- av_opt_set_int(abuffer_ctx, "sample_rate", INPUT_SAMPLERATE, AV_OPT_SEARCH_CHILDREN);
-
- /* Now initialize the filter; we pass NULL options, since we have already
- * set all the options above. */
- err = avfilter_init_str(abuffer_ctx, NULL);
- if (err < 0) {
- fprintf(stderr, "Could not initialize the abuffer filter.\n");
- return err;
- }
-
- /* Create volume filter. */
- volume = avfilter_get_by_name("volume");
- if (!volume) {
- fprintf(stderr, "Could not find the volume filter.\n");
- return AVERROR_FILTER_NOT_FOUND;
- }
-
- volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume");
- if (!volume_ctx) {
- fprintf(stderr, "Could not allocate the volume instance.\n");
- return AVERROR(ENOMEM);
- }
-
- /* A different way of passing the options is as key/value pairs in a
- * dictionary. */
- av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0);
- err = avfilter_init_dict(volume_ctx, &options_dict);
- av_dict_free(&options_dict);
- if (err < 0) {
- fprintf(stderr, "Could not initialize the volume filter.\n");
- return err;
- }
-
- /* Create the aformat filter;
- * it ensures that the output is of the format we want. */
- aformat = avfilter_get_by_name("aformat");
- if (!aformat) {
- fprintf(stderr, "Could not find the aformat filter.\n");
- return AVERROR_FILTER_NOT_FOUND;
- }
-
- aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
- if (!aformat_ctx) {
- fprintf(stderr, "Could not allocate the aformat instance.\n");
- return AVERROR(ENOMEM);
- }
-
- /* A third way of passing the options is in a string of the form
- * key1=value1:key2=value2.... */
- snprintf(options_str, sizeof(options_str),
- "sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
- av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
- (uint64_t)AV_CH_LAYOUT_STEREO);
- err = avfilter_init_str(aformat_ctx, options_str);
- if (err < 0) {
- av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
- return err;
- }
-
- /* Finally create the abuffersink filter;
- * it will be used to get the filtered data out of the graph. */
- abuffersink = avfilter_get_by_name("abuffersink");
- if (!abuffersink) {
- fprintf(stderr, "Could not find the abuffersink filter.\n");
- return AVERROR_FILTER_NOT_FOUND;
- }
-
- abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
- if (!abuffersink_ctx) {
- fprintf(stderr, "Could not allocate the abuffersink instance.\n");
- return AVERROR(ENOMEM);
- }
-
- /* This filter takes no options. */
- err = avfilter_init_str(abuffersink_ctx, NULL);
- if (err < 0) {
- fprintf(stderr, "Could not initialize the abuffersink instance.\n");
- return err;
- }
-
- /* Connect the filters;
- * in this simple case the filters just form a linear chain. */
- err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0);
- if (err >= 0)
- err = avfilter_link(volume_ctx, 0, aformat_ctx, 0);
- if (err >= 0)
- err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
- if (err < 0) {
- fprintf(stderr, "Error connecting filters\n");
- return err;
- }
-
- /* Configure the graph. */
- err = avfilter_graph_config(filter_graph, NULL);
- if (err < 0) {
- av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
- return err;
- }
-
- *graph = filter_graph;
- *src = abuffer_ctx;
- *sink = abuffersink_ctx;
-
- return 0;
-}
-
-/* Do something useful with the filtered data: this simple
- * example just prints the MD5 checksum of each plane to stdout. */
-static int process_output(struct AVMD5 *md5, AVFrame *frame)
-{
- int planar = av_sample_fmt_is_planar(frame->format);
- int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
- int planes = planar ? channels : 1;
- int bps = av_get_bytes_per_sample(frame->format);
- int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
- int i, j;
-
- for (i = 0; i < planes; i++) {
- uint8_t checksum[16];
-
- av_md5_init(md5);
- av_md5_sum(checksum, frame->extended_data[i], plane_size);
-
- fprintf(stdout, "plane %d: 0x", i);
- for (j = 0; j < sizeof(checksum); j++)
- fprintf(stdout, "%02X", checksum[j]);
- fprintf(stdout, "\n");
- }
- fprintf(stdout, "\n");
-
- return 0;
-}
-
-/* Construct a frame of audio data to be filtered;
- * this simple example just synthesizes a sine wave. */
-static int get_input(AVFrame *frame, int frame_num)
-{
- int err, i, j;
-
-#define FRAME_SIZE 1024
-
- /* Set up the frame properties and allocate the buffer for the data. */
- frame->sample_rate = INPUT_SAMPLERATE;
- frame->format = INPUT_FORMAT;
- frame->channel_layout = INPUT_CHANNEL_LAYOUT;
- frame->nb_samples = FRAME_SIZE;
- frame->pts = frame_num * FRAME_SIZE;
-
- err = av_frame_get_buffer(frame, 0);
- if (err < 0)
- return err;
-
- /* Fill the data for each channel. */
- for (i = 0; i < 5; i++) {
- float *data = (float*)frame->extended_data[i];
-
- for (j = 0; j < frame->nb_samples; j++)
- data[j] = sin(2 * M_PI * (frame_num + j) * (i + 1) / FRAME_SIZE);
- }
-
- return 0;
-}
-
-int main(int argc, char *argv[])
-{
- struct AVMD5 *md5;
- AVFilterGraph *graph;
- AVFilterContext *src, *sink;
- AVFrame *frame;
- uint8_t errstr[1024];
- float duration;
- int err, nb_frames, i;
-
- if (argc < 2) {
- fprintf(stderr, "Usage: %s \n", argv[0]);
- return 1;
- }
-
- duration = atof(argv[1]);
- nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE;
- if (nb_frames <= 0) {
- fprintf(stderr, "Invalid duration: %s\n", argv[1]);
- return 1;
- }
-
- avfilter_register_all();
-
- /* Allocate the frame we will be using to store the data. */
- frame = av_frame_alloc();
- if (!frame) {
- fprintf(stderr, "Error allocating the frame\n");
- return 1;
- }
-
- md5 = av_md5_alloc();
- if (!md5) {
- fprintf(stderr, "Error allocating the MD5 context\n");
- return 1;
- }
-
- /* Set up the filtergraph. */
- err = init_filter_graph(&graph, &src, &sink);
- if (err < 0) {
- fprintf(stderr, "Unable to init filter graph:");
- goto fail;
- }
-
- /* the main filtering loop */
- for (i = 0; i < nb_frames; i++) {
- /* get an input frame to be filtered */
- err = get_input(frame, i);
- if (err < 0) {
- fprintf(stderr, "Error generating input frame:");
- goto fail;
- }
-
- /* Send the frame to the input of the filtergraph. */
- err = av_buffersrc_add_frame(src, frame);
- if (err < 0) {
- av_frame_unref(frame);
- fprintf(stderr, "Error submitting the frame to the filtergraph:");
- goto fail;
- }
-
- /* Get all the filtered output that is available. */
- while ((err = av_buffersink_get_frame(sink, frame)) >= 0) {
- /* now do something with our filtered frame */
- err = process_output(md5, frame);
- if (err < 0) {
- fprintf(stderr, "Error processing the filtered frame:");
- goto fail;
- }
- av_frame_unref(frame);
- }
-
- if (err == AVERROR(EAGAIN)) {
- /* Need to feed more frames in. */
- continue;
- } else if (err == AVERROR_EOF) {
- /* Nothing more to do, finish. */
- break;
- } else if (err < 0) {
- /* An error occurred. */
- fprintf(stderr, "Error filtering the data:");
- goto fail;
- }
- }
-
- avfilter_graph_free(&graph);
- av_frame_free(&frame);
- av_freep(&md5);
-
- return 0;
-
-fail:
- av_strerror(err, errstr, sizeof(errstr));
- fprintf(stderr, "%s\n", errstr);
- return 1;
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/filtering_audio.c b/Externals/ffmpeg/shared/doc/examples/filtering_audio.c
deleted file mode 100644
index 46595fb3b8..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/filtering_audio.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Copyright (c) 2010 Nicolas George
- * Copyright (c) 2011 Stefano Sabatini
- * Copyright (c) 2012 Clément Bœsch
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * @file
- * API example for audio decoding and filtering
- * @example filtering_audio.c
- */
-
-#include
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
-static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
-
-static AVFormatContext *fmt_ctx;
-static AVCodecContext *dec_ctx;
-AVFilterContext *buffersink_ctx;
-AVFilterContext *buffersrc_ctx;
-AVFilterGraph *filter_graph;
-static int audio_stream_index = -1;
-
-static int open_input_file(const char *filename)
-{
- int ret;
- AVCodec *dec;
-
- if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
- return ret;
- }
-
- if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
- return ret;
- }
-
- /* select the audio stream */
- ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
- return ret;
- }
- audio_stream_index = ret;
- dec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
- av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
-
- /* init the audio decoder */
- if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
- return ret;
- }
-
- return 0;
-}
-
-static int init_filters(const char *filters_descr)
-{
- char args[512];
- int ret = 0;
- AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
- AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
- AVFilterInOut *outputs = avfilter_inout_alloc();
- AVFilterInOut *inputs = avfilter_inout_alloc();
- static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
- static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
- static const int out_sample_rates[] = { 8000, -1 };
- const AVFilterLink *outlink;
- AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
-
- filter_graph = avfilter_graph_alloc();
- if (!outputs || !inputs || !filter_graph) {
- ret = AVERROR(ENOMEM);
- goto end;
- }
-
- /* buffer audio source: the decoded frames from the decoder will be inserted here. */
- if (!dec_ctx->channel_layout)
- dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
- snprintf(args, sizeof(args),
- "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
- time_base.num, time_base.den, dec_ctx->sample_rate,
- av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
- ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
- args, NULL, filter_graph);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
- goto end;
- }
-
- /* buffer audio sink: to terminate the filter chain. */
- ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
- NULL, NULL, filter_graph);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
- goto end;
- }
-
- ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
- AV_OPT_SEARCH_CHILDREN);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
- goto end;
- }
-
- ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
- AV_OPT_SEARCH_CHILDREN);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
- goto end;
- }
-
- ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
- AV_OPT_SEARCH_CHILDREN);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
- goto end;
- }
-
- /* Endpoints for the filter graph. */
- outputs->name = av_strdup("in");
- outputs->filter_ctx = buffersrc_ctx;
- outputs->pad_idx = 0;
- outputs->next = NULL;
-
- inputs->name = av_strdup("out");
- inputs->filter_ctx = buffersink_ctx;
- inputs->pad_idx = 0;
- inputs->next = NULL;
-
- if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
- &inputs, &outputs, NULL)) < 0)
- goto end;
-
- if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
- goto end;
-
- /* Print summary of the sink buffer
- * Note: args buffer is reused to store channel layout string */
- outlink = buffersink_ctx->inputs[0];
- av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
- av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
- (int)outlink->sample_rate,
- (char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
- args);
-
-end:
- avfilter_inout_free(&inputs);
- avfilter_inout_free(&outputs);
-
- return ret;
-}
-
-static void print_frame(const AVFrame *frame)
-{
- const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));
- const uint16_t *p = (uint16_t*)frame->data[0];
- const uint16_t *p_end = p + n;
-
- while (p < p_end) {
- fputc(*p & 0xff, stdout);
- fputc(*p>>8 & 0xff, stdout);
- p++;
- }
- fflush(stdout);
-}
-
-int main(int argc, char **argv)
-{
- int ret;
- AVPacket packet0, packet;
- AVFrame *frame = av_frame_alloc();
- AVFrame *filt_frame = av_frame_alloc();
- int got_frame;
-
- if (!frame || !filt_frame) {
- perror("Could not allocate frame");
- exit(1);
- }
- if (argc != 2) {
- fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
- exit(1);
- }
-
- av_register_all();
- avfilter_register_all();
-
- if ((ret = open_input_file(argv[1])) < 0)
- goto end;
- if ((ret = init_filters(filter_descr)) < 0)
- goto end;
-
- /* read all packets */
- packet0.data = NULL;
- packet.data = NULL;
- while (1) {
- if (!packet0.data) {
- if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
- break;
- packet0 = packet;
- }
-
- if (packet.stream_index == audio_stream_index) {
- got_frame = 0;
- ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
- continue;
- }
- packet.size -= ret;
- packet.data += ret;
-
- if (got_frame) {
- /* push the audio data from decoded frame into the filtergraph */
- if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) {
- av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
- break;
- }
-
- /* pull filtered audio from the filtergraph */
- while (1) {
- ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
- if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
- break;
- if (ret < 0)
- goto end;
- print_frame(filt_frame);
- av_frame_unref(filt_frame);
- }
- }
-
- if (packet.size <= 0)
- av_free_packet(&packet0);
- } else {
- /* discard non-wanted packets */
- av_free_packet(&packet0);
- }
- }
-end:
- avfilter_graph_free(&filter_graph);
- avcodec_close(dec_ctx);
- avformat_close_input(&fmt_ctx);
- av_frame_free(&frame);
- av_frame_free(&filt_frame);
-
- if (ret < 0 && ret != AVERROR_EOF) {
- fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
- exit(1);
- }
-
- exit(0);
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/filtering_video.c b/Externals/ffmpeg/shared/doc/examples/filtering_video.c
deleted file mode 100644
index 601c3d87f0..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/filtering_video.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (c) 2010 Nicolas George
- * Copyright (c) 2011 Stefano Sabatini
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * @file
- * API example for decoding and filtering
- * @example filtering_video.c
- */
-
-#define _XOPEN_SOURCE 600 /* for usleep */
-#include
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-const char *filter_descr = "scale=78:24";
-
-static AVFormatContext *fmt_ctx;
-static AVCodecContext *dec_ctx;
-AVFilterContext *buffersink_ctx;
-AVFilterContext *buffersrc_ctx;
-AVFilterGraph *filter_graph;
-static int video_stream_index = -1;
-static int64_t last_pts = AV_NOPTS_VALUE;
-
-static int open_input_file(const char *filename)
-{
- int ret;
- AVCodec *dec;
-
- if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
- return ret;
- }
-
- if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
- return ret;
- }
-
- /* select the video stream */
- ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
- return ret;
- }
- video_stream_index = ret;
- dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
- av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
-
- /* init the video decoder */
- if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
- return ret;
- }
-
- return 0;
-}
-
-static int init_filters(const char *filters_descr)
-{
- char args[512];
- int ret = 0;
- AVFilter *buffersrc = avfilter_get_by_name("buffer");
- AVFilter *buffersink = avfilter_get_by_name("buffersink");
- AVFilterInOut *outputs = avfilter_inout_alloc();
- AVFilterInOut *inputs = avfilter_inout_alloc();
- AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
- enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
-
- filter_graph = avfilter_graph_alloc();
- if (!outputs || !inputs || !filter_graph) {
- ret = AVERROR(ENOMEM);
- goto end;
- }
-
- /* buffer video source: the decoded frames from the decoder will be inserted here. */
- snprintf(args, sizeof(args),
- "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
- dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
- time_base.num, time_base.den,
- dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
-
- ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
- args, NULL, filter_graph);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
- goto end;
- }
-
- /* buffer video sink: to terminate the filter chain. */
- ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
- NULL, NULL, filter_graph);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
- goto end;
- }
-
- ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
- AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
- goto end;
- }
-
- /* Endpoints for the filter graph. */
- outputs->name = av_strdup("in");
- outputs->filter_ctx = buffersrc_ctx;
- outputs->pad_idx = 0;
- outputs->next = NULL;
-
- inputs->name = av_strdup("out");
- inputs->filter_ctx = buffersink_ctx;
- inputs->pad_idx = 0;
- inputs->next = NULL;
-
- if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
- &inputs, &outputs, NULL)) < 0)
- goto end;
-
- if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
- goto end;
-
-end:
- avfilter_inout_free(&inputs);
- avfilter_inout_free(&outputs);
-
- return ret;
-}
-
-static void display_frame(const AVFrame *frame, AVRational time_base)
-{
- int x, y;
- uint8_t *p0, *p;
- int64_t delay;
-
- if (frame->pts != AV_NOPTS_VALUE) {
- if (last_pts != AV_NOPTS_VALUE) {
- /* sleep roughly the right amount of time;
- * usleep is in microseconds, just like AV_TIME_BASE. */
- delay = av_rescale_q(frame->pts - last_pts,
- time_base, AV_TIME_BASE_Q);
- if (delay > 0 && delay < 1000000)
- usleep(delay);
- }
- last_pts = frame->pts;
- }
-
- /* Trivial ASCII grayscale display. */
- p0 = frame->data[0];
- puts("\033c");
- for (y = 0; y < frame->height; y++) {
- p = p0;
- for (x = 0; x < frame->width; x++)
- putchar(" .-+#"[*(p++) / 52]);
- putchar('\n');
- p0 += frame->linesize[0];
- }
- fflush(stdout);
-}
-
-int main(int argc, char **argv)
-{
- int ret;
- AVPacket packet;
- AVFrame *frame = av_frame_alloc();
- AVFrame *filt_frame = av_frame_alloc();
- int got_frame;
-
- if (!frame || !filt_frame) {
- perror("Could not allocate frame");
- exit(1);
- }
- if (argc != 2) {
- fprintf(stderr, "Usage: %s file\n", argv[0]);
- exit(1);
- }
-
- av_register_all();
- avfilter_register_all();
-
- if ((ret = open_input_file(argv[1])) < 0)
- goto end;
- if ((ret = init_filters(filter_descr)) < 0)
- goto end;
-
- /* read all packets */
- while (1) {
- if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
- break;
-
- if (packet.stream_index == video_stream_index) {
- got_frame = 0;
- ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
- break;
- }
-
- if (got_frame) {
- frame->pts = av_frame_get_best_effort_timestamp(frame);
-
- /* push the decoded frame into the filtergraph */
- if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
- av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
- break;
- }
-
- /* pull filtered frames from the filtergraph */
- while (1) {
- ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
- if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
- break;
- if (ret < 0)
- goto end;
- display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
- av_frame_unref(filt_frame);
- }
- av_frame_unref(frame);
- }
- }
- av_free_packet(&packet);
- }
-end:
- avfilter_graph_free(&filter_graph);
- avcodec_close(dec_ctx);
- avformat_close_input(&fmt_ctx);
- av_frame_free(&frame);
- av_frame_free(&filt_frame);
-
- if (ret < 0 && ret != AVERROR_EOF) {
- fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
- exit(1);
- }
-
- exit(0);
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/metadata.c b/Externals/ffmpeg/shared/doc/examples/metadata.c
deleted file mode 100644
index f73c267369..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/metadata.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2011 Reinhard Tartler
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * @file
- * Shows how the metadata API can be used in application programs.
- * @example metadata.c
- */
-
-#include
-
-#include
-#include
-
-int main (int argc, char **argv)
-{
- AVFormatContext *fmt_ctx = NULL;
- AVDictionaryEntry *tag = NULL;
- int ret;
-
- if (argc != 2) {
- printf("usage: %s \n"
- "example program to demonstrate the use of the libavformat metadata API.\n"
- "\n", argv[0]);
- return 1;
- }
-
- av_register_all();
- if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
- return ret;
-
- while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
- printf("%s=%s\n", tag->key, tag->value);
-
- avformat_close_input(&fmt_ctx);
- return 0;
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/muxing.c b/Externals/ffmpeg/shared/doc/examples/muxing.c
deleted file mode 100644
index 8b0ea60bb3..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/muxing.c
+++ /dev/null
@@ -1,670 +0,0 @@
-/*
- * Copyright (c) 2003 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * @file
- * libavformat API example.
- *
- * Output a media file in any supported libavformat format. The default
- * codecs are used.
- * @example muxing.c
- */
-
-#include
-#include
-#include
-#include
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define STREAM_DURATION 10.0
-#define STREAM_FRAME_RATE 25 /* 25 images/s */
-#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
-
-#define SCALE_FLAGS SWS_BICUBIC
-
-// a wrapper around a single output AVStream
-typedef struct OutputStream {
- AVStream *st;
-
- /* pts of the next frame that will be generated */
- int64_t next_pts;
- int samples_count;
-
- AVFrame *frame;
- AVFrame *tmp_frame;
-
- float t, tincr, tincr2;
-
- struct SwsContext *sws_ctx;
- struct SwrContext *swr_ctx;
-} OutputStream;
-
-static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
-{
- AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
-
- printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
- av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
- av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
- av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
- pkt->stream_index);
-}
-
-static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
-{
- /* rescale output packet timestamp values from codec to stream timebase */
- av_packet_rescale_ts(pkt, *time_base, st->time_base);
- pkt->stream_index = st->index;
-
- /* Write the compressed frame to the media file. */
- log_packet(fmt_ctx, pkt);
- return av_interleaved_write_frame(fmt_ctx, pkt);
-}
-
-/* Add an output stream. */
-static void add_stream(OutputStream *ost, AVFormatContext *oc,
- AVCodec **codec,
- enum AVCodecID codec_id)
-{
- AVCodecContext *c;
- int i;
-
- /* find the encoder */
- *codec = avcodec_find_encoder(codec_id);
- if (!(*codec)) {
- fprintf(stderr, "Could not find encoder for '%s'\n",
- avcodec_get_name(codec_id));
- exit(1);
- }
-
- ost->st = avformat_new_stream(oc, *codec);
- if (!ost->st) {
- fprintf(stderr, "Could not allocate stream\n");
- exit(1);
- }
- ost->st->id = oc->nb_streams-1;
- c = ost->st->codec;
-
- switch ((*codec)->type) {
- case AVMEDIA_TYPE_AUDIO:
- c->sample_fmt = (*codec)->sample_fmts ?
- (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
- c->bit_rate = 64000;
- c->sample_rate = 44100;
- if ((*codec)->supported_samplerates) {
- c->sample_rate = (*codec)->supported_samplerates[0];
- for (i = 0; (*codec)->supported_samplerates[i]; i++) {
- if ((*codec)->supported_samplerates[i] == 44100)
- c->sample_rate = 44100;
- }
- }
- c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
- c->channel_layout = AV_CH_LAYOUT_STEREO;
- if ((*codec)->channel_layouts) {
- c->channel_layout = (*codec)->channel_layouts[0];
- for (i = 0; (*codec)->channel_layouts[i]; i++) {
- if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
- c->channel_layout = AV_CH_LAYOUT_STEREO;
- }
- }
- c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
- ost->st->time_base = (AVRational){ 1, c->sample_rate };
- break;
-
- case AVMEDIA_TYPE_VIDEO:
- c->codec_id = codec_id;
-
- c->bit_rate = 400000;
- /* Resolution must be a multiple of two. */
- c->width = 352;
- c->height = 288;
- /* timebase: This is the fundamental unit of time (in seconds) in terms
- * of which frame timestamps are represented. For fixed-fps content,
- * timebase should be 1/framerate and timestamp increments should be
- * identical to 1. */
- ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
- c->time_base = ost->st->time_base;
-
- c->gop_size = 12; /* emit one intra frame every twelve frames at most */
- c->pix_fmt = STREAM_PIX_FMT;
- if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
- /* just for testing, we also add B frames */
- c->max_b_frames = 2;
- }
- if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
- /* Needed to avoid using macroblocks in which some coeffs overflow.
- * This does not happen with normal video, it just happens here as
- * the motion of the chroma plane does not match the luma plane. */
- c->mb_decision = 2;
- }
- break;
-
- default:
- break;
- }
-
- /* Some formats want stream headers to be separate. */
- if (oc->oformat->flags & AVFMT_GLOBALHEADER)
- c->flags |= CODEC_FLAG_GLOBAL_HEADER;
-}
-
-/**************************************************************/
-/* audio output */
-
-static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
- uint64_t channel_layout,
- int sample_rate, int nb_samples)
-{
- AVFrame *frame = av_frame_alloc();
- int ret;
-
- if (!frame) {
- fprintf(stderr, "Error allocating an audio frame\n");
- exit(1);
- }
-
- frame->format = sample_fmt;
- frame->channel_layout = channel_layout;
- frame->sample_rate = sample_rate;
- frame->nb_samples = nb_samples;
-
- if (nb_samples) {
- ret = av_frame_get_buffer(frame, 0);
- if (ret < 0) {
- fprintf(stderr, "Error allocating an audio buffer\n");
- exit(1);
- }
- }
-
- return frame;
-}
-
-static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
-{
- AVCodecContext *c;
- int nb_samples;
- int ret;
- AVDictionary *opt = NULL;
-
- c = ost->st->codec;
-
- /* open it */
- av_dict_copy(&opt, opt_arg, 0);
- ret = avcodec_open2(c, codec, &opt);
- av_dict_free(&opt);
- if (ret < 0) {
- fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
- exit(1);
- }
-
- /* init signal generator */
- ost->t = 0;
- ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
- /* increment frequency by 110 Hz per second */
- ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
-
- if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
- nb_samples = 10000;
- else
- nb_samples = c->frame_size;
-
- ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
- c->sample_rate, nb_samples);
- ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
- c->sample_rate, nb_samples);
-
- /* create resampler context */
- ost->swr_ctx = swr_alloc();
- if (!ost->swr_ctx) {
- fprintf(stderr, "Could not allocate resampler context\n");
- exit(1);
- }
-
- /* set options */
- av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
- av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
- av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
- av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
- av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
- av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
-
- /* initialize the resampling context */
- if ((ret = swr_init(ost->swr_ctx)) < 0) {
- fprintf(stderr, "Failed to initialize the resampling context\n");
- exit(1);
- }
-}
-
-/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
- * 'nb_channels' channels. */
-static AVFrame *get_audio_frame(OutputStream *ost)
-{
- AVFrame *frame = ost->tmp_frame;
- int j, i, v;
- int16_t *q = (int16_t*)frame->data[0];
-
- /* check if we want to generate more frames */
- if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
- STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
- return NULL;
-
- for (j = 0; j nb_samples; j++) {
- v = (int)(sin(ost->t) * 10000);
- for (i = 0; i < ost->st->codec->channels; i++)
- *q++ = v;
- ost->t += ost->tincr;
- ost->tincr += ost->tincr2;
- }
-
- frame->pts = ost->next_pts;
- ost->next_pts += frame->nb_samples;
-
- return frame;
-}
-
-/*
- * encode one audio frame and send it to the muxer
- * return 1 when encoding is finished, 0 otherwise
- */
-static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
-{
- AVCodecContext *c;
- AVPacket pkt = { 0 }; // data and size must be 0;
- AVFrame *frame;
- int ret;
- int got_packet;
- int dst_nb_samples;
-
- av_init_packet(&pkt);
- c = ost->st->codec;
-
- frame = get_audio_frame(ost);
-
- if (frame) {
- /* convert samples from native format to destination codec format, using the resampler */
- /* compute destination number of samples */
- dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
- c->sample_rate, c->sample_rate, AV_ROUND_UP);
- av_assert0(dst_nb_samples == frame->nb_samples);
-
- /* when we pass a frame to the encoder, it may keep a reference to it
- * internally;
- * make sure we do not overwrite it here
- */
- ret = av_frame_make_writable(ost->frame);
- if (ret < 0)
- exit(1);
-
- /* convert to destination format */
- ret = swr_convert(ost->swr_ctx,
- ost->frame->data, dst_nb_samples,
- (const uint8_t **)frame->data, frame->nb_samples);
- if (ret < 0) {
- fprintf(stderr, "Error while converting\n");
- exit(1);
- }
- frame = ost->frame;
-
- frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
- ost->samples_count += dst_nb_samples;
- }
-
- ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
- if (ret < 0) {
- fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
- exit(1);
- }
-
- if (got_packet) {
- ret = write_frame(oc, &c->time_base, ost->st, &pkt);
- if (ret < 0) {
- fprintf(stderr, "Error while writing audio frame: %s\n",
- av_err2str(ret));
- exit(1);
- }
- }
-
- return (frame || got_packet) ? 0 : 1;
-}
-
-/**************************************************************/
-/* video output */
-
-static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
-{
- AVFrame *picture;
- int ret;
-
- picture = av_frame_alloc();
- if (!picture)
- return NULL;
-
- picture->format = pix_fmt;
- picture->width = width;
- picture->height = height;
-
- /* allocate the buffers for the frame data */
- ret = av_frame_get_buffer(picture, 32);
- if (ret < 0) {
- fprintf(stderr, "Could not allocate frame data.\n");
- exit(1);
- }
-
- return picture;
-}
-
-static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
-{
- int ret;
- AVCodecContext *c = ost->st->codec;
- AVDictionary *opt = NULL;
-
- av_dict_copy(&opt, opt_arg, 0);
-
- /* open the codec */
- ret = avcodec_open2(c, codec, &opt);
- av_dict_free(&opt);
- if (ret < 0) {
- fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
- exit(1);
- }
-
- /* allocate and init a re-usable frame */
- ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
- if (!ost->frame) {
- fprintf(stderr, "Could not allocate video frame\n");
- exit(1);
- }
-
- /* If the output format is not YUV420P, then a temporary YUV420P
- * picture is needed too. It is then converted to the required
- * output format. */
- ost->tmp_frame = NULL;
- if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
- ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
- if (!ost->tmp_frame) {
- fprintf(stderr, "Could not allocate temporary picture\n");
- exit(1);
- }
- }
-}
-
-/* Prepare a dummy image. */
-static void fill_yuv_image(AVFrame *pict, int frame_index,
- int width, int height)
-{
- int x, y, i, ret;
-
- /* when we pass a frame to the encoder, it may keep a reference to it
- * internally;
- * make sure we do not overwrite it here
- */
- ret = av_frame_make_writable(pict);
- if (ret < 0)
- exit(1);
-
- i = frame_index;
-
- /* Y */
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
-
- /* Cb and Cr */
- for (y = 0; y < height / 2; y++) {
- for (x = 0; x < width / 2; x++) {
- pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
- pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
- }
- }
-}
-
-static AVFrame *get_video_frame(OutputStream *ost)
-{
- AVCodecContext *c = ost->st->codec;
-
- /* check if we want to generate more frames */
- if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
- STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
- return NULL;
-
- if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
- /* as we only generate a YUV420P picture, we must convert it
- * to the codec pixel format if needed */
- if (!ost->sws_ctx) {
- ost->sws_ctx = sws_getContext(c->width, c->height,
- AV_PIX_FMT_YUV420P,
- c->width, c->height,
- c->pix_fmt,
- SCALE_FLAGS, NULL, NULL, NULL);
- if (!ost->sws_ctx) {
- fprintf(stderr,
- "Could not initialize the conversion context\n");
- exit(1);
- }
- }
- fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
- sws_scale(ost->sws_ctx,
- (const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
- 0, c->height, ost->frame->data, ost->frame->linesize);
- } else {
- fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
- }
-
- ost->frame->pts = ost->next_pts++;
-
- return ost->frame;
-}
-
-/*
- * encode one video frame and send it to the muxer
- * return 1 when encoding is finished, 0 otherwise
- */
-static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
-{
- int ret;
- AVCodecContext *c;
- AVFrame *frame;
- int got_packet = 0;
-
- c = ost->st->codec;
-
- frame = get_video_frame(ost);
-
- if (oc->oformat->flags & AVFMT_RAWPICTURE) {
- /* a hack to avoid data copy with some raw video muxers */
- AVPacket pkt;
- av_init_packet(&pkt);
-
- if (!frame)
- return 1;
-
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.stream_index = ost->st->index;
- pkt.data = (uint8_t *)frame;
- pkt.size = sizeof(AVPicture);
-
- pkt.pts = pkt.dts = frame->pts;
- av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
-
- ret = av_interleaved_write_frame(oc, &pkt);
- } else {
- AVPacket pkt = { 0 };
- av_init_packet(&pkt);
-
- /* encode the image */
- ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
- if (ret < 0) {
- fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
- exit(1);
- }
-
- if (got_packet) {
- ret = write_frame(oc, &c->time_base, ost->st, &pkt);
- } else {
- ret = 0;
- }
- }
-
- if (ret < 0) {
- fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
- exit(1);
- }
-
- return (frame || got_packet) ? 0 : 1;
-}
-
-static void close_stream(AVFormatContext *oc, OutputStream *ost)
-{
- avcodec_close(ost->st->codec);
- av_frame_free(&ost->frame);
- av_frame_free(&ost->tmp_frame);
- sws_freeContext(ost->sws_ctx);
- swr_free(&ost->swr_ctx);
-}
-
-/**************************************************************/
-/* media file output */
-
-int main(int argc, char **argv)
-{
- OutputStream video_st = { 0 }, audio_st = { 0 };
- const char *filename;
- AVOutputFormat *fmt;
- AVFormatContext *oc;
- AVCodec *audio_codec, *video_codec;
- int ret;
- int have_video = 0, have_audio = 0;
- int encode_video = 0, encode_audio = 0;
- AVDictionary *opt = NULL;
-
- /* Initialize libavcodec, and register all codecs and formats. */
- av_register_all();
-
- if (argc < 2) {
- printf("usage: %s output_file\n"
- "API example program to output a media file with libavformat.\n"
- "This program generates a synthetic audio and video stream, encodes and\n"
- "muxes them into a file named output_file.\n"
- "The output format is automatically guessed according to the file extension.\n"
- "Raw images can also be output by using '%%d' in the filename.\n"
- "\n", argv[0]);
- return 1;
- }
-
- filename = argv[1];
- if (argc > 3 && !strcmp(argv[2], "-flags")) {
- av_dict_set(&opt, argv[2]+1, argv[3], 0);
- }
-
- /* allocate the output media context */
- avformat_alloc_output_context2(&oc, NULL, NULL, filename);
- if (!oc) {
- printf("Could not deduce output format from file extension: using MPEG.\n");
- avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
- }
- if (!oc)
- return 1;
-
- fmt = oc->oformat;
-
- /* Add the audio and video streams using the default format codecs
- * and initialize the codecs. */
- if (fmt->video_codec != AV_CODEC_ID_NONE) {
- add_stream(&video_st, oc, &video_codec, fmt->video_codec);
- have_video = 1;
- encode_video = 1;
- }
- if (fmt->audio_codec != AV_CODEC_ID_NONE) {
- add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
- have_audio = 1;
- encode_audio = 1;
- }
-
- /* Now that all the parameters are set, we can open the audio and
- * video codecs and allocate the necessary encode buffers. */
- if (have_video)
- open_video(oc, video_codec, &video_st, opt);
-
- if (have_audio)
- open_audio(oc, audio_codec, &audio_st, opt);
-
- av_dump_format(oc, 0, filename, 1);
-
- /* open the output file, if needed */
- if (!(fmt->flags & AVFMT_NOFILE)) {
- ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
- if (ret < 0) {
- fprintf(stderr, "Could not open '%s': %s\n", filename,
- av_err2str(ret));
- return 1;
- }
- }
-
- /* Write the stream header, if any. */
- ret = avformat_write_header(oc, &opt);
- if (ret < 0) {
- fprintf(stderr, "Error occurred when opening output file: %s\n",
- av_err2str(ret));
- return 1;
- }
-
- while (encode_video || encode_audio) {
- /* select the stream to encode */
- if (encode_video &&
- (!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
- audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
- encode_video = !write_video_frame(oc, &video_st);
- } else {
- encode_audio = !write_audio_frame(oc, &audio_st);
- }
- }
-
- /* Write the trailer, if any. The trailer must be written before you
- * close the CodecContexts open when you wrote the header; otherwise
- * av_write_trailer() may try to use memory that was freed on
- * av_codec_close(). */
- av_write_trailer(oc);
-
- /* Close each codec. */
- if (have_video)
- close_stream(oc, &video_st);
- if (have_audio)
- close_stream(oc, &audio_st);
-
- if (!(fmt->flags & AVFMT_NOFILE))
- /* Close the output file. */
- avio_closep(&oc->pb);
-
- /* free the stream */
- avformat_free_context(oc);
-
- return 0;
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/remuxing.c b/Externals/ffmpeg/shared/doc/examples/remuxing.c
deleted file mode 100644
index e9758a8dcb..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/remuxing.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (c) 2013 Stefano Sabatini
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * @file
- * libavformat/libavcodec demuxing and muxing API example.
- *
- * Remux streams from one container format to another.
- * @example remuxing.c
- */
-
-#include
-#include
-
-static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
-{
- AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
-
- printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
- tag,
- av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
- av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
- av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
- pkt->stream_index);
-}
-
-int main(int argc, char **argv)
-{
- AVOutputFormat *ofmt = NULL;
- AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
- AVPacket pkt;
- const char *in_filename, *out_filename;
- int ret, i;
-
- if (argc < 3) {
- printf("usage: %s input output\n"
- "API example program to remux a media file with libavformat and libavcodec.\n"
- "The output format is guessed according to the file extension.\n"
- "\n", argv[0]);
- return 1;
- }
-
- in_filename = argv[1];
- out_filename = argv[2];
-
- av_register_all();
-
- if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
- fprintf(stderr, "Could not open input file '%s'", in_filename);
- goto end;
- }
-
- if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
- fprintf(stderr, "Failed to retrieve input stream information");
- goto end;
- }
-
- av_dump_format(ifmt_ctx, 0, in_filename, 0);
-
- avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
- if (!ofmt_ctx) {
- fprintf(stderr, "Could not create output context\n");
- ret = AVERROR_UNKNOWN;
- goto end;
- }
-
- ofmt = ofmt_ctx->oformat;
-
- for (i = 0; i < ifmt_ctx->nb_streams; i++) {
- AVStream *in_stream = ifmt_ctx->streams[i];
- AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
- if (!out_stream) {
- fprintf(stderr, "Failed allocating output stream\n");
- ret = AVERROR_UNKNOWN;
- goto end;
- }
-
- ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
- if (ret < 0) {
- fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
- goto end;
- }
- out_stream->codec->codec_tag = 0;
- if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
- out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
- }
- av_dump_format(ofmt_ctx, 0, out_filename, 1);
-
- if (!(ofmt->flags & AVFMT_NOFILE)) {
- ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
- if (ret < 0) {
- fprintf(stderr, "Could not open output file '%s'", out_filename);
- goto end;
- }
- }
-
- ret = avformat_write_header(ofmt_ctx, NULL);
- if (ret < 0) {
- fprintf(stderr, "Error occurred when opening output file\n");
- goto end;
- }
-
- while (1) {
- AVStream *in_stream, *out_stream;
-
- ret = av_read_frame(ifmt_ctx, &pkt);
- if (ret < 0)
- break;
-
- in_stream = ifmt_ctx->streams[pkt.stream_index];
- out_stream = ofmt_ctx->streams[pkt.stream_index];
-
- log_packet(ifmt_ctx, &pkt, "in");
-
- /* copy packet */
- pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
- pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
- pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
- pkt.pos = -1;
- log_packet(ofmt_ctx, &pkt, "out");
-
- ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
- if (ret < 0) {
- fprintf(stderr, "Error muxing packet\n");
- break;
- }
- av_free_packet(&pkt);
- }
-
- av_write_trailer(ofmt_ctx);
-end:
-
- avformat_close_input(&ifmt_ctx);
-
- /* close output */
- if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
- avio_closep(&ofmt_ctx->pb);
- avformat_free_context(ofmt_ctx);
-
- if (ret < 0 && ret != AVERROR_EOF) {
- fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
- return 1;
- }
-
- return 0;
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/resampling_audio.c b/Externals/ffmpeg/shared/doc/examples/resampling_audio.c
deleted file mode 100644
index f35e7e1779..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/resampling_audio.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Copyright (c) 2012 Stefano Sabatini
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * @example resampling_audio.c
- * libswresample API use example.
- */
-
-#include
-#include
-#include
-#include
-
-static int get_format_from_sample_fmt(const char **fmt,
- enum AVSampleFormat sample_fmt)
-{
- int i;
- struct sample_fmt_entry {
- enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
- } sample_fmt_entries[] = {
- { AV_SAMPLE_FMT_U8, "u8", "u8" },
- { AV_SAMPLE_FMT_S16, "s16be", "s16le" },
- { AV_SAMPLE_FMT_S32, "s32be", "s32le" },
- { AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
- { AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
- };
- *fmt = NULL;
-
- for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
- struct sample_fmt_entry *entry = &sample_fmt_entries[i];
- if (sample_fmt == entry->sample_fmt) {
- *fmt = AV_NE(entry->fmt_be, entry->fmt_le);
- return 0;
- }
- }
-
- fprintf(stderr,
- "Sample format %s not supported as output format\n",
- av_get_sample_fmt_name(sample_fmt));
- return AVERROR(EINVAL);
-}
-
-/**
- * Fill dst buffer with nb_samples, generated starting from t.
- */
-static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
-{
- int i, j;
- double tincr = 1.0 / sample_rate, *dstp = dst;
- const double c = 2 * M_PI * 440.0;
-
- /* generate sin tone with 440Hz frequency and duplicated channels */
- for (i = 0; i < nb_samples; i++) {
- *dstp = sin(c * *t);
- for (j = 1; j < nb_channels; j++)
- dstp[j] = dstp[0];
- dstp += nb_channels;
- *t += tincr;
- }
-}
-
-int main(int argc, char **argv)
-{
- int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
- int src_rate = 48000, dst_rate = 44100;
- uint8_t **src_data = NULL, **dst_data = NULL;
- int src_nb_channels = 0, dst_nb_channels = 0;
- int src_linesize, dst_linesize;
- int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
- enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
- const char *dst_filename = NULL;
- FILE *dst_file;
- int dst_bufsize;
- const char *fmt;
- struct SwrContext *swr_ctx;
- double t;
- int ret;
-
- if (argc != 2) {
- fprintf(stderr, "Usage: %s output_file\n"
- "API example program to show how to resample an audio stream with libswresample.\n"
- "This program generates a series of audio frames, resamples them to a specified "
- "output format and rate and saves them to an output file named output_file.\n",
- argv[0]);
- exit(1);
- }
- dst_filename = argv[1];
-
- dst_file = fopen(dst_filename, "wb");
- if (!dst_file) {
- fprintf(stderr, "Could not open destination file %s\n", dst_filename);
- exit(1);
- }
-
- /* create resampler context */
- swr_ctx = swr_alloc();
- if (!swr_ctx) {
- fprintf(stderr, "Could not allocate resampler context\n");
- ret = AVERROR(ENOMEM);
- goto end;
- }
-
- /* set options */
- av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
- av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
- av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
-
- av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
- av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
- av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
-
- /* initialize the resampling context */
- if ((ret = swr_init(swr_ctx)) < 0) {
- fprintf(stderr, "Failed to initialize the resampling context\n");
- goto end;
- }
-
- /* allocate source and destination samples buffers */
-
- src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
- ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
- src_nb_samples, src_sample_fmt, 0);
- if (ret < 0) {
- fprintf(stderr, "Could not allocate source samples\n");
- goto end;
- }
-
- /* compute the number of converted samples: buffering is avoided
- * ensuring that the output buffer will contain at least all the
- * converted input samples */
- max_dst_nb_samples = dst_nb_samples =
- av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
-
- /* buffer is going to be directly written to a rawaudio file, no alignment */
- dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
- ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
- dst_nb_samples, dst_sample_fmt, 0);
- if (ret < 0) {
- fprintf(stderr, "Could not allocate destination samples\n");
- goto end;
- }
-
- t = 0;
- do {
- /* generate synthetic audio */
- fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
-
- /* compute destination number of samples */
- dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
- src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
- if (dst_nb_samples > max_dst_nb_samples) {
- av_freep(&dst_data[0]);
- ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
- dst_nb_samples, dst_sample_fmt, 1);
- if (ret < 0)
- break;
- max_dst_nb_samples = dst_nb_samples;
- }
-
- /* convert to destination format */
- ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
- if (ret < 0) {
- fprintf(stderr, "Error while converting\n");
- goto end;
- }
- dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
- ret, dst_sample_fmt, 1);
- if (dst_bufsize < 0) {
- fprintf(stderr, "Could not get sample buffer size\n");
- goto end;
- }
- printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
- fwrite(dst_data[0], 1, dst_bufsize, dst_file);
- } while (t < 10);
-
- if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
- goto end;
- fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
- "ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
- fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
-
-end:
- fclose(dst_file);
-
- if (src_data)
- av_freep(&src_data[0]);
- av_freep(&src_data);
-
- if (dst_data)
- av_freep(&dst_data[0]);
- av_freep(&dst_data);
-
- swr_free(&swr_ctx);
- return ret < 0;
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/scaling_video.c b/Externals/ffmpeg/shared/doc/examples/scaling_video.c
deleted file mode 100644
index 587f3abe4f..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/scaling_video.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (c) 2012 Stefano Sabatini
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * @file
- * libswscale API use example.
- * @example scaling_video.c
- */
-
-#include
-#include
-#include
-
-static void fill_yuv_image(uint8_t *data[4], int linesize[4],
- int width, int height, int frame_index)
-{
- int x, y;
-
- /* Y */
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- data[0][y * linesize[0] + x] = x + y + frame_index * 3;
-
- /* Cb and Cr */
- for (y = 0; y < height / 2; y++) {
- for (x = 0; x < width / 2; x++) {
- data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
- data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
- }
- }
-}
-
-int main(int argc, char **argv)
-{
- uint8_t *src_data[4], *dst_data[4];
- int src_linesize[4], dst_linesize[4];
- int src_w = 320, src_h = 240, dst_w, dst_h;
- enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
- const char *dst_size = NULL;
- const char *dst_filename = NULL;
- FILE *dst_file;
- int dst_bufsize;
- struct SwsContext *sws_ctx;
- int i, ret;
-
- if (argc != 3) {
- fprintf(stderr, "Usage: %s output_file output_size\n"
- "API example program to show how to scale an image with libswscale.\n"
- "This program generates a series of pictures, rescales them to the given "
- "output_size and saves them to an output file named output_file\n."
- "\n", argv[0]);
- exit(1);
- }
- dst_filename = argv[1];
- dst_size = argv[2];
-
- if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
- fprintf(stderr,
- "Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
- dst_size);
- exit(1);
- }
-
- dst_file = fopen(dst_filename, "wb");
- if (!dst_file) {
- fprintf(stderr, "Could not open destination file %s\n", dst_filename);
- exit(1);
- }
-
- /* create scaling context */
- sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
- dst_w, dst_h, dst_pix_fmt,
- SWS_BILINEAR, NULL, NULL, NULL);
- if (!sws_ctx) {
- fprintf(stderr,
- "Impossible to create scale context for the conversion "
- "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
- av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
- av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
- ret = AVERROR(EINVAL);
- goto end;
- }
-
- /* allocate source and destination image buffers */
- if ((ret = av_image_alloc(src_data, src_linesize,
- src_w, src_h, src_pix_fmt, 16)) < 0) {
- fprintf(stderr, "Could not allocate source image\n");
- goto end;
- }
-
- /* buffer is going to be written to rawvideo file, no alignment */
- if ((ret = av_image_alloc(dst_data, dst_linesize,
- dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
- fprintf(stderr, "Could not allocate destination image\n");
- goto end;
- }
- dst_bufsize = ret;
-
- for (i = 0; i < 100; i++) {
- /* generate synthetic video */
- fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
-
- /* convert to destination format */
- sws_scale(sws_ctx, (const uint8_t * const*)src_data,
- src_linesize, 0, src_h, dst_data, dst_linesize);
-
- /* write scaled image to file */
- fwrite(dst_data[0], 1, dst_bufsize, dst_file);
- }
-
- fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
- "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
- av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
-
-end:
- fclose(dst_file);
- av_freep(&src_data[0]);
- av_freep(&dst_data[0]);
- sws_freeContext(sws_ctx);
- return ret < 0;
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/transcode_aac.c b/Externals/ffmpeg/shared/doc/examples/transcode_aac.c
deleted file mode 100644
index 6998aac72b..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/transcode_aac.c
+++ /dev/null
@@ -1,755 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * simple audio converter
- *
- * @example transcode_aac.c
- * Convert an input audio file to AAC in an MP4 container using FFmpeg.
- * @author Andreas Unterweger (dustsigns@gmail.com)
- */
-
-#include
-
-#include "libavformat/avformat.h"
-#include "libavformat/avio.h"
-
-#include "libavcodec/avcodec.h"
-
-#include "libavutil/audio_fifo.h"
-#include "libavutil/avassert.h"
-#include "libavutil/avstring.h"
-#include "libavutil/frame.h"
-#include "libavutil/opt.h"
-
-#include "libswresample/swresample.h"
-
-/** The output bit rate in kbit/s */
-#define OUTPUT_BIT_RATE 48000
-/** The number of output channels */
-#define OUTPUT_CHANNELS 2
-/** The audio sample output format */
-#define OUTPUT_SAMPLE_FORMAT AV_SAMPLE_FMT_S16
-
-/**
- * Convert an error code into a text message.
- * @param error Error code to be converted
- * @return Corresponding error text (not thread-safe)
- */
-static const char *get_error_text(const int error)
-{
- static char error_buffer[255];
- av_strerror(error, error_buffer, sizeof(error_buffer));
- return error_buffer;
-}
-
-/** Open an input file and the required decoder. */
-static int open_input_file(const char *filename,
- AVFormatContext **input_format_context,
- AVCodecContext **input_codec_context)
-{
- AVCodec *input_codec;
- int error;
-
- /** Open the input file to read from it. */
- if ((error = avformat_open_input(input_format_context, filename, NULL,
- NULL)) < 0) {
- fprintf(stderr, "Could not open input file '%s' (error '%s')\n",
- filename, get_error_text(error));
- *input_format_context = NULL;
- return error;
- }
-
- /** Get information on the input file (number of streams etc.). */
- if ((error = avformat_find_stream_info(*input_format_context, NULL)) < 0) {
- fprintf(stderr, "Could not open find stream info (error '%s')\n",
- get_error_text(error));
- avformat_close_input(input_format_context);
- return error;
- }
-
- /** Make sure that there is only one stream in the input file. */
- if ((*input_format_context)->nb_streams != 1) {
- fprintf(stderr, "Expected one audio input stream, but found %d\n",
- (*input_format_context)->nb_streams);
- avformat_close_input(input_format_context);
- return AVERROR_EXIT;
- }
-
- /** Find a decoder for the audio stream. */
- if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codec->codec_id))) {
- fprintf(stderr, "Could not find input codec\n");
- avformat_close_input(input_format_context);
- return AVERROR_EXIT;
- }
-
- /** Open the decoder for the audio stream to use it later. */
- if ((error = avcodec_open2((*input_format_context)->streams[0]->codec,
- input_codec, NULL)) < 0) {
- fprintf(stderr, "Could not open input codec (error '%s')\n",
- get_error_text(error));
- avformat_close_input(input_format_context);
- return error;
- }
-
- /** Save the decoder context for easier access later. */
- *input_codec_context = (*input_format_context)->streams[0]->codec;
-
- return 0;
-}
-
-/**
- * Open an output file and the required encoder.
- * Also set some basic encoder parameters.
- * Some of these parameters are based on the input file's parameters.
- */
-static int open_output_file(const char *filename,
- AVCodecContext *input_codec_context,
- AVFormatContext **output_format_context,
- AVCodecContext **output_codec_context)
-{
- AVIOContext *output_io_context = NULL;
- AVStream *stream = NULL;
- AVCodec *output_codec = NULL;
- int error;
-
- /** Open the output file to write to it. */
- if ((error = avio_open(&output_io_context, filename,
- AVIO_FLAG_WRITE)) < 0) {
- fprintf(stderr, "Could not open output file '%s' (error '%s')\n",
- filename, get_error_text(error));
- return error;
- }
-
- /** Create a new format context for the output container format. */
- if (!(*output_format_context = avformat_alloc_context())) {
- fprintf(stderr, "Could not allocate output format context\n");
- return AVERROR(ENOMEM);
- }
-
- /** Associate the output file (pointer) with the container format context. */
- (*output_format_context)->pb = output_io_context;
-
- /** Guess the desired container format based on the file extension. */
- if (!((*output_format_context)->oformat = av_guess_format(NULL, filename,
- NULL))) {
- fprintf(stderr, "Could not find output file format\n");
- goto cleanup;
- }
-
- av_strlcpy((*output_format_context)->filename, filename,
- sizeof((*output_format_context)->filename));
-
- /** Find the encoder to be used by its name. */
- if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) {
- fprintf(stderr, "Could not find an AAC encoder.\n");
- goto cleanup;
- }
-
- /** Create a new audio stream in the output file container. */
- if (!(stream = avformat_new_stream(*output_format_context, output_codec))) {
- fprintf(stderr, "Could not create new stream\n");
- error = AVERROR(ENOMEM);
- goto cleanup;
- }
-
- /** Save the encoder context for easiert access later. */
- *output_codec_context = stream->codec;
-
- /**
- * Set the basic encoder parameters.
- * The input file's sample rate is used to avoid a sample rate conversion.
- */
- (*output_codec_context)->channels = OUTPUT_CHANNELS;
- (*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
- (*output_codec_context)->sample_rate = input_codec_context->sample_rate;
- (*output_codec_context)->sample_fmt = AV_SAMPLE_FMT_S16;
- (*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
-
- /**
- * Some container formats (like MP4) require global headers to be present
- * Mark the encoder so that it behaves accordingly.
- */
- if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
- (*output_codec_context)->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
- /** Open the encoder for the audio stream to use it later. */
- if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) {
- fprintf(stderr, "Could not open output codec (error '%s')\n",
- get_error_text(error));
- goto cleanup;
- }
-
- return 0;
-
-cleanup:
- avio_closep(&(*output_format_context)->pb);
- avformat_free_context(*output_format_context);
- *output_format_context = NULL;
- return error < 0 ? error : AVERROR_EXIT;
-}
-
-/** Initialize one data packet for reading or writing. */
-static void init_packet(AVPacket *packet)
-{
- av_init_packet(packet);
- /** Set the packet data and size so that it is recognized as being empty. */
- packet->data = NULL;
- packet->size = 0;
-}
-
-/** Initialize one audio frame for reading from the input file */
-static int init_input_frame(AVFrame **frame)
-{
- if (!(*frame = av_frame_alloc())) {
- fprintf(stderr, "Could not allocate input frame\n");
- return AVERROR(ENOMEM);
- }
- return 0;
-}
-
-/**
- * Initialize the audio resampler based on the input and output codec settings.
- * If the input and output sample formats differ, a conversion is required
- * libswresample takes care of this, but requires initialization.
- */
-static int init_resampler(AVCodecContext *input_codec_context,
- AVCodecContext *output_codec_context,
- SwrContext **resample_context)
-{
- int error;
-
- /**
- * Create a resampler context for the conversion.
- * Set the conversion parameters.
- * Default channel layouts based on the number of channels
- * are assumed for simplicity (they are sometimes not detected
- * properly by the demuxer and/or decoder).
- */
- *resample_context = swr_alloc_set_opts(NULL,
- av_get_default_channel_layout(output_codec_context->channels),
- output_codec_context->sample_fmt,
- output_codec_context->sample_rate,
- av_get_default_channel_layout(input_codec_context->channels),
- input_codec_context->sample_fmt,
- input_codec_context->sample_rate,
- 0, NULL);
- if (!*resample_context) {
- fprintf(stderr, "Could not allocate resample context\n");
- return AVERROR(ENOMEM);
- }
- /**
- * Perform a sanity check so that the number of converted samples is
- * not greater than the number of samples to be converted.
- * If the sample rates differ, this case has to be handled differently
- */
- av_assert0(output_codec_context->sample_rate == input_codec_context->sample_rate);
-
- /** Open the resampler with the specified parameters. */
- if ((error = swr_init(*resample_context)) < 0) {
- fprintf(stderr, "Could not open resample context\n");
- swr_free(resample_context);
- return error;
- }
- return 0;
-}
-
-/** Initialize a FIFO buffer for the audio samples to be encoded. */
-static int init_fifo(AVAudioFifo **fifo)
-{
- /** Create the FIFO buffer based on the specified output sample format. */
- if (!(*fifo = av_audio_fifo_alloc(OUTPUT_SAMPLE_FORMAT, OUTPUT_CHANNELS, 1))) {
- fprintf(stderr, "Could not allocate FIFO\n");
- return AVERROR(ENOMEM);
- }
- return 0;
-}
-
-/** Write the header of the output file container. */
-static int write_output_file_header(AVFormatContext *output_format_context)
-{
- int error;
- if ((error = avformat_write_header(output_format_context, NULL)) < 0) {
- fprintf(stderr, "Could not write output file header (error '%s')\n",
- get_error_text(error));
- return error;
- }
- return 0;
-}
-
-/** Decode one audio frame from the input file. */
-static int decode_audio_frame(AVFrame *frame,
- AVFormatContext *input_format_context,
- AVCodecContext *input_codec_context,
- int *data_present, int *finished)
-{
- /** Packet used for temporary storage. */
- AVPacket input_packet;
- int error;
- init_packet(&input_packet);
-
- /** Read one audio frame from the input file into a temporary packet. */
- if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
- /** If we are at the end of the file, flush the decoder below. */
- if (error == AVERROR_EOF)
- *finished = 1;
- else {
- fprintf(stderr, "Could not read frame (error '%s')\n",
- get_error_text(error));
- return error;
- }
- }
-
- /**
- * Decode the audio frame stored in the temporary packet.
- * The input audio stream decoder is used to do this.
- * If we are at the end of the file, pass an empty packet to the decoder
- * to flush it.
- */
- if ((error = avcodec_decode_audio4(input_codec_context, frame,
- data_present, &input_packet)) < 0) {
- fprintf(stderr, "Could not decode frame (error '%s')\n",
- get_error_text(error));
- av_free_packet(&input_packet);
- return error;
- }
-
- /**
- * If the decoder has not been flushed completely, we are not finished,
- * so that this function has to be called again.
- */
- if (*finished && *data_present)
- *finished = 0;
- av_free_packet(&input_packet);
- return 0;
-}
-
-/**
- * Initialize a temporary storage for the specified number of audio samples.
- * The conversion requires temporary storage due to the different format.
- * The number of audio samples to be allocated is specified in frame_size.
- */
-static int init_converted_samples(uint8_t ***converted_input_samples,
- AVCodecContext *output_codec_context,
- int frame_size)
-{
- int error;
-
- /**
- * Allocate as many pointers as there are audio channels.
- * Each pointer will later point to the audio samples of the corresponding
- * channels (although it may be NULL for interleaved formats).
- */
- if (!(*converted_input_samples = calloc(output_codec_context->channels,
- sizeof(**converted_input_samples)))) {
- fprintf(stderr, "Could not allocate converted input sample pointers\n");
- return AVERROR(ENOMEM);
- }
-
- /**
- * Allocate memory for the samples of all channels in one consecutive
- * block for convenience.
- */
- if ((error = av_samples_alloc(*converted_input_samples, NULL,
- output_codec_context->channels,
- frame_size,
- output_codec_context->sample_fmt, 0)) < 0) {
- fprintf(stderr,
- "Could not allocate converted input samples (error '%s')\n",
- get_error_text(error));
- av_freep(&(*converted_input_samples)[0]);
- free(*converted_input_samples);
- return error;
- }
- return 0;
-}
-
-/**
- * Convert the input audio samples into the output sample format.
- * The conversion happens on a per-frame basis, the size of which is specified
- * by frame_size.
- */
-static int convert_samples(const uint8_t **input_data,
- uint8_t **converted_data, const int frame_size,
- SwrContext *resample_context)
-{
- int error;
-
- /** Convert the samples using the resampler. */
- if ((error = swr_convert(resample_context,
- converted_data, frame_size,
- input_data , frame_size)) < 0) {
- fprintf(stderr, "Could not convert input samples (error '%s')\n",
- get_error_text(error));
- return error;
- }
-
- return 0;
-}
-
-/** Add converted input audio samples to the FIFO buffer for later processing. */
-static int add_samples_to_fifo(AVAudioFifo *fifo,
- uint8_t **converted_input_samples,
- const int frame_size)
-{
- int error;
-
- /**
- * Make the FIFO as large as it needs to be to hold both,
- * the old and the new samples.
- */
- if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) {
- fprintf(stderr, "Could not reallocate FIFO\n");
- return error;
- }
-
- /** Store the new samples in the FIFO buffer. */
- if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
- frame_size) < frame_size) {
- fprintf(stderr, "Could not write data to FIFO\n");
- return AVERROR_EXIT;
- }
- return 0;
-}
-
-/**
- * Read one audio frame from the input file, decodes, converts and stores
- * it in the FIFO buffer.
- */
-static int read_decode_convert_and_store(AVAudioFifo *fifo,
- AVFormatContext *input_format_context,
- AVCodecContext *input_codec_context,
- AVCodecContext *output_codec_context,
- SwrContext *resampler_context,
- int *finished)
-{
- /** Temporary storage of the input samples of the frame read from the file. */
- AVFrame *input_frame = NULL;
- /** Temporary storage for the converted input samples. */
- uint8_t **converted_input_samples = NULL;
- int data_present;
- int ret = AVERROR_EXIT;
-
- /** Initialize temporary storage for one input frame. */
- if (init_input_frame(&input_frame))
- goto cleanup;
- /** Decode one frame worth of audio samples. */
- if (decode_audio_frame(input_frame, input_format_context,
- input_codec_context, &data_present, finished))
- goto cleanup;
- /**
- * If we are at the end of the file and there are no more samples
- * in the decoder which are delayed, we are actually finished.
- * This must not be treated as an error.
- */
- if (*finished && !data_present) {
- ret = 0;
- goto cleanup;
- }
- /** If there is decoded data, convert and store it */
- if (data_present) {
- /** Initialize the temporary storage for the converted input samples. */
- if (init_converted_samples(&converted_input_samples, output_codec_context,
- input_frame->nb_samples))
- goto cleanup;
-
- /**
- * Convert the input samples to the desired output sample format.
- * This requires a temporary storage provided by converted_input_samples.
- */
- if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples,
- input_frame->nb_samples, resampler_context))
- goto cleanup;
-
- /** Add the converted input samples to the FIFO buffer for later processing. */
- if (add_samples_to_fifo(fifo, converted_input_samples,
- input_frame->nb_samples))
- goto cleanup;
- ret = 0;
- }
- ret = 0;
-
-cleanup:
- if (converted_input_samples) {
- av_freep(&converted_input_samples[0]);
- free(converted_input_samples);
- }
- av_frame_free(&input_frame);
-
- return ret;
-}
-
-/**
- * Initialize one input frame for writing to the output file.
- * The frame will be exactly frame_size samples large.
- */
-static int init_output_frame(AVFrame **frame,
- AVCodecContext *output_codec_context,
- int frame_size)
-{
- int error;
-
- /** Create a new frame to store the audio samples. */
- if (!(*frame = av_frame_alloc())) {
- fprintf(stderr, "Could not allocate output frame\n");
- return AVERROR_EXIT;
- }
-
- /**
- * Set the frame's parameters, especially its size and format.
- * av_frame_get_buffer needs this to allocate memory for the
- * audio samples of the frame.
- * Default channel layouts based on the number of channels
- * are assumed for simplicity.
- */
- (*frame)->nb_samples = frame_size;
- (*frame)->channel_layout = output_codec_context->channel_layout;
- (*frame)->format = output_codec_context->sample_fmt;
- (*frame)->sample_rate = output_codec_context->sample_rate;
-
- /**
- * Allocate the samples of the created frame. This call will make
- * sure that the audio frame can hold as many samples as specified.
- */
- if ((error = av_frame_get_buffer(*frame, 0)) < 0) {
- fprintf(stderr, "Could allocate output frame samples (error '%s')\n",
- get_error_text(error));
- av_frame_free(frame);
- return error;
- }
-
- return 0;
-}
-
-/** Encode one frame worth of audio to the output file. */
-static int encode_audio_frame(AVFrame *frame,
- AVFormatContext *output_format_context,
- AVCodecContext *output_codec_context,
- int *data_present)
-{
- /** Packet used for temporary storage. */
- AVPacket output_packet;
- int error;
- init_packet(&output_packet);
-
- /**
- * Encode the audio frame and store it in the temporary packet.
- * The output audio stream encoder is used to do this.
- */
- if ((error = avcodec_encode_audio2(output_codec_context, &output_packet,
- frame, data_present)) < 0) {
- fprintf(stderr, "Could not encode frame (error '%s')\n",
- get_error_text(error));
- av_free_packet(&output_packet);
- return error;
- }
-
- /** Write one audio frame from the temporary packet to the output file. */
- if (*data_present) {
- if ((error = av_write_frame(output_format_context, &output_packet)) < 0) {
- fprintf(stderr, "Could not write frame (error '%s')\n",
- get_error_text(error));
- av_free_packet(&output_packet);
- return error;
- }
-
- av_free_packet(&output_packet);
- }
-
- return 0;
-}
-
-/**
- * Load one audio frame from the FIFO buffer, encode and write it to the
- * output file.
- */
-static int load_encode_and_write(AVAudioFifo *fifo,
- AVFormatContext *output_format_context,
- AVCodecContext *output_codec_context)
-{
- /** Temporary storage of the output samples of the frame written to the file. */
- AVFrame *output_frame;
- /**
- * Use the maximum number of possible samples per frame.
- * If there is less than the maximum possible frame size in the FIFO
- * buffer use this number. Otherwise, use the maximum possible frame size
- */
- const int frame_size = FFMIN(av_audio_fifo_size(fifo),
- output_codec_context->frame_size);
- int data_written;
-
- /** Initialize temporary storage for one output frame. */
- if (init_output_frame(&output_frame, output_codec_context, frame_size))
- return AVERROR_EXIT;
-
- /**
- * Read as many samples from the FIFO buffer as required to fill the frame.
- * The samples are stored in the frame temporarily.
- */
- if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
- fprintf(stderr, "Could not read data from FIFO\n");
- av_frame_free(&output_frame);
- return AVERROR_EXIT;
- }
-
- /** Encode one frame worth of audio samples. */
- if (encode_audio_frame(output_frame, output_format_context,
- output_codec_context, &data_written)) {
- av_frame_free(&output_frame);
- return AVERROR_EXIT;
- }
- av_frame_free(&output_frame);
- return 0;
-}
-
-/** Write the trailer of the output file container. */
-static int write_output_file_trailer(AVFormatContext *output_format_context)
-{
- int error;
- if ((error = av_write_trailer(output_format_context)) < 0) {
- fprintf(stderr, "Could not write output file trailer (error '%s')\n",
- get_error_text(error));
- return error;
- }
- return 0;
-}
-
-/** Convert an audio file to an AAC file in an MP4 container. */
-int main(int argc, char **argv)
-{
- AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
- AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
- SwrContext *resample_context = NULL;
- AVAudioFifo *fifo = NULL;
- int ret = AVERROR_EXIT;
-
- if (argc < 3) {
- fprintf(stderr, "Usage: %s \n", argv[0]);
- exit(1);
- }
-
- /** Register all codecs and formats so that they can be used. */
- av_register_all();
- /** Open the input file for reading. */
- if (open_input_file(argv[1], &input_format_context,
- &input_codec_context))
- goto cleanup;
- /** Open the output file for writing. */
- if (open_output_file(argv[2], input_codec_context,
- &output_format_context, &output_codec_context))
- goto cleanup;
- /** Initialize the resampler to be able to convert audio sample formats. */
- if (init_resampler(input_codec_context, output_codec_context,
- &resample_context))
- goto cleanup;
- /** Initialize the FIFO buffer to store audio samples to be encoded. */
- if (init_fifo(&fifo))
- goto cleanup;
- /** Write the header of the output file container. */
- if (write_output_file_header(output_format_context))
- goto cleanup;
-
- /**
- * Loop as long as we have input samples to read or output samples
- * to write; abort as soon as we have neither.
- */
- while (1) {
- /** Use the encoder's desired frame size for processing. */
- const int output_frame_size = output_codec_context->frame_size;
- int finished = 0;
-
- /**
- * Make sure that there is one frame worth of samples in the FIFO
- * buffer so that the encoder can do its work.
- * Since the decoder's and the encoder's frame size may differ, we
- * need to FIFO buffer to store as many frames worth of input samples
- * that they make up at least one frame worth of output samples.
- */
- while (av_audio_fifo_size(fifo) < output_frame_size) {
- /**
- * Decode one frame worth of audio samples, convert it to the
- * output sample format and put it into the FIFO buffer.
- */
- if (read_decode_convert_and_store(fifo, input_format_context,
- input_codec_context,
- output_codec_context,
- resample_context, &finished))
- goto cleanup;
-
- /**
- * If we are at the end of the input file, we continue
- * encoding the remaining audio samples to the output file.
- */
- if (finished)
- break;
- }
-
- /**
- * If we have enough samples for the encoder, we encode them.
- * At the end of the file, we pass the remaining samples to
- * the encoder.
- */
- while (av_audio_fifo_size(fifo) >= output_frame_size ||
- (finished && av_audio_fifo_size(fifo) > 0))
- /**
- * Take one frame worth of audio samples from the FIFO buffer,
- * encode it and write it to the output file.
- */
- if (load_encode_and_write(fifo, output_format_context,
- output_codec_context))
- goto cleanup;
-
- /**
- * If we are at the end of the input file and have encoded
- * all remaining samples, we can exit this loop and finish.
- */
- if (finished) {
- int data_written;
- /** Flush the encoder as it may have delayed frames. */
- do {
- if (encode_audio_frame(NULL, output_format_context,
- output_codec_context, &data_written))
- goto cleanup;
- } while (data_written);
- break;
- }
- }
-
- /** Write the trailer of the output file container. */
- if (write_output_file_trailer(output_format_context))
- goto cleanup;
- ret = 0;
-
-cleanup:
- if (fifo)
- av_audio_fifo_free(fifo);
- swr_free(&resample_context);
- if (output_codec_context)
- avcodec_close(output_codec_context);
- if (output_format_context) {
- avio_closep(&output_format_context->pb);
- avformat_free_context(output_format_context);
- }
- if (input_codec_context)
- avcodec_close(input_codec_context);
- if (input_format_context)
- avformat_close_input(&input_format_context);
-
- return ret;
-}
diff --git a/Externals/ffmpeg/shared/doc/examples/transcoding.c b/Externals/ffmpeg/shared/doc/examples/transcoding.c
deleted file mode 100644
index 2a8220eefa..0000000000
--- a/Externals/ffmpeg/shared/doc/examples/transcoding.c
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * Copyright (c) 2010 Nicolas George
- * Copyright (c) 2011 Stefano Sabatini
- * Copyright (c) 2014 Andrey Utkin
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * @file
- * API example for demuxing, decoding, filtering, encoding and muxing
- * @example transcoding.c
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-static AVFormatContext *ifmt_ctx;
-static AVFormatContext *ofmt_ctx;
-typedef struct FilteringContext {
- AVFilterContext *buffersink_ctx;
- AVFilterContext *buffersrc_ctx;
- AVFilterGraph *filter_graph;
-} FilteringContext;
-static FilteringContext *filter_ctx;
-
-static int open_input_file(const char *filename)
-{
- int ret;
- unsigned int i;
-
- ifmt_ctx = NULL;
- if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
- return ret;
- }
-
- if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
- return ret;
- }
-
- for (i = 0; i < ifmt_ctx->nb_streams; i++) {
- AVStream *stream;
- AVCodecContext *codec_ctx;
- stream = ifmt_ctx->streams[i];
- codec_ctx = stream->codec;
- /* Reencode video & audio and remux subtitles etc. */
- if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
- || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
- /* Open decoder */
- ret = avcodec_open2(codec_ctx,
- avcodec_find_decoder(codec_ctx->codec_id), NULL);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
- return ret;
- }
- }
- }
-
- av_dump_format(ifmt_ctx, 0, filename, 0);
- return 0;
-}
-
-static int open_output_file(const char *filename)
-{
- AVStream *out_stream;
- AVStream *in_stream;
- AVCodecContext *dec_ctx, *enc_ctx;
- AVCodec *encoder;
- int ret;
- unsigned int i;
-
- ofmt_ctx = NULL;
- avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
- if (!ofmt_ctx) {
- av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
- return AVERROR_UNKNOWN;
- }
-
-
- for (i = 0; i < ifmt_ctx->nb_streams; i++) {
- out_stream = avformat_new_stream(ofmt_ctx, NULL);
- if (!out_stream) {
- av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
- return AVERROR_UNKNOWN;
- }
-
- in_stream = ifmt_ctx->streams[i];
- dec_ctx = in_stream->codec;
- enc_ctx = out_stream->codec;
-
- if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
- || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
- /* in this example, we choose transcoding to same codec */
- encoder = avcodec_find_encoder(dec_ctx->codec_id);
- if (!encoder) {
- av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
- return AVERROR_INVALIDDATA;
- }
-
- /* In this example, we transcode to same properties (picture size,
- * sample rate etc.). These properties can be changed for output
- * streams easily using filters */
- if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
- enc_ctx->height = dec_ctx->height;
- enc_ctx->width = dec_ctx->width;
- enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
- /* take first format from list of supported formats */
- enc_ctx->pix_fmt = encoder->pix_fmts[0];
- /* video time_base can be set to whatever is handy and supported by encoder */
- enc_ctx->time_base = dec_ctx->time_base;
- } else {
- enc_ctx->sample_rate = dec_ctx->sample_rate;
- enc_ctx->channel_layout = dec_ctx->channel_layout;
- enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
- /* take first format from list of supported formats */
- enc_ctx->sample_fmt = encoder->sample_fmts[0];
- enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
- }
-
- /* Third parameter can be used to pass settings to encoder */
- ret = avcodec_open2(enc_ctx, encoder, NULL);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
- return ret;
- }
- } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
- av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
- return AVERROR_INVALIDDATA;
- } else {
- /* if this stream must be remuxed */
- ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
- ifmt_ctx->streams[i]->codec);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
- return ret;
- }
- }
-
- if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
- enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
- }
- av_dump_format(ofmt_ctx, 0, filename, 1);
-
- if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
- ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
- return ret;
- }
- }
-
- /* init muxer, write output file header */
- ret = avformat_write_header(ofmt_ctx, NULL);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
- return ret;
- }
-
- return 0;
-}
-
-static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
- AVCodecContext *enc_ctx, const char *filter_spec)
-{
- char args[512];
- int ret = 0;
- AVFilter *buffersrc = NULL;
- AVFilter *buffersink = NULL;
- AVFilterContext *buffersrc_ctx = NULL;
- AVFilterContext *buffersink_ctx = NULL;
- AVFilterInOut *outputs = avfilter_inout_alloc();
- AVFilterInOut *inputs = avfilter_inout_alloc();
- AVFilterGraph *filter_graph = avfilter_graph_alloc();
-
- if (!outputs || !inputs || !filter_graph) {
- ret = AVERROR(ENOMEM);
- goto end;
- }
-
- if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
- buffersrc = avfilter_get_by_name("buffer");
- buffersink = avfilter_get_by_name("buffersink");
- if (!buffersrc || !buffersink) {
- av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
- ret = AVERROR_UNKNOWN;
- goto end;
- }
-
- snprintf(args, sizeof(args),
- "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
- dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
- dec_ctx->time_base.num, dec_ctx->time_base.den,
- dec_ctx->sample_aspect_ratio.num,
- dec_ctx->sample_aspect_ratio.den);
-
- ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
- args, NULL, filter_graph);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
- goto end;
- }
-
- ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
- NULL, NULL, filter_graph);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
- goto end;
- }
-
- ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
- (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
- AV_OPT_SEARCH_CHILDREN);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
- goto end;
- }
- } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
- buffersrc = avfilter_get_by_name("abuffer");
- buffersink = avfilter_get_by_name("abuffersink");
- if (!buffersrc || !buffersink) {
- av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
- ret = AVERROR_UNKNOWN;
- goto end;
- }
-
- if (!dec_ctx->channel_layout)
- dec_ctx->channel_layout =
- av_get_default_channel_layout(dec_ctx->channels);
- snprintf(args, sizeof(args),
- "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
- dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
- av_get_sample_fmt_name(dec_ctx->sample_fmt),
- dec_ctx->channel_layout);
- ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
- args, NULL, filter_graph);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
- goto end;
- }
-
- ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
- NULL, NULL, filter_graph);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
- goto end;
- }
-
- ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
- (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
- AV_OPT_SEARCH_CHILDREN);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
- goto end;
- }
-
- ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
- (uint8_t*)&enc_ctx->channel_layout,
- sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
- goto end;
- }
-
- ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
- (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
- AV_OPT_SEARCH_CHILDREN);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
- goto end;
- }
- } else {
- ret = AVERROR_UNKNOWN;
- goto end;
- }
-
- /* Endpoints for the filter graph. */
- outputs->name = av_strdup("in");
- outputs->filter_ctx = buffersrc_ctx;
- outputs->pad_idx = 0;
- outputs->next = NULL;
-
- inputs->name = av_strdup("out");
- inputs->filter_ctx = buffersink_ctx;
- inputs->pad_idx = 0;
- inputs->next = NULL;
-
- if (!outputs->name || !inputs->name) {
- ret = AVERROR(ENOMEM);
- goto end;
- }
-
- if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
- &inputs, &outputs, NULL)) < 0)
- goto end;
-
- if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
- goto end;
-
- /* Fill FilteringContext */
- fctx->buffersrc_ctx = buffersrc_ctx;
- fctx->buffersink_ctx = buffersink_ctx;
- fctx->filter_graph = filter_graph;
-
-end:
- avfilter_inout_free(&inputs);
- avfilter_inout_free(&outputs);
-
- return ret;
-}
-
-static int init_filters(void)
-{
- const char *filter_spec;
- unsigned int i;
- int ret;
- filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
- if (!filter_ctx)
- return AVERROR(ENOMEM);
-
- for (i = 0; i < ifmt_ctx->nb_streams; i++) {
- filter_ctx[i].buffersrc_ctx = NULL;
- filter_ctx[i].buffersink_ctx = NULL;
- filter_ctx[i].filter_graph = NULL;
- if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
- || ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
- continue;
-
-
- if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
- filter_spec = "null"; /* passthrough (dummy) filter for video */
- else
- filter_spec = "anull"; /* passthrough (dummy) filter for audio */
- ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
- ofmt_ctx->streams[i]->codec, filter_spec);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
- int ret;
- int got_frame_local;
- AVPacket enc_pkt;
- int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
- (ifmt_ctx->streams[stream_index]->codec->codec_type ==
- AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
-
- if (!got_frame)
- got_frame = &got_frame_local;
-
- av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
- /* encode filtered frame */
- enc_pkt.data = NULL;
- enc_pkt.size = 0;
- av_init_packet(&enc_pkt);
- ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
- filt_frame, got_frame);
- av_frame_free(&filt_frame);
- if (ret < 0)
- return ret;
- if (!(*got_frame))
- return 0;
-
- /* prepare packet for muxing */
- enc_pkt.stream_index = stream_index;
- av_packet_rescale_ts(&enc_pkt,
- ofmt_ctx->streams[stream_index]->codec->time_base,
- ofmt_ctx->streams[stream_index]->time_base);
-
- av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
- /* mux encoded frame */
- ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
- return ret;
-}
-
-static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
-{
- int ret;
- AVFrame *filt_frame;
-
- av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
- /* push the decoded frame into the filtergraph */
- ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
- frame, 0);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
- return ret;
- }
-
- /* pull filtered frames from the filtergraph */
- while (1) {
- filt_frame = av_frame_alloc();
- if (!filt_frame) {
- ret = AVERROR(ENOMEM);
- break;
- }
- av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
- ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
- filt_frame);
- if (ret < 0) {
- /* if no more frames for output - returns AVERROR(EAGAIN)
- * if flushed and no more frames for output - returns AVERROR_EOF
- * rewrite retcode to 0 to show it as normal procedure completion
- */
- if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
- ret = 0;
- av_frame_free(&filt_frame);
- break;
- }
-
- filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
- ret = encode_write_frame(filt_frame, stream_index, NULL);
- if (ret < 0)
- break;
- }
-
- return ret;
-}
-
-static int flush_encoder(unsigned int stream_index)
-{
- int ret;
- int got_frame;
-
- if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
- CODEC_CAP_DELAY))
- return 0;
-
- while (1) {
- av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
- ret = encode_write_frame(NULL, stream_index, &got_frame);
- if (ret < 0)
- break;
- if (!got_frame)
- return 0;
- }
- return ret;
-}
-
-int main(int argc, char **argv)
-{
- int ret;
- AVPacket packet = { .data = NULL, .size = 0 };
- AVFrame *frame = NULL;
- enum AVMediaType type;
- unsigned int stream_index;
- unsigned int i;
- int got_frame;
- int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
-
- if (argc != 3) {
- av_log(NULL, AV_LOG_ERROR, "Usage: %s \n", argv[0]);
- return 1;
- }
-
- av_register_all();
- avfilter_register_all();
-
- if ((ret = open_input_file(argv[1])) < 0)
- goto end;
- if ((ret = open_output_file(argv[2])) < 0)
- goto end;
- if ((ret = init_filters()) < 0)
- goto end;
-
- /* read all packets */
- while (1) {
- if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
- break;
- stream_index = packet.stream_index;
- type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
- av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
- stream_index);
-
- if (filter_ctx[stream_index].filter_graph) {
- av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
- frame = av_frame_alloc();
- if (!frame) {
- ret = AVERROR(ENOMEM);
- break;
- }
- av_packet_rescale_ts(&packet,
- ifmt_ctx->streams[stream_index]->time_base,
- ifmt_ctx->streams[stream_index]->codec->time_base);
- dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
- avcodec_decode_audio4;
- ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
- &got_frame, &packet);
- if (ret < 0) {
- av_frame_free(&frame);
- av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
- break;
- }
-
- if (got_frame) {
- frame->pts = av_frame_get_best_effort_timestamp(frame);
- ret = filter_encode_write_frame(frame, stream_index);
- av_frame_free(&frame);
- if (ret < 0)
- goto end;
- } else {
- av_frame_free(&frame);
- }
- } else {
- /* remux this frame without reencoding */
- av_packet_rescale_ts(&packet,
- ifmt_ctx->streams[stream_index]->time_base,
- ofmt_ctx->streams[stream_index]->time_base);
-
- ret = av_interleaved_write_frame(ofmt_ctx, &packet);
- if (ret < 0)
- goto end;
- }
- av_free_packet(&packet);
- }
-
- /* flush filters and encoders */
- for (i = 0; i < ifmt_ctx->nb_streams; i++) {
- /* flush filter */
- if (!filter_ctx[i].filter_graph)
- continue;
- ret = filter_encode_write_frame(NULL, i);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
- goto end;
- }
-
- /* flush encoder */
- ret = flush_encoder(i);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
- goto end;
- }
- }
-
- av_write_trailer(ofmt_ctx);
-end:
- av_free_packet(&packet);
- av_frame_free(&frame);
- for (i = 0; i < ifmt_ctx->nb_streams; i++) {
- avcodec_close(ifmt_ctx->streams[i]->codec);
- if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
- avcodec_close(ofmt_ctx->streams[i]->codec);
- if (filter_ctx && filter_ctx[i].filter_graph)
- avfilter_graph_free(&filter_ctx[i].filter_graph);
- }
- av_free(filter_ctx);
- avformat_close_input(&ifmt_ctx);
- if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
- avio_closep(&ofmt_ctx->pb);
- avformat_free_context(ofmt_ctx);
-
- if (ret < 0)
- av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
-
- return ret ? 1 : 0;
-}
diff --git a/Externals/ffmpeg/shared/doc/faq.html b/Externals/ffmpeg/shared/doc/faq.html
deleted file mode 100644
index be6ea62995..0000000000
--- a/Externals/ffmpeg/shared/doc/faq.html
+++ /dev/null
@@ -1,719 +0,0 @@
-
-
-
-
-
-
- FFmpeg FAQ
-
-
-
-
-
-
-
-
- FFmpeg FAQ
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 General Questions# TOC
-
-
-
1.1 Why doesn’t FFmpeg support feature [xyz]?# TOC
-
-
Because no one has taken on that task yet. FFmpeg development is
-driven by the tasks that are important to the individual developers.
-If there is a feature that is important to you, the best way to get
-it implemented is to undertake the task yourself or sponsor a developer.
-
-
-
1.2 FFmpeg does not support codec XXX. Can you include a Windows DLL loader to support it?# TOC
-
-
No. Windows DLLs are not portable, bloated and often slow.
-Moreover FFmpeg strives to support all codecs natively.
-A DLL loader is not conducive to that goal.
-
-
-
1.3 I cannot read this file although this format seems to be supported by ffmpeg.# TOC
-
-
Even if ffmpeg can read the container format, it may not support all its
-codecs. Please consult the supported codec list in the ffmpeg
-documentation.
-
-
-
1.4 Which codecs are supported by Windows?# TOC
-
-
Windows does not support standard formats like MPEG very well, unless you
-install some additional codecs.
-
-
The following list of video codecs should work on most Windows systems:
-
-msmpeg4v2
-.avi/.asf
-
-msmpeg4
-.asf only
-
-wmv1
-.asf only
-
-wmv2
-.asf only
-
-mpeg4
-Only if you have some MPEG-4 codec like ffdshow or Xvid installed.
-
-mpeg1video
-.mpg only
-
-
-
Note, ASF files often have .wmv or .wma extensions in Windows. It should also
-be mentioned that Microsoft claims a patent on the ASF format, and may sue
-or threaten users who create ASF files with non-Microsoft software. It is
-strongly advised to avoid ASF where possible.
-
-
The following list of audio codecs should work on most Windows systems:
-
-adpcm_ima_wav
-adpcm_ms
-pcm_s16le
-always
-
-libmp3lame
-If some MP3 codec like LAME is installed.
-
-
-
-
-
-
2 Compilation# TOC
-
-
-
2.1 error: can't find a register in class 'GENERAL_REGS' while reloading 'asm'
# TOC
-
-
This is a bug in gcc. Do not report it to us. Instead, please report it to
-the gcc developers. Note that we will not add workarounds for gcc bugs.
-
-
Also note that (some of) the gcc developers believe this is not a bug or
-not a bug they should fix:
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203 .
-Then again, some of them do not know the difference between an undecidable
-problem and an NP-hard problem...
-
-
-
2.2 I have installed this library with my distro’s package manager. Why does configure
not see it?# TOC
-
-
Distributions usually split libraries in several packages. The main package
-contains the files necessary to run programs using the library. The
-development package contains the files necessary to build programs using the
-library. Sometimes, docs and/or data are in a separate package too.
-
-
To build FFmpeg, you need to install the development package. It is usually
-called libfoo-dev or libfoo-devel . You can remove it after the
-build is finished, but be sure to keep the main package.
-
-
-
2.3 How do I make pkg-config
find my libraries?# TOC
-
-
Somewhere along with your libraries, there is a .pc file (or several)
-in a pkgconfig directory. You need to set environment variables to
-point pkg-config
to these files.
-
-
If you need to add directories to pkg-config
’s search list
-(typical use case: library installed separately), add it to
-$PKG_CONFIG_PATH
:
-
-
-
export PKG_CONFIG_PATH=/opt/x264/lib/pkgconfig:/opt/opus/lib/pkgconfig
-
-
-
If you need to replace pkg-config
’s search list
-(typical use case: cross-compiling), set it in
-$PKG_CONFIG_LIBDIR
:
-
-
-
export PKG_CONFIG_LIBDIR=/home/me/cross/usr/lib/pkgconfig:/home/me/cross/usr/local/lib/pkgconfig
-
-
-
If you need to know the library’s internal dependencies (typical use: static
-linking), add the --static
option to pkg-config
:
-
-
-
./configure --pkg-config-flags=--static
-
-
-
-
2.4 How do I use pkg-config
when cross-compiling?# TOC
-
-
The best way is to install pkg-config
in your cross-compilation
-environment. It will automatically use the cross-compilation libraries.
-
-
You can also use pkg-config
from the host environment by
-specifying explicitly --pkg-config=pkg-config
to configure
.
-In that case, you must point pkg-config
to the correct directories
-using the PKG_CONFIG_LIBDIR
, as explained in the previous entry.
-
-
As an intermediate solution, you can place in your cross-compilation
-environment a script that calls the host pkg-config
with
-PKG_CONFIG_LIBDIR
set. That script can look like that:
-
-
-
#!/bin/sh
-PKG_CONFIG_LIBDIR=/path/to/cross/lib/pkgconfig
-export PKG_CONFIG_LIBDIR
-exec /usr/bin/pkg-config "$@"
-
-
-
-
-
-
-
3.1 ffmpeg does not work; what is wrong?# TOC
-
-
Try a make distclean
in the ffmpeg source directory before the build.
-If this does not help see
-(http://ffmpeg.org/bugreports.html ).
-
-
-
3.2 How do I encode single pictures into movies?# TOC
-
-
First, rename your pictures to follow a numerical sequence.
-For example, img1.jpg, img2.jpg, img3.jpg,...
-Then you may run:
-
-
-
ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
-
-
-
Notice that ‘%d ’ is replaced by the image number.
-
-
img%03d.jpg means the sequence img001.jpg , img002.jpg , etc.
-
-
Use the -start_number option to declare a starting number for
-the sequence. This is useful if your sequence does not start with
-img001.jpg but is still in a numerical order. The following
-example will start with img100.jpg :
-
-
-
ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
-
-
-
If you have large number of pictures to rename, you can use the
-following command to ease the burden. The command, using the bourne
-shell syntax, symbolically links all files in the current directory
-that match *jpg
to the /tmp directory in the sequence of
-img001.jpg , img002.jpg and so on.
-
-
-
x=1; for i in *jpg; do counter=$(printf %03d $x); ln -s "$i" /tmp/img"$counter".jpg; x=$(($x+1)); done
-
-
-
If you want to sequence them by oldest modified first, substitute
-$(ls -r -t *jpg)
in place of *jpg
.
-
-
Then run:
-
-
-
ffmpeg -f image2 -i /tmp/img%03d.jpg /tmp/a.mpg
-
-
-
The same logic is used for any image format that ffmpeg reads.
-
-
You can also use cat
to pipe images to ffmpeg:
-
-
-
cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
-
-
-
-
3.3 How do I encode movie to single pictures?# TOC
-
-
Use:
-
-
-
ffmpeg -i movie.mpg movie%d.jpg
-
-
-
The movie.mpg used as input will be converted to
-movie1.jpg , movie2.jpg , etc...
-
-
Instead of relying on file format self-recognition, you may also use
-
--c:v ppm
--c:v png
--c:v mjpeg
-
-
to force the encoding.
-
-
Applying that to the previous example:
-
-
ffmpeg -i movie.mpg -f image2 -c:v mjpeg menu%d.jpg
-
-
-
Beware that there is no "jpeg" codec. Use "mjpeg" instead.
-
-
-
3.4 Why do I see a slight quality degradation with multithreaded MPEG* encoding?# TOC
-
-
For multithreaded MPEG* encoding, the encoded slices must be independent,
-otherwise thread n would practically have to wait for n-1 to finish, so it’s
-quite logical that there is a small reduction of quality. This is not a bug.
-
-
-
3.5 How can I read from the standard input or write to the standard output?# TOC
-
-
Use - as file name.
-
-
-
3.6 -f jpeg doesn’t work.# TOC
-
-
Try ’-f image2 test%d.jpg’.
-
-
-
3.7 Why can I not change the frame rate?# TOC
-
-
Some codecs, like MPEG-1/2, only allow a small number of fixed frame rates.
-Choose a different codec with the -c:v command line option.
-
-
-
3.8 How do I encode Xvid or DivX video with ffmpeg?# TOC
-
-
Both Xvid and DivX (version 4+) are implementations of the ISO MPEG-4
-standard (note that there are many other coding formats that use this
-same standard). Thus, use ’-c:v mpeg4’ to encode in these formats. The
-default fourcc stored in an MPEG-4-coded file will be ’FMP4’. If you want
-a different fourcc, use the ’-vtag’ option. E.g., ’-vtag xvid’ will
-force the fourcc ’xvid’ to be stored as the video fourcc rather than the
-default.
-
-
-
3.9 Which are good parameters for encoding high quality MPEG-4?# TOC
-
-
’-mbd rd -flags +mv4+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2’,
-things to try: ’-bf 2’, ’-flags qprd’, ’-flags mv0’, ’-flags skiprd’.
-
-
-
3.10 Which are good parameters for encoding high quality MPEG-1/MPEG-2?# TOC
-
-
’-mbd rd -trellis 2 -cmp 2 -subcmp 2 -g 100 -pass 1/2’
-but beware the ’-g 100’ might cause problems with some decoders.
-Things to try: ’-bf 2’, ’-flags qprd’, ’-flags mv0’, ’-flags skiprd.
-
-
-
3.11 Interlaced video looks very bad when encoded with ffmpeg, what is wrong?# TOC
-
-
You should use ’-flags +ilme+ildct’ and maybe ’-flags +alt’ for interlaced
-material, and try ’-top 0/1’ if the result looks really messed-up.
-
-
-
3.12 How can I read DirectShow files?# TOC
-
-
If you have built FFmpeg with ./configure --enable-avisynth
-(only possible on MinGW/Cygwin platforms),
-then you may use any file that DirectShow can read as input.
-
-
Just create an "input.avs" text file with this single line ...
-
-
DirectShowSource("C:\path to your file\yourfile.asf")
-
-
... and then feed that text file to ffmpeg:
-
-
-
For ANY other help on AviSynth, please visit the
-AviSynth homepage .
-
-
-
3.13 How can I join video files?# TOC
-
-
To "join" video files is quite ambiguous. The following list explains the
-different kinds of "joining" and points out how those are addressed in
-FFmpeg. To join video files may mean:
-
-
- To put them one after the other: this is called to concatenate them
-(in short: concat) and is addressed
-in this very faq .
-
- To put them together in the same file, to let the user choose between the
-different versions (example: different audio languages): this is called to
-multiplex them together (in short: mux), and is done by simply
-invoking ffmpeg with several -i options.
-
- For audio, to put all channels together in a single stream (example: two
-mono streams into one stereo stream): this is sometimes called to
-merge them, and can be done using the
-amerge
filter.
-
- For audio, to play one on top of the other: this is called to mix
-them, and can be done by first merging them into a single stream and then
-using the pan
filter to mix
-the channels at will.
-
- For video, to display both together, side by side or one on top of a part of
-the other; it can be done using the
-overlay
video filter.
-
-
-
-
-
3.14 How can I concatenate video files?# TOC
-
-
There are several solutions, depending on the exact circumstances.
-
-
-
3.14.1 Concatenating using the concat filter # TOC
-
-
FFmpeg has a concat
filter designed specifically for that, with examples in the
-documentation. This operation is recommended if you need to re-encode.
-
-
-
3.14.2 Concatenating using the concat demuxer # TOC
-
-
FFmpeg has a concat
demuxer which you can use when you want to avoid a re-encode and
-your format doesn’t support file level concatenation.
-
-
-
3.14.3 Concatenating using the concat protocol (file level)# TOC
-
-
FFmpeg has a concat
protocol designed specifically for that, with examples in the
-documentation.
-
-
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
-video by merely concatenating the files containing them.
-
-
Hence you may concatenate your multimedia files by first transcoding them to
-these privileged formats, then using the humble cat
command (or the
-equally humble copy
under Windows), and finally transcoding back to your
-format of choice.
-
-
-
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
-ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
-cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg
-ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
-
-
-
Additionally, you can use the concat
protocol instead of cat
or
-copy
which will avoid creation of a potentially huge intermediate file.
-
-
-
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
-ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
-ffmpeg -i concat:"intermediate1.mpg|intermediate2.mpg" -c copy intermediate_all.mpg
-ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
-
-
-
Note that you may need to escape the character "|" which is special for many
-shells.
-
-
Another option is usage of named pipes, should your platform support it:
-
-
-
mkfifo intermediate1.mpg
-mkfifo intermediate2.mpg
-ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
-ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
-cat intermediate1.mpg intermediate2.mpg |\
-ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi
-
-
-
-
3.14.4 Concatenating using raw audio and video# TOC
-
-
Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also
-allow concatenation, and the transcoding step is almost lossless.
-When using multiple yuv4mpegpipe(s), the first line needs to be discarded
-from all but the first stream. This can be accomplished by piping through
-tail
as seen below. Note that when piping through tail
you
-must use command grouping, { ;}
, to background properly.
-
-
For example, let’s say we want to concatenate two FLV files into an
-output.flv file:
-
-
-
mkfifo temp1.a
-mkfifo temp1.v
-mkfifo temp2.a
-mkfifo temp2.v
-mkfifo all.a
-mkfifo all.v
-ffmpeg -i input1.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp1.a < /dev/null &
-ffmpeg -i input2.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp2.a < /dev/null &
-ffmpeg -i input1.flv -an -f yuv4mpegpipe - > temp1.v < /dev/null &
-{ ffmpeg -i input2.flv -an -f yuv4mpegpipe - < /dev/null | tail -n +2 > temp2.v ; } &
-cat temp1.a temp2.a > all.a &
-cat temp1.v temp2.v > all.v &
-ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
- -f yuv4mpegpipe -i all.v \
- -y output.flv
-rm temp[12].[av] all.[av]
-
-
-
-
3.15 Using -f lavfi , audio becomes mono for no apparent reason.# TOC
-
-
Use -dumpgraph - to find out exactly where the channel layout is
-lost.
-
-
Most likely, it is through auto-inserted aresample
. Try to understand
-why the converting filter was needed at that place.
-
-
Just before the output is a likely place, as -f lavfi currently
-only support packed S16.
-
-
Then insert the correct aformat
explicitly in the filtergraph,
-specifying the exact format.
-
-
-
aformat=sample_fmts=s16:channel_layouts=stereo
-
-
-
-
3.16 Why does FFmpeg not see the subtitles in my VOB file?# TOC
-
-
VOB and a few other formats do not have a global header that describes
-everything present in the file. Instead, applications are supposed to scan
-the file to see what it contains. Since VOB files are frequently large, only
-the beginning is scanned. If the subtitles happen only later in the file,
-they will not be initially detected.
-
-
Some applications, including the ffmpeg
command-line tool, can only
-work with streams that were detected during the initial scan; streams that
-are detected later are ignored.
-
-
The size of the initial scan is controlled by two options: probesize
-(default ~5 Mo) and analyzeduration
(default 5,000,000 µs = 5 s). For
-the subtitle stream to be detected, both values must be large enough.
-
-
-
3.17 Why was the ffmpeg
-sameq option removed? What to use instead?# TOC
-
-
The -sameq option meant "same quantizer", and made sense only in a
-very limited set of cases. Unfortunately, a lot of people mistook it for
-"same quality" and used it in places where it did not make sense: it had
-roughly the expected visible effect, but achieved it in a very inefficient
-way.
-
-
Each encoder has its own set of options to set the quality-vs-size balance,
-use the options for the encoder you are using to set the quality level to a
-point acceptable for your tastes. The most common options to do that are
--qscale and -qmax , but you should peruse the documentation
-of the encoder you chose.
-
-
-
4 Development# TOC
-
-
-
4.1 Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?# TOC
-
-
Yes. Check the doc/examples directory in the source
-repository, also available online at:
-https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples .
-
-
Examples are also installed by default, usually in
-$PREFIX/share/ffmpeg/examples
.
-
-
Also you may read the Developers Guide of the FFmpeg documentation. Alternatively,
-examine the source code for one of the many open source projects that
-already incorporate FFmpeg at (projects.html ).
-
-
-
4.2 Can you support my C compiler XXX?# TOC
-
-
It depends. If your compiler is C99-compliant, then patches to support
-it are likely to be welcome if they do not pollute the source code
-with #ifdef
s related to the compiler.
-
-
-
4.3 Is Microsoft Visual C++ supported?# TOC
-
-
Yes. Please see the Microsoft Visual C++
-section in the FFmpeg documentation.
-
-
-
4.4 Can you add automake, libtool or autoconf support?# TOC
-
-
No. These tools are too bloated and they complicate the build.
-
-
-
4.5 Why not rewrite FFmpeg in object-oriented C++?# TOC
-
-
FFmpeg is already organized in a highly modular manner and does not need to
-be rewritten in a formal object language. Further, many of the developers
-favor straight C; it works for them. For more arguments on this matter,
-read "Programming Religion" .
-
-
-
4.6 Why are the ffmpeg programs devoid of debugging symbols?# TOC
-
-
The build process creates ffmpeg_g
, ffplay_g
, etc. which
-contain full debug information. Those binaries are stripped to create
-ffmpeg
, ffplay
, etc. If you need the debug information, use
-the *_g versions.
-
-
-
4.7 I do not like the LGPL, can I contribute code under the GPL instead?# TOC
-
-
Yes, as long as the code is optional and can easily and cleanly be placed
-under #if CONFIG_GPL without breaking anything. So, for example, a new codec
-or filter would be OK under GPL while a bug fix to LGPL code would not.
-
-
-
4.8 I’m using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves.# TOC
-
-
FFmpeg builds static libraries by default. In static libraries, dependencies
-are not handled. That has two consequences. First, you must specify the
-libraries in dependency order: -lavdevice
must come before
--lavformat
, -lavutil
must come after everything else, etc.
-Second, external libraries that are used in FFmpeg have to be specified too.
-
-
An easy way to get the full list of required libraries in dependency order
-is to use pkg-config
.
-
-
-
c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
-
-
-
See doc/example/Makefile and doc/example/pc-uninstalled for
-more details.
-
-
-
4.9 I’m using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available.# TOC
-
-
FFmpeg is a pure C project, so to use the libraries within your C++ application
-you need to explicitly state that you are using a C library. You can do this by
-encompassing your FFmpeg includes using extern "C"
.
-
-
See http://www.parashift.com/c++-faq-lite/mixing-c-and-cpp.html#faq-32.3
-
-
-
4.10 I’m using libavutil from within my C++ application but the compiler complains about ’UINT64_C’ was not declared in this scope# TOC
-
-
FFmpeg is a pure C project using C99 math features, in order to enable C++
-to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS
-
-
-
4.11 I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?# TOC
-
-
You have to create a custom AVIOContext using avio_alloc_context
,
-see libavformat/aviobuf.c in FFmpeg and libmpdemux/demux_lavf.c in MPlayer or MPlayer2 sources.
-
-
-
4.12 Where is the documentation about ffv1, msmpeg4, asv1, 4xm?# TOC
-
-
see http://www.ffmpeg.org/~michael/
-
-
-
4.13 How do I feed H.263-RTP (and other codecs in RTP) to libavcodec?# TOC
-
-
Even if peculiar since it is network oriented, RTP is a container like any
-other. You have to demux RTP before feeding the payload to libavcodec.
-In this specific case please look at RFC 4629 to see how it should be done.
-
-
-
4.14 AVStream.r_frame_rate is wrong, it is much larger than the frame rate.# TOC
-
-
r_frame_rate
is NOT the average frame rate, it is the smallest frame rate
-that can accurately represent all timestamps. So no, it is not
-wrong if it is larger than the average!
-For example, if you have mixed 25 and 30 fps content, then r_frame_rate
-will be 150 (it is the least common multiple).
-If you are looking for the average frame rate, see AVStream.avg_frame_rate
.
-
-
-
4.15 Why is make fate
not running all tests?# TOC
-
-
Make sure you have the fate-suite samples and the SAMPLES
Make variable
-or FATE_SAMPLES
environment variable or the --samples
-configure
option is set to the right path.
-
-
-
4.16 Why is make fate
not finding the samples?# TOC
-
-
Do you happen to have a ~
character in the samples path to indicate a
-home directory? The value is used in ways where the shell cannot expand it,
-causing FATE to not find files. Just replace ~
by the full path.
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/fate.html b/Externals/ffmpeg/shared/doc/fate.html
deleted file mode 100644
index 980d28b756..0000000000
--- a/Externals/ffmpeg/shared/doc/fate.html
+++ /dev/null
@@ -1,286 +0,0 @@
-
-
-
-
-
-
- FFmpeg Automated Testing Environment
-
-
-
-
-
-
-
-
- FFmpeg Automated Testing Environment
-
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Introduction# TOC
-
-
FATE is an extended regression suite on the client-side and a means
-for results aggregation and presentation on the server-side.
-
-
The first part of this document explains how you can use FATE from
-your FFmpeg source directory to test your ffmpeg binary. The second
-part describes how you can run FATE to submit the results to FFmpeg’s
-FATE server.
-
-
In any way you can have a look at the publicly viewable FATE results
-by visiting this website:
-
-
http://fate.ffmpeg.org/
-
-
This is especially recommended for all people contributing source
-code to FFmpeg, as it can be seen if some test on some platform broke
-with their recent contribution. This usually happens on the platforms
-the developers could not test on.
-
-
The second part of this document describes how you can run FATE to
-submit your results to FFmpeg’s FATE server. If you want to submit your
-results be sure to check that your combination of CPU, OS and compiler
-is not already listed on the above mentioned website.
-
-
In the third part you can find a comprehensive listing of FATE makefile
-targets and variables.
-
-
-
-
2 Using FATE from your FFmpeg source directory# TOC
-
-
If you want to run FATE on your machine you need to have the samples
-in place. You can get the samples via the build target fate-rsync.
-Use this command from the top-level source directory:
-
-
-
make fate-rsync SAMPLES=fate-suite/
-make fate SAMPLES=fate-suite/
-
-
-
The above commands set the samples location by passing a makefile
-variable via command line. It is also possible to set the samples
-location at source configuration time by invoking configure with
-‘–samples=<path to the samples directory>’. Afterwards you can
-invoke the makefile targets without setting the SAMPLES makefile
-variable. This is illustrated by the following commands:
-
-
-
./configure --samples=fate-suite/
-make fate-rsync
-make fate
-
-
-
Yet another way to tell FATE about the location of the sample
-directory is by making sure the environment variable FATE_SAMPLES
-contains the path to your samples directory. This can be achieved
-by e.g. putting that variable in your shell profile or by setting
-it in your interactive session.
-
-
-
FATE_SAMPLES=fate-suite/ make fate
-
-
-
-
Do not put a ’~’ character in the samples path to indicate a home
-directory. Because of shell nuances, this will cause FATE to fail.
-
-
To use a custom wrapper to run the test, pass --target-exec to
-configure
or set the TARGET_EXEC Make variable.
-
-
-
-
3 Submitting the results to the FFmpeg result aggregation server# TOC
-
-
To submit your results to the server you should run fate through the
-shell script tests/fate.sh from the FFmpeg sources. This script needs
-to be invoked with a configuration file as its first argument.
-
-
-
tests/fate.sh /path/to/fate_config
-
-
-
A configuration file template with comments describing the individual
-configuration variables can be found at doc/fate_config.sh.template .
-
-
The mentioned configuration template is also available here:
-
slot= # some unique identifier
-repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
-samples= # path to samples directory
-workdir= # directory in which to do all the work
-#fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
-comment= # optional description
-build_only= # set to "yes" for a compile-only instance that skips tests
-
-# the following are optional and map to configure options
-arch=
-cpu=
-cross_prefix=
-as=
-cc=
-ld=
-target_os=
-sysroot=
-target_exec=
-target_path=
-target_samples=
-extra_cflags=
-extra_ldflags=
-extra_libs=
-extra_conf= # extra configure options not covered above
-
-#make= # name of GNU make if not 'make'
-makeopts= # extra options passed to 'make'
-#tar= # command to create a tar archive from its arguments on stdout,
- # defaults to 'tar c'
-
-
Create a configuration that suits your needs, based on the configuration
-template. The ‘slot’ configuration variable can be any string that is not
-yet used, but it is suggested that you name it adhering to the following
-pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
-itself will be sourced in a shell script, therefore all shell features may
-be used. This enables you to setup the environment as you need it for your
-build.
-
-
For your first test runs the ‘fate_recv’ variable should be empty or
-commented out. This will run everything as normal except that it will omit
-the submission of the results to the server. The following files should be
-present in $workdir as specified in the configuration file:
-
-
- configure.log
- compile.log
- test.log
- report
- version
-
-
-
When you have everything working properly you can create an SSH key pair
-and send the public key to the FATE server administrator who can be contacted
-at the email address fate-admin@ffmpeg.org .
-
-
Configure your SSH client to use public key authentication with that key
-when connecting to the FATE server. Also do not forget to check the identity
-of the server and to accept its host key. This can usually be achieved by
-running your SSH client manually and killing it after you accepted the key.
-The FATE server’s fingerprint is:
-
-
-RSA
-d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51
-
-ECDSA
-76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86
-
-
-
-
If you have problems connecting to the FATE server, it may help to try out
-the ssh
command with one or more -v options. You should
-get detailed output concerning your SSH configuration and the authentication
-process.
-
-
The only thing left is to automate the execution of the fate.sh script and
-the synchronisation of the samples directory.
-
-
-
-
4 FATE makefile targets and variables# TOC
-
-
-
4.1 Makefile targets# TOC
-
-
-fate-rsync
-Download/synchronize sample files to the configured samples directory.
-
-
-fate-list
-Will list all fate/regression test targets.
-
-
-fate
-Run the FATE test suite (requires the fate-suite dataset).
-
-
-
-
-
4.2 Makefile variables# TOC
-
-
-V
-Verbosity level, can be set to 0, 1 or 2.
-
- 0: show just the test arguments
- 1: show just the command used in the test
- 2: show everything
-
-
-
-SAMPLES
-Specify or override the path to the FATE samples at make time, it has a
-meaning only while running the regression tests.
-
-
-THREADS
-Specify how many threads to use while running regression tests, it is
-quite useful to detect thread-related regressions.
-
-
-THREAD_TYPE
-Specify which threading strategy test, either slice or frame ,
-by default slice+frame
-
-
-CPUFLAGS
-Specify CPU flags.
-
-
-TARGET_EXEC
-Specify or override the wrapper used to run the tests.
-The TARGET_EXEC option provides a way to run FATE wrapped in
-valgrind
, qemu-user
or wine
or on remote targets
-through ssh
.
-
-
-GEN
-Set to 1 to generate the missing or mismatched references.
-
-
-
-
-
4.3 Examples# TOC
-
-
-
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffmpeg-all.html b/Externals/ffmpeg/shared/doc/ffmpeg-all.html
deleted file mode 100644
index dc4fc35617..0000000000
--- a/Externals/ffmpeg/shared/doc/ffmpeg-all.html
+++ /dev/null
@@ -1,27303 +0,0 @@
-
-
-
-
-
-
- ffmpeg Documentation
-
-
-
-
-
-
-
-
- ffmpeg Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Synopsis# TOC
-
-
ffmpeg [global_options ] {[input_file_options ] -i input_file } ... {[output_file_options ] output_file } ...
-
-
-
2 Description# TOC
-
-
ffmpeg
is a very fast video and audio converter that can also grab from
-a live audio/video source. It can also convert between arbitrary sample
-rates and resize video on the fly with a high quality polyphase filter.
-
-
ffmpeg
reads from an arbitrary number of input "files" (which can be regular
-files, pipes, network streams, grabbing devices, etc.), specified by the
--i
option, and writes to an arbitrary number of output "files", which are
-specified by a plain output filename. Anything found on the command line which
-cannot be interpreted as an option is considered to be an output filename.
-
-
Each input or output file can, in principle, contain any number of streams of
-different types (video/audio/subtitle/attachment/data). The allowed number and/or
-types of streams may be limited by the container format. Selecting which
-streams from which inputs will go into which output is either done automatically
-or with the -map
option (see the Stream selection chapter).
-
-
To refer to input files in options, you must use their indices (0-based). E.g.
-the first input file is 0
, the second is 1
, etc. Similarly, streams
-within a file are referred to by their indices. E.g. 2:3
refers to the
-fourth stream in the third input file. Also see the Stream specifiers chapter.
-
-
As a general rule, options are applied to the next specified
-file. Therefore, order is important, and you can have the same
-option on the command line multiple times. Each occurrence is
-then applied to the next input or output file.
-Exceptions from this rule are the global options (e.g. verbosity level),
-which should be specified first.
-
-
Do not mix input and output files – first specify all input files, then all
-output files. Also do not mix options which belong to different files. All
-options apply ONLY to the next input or output file and are reset between files.
-
-
- To set the video bitrate of the output file to 64 kbit/s:
-
-
ffmpeg -i input.avi -b:v 64k -bufsize 64k output.avi
-
-
- To force the frame rate of the output file to 24 fps:
-
-
ffmpeg -i input.avi -r 24 output.avi
-
-
- To force the frame rate of the input file (valid for raw formats only)
-to 1 fps and the frame rate of the output file to 24 fps:
-
-
ffmpeg -r 1 -i input.m2v -r 24 output.avi
-
-
-
-
The format option may be needed for raw input files.
-
-
-
-
3 Detailed description# TOC
-
-
The transcoding process in ffmpeg
for each output can be described by
-the following diagram:
-
-
-
_______ ______________
-| | | |
-| input | demuxer | encoded data | decoder
-| file | ---------> | packets | -----+
-|_______| |______________| |
- v
- _________
- | |
- | decoded |
- | frames |
- |_________|
- ________ ______________ |
-| | | | |
-| output | <-------- | encoded data | <----+
-| file | muxer | packets | encoder
-|________| |______________|
-
-
-
-
-
ffmpeg
calls the libavformat library (containing demuxers) to read
-input files and get packets containing encoded data from them. When there are
-multiple input files, ffmpeg
tries to keep them synchronized by
-tracking lowest timestamp on any active input stream.
-
-
Encoded packets are then passed to the decoder (unless streamcopy is selected
-for the stream, see further for a description). The decoder produces
-uncompressed frames (raw video/PCM audio/...) which can be processed further by
-filtering (see next section). After filtering, the frames are passed to the
-encoder, which encodes them and outputs encoded packets. Finally those are
-passed to the muxer, which writes the encoded packets to the output file.
-
-
-
3.1 Filtering# TOC
-
Before encoding, ffmpeg
can process raw audio and video frames using
-filters from the libavfilter library. Several chained filters form a filter
-graph. ffmpeg
distinguishes between two types of filtergraphs:
-simple and complex.
-
-
-
3.1.1 Simple filtergraphs# TOC
-
Simple filtergraphs are those that have exactly one input and output, both of
-the same type. In the above diagram they can be represented by simply inserting
-an additional step between decoding and encoding:
-
-
-
_________ ______________
-| | | |
-| decoded | | encoded data |
-| frames |\ _ | packets |
-|_________| \ /||______________|
- \ __________ /
- simple _\|| | / encoder
- filtergraph | filtered |/
- | frames |
- |__________|
-
-
-
-
Simple filtergraphs are configured with the per-stream -filter option
-(with -vf and -af aliases for video and audio respectively).
-A simple filtergraph for video can look for example like this:
-
-
-
_______ _____________ _______ ________
-| | | | | | | |
-| input | ---> | deinterlace | ---> | scale | ---> | output |
-|_______| |_____________| |_______| |________|
-
-
-
-
Note that some filters change frame properties but not frame contents. E.g. the
-fps
filter in the example above changes number of frames, but does not
-touch the frame contents. Another example is the setpts
filter, which
-only sets timestamps and otherwise passes the frames unchanged.
-
-
-
3.1.2 Complex filtergraphs# TOC
-
Complex filtergraphs are those which cannot be described as simply a linear
-processing chain applied to one stream. This is the case, for example, when the graph has
-more than one input and/or output, or when output stream type is different from
-input. They can be represented with the following diagram:
-
-
-
_________
-| |
-| input 0 |\ __________
-|_________| \ | |
- \ _________ /| output 0 |
- \ | | / |__________|
- _________ \| complex | /
-| | | |/
-| input 1 |---->| filter |\
-|_________| | | \ __________
- /| graph | \ | |
- / | | \| output 1 |
- _________ / |_________| |__________|
-| | /
-| input 2 |/
-|_________|
-
-
-
-
Complex filtergraphs are configured with the -filter_complex option.
-Note that this option is global, since a complex filtergraph, by its nature,
-cannot be unambiguously associated with a single stream or file.
-
-
The -lavfi option is equivalent to -filter_complex .
-
-
A trivial example of a complex filtergraph is the overlay
filter, which
-has two video inputs and one video output, containing one video overlaid on top
-of the other. Its audio counterpart is the amix
filter.
-
-
-
3.2 Stream copy# TOC
-
Stream copy is a mode selected by supplying the copy
parameter to the
--codec option. It makes ffmpeg
omit the decoding and encoding
-step for the specified stream, so it does only demuxing and muxing. It is useful
-for changing the container format or modifying container-level metadata. The
-diagram above will, in this case, simplify to this:
-
-
-
_______ ______________ ________
-| | | | | |
-| input | demuxer | encoded data | muxer | output |
-| file | ---------> | packets | -------> | file |
-|_______| |______________| |________|
-
-
-
-
Since there is no decoding or encoding, it is very fast and there is no quality
-loss. However, it might not work in some cases because of many factors. Applying
-filters is obviously also impossible, since filters work on uncompressed data.
-
-
-
-
4 Stream selection# TOC
-
-
By default, ffmpeg
includes only one stream of each type (video, audio, subtitle)
-present in the input files and adds them to each output file. It picks the
-"best" of each based upon the following criteria: for video, it is the stream
-with the highest resolution, for audio, it is the stream with the most channels, for
-subtitles, it is the first subtitle stream. In the case where several streams of
-the same type rate equally, the stream with the lowest index is chosen.
-
-
You can disable some of those defaults by using the -vn/-an/-sn
options. For
-full manual control, use the -map
option, which disables the defaults just
-described.
-
-
-
-
5 Options# TOC
-
-
All the numerical options, if not specified otherwise, accept a string
-representing a number as input, which may be followed by one of the SI
-unit prefixes, for example: ’K’, ’M’, or ’G’.
-
-
If ’i’ is appended to the SI unit prefix, the complete prefix will be
-interpreted as a unit prefix for binary multiples, which are based on
-powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
-prefix multiplies the value by 8. This allows using, for example:
-’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
-
-
Options which do not take arguments are boolean options, and set the
-corresponding value to true. They can be set to false by prefixing
-the option name with "no". For example using "-nofoo"
-will set the boolean option with name "foo" to false.
-
-
-
5.1 Stream specifiers# TOC
-
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
-are used to precisely specify which stream(s) a given option belongs to.
-
-
A stream specifier is a string generally appended to the option name and
-separated from it by a colon. E.g. -codec:a:1 ac3
contains the
-a:1
stream specifier, which matches the second audio stream. Therefore, it
-would select the ac3 codec for the second audio stream.
-
-
A stream specifier can match several streams, so that the option is applied to all
-of them. E.g. the stream specifier in -b:a 128k
matches all audio
-streams.
-
-
An empty stream specifier matches all streams. For example, -codec copy
-or -codec: copy
would copy all the streams without reencoding.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index. E.g. -threads:1 4
would set the
-thread count for the second stream to 4.
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
-’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
-stream number stream_index of this type. Otherwise, it matches all
-streams of this type.
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number stream_index
-in the program with the id program_id . Otherwise, it matches all streams in the
-program.
-
-#stream_id or i:stream_id
-Match the stream by stream id (e.g. PID in MPEG-TS container).
-
-m:key [:value ]
-Matches streams with the metadata tag key having the specified value. If
-value is not given, matches streams that contain the given tag with any
-value.
-
-Note that in ffmpeg
, matching by metadata will only work properly for
-input files.
-
-
-
-
-
5.2 Generic options# TOC
-
-
These options are shared amongst the ff* tools.
-
-
--L
-Show license.
-
-
--h, -?, -help, --help [arg ]
-Show help. An optional parameter may be specified to print help about a specific
-item. If no argument is specified, only basic (non advanced) tool
-options are shown.
-
-Possible values of arg are:
-
-long
-Print advanced tool options in addition to the basic tool options.
-
-
-full
-Print complete list of options, including shared and private options
-for encoders, decoders, demuxers, muxers, filters, etc.
-
-
-decoder=decoder_name
-Print detailed information about the decoder named decoder_name . Use the
--decoders option to get a list of all decoders.
-
-
-encoder=encoder_name
-Print detailed information about the encoder named encoder_name . Use the
--encoders option to get a list of all encoders.
-
-
-demuxer=demuxer_name
-Print detailed information about the demuxer named demuxer_name . Use the
--formats option to get a list of all demuxers and muxers.
-
-
-muxer=muxer_name
-Print detailed information about the muxer named muxer_name . Use the
--formats option to get a list of all muxers and demuxers.
-
-
-filter=filter_name
-Print detailed information about the filter name filter_name . Use the
--filters option to get a list of all filters.
-
-
-
-
--version
-Show version.
-
-
--formats
-Show available formats (including devices).
-
-
--devices
-Show available devices.
-
-
--codecs
-Show all codecs known to libavcodec.
-
-Note that the term ’codec’ is used throughout this documentation as a shortcut
-for what is more correctly called a media bitstream format.
-
-
--decoders
-Show available decoders.
-
-
--encoders
-Show all available encoders.
-
-
--bsfs
-Show available bitstream filters.
-
-
--protocols
-Show available protocols.
-
-
--filters
-Show available libavfilter filters.
-
-
--pix_fmts
-Show available pixel formats.
-
-
--sample_fmts
-Show available sample formats.
-
-
--layouts
-Show channel names and standard channel layouts.
-
-
--colors
-Show recognized color names.
-
-
--sources device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sources of the intput device.
-Some devices may provide system-dependent source names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sources pulse,server=192.168.0.4
-
-
-
--sinks device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sinks of the output device.
-Some devices may provide system-dependent sink names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sinks pulse,server=192.168.0.4
-
-
-
--loglevel [repeat+]loglevel | -v [repeat+]loglevel
-Set the logging level used by the library.
-Adding "repeat+" indicates that repeated log output should not be compressed
-to the first line and the "Last message repeated n times" line will be
-omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-’repeat’ will not change the loglevel.
-loglevel is a string or a number containing one of the following values:
-
-‘quiet, -8 ’
-Show nothing at all; be silent.
-
-‘panic, 0 ’
-Only show fatal errors which could lead the process to crash, such as
-and assert failure. This is not currently used for anything.
-
-‘fatal, 8 ’
-Only show fatal errors. These are errors after which the process absolutely
-cannot continue after.
-
-‘error, 16 ’
-Show all errors, including ones which can be recovered from.
-
-‘warning, 24 ’
-Show all warnings and errors. Any message related to possibly
-incorrect or unexpected events will be shown.
-
-‘info, 32 ’
-Show informative messages during processing. This is in addition to
-warnings and errors. This is the default value.
-
-‘verbose, 40 ’
-Same as info
, except more verbose.
-
-‘debug, 48 ’
-Show everything, including debugging information.
-
-
-
-By default the program logs to stderr, if coloring is supported by the
-terminal, colors are used to mark errors and warnings. Log coloring
-can be disabled setting the environment variable
-AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
-the environment variable AV_LOG_FORCE_COLOR
.
-The use of the environment variable NO_COLOR
is deprecated and
-will be dropped in a following FFmpeg version.
-
-
--report
-Dump full command line and console output to a file named
-program -YYYYMMDD -HHMMSS .log
in the current
-directory.
-This file can be useful for bug reports.
-It also implies -loglevel verbose
.
-
-Setting the environment variable FFREPORT
to any value has the
-same effect. If the value is a ’:’-separated key=value sequence, these
-options will affect the report; option values must be escaped if they
-contain special characters or the options delimiter ’:’ (see the
-“Quoting and escaping” section in the ffmpeg-utils manual).
-
-The following options are recognized:
-
-file
-set the file name to use for the report; %p
is expanded to the name
-of the program, %t
is expanded to a timestamp, %%
is expanded
-to a plain %
-
-level
-set the log verbosity level using a numerical value (see -loglevel
).
-
-
-
-For example, to output a report to a file named ffreport.log
-using a log level of 32
(alias for log level info
):
-
-
-
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
-
-
-Errors in parsing the environment variable are not fatal, and will not
-appear in the report.
-
-
--hide_banner
-Suppress printing banner.
-
-All FFmpeg tools will normally show a copyright notice, build options
-and library versions. This option can be used to suppress printing
-this information.
-
-
--cpuflags flags (global )
-Allows setting and clearing cpu flags. This option is intended
-for testing. Do not use it unless you know what you’re doing.
-
-
ffmpeg -cpuflags -sse+mmx ...
-ffmpeg -cpuflags mmx ...
-ffmpeg -cpuflags 0 ...
-
-Possible flags for this option are:
-
-‘x86 ’
-
-‘mmx ’
-‘mmxext ’
-‘sse ’
-‘sse2 ’
-‘sse2slow ’
-‘sse3 ’
-‘sse3slow ’
-‘ssse3 ’
-‘atom ’
-‘sse4.1 ’
-‘sse4.2 ’
-‘avx ’
-‘xop ’
-‘fma4 ’
-‘3dnow ’
-‘3dnowext ’
-‘cmov ’
-
-
-‘ARM ’
-
-‘armv5te ’
-‘armv6 ’
-‘armv6t2 ’
-‘vfp ’
-‘vfpv3 ’
-‘neon ’
-
-
-‘PowerPC ’
-
-‘altivec ’
-
-
-‘Specific Processors ’
-
-‘pentium2 ’
-‘pentium3 ’
-‘pentium4 ’
-‘k6 ’
-‘k62 ’
-‘athlon ’
-‘athlonxp ’
-‘k8 ’
-
-
-
-
-
--opencl_bench
-Benchmark all available OpenCL devices and show the results. This option
-is only available when FFmpeg has been compiled with --enable-opencl
.
-
-
--opencl_options options (global )
-Set OpenCL environment options. This option is only available when
-FFmpeg has been compiled with --enable-opencl
.
-
-options must be a list of key =value option pairs
-separated by ’:’. See the “OpenCL Options” section in the
-ffmpeg-utils manual for the list of supported options.
-
-
-
-
-
5.3 AVOptions# TOC
-
-
These options are provided directly by the libavformat, libavdevice and
-libavcodec libraries. To see the list of available AVOptions, use the
--help option. They are separated into two categories:
-
-generic
-These options can be set for any container, codec or device. Generic options
-are listed under AVFormatContext options for containers/devices and under
-AVCodecContext options for codecs.
-
-private
-These options are specific to the given container, device or codec. Private
-options are listed under their corresponding containers/devices/codecs.
-
-
-
-
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
-an MP3 file, use the id3v2_version private option of the MP3
-muxer:
-
-
ffmpeg -i input.flac -id3v2_version 3 out.mp3
-
-
-
All codec AVOptions are per-stream, and thus a stream specifier
-should be attached to them.
-
-
Note: the -nooption syntax cannot be used for boolean
-AVOptions, use -option 0 /-option 1 .
-
-
Note: the old undocumented way of specifying per-stream AVOptions by
-prepending v/a/s to the options name is now obsolete and will be
-removed soon.
-
-
-
5.4 Main options# TOC
-
-
--f fmt (input/output )
-Force input or output file format. The format is normally auto detected for input
-files and guessed from the file extension for output files, so this option is not
-needed in most cases.
-
-
--i filename (input )
-input file name
-
-
--y (global )
-Overwrite output files without asking.
-
-
--n (global )
-Do not overwrite output files, and exit immediately if a specified
-output file already exists.
-
-
--c[:stream_specifier ] codec (input/output,per-stream )
--codec[:stream_specifier ] codec (input/output,per-stream )
-Select an encoder (when used before an output file) or a decoder (when used
-before an input file) for one or more streams. codec is the name of a
-decoder/encoder or a special value copy
(output only) to indicate that
-the stream is not to be re-encoded.
-
-For example
-
-
ffmpeg -i INPUT -map 0 -c:v libx264 -c:a copy OUTPUT
-
-encodes all video streams with libx264 and copies all audio streams.
-
-For each stream, the last matching c
option is applied, so
-
-
ffmpeg -i INPUT -map 0 -c copy -c:v:1 libx264 -c:a:137 libvorbis OUTPUT
-
-will copy all the streams except the second video, which will be encoded with
-libx264, and the 138th audio, which will be encoded with libvorbis.
-
-
--t duration (input/output )
-When used as an input option (before -i
), limit the duration of
-data read from the input file.
-
-When used as an output option (before an output filename), stop writing the
-output after its duration reaches duration .
-
-duration may be a number in seconds, or in hh:mm:ss[.xxx]
form.
-
--to and -t are mutually exclusive and -t has priority.
-
-
--to position (output )
-Stop writing the output at position .
-position may be a number in seconds, or in hh:mm:ss[.xxx]
form.
-
--to and -t are mutually exclusive and -t has priority.
-
-
--fs limit_size (output )
-Set the file size limit, expressed in bytes.
-
-
--ss position (input/output )
-When used as an input option (before -i
), seeks in this input file to
-position . Note the in most formats it is not possible to seek exactly, so
-ffmpeg
will seek to the closest seek point before position .
-When transcoding and -accurate_seek is enabled (the default), this
-extra segment between the seek point and position will be decoded and
-discarded. When doing stream copy or when -noaccurate_seek is used, it
-will be preserved.
-
-When used as an output option (before an output filename), decodes but discards
-input until the timestamps reach position .
-
-position may be either in seconds or in hh:mm:ss[.xxx]
form.
-
-
--itsoffset offset (input )
-Set the input time offset.
-
-offset must be a time duration specification,
-see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-The offset is added to the timestamps of the input files. Specifying
-a positive offset means that the corresponding streams are delayed by
-the time duration specified in offset .
-
-
--timestamp date (output )
-Set the recording timestamp in the container.
-
-date must be a time duration specification,
-see (ffmpeg-utils)the Date section in the ffmpeg-utils(1) manual .
-
-
--metadata[:metadata_specifier] key =value (output,per-metadata )
-Set a metadata key/value pair.
-
-An optional metadata_specifier may be given to set metadata
-on streams or chapters. See -map_metadata
documentation for
-details.
-
-This option overrides metadata set with -map_metadata
. It is
-also possible to delete metadata by using an empty value.
-
-For example, for setting the title in the output file:
-
-
ffmpeg -i in.avi -metadata title="my title" out.flv
-
-
-To set the language of the first audio stream:
-
-
ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
-
-
-
--target type (output )
-Specify target file type (vcd
, svcd
, dvd
, dv
,
-dv50
). type may be prefixed with pal-
, ntsc-
or
-film-
to use the corresponding standard. All the format options
-(bitrate, codecs, buffer sizes) are then set automatically. You can just type:
-
-
-
ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg
-
-
-Nevertheless you can specify additional options as long as you know
-they do not conflict with the standard, as in:
-
-
-
ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
-
-
-
--dframes number (output )
-Set the number of data frames to output. This is an alias for -frames:d
.
-
-
--frames[:stream_specifier ] framecount (output,per-stream )
-Stop writing to the stream after framecount frames.
-
-
--q[:stream_specifier ] q (output,per-stream )
--qscale[:stream_specifier ] q (output,per-stream )
-Use fixed quality scale (VBR). The meaning of q /qscale is
-codec-dependent.
-If qscale is used without a stream_specifier then it applies only
-to the video stream, this is to maintain compatibility with previous behavior
-and as specifying the same codec specific value to 2 different codecs that is
-audio and video generally is not what is intended when no stream_specifier is
-used.
-
-
--filter[:stream_specifier ] filtergraph (output,per-stream )
-Create the filtergraph specified by filtergraph and use it to
-filter the stream.
-
-filtergraph is a description of the filtergraph to apply to
-the stream, and must have a single input and a single output of the
-same type of the stream. In the filtergraph, the input is associated
-to the label in
, and the output to the label out
. See
-the ffmpeg-filters manual for more information about the filtergraph
-syntax.
-
-See the -filter_complex option if you
-want to create filtergraphs with multiple inputs and/or outputs.
-
-
--filter_script[:stream_specifier ] filename (output,per-stream )
-This option is similar to -filter , the only difference is that its
-argument is the name of the file from which a filtergraph description is to be
-read.
-
-
--pre[:stream_specifier ] preset_name (output,per-stream )
-Specify the preset for matching stream(s).
-
-
--stats (global )
-Print encoding progress/statistics. It is on by default, to explicitly
-disable it you need to specify -nostats
.
-
-
--progress url (global )
-Send program-friendly progress information to url .
-
-Progress information is written approximately every second and at the end of
-the encoding process. It is made of "key =value " lines. key
-consists of only alphanumeric characters. The last key of a sequence of
-progress information is always "progress".
-
-
--stdin
-Enable interaction on standard input. On by default unless standard input is
-used as an input. To explicitly disable interaction you need to specify
--nostdin
.
-
-Disabling interaction on standard input is useful, for example, if
-ffmpeg is in the background process group. Roughly the same result can
-be achieved with ffmpeg ... < /dev/null
but it requires a
-shell.
-
-
--debug_ts (global )
-Print timestamp information. It is off by default. This option is
-mostly useful for testing and debugging purposes, and the output
-format may change from one version to another, so it should not be
-employed by portable scripts.
-
-See also the option -fdebug ts
.
-
-
--attach filename (output )
-Add an attachment to the output file. This is supported by a few formats
-like Matroska for e.g. fonts used in rendering subtitles. Attachments
-are implemented as a specific type of stream, so this option will add
-a new stream to the file. It is then possible to use per-stream options
-on this stream in the usual way. Attachment streams created with this
-option will be created after all the other streams (i.e. those created
-with -map
or automatic mappings).
-
-Note that for Matroska you also have to set the mimetype metadata tag:
-
-
ffmpeg -i INPUT -attach DejaVuSans.ttf -metadata:s:2 mimetype=application/x-truetype-font out.mkv
-
-(assuming that the attachment stream will be third in the output file).
-
-
--dump_attachment[:stream_specifier ] filename (input,per-stream )
-Extract the matching attachment stream into a file named filename . If
-filename is empty, then the value of the filename
metadata tag
-will be used.
-
-E.g. to extract the first attachment to a file named ’out.ttf’:
-
-
ffmpeg -dump_attachment:t:0 out.ttf -i INPUT
-
-To extract all attachments to files determined by the filename
tag:
-
-
ffmpeg -dump_attachment:t "" -i INPUT
-
-
-Technical note – attachments are implemented as codec extradata, so this
-option can actually be used to extract extradata from any stream, not just
-attachments.
-
-
-
-
-
-
5.5 Video Options# TOC
-
-
--vframes number (output )
-Set the number of video frames to output. This is an alias for -frames:v
.
-
--r[:stream_specifier ] fps (input/output,per-stream )
-Set frame rate (Hz value, fraction or abbreviation).
-
-As an input option, ignore any timestamps stored in the file and instead
-generate timestamps assuming constant frame rate fps .
-This is not the same as the -framerate option used for some input formats
-like image2 or v4l2 (it used to be the same in older versions of FFmpeg).
-If in doubt use -framerate instead of the input option -r .
-
-As an output option, duplicate or drop input frames to achieve constant output
-frame rate fps .
-
-
--s[:stream_specifier ] size (input/output,per-stream )
-Set frame size.
-
-As an input option, this is a shortcut for the video_size private
-option, recognized by some demuxers for which the frame size is either not
-stored in the file or is configurable – e.g. raw video or video grabbers.
-
-As an output option, this inserts the scale
video filter to the
-end of the corresponding filtergraph. Please use the scale
filter
-directly to insert it at the beginning or some other place.
-
-The format is ‘wxh ’ (default - same as source).
-
-
--aspect[:stream_specifier ] aspect (output,per-stream )
-Set the video display aspect ratio specified by aspect .
-
-aspect can be a floating point number string, or a string of the
-form num :den , where num and den are the
-numerator and denominator of the aspect ratio. For example "4:3",
-"16:9", "1.3333", and "1.7777" are valid argument values.
-
-If used together with -vcodec copy , it will affect the aspect ratio
-stored at container level, but not the aspect ratio stored in encoded
-frames, if it exists.
-
-
--vn (output )
-Disable video recording.
-
-
--vcodec codec (output )
-Set the video codec. This is an alias for -codec:v
.
-
-
--pass[:stream_specifier ] n (output,per-stream )
-Select the pass number (1 or 2). It is used to do two-pass
-video encoding. The statistics of the video are recorded in the first
-pass into a log file (see also the option -passlogfile),
-and in the second pass that log file is used to generate the video
-at the exact requested bitrate.
-On pass 1, you may just deactivate audio and set output to null,
-examples for Windows and Unix:
-
-
ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y NUL
-ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y /dev/null
-
-
-
--passlogfile[:stream_specifier ] prefix (output,per-stream )
-Set two-pass log file name prefix to prefix , the default file name
-prefix is “ffmpeg2pass”. The complete file name will be
-PREFIX-N.log , where N is a number specific to the output
-stream
-
-
--vf filtergraph (output )
-Create the filtergraph specified by filtergraph and use it to
-filter the stream.
-
-This is an alias for -filter:v
, see the -filter option .
-
-
-
-
-
5.6 Advanced Video options# TOC
-
-
--pix_fmt[:stream_specifier ] format (input/output,per-stream )
-Set pixel format. Use -pix_fmts
to show all the supported
-pixel formats.
-If the selected pixel format can not be selected, ffmpeg will print a
-warning and select the best pixel format supported by the encoder.
-If pix_fmt is prefixed by a +
, ffmpeg will exit with an error
-if the requested pixel format can not be selected, and automatic conversions
-inside filtergraphs are disabled.
-If pix_fmt is a single +
, ffmpeg selects the same pixel format
-as the input (or graph output) and automatic conversions are disabled.
-
-
--sws_flags flags (input/output )
-Set SwScaler flags.
-
--vdt n
-Discard threshold.
-
-
--rc_override[:stream_specifier ] override (output,per-stream )
-Rate control override for specific intervals, formatted as "int,int,int"
-list separated with slashes. Two first values are the beginning and
-end frame numbers, last one is quantizer to use if positive, or quality
-factor if negative.
-
-
--ilme
-Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
-Use this option if your input file is interlaced and you want
-to keep the interlaced format for minimum losses.
-The alternative is to deinterlace the input stream with
--deinterlace , but deinterlacing introduces losses.
-
--psnr
-Calculate PSNR of compressed frames.
-
--vstats
-Dump video coding statistics to vstats_HHMMSS.log .
-
--vstats_file file
-Dump video coding statistics to file .
-
--top[:stream_specifier ] n (output,per-stream )
-top=1/bottom=0/auto=-1 field first
-
--dc precision
-Intra_dc_precision.
-
--vtag fourcc/tag (output )
-Force video tag/fourcc. This is an alias for -tag:v
.
-
--qphist (global )
-Show QP histogram
-
--vbsf bitstream_filter
-Deprecated see -bsf
-
-
--force_key_frames[:stream_specifier ] time [,time ...] (output,per-stream )
--force_key_frames[:stream_specifier ] expr:expr (output,per-stream )
-Force key frames at the specified timestamps, more precisely at the first
-frames after each specified time.
-
-If the argument is prefixed with expr:
, the string expr
-is interpreted like an expression and is evaluated for each frame. A
-key frame is forced in case the evaluation is non-zero.
-
-If one of the times is "chapters
[delta ]", it is expanded into
-the time of the beginning of all chapters in the file, shifted by
-delta , expressed as a time in seconds.
-This option can be useful to ensure that a seek point is present at a
-chapter mark or any other designated place in the output file.
-
-For example, to insert a key frame at 5 minutes, plus key frames 0.1 second
-before the beginning of every chapter:
-
-
-force_key_frames 0:05:00,chapters-0.1
-
-
-The expression in expr can contain the following constants:
-
-n
-the number of current processed frame, starting from 0
-
-n_forced
-the number of forced frames
-
-prev_forced_n
-the number of the previous forced frame, it is NAN
when no
-keyframe was forced yet
-
-prev_forced_t
-the time of the previous forced frame, it is NAN
when no
-keyframe was forced yet
-
-t
-the time of the current processed frame
-
-
-
-For example to force a key frame every 5 seconds, you can specify:
-
-
-force_key_frames expr:gte(t,n_forced*5)
-
-
-To force a key frame 5 seconds after the time of the last forced one,
-starting from second 13:
-
-
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
-
-
-Note that forcing too many keyframes is very harmful for the lookahead
-algorithms of certain encoders: using fixed-GOP options or similar
-would be more efficient.
-
-
--copyinkf[:stream_specifier ] (output,per-stream )
-When doing stream copy, copy also non-key frames found at the
-beginning.
-
-
--hwaccel[:stream_specifier ] hwaccel (input,per-stream )
-Use hardware acceleration to decode the matching stream(s). The allowed values
-of hwaccel are:
-
-none
-Do not use any hardware acceleration (the default).
-
-
-auto
-Automatically select the hardware acceleration method.
-
-
-vda
-Use Apple VDA hardware acceleration.
-
-
-vdpau
-Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
-
-
-dxva2
-Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
-
-
-
-This option has no effect if the selected hwaccel is not available or not
-supported by the chosen decoder.
-
-Note that most acceleration methods are intended for playback and will not be
-faster than software decoding on modern CPUs. Additionally, ffmpeg
-will usually need to copy the decoded frames from the GPU memory into the system
-memory, resulting in further performance loss. This option is thus mainly
-useful for testing.
-
-
--hwaccel_device[:stream_specifier ] hwaccel_device (input,per-stream )
-Select a device to use for hardware acceleration.
-
-This option only makes sense when the -hwaccel option is also
-specified. Its exact meaning depends on the specific hardware acceleration
-method chosen.
-
-
-vdpau
-For VDPAU, this option specifies the X11 display/screen to use. If this option
-is not specified, the value of the DISPLAY environment variable is used
-
-
-dxva2
-For DXVA2, this option should contain the number of the display adapter to use.
-If this option is not specified, the default adapter is used.
-
-
-
-
-
-
-
5.7 Audio Options# TOC
-
-
--aframes number (output )
-Set the number of audio frames to output. This is an alias for -frames:a
.
-
--ar[:stream_specifier ] freq (input/output,per-stream )
-Set the audio sampling frequency. For output streams it is set by
-default to the frequency of the corresponding input stream. For input
-streams this option only makes sense for audio grabbing devices and raw
-demuxers and is mapped to the corresponding demuxer options.
-
--aq q (output )
-Set the audio quality (codec-specific, VBR). This is an alias for -q:a.
-
--ac[:stream_specifier ] channels (input/output,per-stream )
-Set the number of audio channels. For output streams it is set by
-default to the number of input audio channels. For input streams
-this option only makes sense for audio grabbing devices and raw demuxers
-and is mapped to the corresponding demuxer options.
-
--an (output )
-Disable audio recording.
-
--acodec codec (input/output )
-Set the audio codec. This is an alias for -codec:a
.
-
--sample_fmt[:stream_specifier ] sample_fmt (output,per-stream )
-Set the audio sample format. Use -sample_fmts
to get a list
-of supported sample formats.
-
-
--af filtergraph (output )
-Create the filtergraph specified by filtergraph and use it to
-filter the stream.
-
-This is an alias for -filter:a
, see the -filter option .
-
-
-
-
-
5.8 Advanced Audio options# TOC
-
-
--atag fourcc/tag (output )
-Force audio tag/fourcc. This is an alias for -tag:a
.
-
--absf bitstream_filter
-Deprecated, see -bsf
-
--guess_layout_max channels (input,per-stream )
-If some input channel layout is not known, try to guess only if it
-corresponds to at most the specified number of channels. For example, 2
-tells to ffmpeg
to recognize 1 channel as mono and 2 channels as
-stereo but not 6 channels as 5.1. The default is to always try to guess. Use
-0 to disable all guessing.
-
-
-
-
-
5.9 Subtitle options# TOC
-
-
--scodec codec (input/output )
-Set the subtitle codec. This is an alias for -codec:s
.
-
--sn (output )
-Disable subtitle recording.
-
--sbsf bitstream_filter
-Deprecated, see -bsf
-
-
-
-
-
5.10 Advanced Subtitle options# TOC
-
-
--fix_sub_duration
-Fix subtitles durations. For each subtitle, wait for the next packet in the
-same stream and adjust the duration of the first to avoid overlap. This is
-necessary with some subtitles codecs, especially DVB subtitles, because the
-duration in the original packet is only a rough estimate and the end is
-actually marked by an empty subtitle frame. Failing to use this option when
-necessary can result in exaggerated durations or muxing failures due to
-non-monotonic timestamps.
-
-Note that this option will delay the output of all data until the next
-subtitle packet is decoded: it may increase memory consumption and latency a
-lot.
-
-
--canvas_size size
-Set the size of the canvas used to render subtitles.
-
-
-
-
-
-
5.11 Advanced options# TOC
-
-
--map [-]input_file_id [:stream_specifier ][,sync_file_id [:stream_specifier ]] | [linklabel] (output )
-
-Designate one or more input streams as a source for the output file. Each input
-stream is identified by the input file index input_file_id and
-the input stream index input_stream_id within the input
-file. Both indices start at 0. If specified,
-sync_file_id :stream_specifier sets which input stream
-is used as a presentation sync reference.
-
-The first -map
option on the command line specifies the
-source for output stream 0, the second -map
option specifies
-the source for output stream 1, etc.
-
-A -
character before the stream identifier creates a "negative" mapping.
-It disables matching streams from already created mappings.
-
-An alternative [linklabel] form will map outputs from complex filter
-graphs (see the -filter_complex option) to the output file.
-linklabel must correspond to a defined output link label in the graph.
-
-For example, to map ALL streams from the first input file to output
-
-
ffmpeg -i INPUT -map 0 output
-
-
-For example, if you have two audio streams in the first input file,
-these streams are identified by "0:0" and "0:1". You can use
--map
to select which streams to place in an output file. For
-example:
-
-
ffmpeg -i INPUT -map 0:1 out.wav
-
-will map the input stream in INPUT identified by "0:1" to
-the (single) output stream in out.wav .
-
-For example, to select the stream with index 2 from input file
-a.mov (specified by the identifier "0:2"), and stream with
-index 6 from input b.mov (specified by the identifier "1:6"),
-and copy them to the output file out.mov :
-
-
ffmpeg -i a.mov -i b.mov -c copy -map 0:2 -map 1:6 out.mov
-
-
-To select all video and the third audio stream from an input file:
-
-
ffmpeg -i INPUT -map 0:v -map 0:a:2 OUTPUT
-
-
-To map all the streams except the second audio, use negative mappings
-
-
ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT
-
-
-To pick the English audio stream:
-
-
ffmpeg -i INPUT -map 0:m:language:eng OUTPUT
-
-
-Note that using this option disables the default mappings for this output file.
-
-
--map_channel [input_file_id .stream_specifier .channel_id |-1][:output_file_id .stream_specifier ]
-Map an audio channel from a given input to an output. If
-output_file_id .stream_specifier is not set, the audio channel will
-be mapped on all the audio streams.
-
-Using "-1" instead of
-input_file_id .stream_specifier .channel_id will map a muted
-channel.
-
-For example, assuming INPUT is a stereo audio file, you can switch the
-two audio channels with the following command:
-
-
ffmpeg -i INPUT -map_channel 0.0.1 -map_channel 0.0.0 OUTPUT
-
-
-If you want to mute the first channel and keep the second:
-
-
ffmpeg -i INPUT -map_channel -1 -map_channel 0.0.1 OUTPUT
-
-
-The order of the "-map_channel" option specifies the order of the channels in
-the output stream. The output channel layout is guessed from the number of
-channels mapped (mono if one "-map_channel", stereo if two, etc.). Using "-ac"
-in combination of "-map_channel" makes the channel gain levels to be updated if
-input and output channel layouts don’t match (for instance two "-map_channel"
-options and "-ac 6").
-
-You can also extract each channel of an input to specific outputs; the following
-command extracts two channels of the INPUT audio stream (file 0, stream 0)
-to the respective OUTPUT_CH0 and OUTPUT_CH1 outputs:
-
-
ffmpeg -i INPUT -map_channel 0.0.0 OUTPUT_CH0 -map_channel 0.0.1 OUTPUT_CH1
-
-
-The following example splits the channels of a stereo input into two separate
-streams, which are put into the same output file:
-
-
ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg
-
-
-Note that currently each output stream can only contain channels from a single
-input stream; you can’t for example use "-map_channel" to pick multiple input
-audio channels contained in different streams (from the same or different files)
-and merge them into a single output stream. It is therefore not currently
-possible, for example, to turn two separate mono streams into a single stereo
-stream. However splitting a stereo stream into two single channel mono streams
-is possible.
-
-If you need this feature, a possible workaround is to use the amerge
-filter. For example, if you need to merge a media (here input.mkv ) with 2
-mono audio streams into one single stereo channel audio stream (and keep the
-video stream), you can use the following command:
-
-
ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv
-
-
-
--map_metadata[:metadata_spec_out ] infile [:metadata_spec_in ] (output,per-metadata )
-Set metadata information of the next output file from infile . Note that
-those are file indices (zero-based), not filenames.
-Optional metadata_spec_in/out parameters specify, which metadata to copy.
-A metadata specifier can have the following forms:
-
-g
-global metadata, i.e. metadata that applies to the whole file
-
-
-s [:stream_spec ]
-per-stream metadata. stream_spec is a stream specifier as described
-in the Stream specifiers chapter. In an input metadata specifier, the first
-matching stream is copied from. In an output metadata specifier, all matching
-streams are copied to.
-
-
-c :chapter_index
-per-chapter metadata. chapter_index is the zero-based chapter index.
-
-
-p :program_index
-per-program metadata. program_index is the zero-based program index.
-
-
-If metadata specifier is omitted, it defaults to global.
-
-By default, global metadata is copied from the first input file,
-per-stream and per-chapter metadata is copied along with streams/chapters. These
-default mappings are disabled by creating any mapping of the relevant type. A negative
-file index can be used to create a dummy mapping that just disables automatic copying.
-
-For example to copy metadata from the first stream of the input file to global metadata
-of the output file:
-
-
ffmpeg -i in.ogg -map_metadata 0:s:0 out.mp3
-
-
-To do the reverse, i.e. copy global metadata to all audio streams:
-
-
ffmpeg -i in.mkv -map_metadata:s:a 0:g out.mkv
-
-Note that simple 0
would work as well in this example, since global
-metadata is assumed by default.
-
-
--map_chapters input_file_index (output )
-Copy chapters from input file with index input_file_index to the next
-output file. If no chapter mapping is specified, then chapters are copied from
-the first input file with at least one chapter. Use a negative file index to
-disable any chapter copying.
-
-
--benchmark (global )
-Show benchmarking information at the end of an encode.
-Shows CPU time used and maximum memory consumption.
-Maximum memory consumption is not supported on all systems,
-it will usually display as 0 if not supported.
-
--benchmark_all (global )
-Show benchmarking information during the encode.
-Shows CPU time used in various steps (audio/video encode/decode).
-
--timelimit duration (global )
-Exit after ffmpeg has been running for duration seconds.
-
--dump (global )
-Dump each input packet to stderr.
-
--hex (global )
-When dumping packets, also dump the payload.
-
--re (input )
-Read input at native frame rate. Mainly used to simulate a grab device.
-or live input stream (e.g. when reading from a file). Should not be used
-with actual grab devices or live input streams (where it can cause packet
-loss).
-By default ffmpeg
attempts to read the input(s) as fast as possible.
-This option will slow down the reading of the input(s) to the native frame rate
-of the input(s). It is useful for real-time output (e.g. live streaming).
-
--loop_input
-Loop over the input stream. Currently it works only for image
-streams. This option is used for automatic FFserver testing.
-This option is deprecated, use -loop 1.
-
--loop_output number_of_times
-Repeatedly loop output for formats that support looping such as animated GIF
-(0 will loop the output infinitely).
-This option is deprecated, use -loop.
-
--vsync parameter
-Video sync method.
-For compatibility reasons old values can be specified as numbers.
-Newly added values will have to be specified as strings always.
-
-
-0, passthrough
-Each frame is passed with its timestamp from the demuxer to the muxer.
-
-1, cfr
-Frames will be duplicated and dropped to achieve exactly the requested
-constant frame rate.
-
-2, vfr
-Frames are passed through with their timestamp or dropped so as to
-prevent 2 frames from having the same timestamp.
-
-drop
-As passthrough but destroys all timestamps, making the muxer generate
-fresh timestamps based on frame-rate.
-
--1, auto
-Chooses between 1 and 2 depending on muxer capabilities. This is the
-default method.
-
-
-
-Note that the timestamps may be further modified by the muxer, after this.
-For example, in the case that the format option avoid_negative_ts
-is enabled.
-
-With -map you can select from which stream the timestamps should be
-taken. You can leave either video or audio unchanged and sync the
-remaining stream(s) to the unchanged one.
-
-
--async samples_per_second
-Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
-the parameter is the maximum samples per second by which the audio is changed.
--async 1 is a special case where only the start of the audio stream is corrected
-without any later correction.
-
-Note that the timestamps may be further modified by the muxer, after this.
-For example, in the case that the format option avoid_negative_ts
-is enabled.
-
-This option has been deprecated. Use the aresample
audio filter instead.
-
-
--copyts
-Do not process input timestamps, but keep their values without trying
-to sanitize them. In particular, do not remove the initial start time
-offset value.
-
-Note that, depending on the vsync option or on specific muxer
-processing (e.g. in case the format option avoid_negative_ts
-is enabled) the output timestamps may mismatch with the input
-timestamps even when this option is selected.
-
-
--start_at_zero
-When used with copyts , shift input timestamps so they start at zero.
-
-This means that using e.g. -ss 50
will make output timestamps start at
-50 seconds, regardless of what timestamp the input file started at.
-
-
--copytb mode
-Specify how to set the encoder timebase when stream copying. mode is an
-integer numeric value, and can assume one of the following values:
-
-
-1
-Use the demuxer timebase.
-
-The time base is copied to the output encoder from the corresponding input
-demuxer. This is sometimes required to avoid non monotonically increasing
-timestamps when copying video streams with variable frame rate.
-
-
-0
-Use the decoder timebase.
-
-The time base is copied to the output encoder from the corresponding input
-decoder.
-
-
--1
-Try to make the choice automatically, in order to generate a sane output.
-
-
-
-Default value is -1.
-
-
--shortest (output )
-Finish encoding when the shortest input stream ends.
-
--dts_delta_threshold
-Timestamp discontinuity delta threshold.
-
--muxdelay seconds (input )
-Set the maximum demux-decode delay.
-
--muxpreload seconds (input )
-Set the initial demux-decode delay.
-
--streamid output-stream-index :new-value (output )
-Assign a new stream-id value to an output stream. This option should be
-specified prior to the output filename to which it applies.
-For the situation where multiple output files exist, a streamid
-may be reassigned to a different value.
-
-For example, to set the stream 0 PID to 33 and the stream 1 PID to 36 for
-an output mpegts file:
-
-
ffmpeg -i infile -streamid 0:33 -streamid 1:36 out.ts
-
-
-
--bsf[:stream_specifier ] bitstream_filters (output,per-stream )
-Set bitstream filters for matching streams. bitstream_filters is
-a comma-separated list of bitstream filters. Use the -bsfs
option
-to get the list of bitstream filters.
-
-
ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
-
-
-
ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
-
-
-
--tag[:stream_specifier ] codec_tag (input/output,per-stream )
-Force a tag/fourcc for matching streams.
-
-
--timecode hh :mm :ss SEPff
-Specify Timecode for writing. SEP is ’:’ for non drop timecode and ’;’
-(or ’.’) for drop.
-
-
ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
-
-
-
--filter_complex filtergraph (global )
-Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or
-outputs. For simple graphs – those with one input and one output of the same
-type – see the -filter options. filtergraph is a description of
-the filtergraph, as described in the “Filtergraph syntax” section of the
-ffmpeg-filters manual.
-
-Input link labels must refer to input streams using the
-[file_index:stream_specifier]
syntax (i.e. the same as -map
-uses). If stream_specifier matches multiple streams, the first one will be
-used. An unlabeled input will be connected to the first unused input stream of
-the matching type.
-
-Output link labels are referred to with -map . Unlabeled outputs are
-added to the first output file.
-
-Note that with this option it is possible to use only lavfi sources without
-normal input files.
-
-For example, to overlay an image over video
-
-
ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
-'[out]' out.mkv
-
-Here [0:v]
refers to the first video stream in the first input file,
-which is linked to the first (main) input of the overlay filter. Similarly the
-first video stream in the second input is linked to the second (overlay) input
-of overlay.
-
-Assuming there is only one video stream in each input file, we can omit input
-labels, so the above is equivalent to
-
-
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map
-'[out]' out.mkv
-
-
-Furthermore we can omit the output label and the single output from the filter
-graph will be added to the output file automatically, so we can simply write
-
-
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
-
-
-To generate 5 seconds of pure red video using lavfi color
source:
-
-
ffmpeg -filter_complex 'color=c=red' -t 5 out.mkv
-
-
-
--lavfi filtergraph (global )
-Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or
-outputs. Equivalent to -filter_complex .
-
-
--filter_complex_script filename (global )
-This option is similar to -filter_complex , the only difference is that
-its argument is the name of the file from which a complex filtergraph
-description is to be read.
-
-
--accurate_seek (input )
-This option enables or disables accurate seeking in input files with the
--ss option. It is enabled by default, so seeking is accurate when
-transcoding. Use -noaccurate_seek to disable it, which may be useful
-e.g. when copying some streams and transcoding the others.
-
-
--override_ffserver (global )
-Overrides the input specifications from ffserver
. Using this
-option you can map any input stream to ffserver
and control
-many aspects of the encoding from ffmpeg
. Without this
-option ffmpeg
will transmit to ffserver
what is
-requested by ffserver
.
-
-The option is intended for cases where features are needed that cannot be
-specified to ffserver
but can be to ffmpeg
.
-
-
--sdp_file file (global )
-Print sdp information to file .
-This allows dumping sdp information when at least one output isn’t an
-rtp stream.
-
-
--discard (input )
-Allows discarding specific streams or frames of streams at the demuxer.
-Not all demuxers support this.
-
-
-none
-Discard no frame.
-
-
-default
-Default, which discards no frames.
-
-
-noref
-Discard all non-reference frames.
-
-
-bidir
-Discard all bidirectional frames.
-
-
-nokey
-Discard all frames excepts keyframes.
-
-
-all
-Discard all frames.
-
-
-
-
-
-
-
As a special exception, you can use a bitmap subtitle stream as input: it
-will be converted into a video with the same size as the largest video in
-the file, or 720x576 if no video is present. Note that this is an
-experimental and temporary solution. It will be removed once libavfilter has
-proper support for subtitles.
-
-
For example, to hardcode subtitles on top of a DVB-T recording stored in
-MPEG-TS format, delaying the subtitles by 1 second:
-
-
ffmpeg -i input.ts -filter_complex \
- '[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \
- -sn -map '#0x2dc' output.mkv
-
-
(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video,
-audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too)
-
-
-
5.12 Preset files# TOC
-
A preset file contains a sequence of option =value pairs,
-one for each line, specifying a sequence of options which would be
-awkward to specify on the command line. Lines starting with the hash
-(’#’) character are ignored and are used to provide comments. Check
-the presets directory in the FFmpeg source tree for examples.
-
-
There are two types of preset files: ffpreset and avpreset files.
-
-
-
5.12.1 ffpreset files# TOC
-
ffpreset files are specified with the vpre
, apre
,
-spre
, and fpre
options. The fpre
option takes the
-filename of the preset instead of a preset name as input and can be
-used for any kind of codec. For the vpre
, apre
, and
-spre
options, the options specified in a preset file are
-applied to the currently selected codec of the same type as the preset
-option.
-
-
The argument passed to the vpre
, apre
, and spre
-preset options identifies the preset file to use according to the
-following rules:
-
-
First ffmpeg searches for a file named arg .ffpreset in the
-directories $FFMPEG_DATADIR (if set), and $HOME/.ffmpeg , and in
-the datadir defined at configuration time (usually PREFIX/share/ffmpeg )
-or in a ffpresets folder along the executable on win32,
-in that order. For example, if the argument is libvpx-1080p
, it will
-search for the file libvpx-1080p.ffpreset .
-
-
If no such file is found, then ffmpeg will search for a file named
-codec_name -arg .ffpreset in the above-mentioned
-directories, where codec_name is the name of the codec to which
-the preset file options will be applied. For example, if you select
-the video codec with -vcodec libvpx
and use -vpre 1080p
,
-then it will search for the file libvpx-1080p.ffpreset .
-
-
-
5.12.2 avpreset files# TOC
-
avpreset files are specified with the pre
option. They work similar to
-ffpreset files, but they only allow encoder- specific options. Therefore, an
-option =value pair specifying an encoder cannot be used.
-
-
When the pre
option is specified, ffmpeg will look for files with the
-suffix .avpreset in the directories $AVCONV_DATADIR (if set), and
-$HOME/.avconv , and in the datadir defined at configuration time (usually
-PREFIX/share/ffmpeg ), in that order.
-
-
First ffmpeg searches for a file named codec_name -arg .avpreset in
-the above-mentioned directories, where codec_name is the name of the codec
-to which the preset file options will be applied. For example, if you select the
-video codec with -vcodec libvpx
and use -pre 1080p
, then it will
-search for the file libvpx-1080p.avpreset .
-
-
If no such file is found, then ffmpeg will search for a file named
-arg .avpreset in the same directories.
-
-
-
-
-
-
- For streaming at very low bitrates, use a low frame rate
-and a small GOP size. This is especially true for RealVideo where
-the Linux player does not seem to be very fast, so it can miss
-frames. An example is:
-
-
-
ffmpeg -g 3 -r 3 -t 10 -b:v 50k -s qcif -f rv10 /tmp/b.rm
-
-
- The parameter ’q’ which is displayed while encoding is the current
-quantizer. The value 1 indicates that a very good quality could
-be achieved. The value 31 indicates the worst quality. If q=31 appears
-too often, it means that the encoder cannot compress enough to meet
-your bitrate. You must either increase the bitrate, decrease the
-frame rate or decrease the frame size.
-
- If your computer is not fast enough, you can speed up the
-compression at the expense of the compression ratio. You can use
-’-me zero’ to speed up motion estimation, and ’-g 0’ to disable
-motion estimation completely (you have only I-frames, which means it
-is about as good as JPEG compression).
-
- To have very low audio bitrates, reduce the sampling frequency
-(down to 22050 Hz for MPEG audio, 22050 or 11025 for AC-3).
-
- To have a constant quality (but a variable bitrate), use the option
-’-qscale n’ when ’n’ is between 1 (excellent quality) and 31 (worst
-quality).
-
-
-
-
-
7 Examples# TOC
-
-
-
7.1 Video and Audio grabbing# TOC
-
-
If you specify the input format and device then ffmpeg can grab video
-and audio directly.
-
-
-
ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg
-
-
-
Or with an ALSA audio source (mono input, card id 1) instead of OSS:
-
-
ffmpeg -f alsa -ac 1 -i hw:1 -f video4linux2 -i /dev/video0 /tmp/out.mpg
-
-
-
Note that you must activate the right video source and channel before
-launching ffmpeg with any TV viewer such as
-xawtv by Gerd Knorr. You also
-have to set the audio recording levels correctly with a
-standard mixer.
-
-
-
7.2 X11 grabbing# TOC
-
-
Grab the X11 display with ffmpeg via
-
-
-
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0 /tmp/out.mpg
-
-
-
0.0 is display.screen number of your X11 server, same as
-the DISPLAY environment variable.
-
-
-
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0+10,20 /tmp/out.mpg
-
-
-
0.0 is display.screen number of your X11 server, same as the DISPLAY environment
-variable. 10 is the x-offset and 20 the y-offset for the grabbing.
-
-
-
7.3 Video and Audio file format conversion# TOC
-
-
Any supported file format and protocol can serve as input to ffmpeg:
-
-
Examples:
-
- You can use YUV files as input:
-
-
-
ffmpeg -i /tmp/test%d.Y /tmp/out.mpg
-
-
-It will use the files:
-
-
/tmp/test0.Y, /tmp/test0.U, /tmp/test0.V,
-/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc...
-
-
-The Y files use twice the resolution of the U and V files. They are
-raw files, without header. They can be generated by all decent video
-decoders. You must specify the size of the image with the -s option
-if ffmpeg cannot guess it.
-
- You can input from a raw YUV420P file:
-
-
-
ffmpeg -i /tmp/test.yuv /tmp/out.avi
-
-
-test.yuv is a file containing raw YUV planar data. Each frame is composed
-of the Y plane followed by the U and V planes at half vertical and
-horizontal resolution.
-
- You can output to a raw YUV420P file:
-
-
-
ffmpeg -i mydivx.avi hugefile.yuv
-
-
- You can set several input files and output files:
-
-
-
ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg
-
-
-Converts the audio file a.wav and the raw YUV video file a.yuv
-to MPEG file a.mpg.
-
- You can also do audio and video conversions at the same time:
-
-
-
ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2
-
-
-Converts a.wav to MPEG audio at 22050 Hz sample rate.
-
- You can encode to several formats at the same time and define a
-mapping from input stream to output streams:
-
-
-
ffmpeg -i /tmp/a.wav -map 0:a -b:a 64k /tmp/a.mp2 -map 0:a -b:a 128k /tmp/b.mp2
-
-
-Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. ’-map
-file:index’ specifies which input stream is used for each output
-stream, in the order of the definition of output streams.
-
- You can transcode decrypted VOBs:
-
-
-
ffmpeg -i snatch_1.vob -f avi -c:v mpeg4 -b:v 800k -g 300 -bf 2 -c:a libmp3lame -b:a 128k snatch.avi
-
-
-This is a typical DVD ripping example; the input is a VOB file, the
-output an AVI file with MPEG-4 video and MP3 audio. Note that in this
-command we use B-frames so the MPEG-4 stream is DivX5 compatible, and
-GOP size is 300 which means one intra frame every 10 seconds for 29.97fps
-input video. Furthermore, the audio stream is MP3-encoded so you need
-to enable LAME support by passing --enable-libmp3lame
to configure.
-The mapping is particularly useful for DVD transcoding
-to get the desired audio language.
-
-NOTE: To see the supported input formats, use ffmpeg -formats
.
-
- You can extract images from a video, or create a video from many images:
-
-For extracting images from a video:
-
-
ffmpeg -i foo.avi -r 1 -s WxH -f image2 foo-%03d.jpeg
-
-
-This will extract one video frame per second from the video and will
-output them in files named foo-001.jpeg , foo-002.jpeg ,
-etc. Images will be rescaled to fit the new WxH values.
-
-If you want to extract just a limited number of frames, you can use the
-above command in combination with the -vframes or -t option, or in
-combination with -ss to start extracting from a certain point in time.
-
-For creating a video from many images:
-
-
ffmpeg -f image2 -i foo-%03d.jpeg -r 12 -s WxH foo.avi
-
-
-The syntax foo-%03d.jpeg
specifies to use a decimal number
-composed of three digits padded with zeroes to express the sequence
-number. It is the same syntax supported by the C printf function, but
-only formats accepting a normal integer are suitable.
-
-When importing an image sequence, -i also supports expanding
-shell-like wildcard patterns (globbing) internally, by selecting the
-image2-specific -pattern_type glob
option.
-
-For example, for creating a video from filenames matching the glob pattern
-foo-*.jpeg
:
-
-
ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
-
-
- You can put many streams of the same type in the output:
-
-
-
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
-
-
-The resulting output file test12.nut will contain the first four streams
-from the input files in reverse order.
-
- To force CBR video output:
-
-
ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
-
-
- The four options lmin, lmax, mblmin and mblmax use ’lambda’ units,
-but you may use the QP2LAMBDA constant to easily convert from ’q’ units:
-
-
ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
-
-
-
-
-
-
8 Syntax# TOC
-
-
This section documents the syntax and formats employed by the FFmpeg
-libraries and tools.
-
-
-
8.1 Quoting and escaping# TOC
-
-
FFmpeg adopts the following quoting and escaping mechanism, unless
-explicitly specified. The following rules are applied:
-
-
- '
and \
are special characters (respectively used for
-quoting and escaping). In addition to them, there might be other
-special characters depending on the specific syntax where the escaping
-and quoting are employed.
-
- A special character is escaped by prefixing it with a ’\’.
-
- All characters enclosed between ” are included literally in the
-parsed string. The quote character '
itself cannot be quoted,
-so you may need to close the quote and escape it.
-
- Leading and trailing whitespaces, unless escaped or quoted, are
-removed from the parsed string.
-
-
-
Note that you may need to add a second level of escaping when using
-the command line or a script, which depends on the syntax of the
-adopted shell language.
-
-
The function av_get_token
defined in
-libavutil/avstring.h can be used to parse a token quoted or
-escaped according to the rules defined above.
-
-
The tool tools/ffescape in the FFmpeg source tree can be used
-to automatically quote or escape a string in a script.
-
-
-
8.1.1 Examples# TOC
-
-
- Escape the string Crime d'Amour
containing the '
special
-character:
-
-
- The string above contains a quote, so the '
needs to be escaped
-when quoting it:
-
-
- Include leading or trailing whitespaces using quoting:
-
-
' this string starts and ends with whitespaces '
-
-
- Escaping and quoting can be mixed together:
-
-
' The string '\'string\'' is a string '
-
-
- To include a literal \
you can use either escaping or quoting:
-
-
'c:\foo' can be written as c:\\foo
-
-
-
-
-
8.2 Date# TOC
-
-
The accepted syntax is:
-
-
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
-now
-
-
-
If the value is "now" it takes the current time.
-
-
Time is local time unless Z is appended, in which case it is
-interpreted as UTC.
-If the year-month-day part is not specified it takes the current
-year-month-day.
-
-
-
8.3 Time duration# TOC
-
-
There are two accepted syntaxes for expressing time duration.
-
-
-
-
HH expresses the number of hours, MM the number of minutes
-for a maximum of 2 digits, and SS the number of seconds for a
-maximum of 2 digits. The m at the end expresses decimal value for
-SS .
-
-
or
-
-
-
-
S expresses the number of seconds, with the optional decimal part
-m .
-
-
In both expressions, the optional ‘- ’ indicates negative duration.
-
-
-
8.3.1 Examples# TOC
-
-
The following examples are all valid time duration:
-
-
-‘55 ’
-55 seconds
-
-
-‘12:03:45 ’
-12 hours, 03 minutes and 45 seconds
-
-
-‘23.189 ’
-23.189 seconds
-
-
-
-
-
8.4 Video size# TOC
-
Specify the size of the sourced video, it may be a string of the form
-width xheight , or the name of a size abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-720x480
-
-‘pal ’
-720x576
-
-‘qntsc ’
-352x240
-
-‘qpal ’
-352x288
-
-‘sntsc ’
-640x480
-
-‘spal ’
-768x576
-
-‘film ’
-352x240
-
-‘ntsc-film ’
-352x240
-
-‘sqcif ’
-128x96
-
-‘qcif ’
-176x144
-
-‘cif ’
-352x288
-
-‘4cif ’
-704x576
-
-‘16cif ’
-1408x1152
-
-‘qqvga ’
-160x120
-
-‘qvga ’
-320x240
-
-‘vga ’
-640x480
-
-‘svga ’
-800x600
-
-‘xga ’
-1024x768
-
-‘uxga ’
-1600x1200
-
-‘qxga ’
-2048x1536
-
-‘sxga ’
-1280x1024
-
-‘qsxga ’
-2560x2048
-
-‘hsxga ’
-5120x4096
-
-‘wvga ’
-852x480
-
-‘wxga ’
-1366x768
-
-‘wsxga ’
-1600x1024
-
-‘wuxga ’
-1920x1200
-
-‘woxga ’
-2560x1600
-
-‘wqsxga ’
-3200x2048
-
-‘wquxga ’
-3840x2400
-
-‘whsxga ’
-6400x4096
-
-‘whuxga ’
-7680x4800
-
-‘cga ’
-320x200
-
-‘ega ’
-640x350
-
-‘hd480 ’
-852x480
-
-‘hd720 ’
-1280x720
-
-‘hd1080 ’
-1920x1080
-
-‘2k ’
-2048x1080
-
-‘2kflat ’
-1998x1080
-
-‘2kscope ’
-2048x858
-
-‘4k ’
-4096x2160
-
-‘4kflat ’
-3996x2160
-
-‘4kscope ’
-4096x1716
-
-‘nhd ’
-640x360
-
-‘hqvga ’
-240x160
-
-‘wqvga ’
-400x240
-
-‘fwqvga ’
-432x240
-
-‘hvga ’
-480x320
-
-‘qhd ’
-960x540
-
-
-
-
-
8.5 Video rate# TOC
-
-
Specify the frame rate of a video, expressed as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a float
-number or a valid video frame rate abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-30000/1001
-
-‘pal ’
-25/1
-
-‘qntsc ’
-30000/1001
-
-‘qpal ’
-25/1
-
-‘sntsc ’
-30000/1001
-
-‘spal ’
-25/1
-
-‘film ’
-24/1
-
-‘ntsc-film ’
-24000/1001
-
-
-
-
-
8.6 Ratio# TOC
-
-
A ratio can be expressed as an expression, or in the form
-numerator :denominator .
-
-
Note that a ratio with infinite (1/0) or negative value is
-considered valid, so you should check on the returned value if you
-want to exclude those values.
-
-
The undefined value can be expressed using the "0:0" string.
-
-
-
8.7 Color# TOC
-
-
It can be the name of a color as defined below (case insensitive match) or a
-[0x|#]RRGGBB[AA]
sequence, possibly followed by @ and a string
-representing the alpha component.
-
-
The alpha component may be a string composed by "0x" followed by an
-hexadecimal number or a decimal number between 0.0 and 1.0, which
-represents the opacity value (‘0x00 ’ or ‘0.0 ’ means completely
-transparent, ‘0xff ’ or ‘1.0 ’ completely opaque). If the alpha
-component is not specified then ‘0xff ’ is assumed.
-
-
The string ‘random ’ will result in a random color.
-
-
The following names of colors are recognized:
-
-‘AliceBlue ’
-0xF0F8FF
-
-‘AntiqueWhite ’
-0xFAEBD7
-
-‘Aqua ’
-0x00FFFF
-
-‘Aquamarine ’
-0x7FFFD4
-
-‘Azure ’
-0xF0FFFF
-
-‘Beige ’
-0xF5F5DC
-
-‘Bisque ’
-0xFFE4C4
-
-‘Black ’
-0x000000
-
-‘BlanchedAlmond ’
-0xFFEBCD
-
-‘Blue ’
-0x0000FF
-
-‘BlueViolet ’
-0x8A2BE2
-
-‘Brown ’
-0xA52A2A
-
-‘BurlyWood ’
-0xDEB887
-
-‘CadetBlue ’
-0x5F9EA0
-
-‘Chartreuse ’
-0x7FFF00
-
-‘Chocolate ’
-0xD2691E
-
-‘Coral ’
-0xFF7F50
-
-‘CornflowerBlue ’
-0x6495ED
-
-‘Cornsilk ’
-0xFFF8DC
-
-‘Crimson ’
-0xDC143C
-
-‘Cyan ’
-0x00FFFF
-
-‘DarkBlue ’
-0x00008B
-
-‘DarkCyan ’
-0x008B8B
-
-‘DarkGoldenRod ’
-0xB8860B
-
-‘DarkGray ’
-0xA9A9A9
-
-‘DarkGreen ’
-0x006400
-
-‘DarkKhaki ’
-0xBDB76B
-
-‘DarkMagenta ’
-0x8B008B
-
-‘DarkOliveGreen ’
-0x556B2F
-
-‘Darkorange ’
-0xFF8C00
-
-‘DarkOrchid ’
-0x9932CC
-
-‘DarkRed ’
-0x8B0000
-
-‘DarkSalmon ’
-0xE9967A
-
-‘DarkSeaGreen ’
-0x8FBC8F
-
-‘DarkSlateBlue ’
-0x483D8B
-
-‘DarkSlateGray ’
-0x2F4F4F
-
-‘DarkTurquoise ’
-0x00CED1
-
-‘DarkViolet ’
-0x9400D3
-
-‘DeepPink ’
-0xFF1493
-
-‘DeepSkyBlue ’
-0x00BFFF
-
-‘DimGray ’
-0x696969
-
-‘DodgerBlue ’
-0x1E90FF
-
-‘FireBrick ’
-0xB22222
-
-‘FloralWhite ’
-0xFFFAF0
-
-‘ForestGreen ’
-0x228B22
-
-‘Fuchsia ’
-0xFF00FF
-
-‘Gainsboro ’
-0xDCDCDC
-
-‘GhostWhite ’
-0xF8F8FF
-
-‘Gold ’
-0xFFD700
-
-‘GoldenRod ’
-0xDAA520
-
-‘Gray ’
-0x808080
-
-‘Green ’
-0x008000
-
-‘GreenYellow ’
-0xADFF2F
-
-‘HoneyDew ’
-0xF0FFF0
-
-‘HotPink ’
-0xFF69B4
-
-‘IndianRed ’
-0xCD5C5C
-
-‘Indigo ’
-0x4B0082
-
-‘Ivory ’
-0xFFFFF0
-
-‘Khaki ’
-0xF0E68C
-
-‘Lavender ’
-0xE6E6FA
-
-‘LavenderBlush ’
-0xFFF0F5
-
-‘LawnGreen ’
-0x7CFC00
-
-‘LemonChiffon ’
-0xFFFACD
-
-‘LightBlue ’
-0xADD8E6
-
-‘LightCoral ’
-0xF08080
-
-‘LightCyan ’
-0xE0FFFF
-
-‘LightGoldenRodYellow ’
-0xFAFAD2
-
-‘LightGreen ’
-0x90EE90
-
-‘LightGrey ’
-0xD3D3D3
-
-‘LightPink ’
-0xFFB6C1
-
-‘LightSalmon ’
-0xFFA07A
-
-‘LightSeaGreen ’
-0x20B2AA
-
-‘LightSkyBlue ’
-0x87CEFA
-
-‘LightSlateGray ’
-0x778899
-
-‘LightSteelBlue ’
-0xB0C4DE
-
-‘LightYellow ’
-0xFFFFE0
-
-‘Lime ’
-0x00FF00
-
-‘LimeGreen ’
-0x32CD32
-
-‘Linen ’
-0xFAF0E6
-
-‘Magenta ’
-0xFF00FF
-
-‘Maroon ’
-0x800000
-
-‘MediumAquaMarine ’
-0x66CDAA
-
-‘MediumBlue ’
-0x0000CD
-
-‘MediumOrchid ’
-0xBA55D3
-
-‘MediumPurple ’
-0x9370D8
-
-‘MediumSeaGreen ’
-0x3CB371
-
-‘MediumSlateBlue ’
-0x7B68EE
-
-‘MediumSpringGreen ’
-0x00FA9A
-
-‘MediumTurquoise ’
-0x48D1CC
-
-‘MediumVioletRed ’
-0xC71585
-
-‘MidnightBlue ’
-0x191970
-
-‘MintCream ’
-0xF5FFFA
-
-‘MistyRose ’
-0xFFE4E1
-
-‘Moccasin ’
-0xFFE4B5
-
-‘NavajoWhite ’
-0xFFDEAD
-
-‘Navy ’
-0x000080
-
-‘OldLace ’
-0xFDF5E6
-
-‘Olive ’
-0x808000
-
-‘OliveDrab ’
-0x6B8E23
-
-‘Orange ’
-0xFFA500
-
-‘OrangeRed ’
-0xFF4500
-
-‘Orchid ’
-0xDA70D6
-
-‘PaleGoldenRod ’
-0xEEE8AA
-
-‘PaleGreen ’
-0x98FB98
-
-‘PaleTurquoise ’
-0xAFEEEE
-
-‘PaleVioletRed ’
-0xD87093
-
-‘PapayaWhip ’
-0xFFEFD5
-
-‘PeachPuff ’
-0xFFDAB9
-
-‘Peru ’
-0xCD853F
-
-‘Pink ’
-0xFFC0CB
-
-‘Plum ’
-0xDDA0DD
-
-‘PowderBlue ’
-0xB0E0E6
-
-‘Purple ’
-0x800080
-
-‘Red ’
-0xFF0000
-
-‘RosyBrown ’
-0xBC8F8F
-
-‘RoyalBlue ’
-0x4169E1
-
-‘SaddleBrown ’
-0x8B4513
-
-‘Salmon ’
-0xFA8072
-
-‘SandyBrown ’
-0xF4A460
-
-‘SeaGreen ’
-0x2E8B57
-
-‘SeaShell ’
-0xFFF5EE
-
-‘Sienna ’
-0xA0522D
-
-‘Silver ’
-0xC0C0C0
-
-‘SkyBlue ’
-0x87CEEB
-
-‘SlateBlue ’
-0x6A5ACD
-
-‘SlateGray ’
-0x708090
-
-‘Snow ’
-0xFFFAFA
-
-‘SpringGreen ’
-0x00FF7F
-
-‘SteelBlue ’
-0x4682B4
-
-‘Tan ’
-0xD2B48C
-
-‘Teal ’
-0x008080
-
-‘Thistle ’
-0xD8BFD8
-
-‘Tomato ’
-0xFF6347
-
-‘Turquoise ’
-0x40E0D0
-
-‘Violet ’
-0xEE82EE
-
-‘Wheat ’
-0xF5DEB3
-
-‘White ’
-0xFFFFFF
-
-‘WhiteSmoke ’
-0xF5F5F5
-
-‘Yellow ’
-0xFFFF00
-
-‘YellowGreen ’
-0x9ACD32
-
-
-
-
-
8.8 Channel Layout# TOC
-
-
A channel layout specifies the spatial disposition of the channels in
-a multi-channel audio stream. To specify a channel layout, FFmpeg
-makes use of a special syntax.
-
-
Individual channels are identified by an id, as given by the table
-below:
-
-‘FL ’
-front left
-
-‘FR ’
-front right
-
-‘FC ’
-front center
-
-‘LFE ’
-low frequency
-
-‘BL ’
-back left
-
-‘BR ’
-back right
-
-‘FLC ’
-front left-of-center
-
-‘FRC ’
-front right-of-center
-
-‘BC ’
-back center
-
-‘SL ’
-side left
-
-‘SR ’
-side right
-
-‘TC ’
-top center
-
-‘TFL ’
-top front left
-
-‘TFC ’
-top front center
-
-‘TFR ’
-top front right
-
-‘TBL ’
-top back left
-
-‘TBC ’
-top back center
-
-‘TBR ’
-top back right
-
-‘DL ’
-downmix left
-
-‘DR ’
-downmix right
-
-‘WL ’
-wide left
-
-‘WR ’
-wide right
-
-‘SDL ’
-surround direct left
-
-‘SDR ’
-surround direct right
-
-‘LFE2 ’
-low frequency 2
-
-
-
-
Standard channel layout compositions can be specified by using the
-following identifiers:
-
-‘mono ’
-FC
-
-‘stereo ’
-FL+FR
-
-‘2.1 ’
-FL+FR+LFE
-
-‘3.0 ’
-FL+FR+FC
-
-‘3.0(back) ’
-FL+FR+BC
-
-‘4.0 ’
-FL+FR+FC+BC
-
-‘quad ’
-FL+FR+BL+BR
-
-‘quad(side) ’
-FL+FR+SL+SR
-
-‘3.1 ’
-FL+FR+FC+LFE
-
-‘5.0 ’
-FL+FR+FC+BL+BR
-
-‘5.0(side) ’
-FL+FR+FC+SL+SR
-
-‘4.1 ’
-FL+FR+FC+LFE+BC
-
-‘5.1 ’
-FL+FR+FC+LFE+BL+BR
-
-‘5.1(side) ’
-FL+FR+FC+LFE+SL+SR
-
-‘6.0 ’
-FL+FR+FC+BC+SL+SR
-
-‘6.0(front) ’
-FL+FR+FLC+FRC+SL+SR
-
-‘hexagonal ’
-FL+FR+FC+BL+BR+BC
-
-‘6.1 ’
-FL+FR+FC+LFE+BC+SL+SR
-
-‘6.1 ’
-FL+FR+FC+LFE+BL+BR+BC
-
-‘6.1(front) ’
-FL+FR+LFE+FLC+FRC+SL+SR
-
-‘7.0 ’
-FL+FR+FC+BL+BR+SL+SR
-
-‘7.0(front) ’
-FL+FR+FC+FLC+FRC+SL+SR
-
-‘7.1 ’
-FL+FR+FC+LFE+BL+BR+SL+SR
-
-‘7.1(wide) ’
-FL+FR+FC+LFE+BL+BR+FLC+FRC
-
-‘7.1(wide-side) ’
-FL+FR+FC+LFE+FLC+FRC+SL+SR
-
-‘octagonal ’
-FL+FR+FC+BL+BR+BC+SL+SR
-
-‘downmix ’
-DL+DR
-
-
-
-
A custom channel layout can be specified as a sequence of terms, separated by
-’+’ or ’|’. Each term can be:
-
- the name of a standard channel layout (e.g. ‘mono ’,
-‘stereo ’, ‘4.0 ’, ‘quad ’, ‘5.0 ’, etc.)
-
- the name of a single channel (e.g. ‘FL ’, ‘FR ’, ‘FC ’, ‘LFE ’, etc.)
-
- a number of channels, in decimal, optionally followed by ’c’, yielding
-the default channel layout for that number of channels (see the
-function av_get_default_channel_layout
)
-
- a channel layout mask, in hexadecimal starting with "0x" (see the
-AV_CH_*
macros in libavutil/channel_layout.h .
-
-
-
Starting from libavutil version 53 the trailing character "c" to
-specify a number of channels will be required, while a channel layout
-mask could also be specified as a decimal number (if and only if not
-followed by "c").
-
-
See also the function av_get_channel_layout
defined in
-libavutil/channel_layout.h .
-
-
-
9 Expression Evaluation# TOC
-
-
When evaluating an arithmetic expression, FFmpeg uses an internal
-formula evaluator, implemented through the libavutil/eval.h
-interface.
-
-
An expression may contain unary, binary operators, constants, and
-functions.
-
-
Two expressions expr1 and expr2 can be combined to form
-another expression "expr1 ;expr2 ".
-expr1 and expr2 are evaluated in turn, and the new
-expression evaluates to the value of expr2 .
-
-
The following binary operators are available: +
, -
,
-*
, /
, ^
.
-
-
The following unary operators are available: +
, -
.
-
-
The following functions are available:
-
-abs(x)
-Compute absolute value of x .
-
-
-acos(x)
-Compute arccosine of x .
-
-
-asin(x)
-Compute arcsine of x .
-
-
-atan(x)
-Compute arctangent of x .
-
-
-between(x, min, max)
-Return 1 if x is greater than or equal to min and lesser than or
-equal to max , 0 otherwise.
-
-
-bitand(x, y)
-bitor(x, y)
-Compute bitwise and/or operation on x and y .
-
-The results of the evaluation of x and y are converted to
-integers before executing the bitwise operation.
-
-Note that both the conversion to integer and the conversion back to
-floating point can lose precision. Beware of unexpected results for
-large numbers (usually 2^53 and larger).
-
-
-ceil(expr)
-Round the value of expression expr upwards to the nearest
-integer. For example, "ceil(1.5)" is "2.0".
-
-
-clip(x, min, max)
-Return the value of x clipped between min and max .
-
-
-cos(x)
-Compute cosine of x .
-
-
-cosh(x)
-Compute hyperbolic cosine of x .
-
-
-eq(x, y)
-Return 1 if x and y are equivalent, 0 otherwise.
-
-
-exp(x)
-Compute exponential of x (with base e
, the Euler’s number).
-
-
-floor(expr)
-Round the value of expression expr downwards to the nearest
-integer. For example, "floor(-1.5)" is "-2.0".
-
-
-gauss(x)
-Compute Gauss function of x , corresponding to
-exp(-x*x/2) / sqrt(2*PI)
.
-
-
-gcd(x, y)
-Return the greatest common divisor of x and y . If both x and
-y are 0 or either or both are less than zero then behavior is undefined.
-
-
-gt(x, y)
-Return 1 if x is greater than y , 0 otherwise.
-
-
-gte(x, y)
-Return 1 if x is greater than or equal to y , 0 otherwise.
-
-
-hypot(x, y)
-This function is similar to the C function with the same name; it returns
-"sqrt(x *x + y *y )", the length of the hypotenuse of a
-right triangle with sides of length x and y , or the distance of the
-point (x , y ) from the origin.
-
-
-if(x, y)
-Evaluate x , and if the result is non-zero return the result of
-the evaluation of y , return 0 otherwise.
-
-
-if(x, y, z)
-Evaluate x , and if the result is non-zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-ifnot(x, y)
-Evaluate x , and if the result is zero return the result of the
-evaluation of y , return 0 otherwise.
-
-
-ifnot(x, y, z)
-Evaluate x , and if the result is zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-isinf(x)
-Return 1.0 if x is +/-INFINITY, 0.0 otherwise.
-
-
-isnan(x)
-Return 1.0 if x is NAN, 0.0 otherwise.
-
-
-ld(var)
-Allow to load the value of the internal variable with number
-var , which was previously stored with st(var , expr ).
-The function returns the loaded value.
-
-
-log(x)
-Compute natural logarithm of x .
-
-
-lt(x, y)
-Return 1 if x is lesser than y , 0 otherwise.
-
-
-lte(x, y)
-Return 1 if x is lesser than or equal to y , 0 otherwise.
-
-
-max(x, y)
-Return the maximum between x and y .
-
-
-min(x, y)
-Return the maximum between x and y .
-
-
-mod(x, y)
-Compute the remainder of division of x by y .
-
-
-not(expr)
-Return 1.0 if expr is zero, 0.0 otherwise.
-
-
-pow(x, y)
-Compute the power of x elevated y , it is equivalent to
-"(x )^(y )".
-
-
-print(t)
-print(t, l)
-Print the value of expression t with loglevel l . If
-l is not specified then a default log level is used.
-Returns the value of the expression printed.
-
-Prints t with loglevel l
-
-
-random(x)
-Return a pseudo random value between 0.0 and 1.0. x is the index of the
-internal variable which will be used to save the seed/state.
-
-
-root(expr, max)
-Find an input value for which the function represented by expr
-with argument ld(0) is 0 in the interval 0..max .
-
-The expression in expr must denote a continuous function or the
-result is undefined.
-
-ld(0) is used to represent the function input value, which means
-that the given expression will be evaluated multiple times with
-various input values that the expression can access through
-ld(0)
. When the expression evaluates to 0 then the
-corresponding input value will be returned.
-
-
-sin(x)
-Compute sine of x .
-
-
-sinh(x)
-Compute hyperbolic sine of x .
-
-
-sqrt(expr)
-Compute the square root of expr . This is equivalent to
-"(expr )^.5".
-
-
-squish(x)
-Compute expression 1/(1 + exp(4*x))
.
-
-
-st(var, expr)
-Allow to store the value of the expression expr in an internal
-variable. var specifies the number of the variable where to
-store the value, and it is a value ranging from 0 to 9. The function
-returns the value stored in the internal variable.
-Note, Variables are currently not shared between expressions.
-
-
-tan(x)
-Compute tangent of x .
-
-
-tanh(x)
-Compute hyperbolic tangent of x .
-
-
-taylor(expr, x)
-taylor(expr, x, id)
-Evaluate a Taylor series at x , given an expression representing
-the ld(id)
-th derivative of a function at 0.
-
-When the series does not converge the result is undefined.
-
-ld(id) is used to represent the derivative order in expr ,
-which means that the given expression will be evaluated multiple times
-with various input values that the expression can access through
-ld(id)
. If id is not specified then 0 is assumed.
-
-Note, when you have the derivatives at y instead of 0,
-taylor(expr, x-y)
can be used.
-
-
-time(0)
-Return the current (wallclock) time in seconds.
-
-
-trunc(expr)
-Round the value of expression expr towards zero to the nearest
-integer. For example, "trunc(-1.5)" is "-1.0".
-
-
-while(cond, expr)
-Evaluate expression expr while the expression cond is
-non-zero, and returns the value of the last expr evaluation, or
-NAN if cond was always false.
-
-
-
-
The following constants are available:
-
-PI
-area of the unit disc, approximately 3.14
-
-E
-exp(1) (Euler’s number), approximately 2.718
-
-PHI
-golden ratio (1+sqrt(5))/2, approximately 1.618
-
-
-
-
Assuming that an expression is considered "true" if it has a non-zero
-value, note that:
-
-
*
works like AND
-
-
+
works like OR
-
-
For example the construct:
-
-
is equivalent to:
-
-
-
In your C code, you can extend the list of unary and binary functions,
-and define recognized constants, so that they are available for your
-expressions.
-
-
The evaluator also recognizes the International System unit prefixes.
-If ’i’ is appended after the prefix, binary prefixes are used, which
-are based on powers of 1024 instead of powers of 1000.
-The ’B’ postfix multiplies the value by 8, and can be appended after a
-unit prefix or used alone. This allows using for example ’KB’, ’MiB’,
-’G’ and ’B’ as number postfix.
-
-
The list of available International System prefixes follows, with
-indication of the corresponding powers of 10 and of 2.
-
-y
-10^-24 / 2^-80
-
-z
-10^-21 / 2^-70
-
-a
-10^-18 / 2^-60
-
-f
-10^-15 / 2^-50
-
-p
-10^-12 / 2^-40
-
-n
-10^-9 / 2^-30
-
-u
-10^-6 / 2^-20
-
-m
-10^-3 / 2^-10
-
-c
-10^-2
-
-d
-10^-1
-
-h
-10^2
-
-k
-10^3 / 2^10
-
-K
-10^3 / 2^10
-
-M
-10^6 / 2^20
-
-G
-10^9 / 2^30
-
-T
-10^12 / 2^40
-
-P
-10^15 / 2^40
-
-E
-10^18 / 2^50
-
-Z
-10^21 / 2^60
-
-Y
-10^24 / 2^70
-
-
-
-
-
-
10 OpenCL Options# TOC
-
-
When FFmpeg is configured with --enable-opencl
, it is possible
-to set the options for the global OpenCL context.
-
-
The list of supported options follows:
-
-
-build_options
-Set build options used to compile the registered kernels.
-
-See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
-
-
-platform_idx
-Select the index of the platform to run OpenCL code.
-
-The specified index must be one of the indexes in the device list
-which can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-device_idx
-Select the index of the device used to run OpenCL code.
-
-The specified index must be one of the indexes in the device list which
-can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-
-
-
-
11 Codec Options# TOC
-
-
libavcodec provides some generic global options, which can be set on
-all the encoders and decoders. In addition each codec may support
-so-called private options, which are specific for a given codec.
-
-
Sometimes, a global option may only affect a specific kind of codec,
-and may be nonsensical or ignored by another, so you need to be aware
-of the meaning of the specified options. Also some options are
-meant only for decoding or encoding.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVCodecContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follow:
-
-
-b integer (encoding,audio,video )
-Set bitrate in bits/s. Default value is 200K.
-
-
-ab integer (encoding,audio )
-Set audio bitrate (in bits/s). Default value is 128K.
-
-
-bt integer (encoding,video )
-Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate
-tolerance specifies how far ratecontrol is willing to deviate from the
-target average bitrate value. This is not related to min/max
-bitrate. Lowering tolerance too much has an adverse effect on quality.
-
-
-flags flags (decoding/encoding,audio,video,subtitles )
-Set generic flags.
-
-Possible values:
-
-‘mv4 ’
-Use four motion vector by macroblock (mpeg4).
-
-‘qpel ’
-Use 1/4 pel motion compensation.
-
-‘loop ’
-Use loop filter.
-
-‘qscale ’
-Use fixed qscale.
-
-‘gmc ’
-Use gmc.
-
-‘mv0 ’
-Always try a mb with mv=<0,0>.
-
-‘input_preserved ’
-‘pass1 ’
-Use internal 2pass ratecontrol in first pass mode.
-
-‘pass2 ’
-Use internal 2pass ratecontrol in second pass mode.
-
-‘gray ’
-Only decode/encode grayscale.
-
-‘emu_edge ’
-Do not draw edges.
-
-‘psnr ’
-Set error[?] variables during encoding.
-
-‘truncated ’
-‘naq ’
-Normalize adaptive quantization.
-
-‘ildct ’
-Use interlaced DCT.
-
-‘low_delay ’
-Force low delay.
-
-‘global_header ’
-Place global headers in extradata instead of every keyframe.
-
-‘bitexact ’
-Only write platform-, build- and time-independent data. (except (I)DCT).
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-‘aic ’
-Apply H263 advanced intra coding / mpeg4 ac prediction.
-
-‘cbp ’
-Deprecated, use mpegvideo private options instead.
-
-‘qprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘ilme ’
-Apply interlaced motion estimation.
-
-‘cgop ’
-Use closed gop.
-
-
-
-
-me_method integer (encoding,video )
-Set motion estimation method.
-
-Possible values:
-
-‘zero ’
-zero motion estimation (fastest)
-
-‘full ’
-full motion estimation (slowest)
-
-‘epzs ’
-EPZS motion estimation (default)
-
-‘esa ’
-esa motion estimation (alias for full)
-
-‘tesa ’
-tesa motion estimation
-
-‘dia ’
-dia motion estimation (alias for epzs)
-
-‘log ’
-log motion estimation
-
-‘phods ’
-phods motion estimation
-
-‘x1 ’
-X1 motion estimation
-
-‘hex ’
-hex motion estimation
-
-‘umh ’
-umh motion estimation
-
-‘iter ’
-iter motion estimation
-
-
-
-
-extradata_size integer
-Set extradata size.
-
-
-time_base rational number
-Set codec time base.
-
-It is the fundamental unit of time (in seconds) in terms of which
-frame timestamps are represented. For fixed-fps content, timebase
-should be 1 / frame_rate
and timestamp increments should be
-identically 1.
-
-
-g integer (encoding,video )
-Set the group of picture size. Default value is 12.
-
-
-ar integer (decoding/encoding,audio )
-Set audio sampling rate (in Hz).
-
-
-ac integer (decoding/encoding,audio )
-Set number of audio channels.
-
-
-cutoff integer (encoding,audio )
-Set cutoff bandwidth.
-
-
-frame_size integer (encoding,audio )
-Set audio frame size.
-
-Each submitted frame except the last must contain exactly frame_size
-samples per channel. May be 0 when the codec has
-CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not
-restricted. It is set by some decoders to indicate constant frame
-size.
-
-
-frame_number integer
-Set the frame number.
-
-
-delay integer
-qcomp float (encoding,video )
-Set video quantizer scale compression (VBR). It is used as a constant
-in the ratecontrol equation. Recommended range for default rc_eq:
-0.0-1.0.
-
-
-qblur float (encoding,video )
-Set video quantizer scale blur (VBR).
-
-
-qmin integer (encoding,video )
-Set min video quantizer scale (VBR). Must be included between -1 and
-69, default value is 2.
-
-
-qmax integer (encoding,video )
-Set max video quantizer scale (VBR). Must be included between -1 and
-1024, default value is 31.
-
-
-qdiff integer (encoding,video )
-Set max difference between the quantizer scale (VBR).
-
-
-bf integer (encoding,video )
-Set max number of B frames between non-B-frames.
-
-Must be an integer between -1 and 16. 0 means that B-frames are
-disabled. If a value of -1 is used, it will choose an automatic value
-depending on the encoder.
-
-Default value is 0.
-
-
-b_qfactor float (encoding,video )
-Set qp factor between P and B frames.
-
-
-rc_strategy integer (encoding,video )
-Set ratecontrol method.
-
-
-b_strategy integer (encoding,video )
-Set strategy to choose between I/P/B-frames.
-
-
-ps integer (encoding,video )
-Set RTP payload size in bytes.
-
-
-mv_bits integer
-header_bits integer
-i_tex_bits integer
-p_tex_bits integer
-i_count integer
-p_count integer
-skip_count integer
-misc_bits integer
-frame_bits integer
-codec_tag integer
-bug flags (decoding,video )
-Workaround not auto detected encoder bugs.
-
-Possible values:
-
-‘autodetect ’
-‘old_msmpeg4 ’
-some old lavc generated msmpeg4v3 files (no autodetection)
-
-‘xvid_ilace ’
-Xvid interlacing bug (autodetected if fourcc==XVIX)
-
-‘ump4 ’
-(autodetected if fourcc==UMP4)
-
-‘no_padding ’
-padding bug (autodetected)
-
-‘amv ’
-‘ac_vlc ’
-illegal vlc bug (autodetected per fourcc)
-
-‘qpel_chroma ’
-‘std_qpel ’
-old standard qpel (autodetected per fourcc/version)
-
-‘qpel_chroma2 ’
-‘direct_blocksize ’
-direct-qpel-blocksize bug (autodetected per fourcc/version)
-
-‘edge ’
-edge padding bug (autodetected per fourcc/version)
-
-‘hpel_chroma ’
-‘dc_clip ’
-‘ms ’
-Workaround various bugs in microsoft broken decoders.
-
-‘trunc ’
-trancated frames
-
-
-
-
-lelim integer (encoding,video )
-Set single coefficient elimination threshold for luminance (negative
-values also consider DC coefficient).
-
-
-celim integer (encoding,video )
-Set single coefficient elimination threshold for chrominance (negative
-values also consider dc coefficient)
-
-
-strict integer (decoding/encoding,audio,video )
-Specify how strictly to follow the standards.
-
-Possible values:
-
-‘very ’
-strictly conform to a older more strict version of the spec or reference software
-
-‘strict ’
-strictly conform to all the things in the spec no matter what consequences
-
-‘normal ’
-‘unofficial ’
-allow unofficial extensions
-
-‘experimental ’
-allow non standardized experimental things, experimental
-(unfinished/work in progress/not well tested) decoders and encoders.
-Note: experimental decoders can pose a security risk, do not use this for
-decoding untrusted input.
-
-
-
-
-b_qoffset float (encoding,video )
-Set QP offset between P and B frames.
-
-
-err_detect flags (decoding,audio,video )
-Set error detection flags.
-
-Possible values:
-
-‘crccheck ’
-verify embedded CRCs
-
-‘bitstream ’
-detect bitstream specification deviations
-
-‘buffer ’
-detect improper bitstream length
-
-‘explode ’
-abort decoding on minor error detection
-
-‘ignore_err ’
-ignore decoding errors, and continue decoding.
-This is useful if you want to analyze the content of a video and thus want
-everything to be decoded no matter what. This option will not result in a video
-that is pleasing to watch in case of errors.
-
-‘careful ’
-consider things that violate the spec and have not been seen in the wild as errors
-
-‘compliant ’
-consider all spec non compliancies as errors
-
-‘aggressive ’
-consider things that a sane encoder should not do as an error
-
-
-
-
-has_b_frames integer
-block_align integer
-mpeg_quant integer (encoding,video )
-Use MPEG quantizers instead of H.263.
-
-
-qsquish float (encoding,video )
-How to keep quantizer between qmin and qmax (0 = clip, 1 = use
-differentiable function).
-
-
-rc_qmod_amp float (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_qmod_freq integer (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_override_count integer
-rc_eq string (encoding,video )
-Set rate control equation. When computing the expression, besides the
-standard functions defined in the section ’Expression Evaluation’, the
-following functions are available: bits2qp(bits), qp2bits(qp). Also
-the following constants are available: iTex pTex tex mv fCode iCount
-mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
-avgTex.
-
-
-maxrate integer (encoding,audio,video )
-Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
-
-
-minrate integer (encoding,audio,video )
-Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR
-encode. It is of little use elsewise.
-
-
-bufsize integer (encoding,audio,video )
-Set ratecontrol buffer size (in bits).
-
-
-rc_buf_aggressivity float (encoding,video )
-Currently useless.
-
-
-i_qfactor float (encoding,video )
-Set QP factor between P and I frames.
-
-
-i_qoffset float (encoding,video )
-Set QP offset between P and I frames.
-
-
-rc_init_cplx float (encoding,video )
-Set initial complexity for 1-pass encoding.
-
-
-dct integer (encoding,video )
-Set DCT algorithm.
-
-Possible values:
-
-‘auto ’
-autoselect a good one (default)
-
-‘fastint ’
-fast integer
-
-‘int ’
-accurate integer
-
-‘mmx ’
-‘altivec ’
-‘faan ’
-floating point AAN DCT
-
-
-
-
-lumi_mask float (encoding,video )
-Compress bright areas stronger than medium ones.
-
-
-tcplx_mask float (encoding,video )
-Set temporal complexity masking.
-
-
-scplx_mask float (encoding,video )
-Set spatial complexity masking.
-
-
-p_mask float (encoding,video )
-Set inter masking.
-
-
-dark_mask float (encoding,video )
-Compress dark areas stronger than medium ones.
-
-
-idct integer (decoding/encoding,video )
-Select IDCT implementation.
-
-Possible values:
-
-‘auto ’
-‘int ’
-‘simple ’
-‘simplemmx ’
-‘simpleauto ’
-Automatically pick a IDCT compatible with the simple one
-
-
-‘arm ’
-‘altivec ’
-‘sh4 ’
-‘simplearm ’
-‘simplearmv5te ’
-‘simplearmv6 ’
-‘simpleneon ’
-‘simplealpha ’
-‘ipp ’
-‘xvidmmx ’
-‘faani ’
-floating point AAN IDCT
-
-
-
-
-slice_count integer
-ec flags (decoding,video )
-Set error concealment strategy.
-
-Possible values:
-
-‘guess_mvs ’
-iterative motion vector (MV) search (slow)
-
-‘deblock ’
-use strong deblock filter for damaged MBs
-
-‘favor_inter ’
-favor predicting from the previous frame instead of the current
-
-
-
-
-bits_per_coded_sample integer
-pred integer (encoding,video )
-Set prediction method.
-
-Possible values:
-
-‘left ’
-‘plane ’
-‘median ’
-
-
-
-aspect rational number (encoding,video )
-Set sample aspect ratio.
-
-
-debug flags (decoding/encoding,audio,video,subtitles )
-Print specific debug info.
-
-Possible values:
-
-‘pict ’
-picture info
-
-‘rc ’
-rate control
-
-‘bitstream ’
-‘mb_type ’
-macroblock (MB) type
-
-‘qp ’
-per-block quantization parameter (QP)
-
-‘mv ’
-motion vector
-
-‘dct_coeff ’
-‘skip ’
-‘startcode ’
-‘pts ’
-‘er ’
-error recognition
-
-‘mmco ’
-memory management control operations (H.264)
-
-‘bugs ’
-‘vis_qp ’
-visualize quantization parameter (QP), lower QP are tinted greener
-
-‘vis_mb_type ’
-visualize block types
-
-‘buffers ’
-picture buffer allocations
-
-‘thread_ops ’
-threading operations
-
-‘nomc ’
-skip motion compensation
-
-
-
-
-vismv integer (decoding,video )
-Visualize motion vectors (MVs).
-
-This option is deprecated, see the codecview filter instead.
-
-Possible values:
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-cmp integer (encoding,video )
-Set full pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-subcmp integer (encoding,video )
-Set sub pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-mbcmp integer (encoding,video )
-Set macroblock compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-ildctcmp integer (encoding,video )
-Set interlaced dct compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-dia_size integer (encoding,video )
-Set diamond type & size for motion estimation.
-
-
-last_pred integer (encoding,video )
-Set amount of motion predictors from the previous frame.
-
-
-preme integer (encoding,video )
-Set pre motion estimation.
-
-
-precmp integer (encoding,video )
-Set pre motion estimation compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-pre_dia_size integer (encoding,video )
-Set diamond type & size for motion estimation pre-pass.
-
-
-subq integer (encoding,video )
-Set sub pel motion estimation quality.
-
-
-dtg_active_format integer
-me_range integer (encoding,video )
-Set limit motion vectors range (1023 for DivX player).
-
-
-ibias integer (encoding,video )
-Set intra quant bias.
-
-
-pbias integer (encoding,video )
-Set inter quant bias.
-
-
-color_table_id integer
-global_quality integer (encoding,audio,video )
-coder integer (encoding,video )
-
-Possible values:
-
-‘vlc ’
-variable length coder / huffman coder
-
-‘ac ’
-arithmetic coder
-
-‘raw ’
-raw (no encoding)
-
-‘rle ’
-run-length coder
-
-‘deflate ’
-deflate-based coder
-
-
-
-
-context integer (encoding,video )
-Set context model.
-
-
-slice_flags integer
-xvmc_acceleration integer
-mbd integer (encoding,video )
-Set macroblock decision algorithm (high quality mode).
-
-Possible values:
-
-‘simple ’
-use mbcmp (default)
-
-‘bits ’
-use fewest bits
-
-‘rd ’
-use best rate distortion
-
-
-
-
-stream_codec_tag integer
-sc_threshold integer (encoding,video )
-Set scene change threshold.
-
-
-lmin integer (encoding,video )
-Set min lagrange factor (VBR).
-
-
-lmax integer (encoding,video )
-Set max lagrange factor (VBR).
-
-
-nr integer (encoding,video )
-Set noise reduction.
-
-
-rc_init_occupancy integer (encoding,video )
-Set number of bits which should be loaded into the rc buffer before
-decoding starts.
-
-
-flags2 flags (decoding/encoding,audio,video )
-
-Possible values:
-
-‘fast ’
-Allow non spec compliant speedup tricks.
-
-‘sgop ’
-Deprecated, use mpegvideo private options instead.
-
-‘noout ’
-Skip bitstream encoding.
-
-‘ignorecrop ’
-Ignore cropping information from sps.
-
-‘local_header ’
-Place global headers at every keyframe instead of in extradata.
-
-‘chunks ’
-Frame data might be split into multiple chunks.
-
-‘showall ’
-Show all frames before the first keyframe.
-
-‘skiprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘export_mvs ’
-Export motion vectors into frame side-data (see AV_FRAME_DATA_MOTION_VECTORS
)
-for codecs that support it. See also doc/examples/export_mvs.c .
-
-
-
-
-error integer (encoding,video )
-qns integer (encoding,video )
-Deprecated, use mpegvideo private options instead.
-
-
-threads integer (decoding/encoding,video )
-
-Possible values:
-
-‘auto ’
-detect a good number of threads
-
-
-
-
-me_threshold integer (encoding,video )
-Set motion estimation threshold.
-
-
-mb_threshold integer (encoding,video )
-Set macroblock threshold.
-
-
-dc integer (encoding,video )
-Set intra_dc_precision.
-
-
-nssew integer (encoding,video )
-Set nsse weight.
-
-
-skip_top integer (decoding,video )
-Set number of macroblock rows at the top which are skipped.
-
-
-skip_bottom integer (decoding,video )
-Set number of macroblock rows at the bottom which are skipped.
-
-
-profile integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-‘aac_main ’
-‘aac_low ’
-‘aac_ssr ’
-‘aac_ltp ’
-‘aac_he ’
-‘aac_he_v2 ’
-‘aac_ld ’
-‘aac_eld ’
-‘mpeg2_aac_low ’
-‘mpeg2_aac_he ’
-‘mpeg4_sp ’
-‘mpeg4_core ’
-‘mpeg4_main ’
-‘mpeg4_asp ’
-‘dts ’
-‘dts_es ’
-‘dts_96_24 ’
-‘dts_hd_hra ’
-‘dts_hd_ma ’
-
-
-
-level integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-
-
-
-lowres integer (decoding,audio,video )
-Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
-
-
-skip_threshold integer (encoding,video )
-Set frame skip threshold.
-
-
-skip_factor integer (encoding,video )
-Set frame skip factor.
-
-
-skip_exp integer (encoding,video )
-Set frame skip exponent.
-Negative values behave identical to the corresponding positive ones, except
-that the score is normalized.
-Positive values exist primarily for compatibility reasons and are not so useful.
-
-
-skipcmp integer (encoding,video )
-Set frame skip compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-border_mask float (encoding,video )
-Increase the quantizer for macroblocks close to borders.
-
-
-mblmin integer (encoding,video )
-Set min macroblock lagrange factor (VBR).
-
-
-mblmax integer (encoding,video )
-Set max macroblock lagrange factor (VBR).
-
-
-mepc integer (encoding,video )
-Set motion estimation bitrate penalty compensation (1.0 = 256).
-
-
-skip_loop_filter integer (decoding,video )
-skip_idct integer (decoding,video )
-skip_frame integer (decoding,video )
-
-Make decoder discard processing depending on the frame type selected
-by the option value.
-
-skip_loop_filter skips frame loop filtering, skip_idct
-skips frame IDCT/dequantization, skip_frame skips decoding.
-
-Possible values:
-
-‘none ’
-Discard no frame.
-
-
-‘default ’
-Discard useless frames like 0-sized frames.
-
-
-‘noref ’
-Discard all non-reference frames.
-
-
-‘bidir ’
-Discard all bidirectional frames.
-
-
-‘nokey ’
-Discard all frames excepts keyframes.
-
-
-‘all ’
-Discard all frames.
-
-
-
-Default value is ‘default ’.
-
-
-bidir_refine integer (encoding,video )
-Refine the two motion vectors used in bidirectional macroblocks.
-
-
-brd_scale integer (encoding,video )
-Downscale frames for dynamic B-frame decision.
-
-
-keyint_min integer (encoding,video )
-Set minimum interval between IDR-frames.
-
-
-refs integer (encoding,video )
-Set reference frames to consider for motion compensation.
-
-
-chromaoffset integer (encoding,video )
-Set chroma qp offset from luma.
-
-
-trellis integer (encoding,audio,video )
-Set rate-distortion optimal quantization.
-
-
-sc_factor integer (encoding,video )
-Set value multiplied by qscale for each frame and added to
-scene_change_score.
-
-
-mv0_threshold integer (encoding,video )
-b_sensitivity integer (encoding,video )
-Adjust sensitivity of b_frame_strategy 1.
-
-
-compression_level integer (encoding,audio,video )
-min_prediction_order integer (encoding,audio )
-max_prediction_order integer (encoding,audio )
-timecode_frame_start integer (encoding,video )
-Set GOP timecode frame start number, in non drop frame format.
-
-
-request_channels integer (decoding,audio )
-Set desired number of audio channels.
-
-
-bits_per_raw_sample integer
-channel_layout integer (decoding/encoding,audio )
-
-Possible values:
-
-request_channel_layout integer (decoding,audio )
-
-Possible values:
-
-rc_max_vbv_use float (encoding,video )
-rc_min_vbv_use float (encoding,video )
-ticks_per_frame integer (decoding/encoding,audio,video )
-color_primaries integer (decoding/encoding,video )
-color_trc integer (decoding/encoding,video )
-colorspace integer (decoding/encoding,video )
-color_range integer (decoding/encoding,video )
-chroma_sample_location integer (decoding/encoding,video )
-log_level_offset integer
-Set the log level offset.
-
-
-slices integer (encoding,video )
-Number of slices, used in parallelized encoding.
-
-
-thread_type flags (decoding/encoding,video )
-Select which multithreading methods to use.
-
-Use of ‘frame ’ will increase decoding delay by one frame per
-thread, so clients which cannot provide future frames should not use
-it.
-
-Possible values:
-
-‘slice ’
-Decode more than one part of a single frame at once.
-
-Multithreading using slices works only when the video was encoded with
-slices.
-
-
-‘frame ’
-Decode more than one frame at once.
-
-
-
-Default value is ‘slice+frame ’.
-
-
-audio_service_type integer (encoding,audio )
-Set audio service type.
-
-Possible values:
-
-‘ma ’
-Main Audio Service
-
-‘ef ’
-Effects
-
-‘vi ’
-Visually Impaired
-
-‘hi ’
-Hearing Impaired
-
-‘di ’
-Dialogue
-
-‘co ’
-Commentary
-
-‘em ’
-Emergency
-
-‘vo ’
-Voice Over
-
-‘ka ’
-Karaoke
-
-
-
-
-request_sample_fmt sample_fmt (decoding,audio )
-Set sample format audio decoders should prefer. Default value is
-none
.
-
-
-pkt_timebase rational number
-sub_charenc encoding (decoding,subtitles )
-Set the input subtitles character encoding.
-
-
-field_order field_order (video )
-Set/override the field order of the video.
-Possible values:
-
-‘progressive ’
-Progressive video
-
-‘tt ’
-Interlaced video, top field coded and displayed first
-
-‘bb ’
-Interlaced video, bottom field coded and displayed first
-
-‘tb ’
-Interlaced video, top coded first, bottom displayed first
-
-‘bt ’
-Interlaced video, bottom coded first, top displayed first
-
-
-
-
-skip_alpha integer (decoding,video )
-Set to 1 to disable processing alpha (transparency). This works like the
-‘gray ’ flag in the flags option which skips chroma information
-instead of alpha. Default is 0.
-
-
-codec_whitelist list (input )
-"," separated List of allowed decoders. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
-
12 Decoders# TOC
-
-
Decoders are configured elements in FFmpeg which allow the decoding of
-multimedia streams.
-
-
When you configure your FFmpeg build, all the supported native decoders
-are enabled by default. Decoders requiring an external library must be enabled
-manually via the corresponding --enable-lib
option. You can list all
-available decoders using the configure option --list-decoders
.
-
-
You can disable all the decoders with the configure option
---disable-decoders
and selectively enable / disable single decoders
-with the options --enable-decoder=DECODER
/
---disable-decoder=DECODER
.
-
-
The option -decoders
of the ff* tools will display the list of
-enabled decoders.
-
-
-
-
13 Video Decoders# TOC
-
-
A description of some of the currently available video decoders
-follows.
-
-
-
13.1 rawvideo# TOC
-
-
Raw video decoder.
-
-
This decoder decodes rawvideo streams.
-
-
-
13.1.1 Options# TOC
-
-
-top top_field_first
-Specify the assumed field type of the input video.
-
--1
-the video is assumed to be progressive (default)
-
-0
-bottom-field-first is assumed
-
-1
-top-field-first is assumed
-
-
-
-
-
-
-
-
-
14 Audio Decoders# TOC
-
-
A description of some of the currently available audio decoders
-follows.
-
-
-
14.1 ac3# TOC
-
-
AC-3 audio decoder.
-
-
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
-the undocumented RealAudio 3 (a.k.a. dnet).
-
-
-
14.1.1 AC-3 Decoder Options# TOC
-
-
--drc_scale value
-Dynamic Range Scale Factor. The factor to apply to dynamic range values
-from the AC-3 stream. This factor is applied exponentially.
-There are 3 notable scale factor ranges:
-
-drc_scale == 0
-DRC disabled. Produces full range audio.
-
-0 < drc_scale <= 1
-DRC enabled. Applies a fraction of the stream DRC value.
-Audio reproduction is between full range and full compression.
-
-drc_scale > 1
-DRC enabled. Applies drc_scale asymmetrically.
-Loud sounds are fully compressed. Soft sounds are enhanced.
-
-
-
-
-
-
-
-
14.2 ffwavesynth# TOC
-
-
Internal wave synthetizer.
-
-
This decoder generates wave patterns according to predefined sequences. Its
-use is purely internal and the format of the data it accepts is not publicly
-documented.
-
-
-
14.3 libcelt# TOC
-
-
libcelt decoder wrapper.
-
-
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
-Requires the presence of the libcelt headers and library during configuration.
-You need to explicitly configure the build with --enable-libcelt
.
-
-
-
14.4 libgsm# TOC
-
-
libgsm decoder wrapper.
-
-
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
-the presence of the libgsm headers and library during configuration. You need
-to explicitly configure the build with --enable-libgsm
.
-
-
This decoder supports both the ordinary GSM and the Microsoft variant.
-
-
-
14.5 libilbc# TOC
-
-
libilbc decoder wrapper.
-
-
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
-audio codec. Requires the presence of the libilbc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libilbc
.
-
-
-
14.5.1 Options# TOC
-
-
The following option is supported by the libilbc wrapper.
-
-
-enhance
-
-Enable the enhancement of the decoded audio when set to 1. The default
-value is 0 (disabled).
-
-
-
-
-
-
14.6 libopencore-amrnb# TOC
-
-
libopencore-amrnb decoder wrapper.
-
-
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
-Narrowband audio codec. Using it requires the presence of the
-libopencore-amrnb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrnb
.
-
-
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
-without this library.
-
-
-
14.7 libopencore-amrwb# TOC
-
-
libopencore-amrwb decoder wrapper.
-
-
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
-Wideband audio codec. Using it requires the presence of the
-libopencore-amrwb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrwb
.
-
-
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
-without this library.
-
-
-
14.8 libopus# TOC
-
-
libopus decoder wrapper.
-
-
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
-Requires the presence of the libopus headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopus
.
-
-
An FFmpeg native decoder for Opus exists, so users can decode Opus
-without this library.
-
-
-
-
15 Subtitles Decoders# TOC
-
-
-
15.1 dvdsub# TOC
-
-
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
-also be found in VobSub file pairs and in some Matroska files.
-
-
-
15.1.1 Options# TOC
-
-
-palette
-Specify the global palette used by the bitmaps. When stored in VobSub, the
-palette is normally specified in the index file; in Matroska, the palette is
-stored in the codec extra-data in the same format as in VobSub. In DVDs, the
-palette is stored in the IFO file, and therefore not available when reading
-from dumped VOB files.
-
-The format for this option is a string containing 16 24-bits hexadecimal
-numbers (without 0x prefix) separated by comas, for example 0d00ee,
-ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
-7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b
.
-
-
-ifo_palette
-Specify the IFO file from which the global palette is obtained.
-(experimental)
-
-
-forced_subs_only
-Only decode subtitle entries marked as forced. Some titles have forced
-and non-forced subtitles in the same track. Setting this flag to 1
-will only keep the forced subtitles. Default value is 0
.
-
-
-
-
-
15.2 libzvbi-teletext# TOC
-
-
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
-subtitles. Requires the presence of the libzvbi headers and library during
-configuration. You need to explicitly configure the build with
---enable-libzvbi
.
-
-
-
15.2.1 Options# TOC
-
-
-txt_page
-List of teletext page numbers to decode. You may use the special * string to
-match all pages. Pages that do not match the specified list are dropped.
-Default value is *.
-
-txt_chop_top
-Discards the top teletext line. Default value is 1.
-
-txt_format
-Specifies the format of the decoded subtitles. The teletext decoder is capable
-of decoding the teletext pages to bitmaps or to simple text, you should use
-"bitmap" for teletext pages, because certain graphics and colors cannot be
-expressed in simple text. You might use "text" for teletext based subtitles if
-your application can handle simple text based subtitles. Default value is
-bitmap.
-
-txt_left
-X offset of generated bitmaps, default is 0.
-
-txt_top
-Y offset of generated bitmaps, default is 0.
-
-txt_chop_spaces
-Chops leading and trailing spaces and removes empty lines from the generated
-text. This option is useful for teletext based subtitles where empty spaces may
-be present at the start or at the end of the lines or empty lines may be
-present between the subtitle lines because of double-sized teletext charactes.
-Default value is 1.
-
-txt_duration
-Sets the display duration of the decoded teletext pages or subtitles in
-miliseconds. Default value is 30000 which is 30 seconds.
-
-txt_transparent
-Force transparent background of the generated teletext bitmaps. Default value
-is 0 which means an opaque (black) background.
-
-
-
-
-
16 Encoders# TOC
-
-
Encoders are configured elements in FFmpeg which allow the encoding of
-multimedia streams.
-
-
When you configure your FFmpeg build, all the supported native encoders
-are enabled by default. Encoders requiring an external library must be enabled
-manually via the corresponding --enable-lib
option. You can list all
-available encoders using the configure option --list-encoders
.
-
-
You can disable all the encoders with the configure option
---disable-encoders
and selectively enable / disable single encoders
-with the options --enable-encoder=ENCODER
/
---disable-encoder=ENCODER
.
-
-
The option -encoders
of the ff* tools will display the list of
-enabled encoders.
-
-
-
-
17 Audio Encoders# TOC
-
-
A description of some of the currently available audio encoders
-follows.
-
-
-
17.1 aac# TOC
-
-
Advanced Audio Coding (AAC) encoder.
-
-
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
-low complexity (AAC-LC) profile is supported. To use this encoder, you must set
-strict option to ‘experimental ’ or lower.
-
-
As this encoder is experimental, unexpected behavior may exist from time to
-time. For a more stable AAC encoder, see libvo-aacenc . However, be warned
-that it has a worse quality reported by some users.
-
-
See also libfdk_aac and libfaac .
-
-
-
17.1.1 Options# TOC
-
-
-b
-Set bit rate in bits/s. Setting this automatically activates constant bit rate
-(CBR) mode.
-
-
-q
-Set quality for variable bit rate (VBR) mode. This option is valid only using
-the ffmpeg
command-line tool. For library interface users, use
-global_quality .
-
-
-stereo_mode
-Set stereo encoding mode. Possible values:
-
-
-‘auto ’
-Automatically selected by the encoder.
-
-
-‘ms_off ’
-Disable middle/side encoding. This is the default.
-
-
-‘ms_force ’
-Force middle/side encoding.
-
-
-
-
-aac_coder
-Set AAC encoder coding method. Possible values:
-
-
-‘faac ’
-FAAC-inspired method.
-
-This method is a simplified reimplementation of the method used in FAAC, which
-sets thresholds proportional to the band energies, and then decreases all the
-thresholds with quantizer steps to find the appropriate quantization with
-distortion below threshold band by band.
-
-The quality of this method is comparable to the two loop searching method
-described below, but somewhat a little better and slower.
-
-
-‘anmr ’
-Average noise to mask ratio (ANMR) trellis-based solution.
-
-This has a theoretic best quality out of all the coding methods, but at the
-cost of the slowest speed.
-
-
-‘twoloop ’
-Two loop searching (TLS) method.
-
-This method first sets quantizers depending on band thresholds and then tries
-to find an optimal combination by adding or subtracting a specific value from
-all quantizers and adjusting some individual quantizer a little.
-
-This method produces similar quality with the FAAC method and is the default.
-
-
-‘fast ’
-Constant quantizer method.
-
-This method sets a constant quantizer for all bands. This is the fastest of all
-the methods, yet produces the worst quality.
-
-
-
-
-
-
-
-
-
17.2 ac3 and ac3_fixed# TOC
-
-
AC-3 audio encoders.
-
-
These encoders implement part of ATSC A/52:2010 and ETSI TS 102 366, as well as
-the undocumented RealAudio 3 (a.k.a. dnet).
-
-
The ac3 encoder uses floating-point math, while the ac3_fixed
-encoder only uses fixed-point integer math. This does not mean that one is
-always faster, just that one or the other may be better suited to a
-particular system. The floating-point encoder will generally produce better
-quality audio for a given bitrate. The ac3_fixed encoder is not the
-default codec for any of the output formats, so it must be specified explicitly
-using the option -acodec ac3_fixed
in order to use it.
-
-
-
17.2.1 AC-3 Metadata# TOC
-
-
The AC-3 metadata options are used to set parameters that describe the audio,
-but in most cases do not affect the audio encoding itself. Some of the options
-do directly affect or influence the decoding and playback of the resulting
-bitstream, while others are just for informational purposes. A few of the
-options will add bits to the output stream that could otherwise be used for
-audio data, and will thus affect the quality of the output. Those will be
-indicated accordingly with a note in the option list below.
-
-
These parameters are described in detail in several publicly-available
-documents.
-
-
-
-
17.2.1.1 Metadata Control Options# TOC
-
-
--per_frame_metadata boolean
-Allow Per-Frame Metadata. Specifies if the encoder should check for changing
-metadata for each frame.
-
-0
-The metadata values set at initialization will be used for every frame in the
-stream. (default)
-
-1
-Metadata values can be changed before encoding each frame.
-
-
-
-
-
-
-
-
17.2.1.2 Downmix Levels# TOC
-
-
--center_mixlev level
-Center Mix Level. The amount of gain the decoder should apply to the center
-channel when downmixing to stereo. This field will only be written to the
-bitstream if a center channel is present. The value is specified as a scale
-factor. There are 3 valid values:
-
-0.707
-Apply -3dB gain
-
-0.595
-Apply -4.5dB gain (default)
-
-0.500
-Apply -6dB gain
-
-
-
-
--surround_mixlev level
-Surround Mix Level. The amount of gain the decoder should apply to the surround
-channel(s) when downmixing to stereo. This field will only be written to the
-bitstream if one or more surround channels are present. The value is specified
-as a scale factor. There are 3 valid values:
-
-0.707
-Apply -3dB gain
-
-0.500
-Apply -6dB gain (default)
-
-0.000
-Silence Surround Channel(s)
-
-
-
-
-
-
-
-
17.2.1.3 Audio Production Information# TOC
-
Audio Production Information is optional information describing the mixing
-environment. Either none or both of the fields are written to the bitstream.
-
-
--mixing_level number
-Mixing Level. Specifies peak sound pressure level (SPL) in the production
-environment when the mix was mastered. Valid values are 80 to 111, or -1 for
-unknown or not indicated. The default value is -1, but that value cannot be
-used if the Audio Production Information is written to the bitstream. Therefore,
-if the room_type
option is not the default value, the mixing_level
-option must not be -1.
-
-
--room_type type
-Room Type. Describes the equalization used during the final mixing session at
-the studio or on the dubbing stage. A large room is a dubbing stage with the
-industry standard X-curve equalization; a small room has flat equalization.
-This field will not be written to the bitstream if both the mixing_level
-option and the room_type
option have the default values.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-large
-Large Room
-
-2
-small
-Small Room
-
-
-
-
-
-
-
-
17.2.1.4 Other Metadata Options# TOC
-
-
--copyright boolean
-Copyright Indicator. Specifies whether a copyright exists for this audio.
-
-0
-off
-No Copyright Exists (default)
-
-1
-on
-Copyright Exists
-
-
-
-
--dialnorm value
-Dialogue Normalization. Indicates how far the average dialogue level of the
-program is below digital 100% full scale (0 dBFS). This parameter determines a
-level shift during audio reproduction that sets the average volume of the
-dialogue to a preset level. The goal is to match volume level between program
-sources. A value of -31dB will result in no volume level change, relative to
-the source volume, during audio reproduction. Valid values are whole numbers in
-the range -31 to -1, with -31 being the default.
-
-
--dsur_mode mode
-Dolby Surround Mode. Specifies whether the stereo signal uses Dolby Surround
-(Pro Logic). This field will only be written to the bitstream if the audio
-stream is stereo. Using this option does NOT mean the encoder will actually
-apply Dolby Surround processing.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-off
-Not Dolby Surround Encoded
-
-2
-on
-Dolby Surround Encoded
-
-
-
-
--original boolean
-Original Bit Stream Indicator. Specifies whether this audio is from the
-original source and not a copy.
-
-0
-off
-Not Original Source
-
-1
-on
-Original Source (default)
-
-
-
-
-
-
-
-
17.2.2 Extended Bitstream Information# TOC
-
The extended bitstream options are part of the Alternate Bit Stream Syntax as
-specified in Annex D of the A/52:2010 standard. It is grouped into 2 parts.
-If any one parameter in a group is specified, all values in that group will be
-written to the bitstream. Default values are used for those that are written
-but have not been specified. If the mixing levels are written, the decoder
-will use these values instead of the ones specified in the center_mixlev
-and surround_mixlev
options if it supports the Alternate Bit Stream
-Syntax.
-
-
-
17.2.2.1 Extended Bitstream Information - Part 1# TOC
-
-
--dmix_mode mode
-Preferred Stereo Downmix Mode. Allows the user to select either Lt/Rt
-(Dolby Surround) or Lo/Ro (normal stereo) as the preferred stereo downmix mode.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-ltrt
-Lt/Rt Downmix Preferred
-
-2
-loro
-Lo/Ro Downmix Preferred
-
-
-
-
--ltrt_cmixlev level
-Lt/Rt Center Mix Level. The amount of gain the decoder should apply to the
-center channel when downmixing to stereo in Lt/Rt mode.
-
-1.414
-Apply +3dB gain
-
-1.189
-Apply +1.5dB gain
-
-1.000
-Apply 0dB gain
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain (default)
-
-0.500
-Apply -6.0dB gain
-
-0.000
-Silence Center Channel
-
-
-
-
--ltrt_surmixlev level
-Lt/Rt Surround Mix Level. The amount of gain the decoder should apply to the
-surround channel(s) when downmixing to stereo in Lt/Rt mode.
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain
-
-0.500
-Apply -6.0dB gain (default)
-
-0.000
-Silence Surround Channel(s)
-
-
-
-
--loro_cmixlev level
-Lo/Ro Center Mix Level. The amount of gain the decoder should apply to the
-center channel when downmixing to stereo in Lo/Ro mode.
-
-1.414
-Apply +3dB gain
-
-1.189
-Apply +1.5dB gain
-
-1.000
-Apply 0dB gain
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain (default)
-
-0.500
-Apply -6.0dB gain
-
-0.000
-Silence Center Channel
-
-
-
-
--loro_surmixlev level
-Lo/Ro Surround Mix Level. The amount of gain the decoder should apply to the
-surround channel(s) when downmixing to stereo in Lo/Ro mode.
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain
-
-0.500
-Apply -6.0dB gain (default)
-
-0.000
-Silence Surround Channel(s)
-
-
-
-
-
-
-
-
17.2.2.2 Extended Bitstream Information - Part 2# TOC
-
-
--dsurex_mode mode
-Dolby Surround EX Mode. Indicates whether the stream uses Dolby Surround EX
-(7.1 matrixed to 5.1). Using this option does NOT mean the encoder will actually
-apply Dolby Surround EX processing.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-on
-Dolby Surround EX Off
-
-2
-off
-Dolby Surround EX On
-
-
-
-
--dheadphone_mode mode
-Dolby Headphone Mode. Indicates whether the stream uses Dolby Headphone
-encoding (multi-channel matrixed to 2.0 for use with headphones). Using this
-option does NOT mean the encoder will actually apply Dolby Headphone
-processing.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-on
-Dolby Headphone Off
-
-2
-off
-Dolby Headphone On
-
-
-
-
--ad_conv_type type
-A/D Converter Type. Indicates whether the audio has passed through HDCD A/D
-conversion.
-
-0
-standard
-Standard A/D Converter (default)
-
-1
-hdcd
-HDCD A/D Converter
-
-
-
-
-
-
-
-
17.2.3 Other AC-3 Encoding Options# TOC
-
-
--stereo_rematrixing boolean
-Stereo Rematrixing. Enables/Disables use of rematrixing for stereo input. This
-is an optional AC-3 feature that increases quality by selectively encoding
-the left/right channels as mid/side. This option is enabled by default, and it
-is highly recommended that it be left as enabled except for testing purposes.
-
-
-
-
-
-
17.2.4 Floating-Point-Only AC-3 Encoding Options# TOC
-
-
These options are only valid for the floating-point encoder and do not exist
-for the fixed-point encoder due to the corresponding features not being
-implemented in fixed-point.
-
-
--channel_coupling boolean
-Enables/Disables use of channel coupling, which is an optional AC-3 feature
-that increases quality by combining high frequency information from multiple
-channels into a single channel. The per-channel high frequency information is
-sent with less accuracy in both the frequency and time domains. This allows
-more bits to be used for lower frequencies while preserving enough information
-to reconstruct the high frequencies. This option is enabled by default for the
-floating-point encoder and should generally be left as enabled except for
-testing purposes or to increase encoding speed.
-
--1
-auto
-Selected by Encoder (default)
-
-0
-off
-Disable Channel Coupling
-
-1
-on
-Enable Channel Coupling
-
-
-
-
--cpl_start_band number
-Coupling Start Band. Sets the channel coupling start band, from 1 to 15. If a
-value higher than the bandwidth is used, it will be reduced to 1 less than the
-coupling end band. If auto is used, the start band will be determined by
-the encoder based on the bit rate, sample rate, and channel layout. This option
-has no effect if channel coupling is disabled.
-
--1
-auto
-Selected by Encoder (default)
-
-
-
-
-
-
-
-
17.3 libfaac# TOC
-
-
libfaac AAC (Advanced Audio Coding) encoder wrapper.
-
-
Requires the presence of the libfaac headers and library during
-configuration. You need to explicitly configure the build with
---enable-libfaac --enable-nonfree
.
-
-
This encoder is considered to be of higher quality with respect to the
-the native experimental FFmpeg AAC encoder .
-
-
For more information see the libfaac project at
-http://www.audiocoding.com/faac.html/ .
-
-
-
17.3.1 Options# TOC
-
-
The following shared FFmpeg codec options are recognized.
-
-
The following options are supported by the libfaac wrapper. The
-faac
-equivalent of the options are listed in parentheses.
-
-
-b (-b )
-Set bit rate in bits/s for ABR (Average Bit Rate) mode. If the bit rate
-is not explicitly specified, it is automatically set to a suitable
-value depending on the selected profile. faac
bitrate is
-expressed in kilobits/s.
-
-Note that libfaac does not support CBR (Constant Bit Rate) but only
-ABR (Average Bit Rate).
-
-If VBR mode is enabled this option is ignored.
-
-
-ar (-R )
-Set audio sampling rate (in Hz).
-
-
-ac (-c )
-Set the number of audio channels.
-
-
-cutoff (-C )
-Set cutoff frequency. If not specified (or explicitly set to 0) it
-will use a value automatically computed by the library. Default value
-is 0.
-
-
-profile
-Set audio profile.
-
-The following profiles are recognized:
-
-‘aac_main ’
-Main AAC (Main)
-
-
-‘aac_low ’
-Low Complexity AAC (LC)
-
-
-‘aac_ssr ’
-Scalable Sample Rate (SSR)
-
-
-‘aac_ltp ’
-Long Term Prediction (LTP)
-
-
-
-If not specified it is set to ‘aac_low ’.
-
-
-flags +qscale
-Set constant quality VBR (Variable Bit Rate) mode.
-
-
-global_quality
-Set quality in VBR mode as an integer number of lambda units.
-
-Only relevant when VBR mode is enabled with flags +qscale
. The
-value is converted to QP units by dividing it by FF_QP2LAMBDA
,
-and used to set the quality value used by libfaac. A reasonable range
-for the option value in QP units is [10-500], the higher the value the
-higher the quality.
-
-
-q (-q )
-Enable VBR mode when set to a non-negative value, and set constant
-quality value as a double floating point value in QP units.
-
-The value sets the quality value used by libfaac. A reasonable range
-for the option value is [10-500], the higher the value the higher the
-quality.
-
-This option is valid only using the ffmpeg
command-line
-tool. For library interface users, use global_quality .
-
-
-
-
-
17.3.2 Examples# TOC
-
-
- Use ffmpeg
to convert an audio file to ABR 128 kbps AAC in an M4A (MP4)
-container:
-
-
ffmpeg -i input.wav -codec:a libfaac -b:a 128k -output.m4a
-
-
- Use ffmpeg
to convert an audio file to VBR AAC, using the
-LTP AAC profile:
-
-
ffmpeg -i input.wav -c:a libfaac -profile:a aac_ltp -q:a 100 output.m4a
-
-
-
-
-
17.4 libfdk_aac# TOC
-
-
libfdk-aac AAC (Advanced Audio Coding) encoder wrapper.
-
-
The libfdk-aac library is based on the Fraunhofer FDK AAC code from
-the Android project.
-
-
Requires the presence of the libfdk-aac headers and library during
-configuration. You need to explicitly configure the build with
---enable-libfdk-aac
. The library is also incompatible with GPL,
-so if you allow the use of GPL, you should configure with
---enable-gpl --enable-nonfree --enable-libfdk-aac
.
-
-
This encoder is considered to be of higher quality with respect to
-both the native experimental FFmpeg AAC encoder and
-libfaac .
-
-
VBR encoding, enabled through the vbr or flags
-+qscale options, is experimental and only works with some
-combinations of parameters.
-
-
Support for encoding 7.1 audio is only available with libfdk-aac 0.1.3 or
-higher.
-
-
For more information see the fdk-aac project at
-http://sourceforge.net/p/opencore-amr/fdk-aac/ .
-
-
-
17.4.1 Options# TOC
-
-
The following options are mapped on the shared FFmpeg codec options.
-
-
-b
-Set bit rate in bits/s. If the bitrate is not explicitly specified, it
-is automatically set to a suitable value depending on the selected
-profile.
-
-In case VBR mode is enabled the option is ignored.
-
-
-ar
-Set audio sampling rate (in Hz).
-
-
-channels
-Set the number of audio channels.
-
-
-flags +qscale
-Enable fixed quality, VBR (Variable Bit Rate) mode.
-Note that VBR is implicitly enabled when the vbr value is
-positive.
-
-
-cutoff
-Set cutoff frequency. If not specified (or explicitly set to 0) it
-will use a value automatically computed by the library. Default value
-is 0.
-
-
-profile
-Set audio profile.
-
-The following profiles are recognized:
-
-‘aac_low ’
-Low Complexity AAC (LC)
-
-
-‘aac_he ’
-High Efficiency AAC (HE-AAC)
-
-
-‘aac_he_v2 ’
-High Efficiency AAC version 2 (HE-AACv2)
-
-
-‘aac_ld ’
-Low Delay AAC (LD)
-
-
-‘aac_eld ’
-Enhanced Low Delay AAC (ELD)
-
-
-
-If not specified it is set to ‘aac_low ’.
-
-
-
-
The following are private options of the libfdk_aac encoder.
-
-
-afterburner
-Enable afterburner feature if set to 1, disabled if set to 0. This
-improves the quality but also the required processing power.
-
-Default value is 1.
-
-
-eld_sbr
-Enable SBR (Spectral Band Replication) for ELD if set to 1, disabled
-if set to 0.
-
-Default value is 0.
-
-
-signaling
-Set SBR/PS signaling style.
-
-It can assume one of the following values:
-
-‘default ’
-choose signaling implicitly (explicit hierarchical by default,
-implicit if global header is disabled)
-
-
-‘implicit ’
-implicit backwards compatible signaling
-
-
-‘explicit_sbr ’
-explicit SBR, implicit PS signaling
-
-
-‘explicit_hierarchical ’
-explicit hierarchical signaling
-
-
-
-Default value is ‘default ’.
-
-
-latm
-Output LATM/LOAS encapsulated data if set to 1, disabled if set to 0.
-
-Default value is 0.
-
-
-header_period
-Set StreamMuxConfig and PCE repetition period (in frames) for sending
-in-band configuration buffers within LATM/LOAS transport layer.
-
-Must be a 16-bits non-negative integer.
-
-Default value is 0.
-
-
-vbr
-Set VBR mode, from 1 to 5. 1 is lowest quality (though still pretty
-good) and 5 is highest quality. A value of 0 will disable VBR, and CBR
-(Constant Bit Rate) is enabled.
-
-Currently only the ‘aac_low ’ profile supports VBR encoding.
-
-VBR modes 1-5 correspond to roughly the following average bit rates:
-
-
-‘1 ’
-32 kbps/channel
-
-‘2 ’
-40 kbps/channel
-
-‘3 ’
-48-56 kbps/channel
-
-‘4 ’
-64 kbps/channel
-
-‘5 ’
-about 80-96 kbps/channel
-
-
-
-Default value is 0.
-
-
-
-
-
17.4.2 Examples# TOC
-
-
- Use ffmpeg
to convert an audio file to VBR AAC in an M4A (MP4)
-container:
-
-
ffmpeg -i input.wav -codec:a libfdk_aac -vbr 3 output.m4a
-
-
- Use ffmpeg
to convert an audio file to CBR 64k kbps AAC, using the
-High-Efficiency AAC profile:
-
-
ffmpeg -i input.wav -c:a libfdk_aac -profile:a aac_he -b:a 64k output.m4a
-
-
-
-
-
17.5 libmp3lame# TOC
-
-
LAME (Lame Ain’t an MP3 Encoder) MP3 encoder wrapper.
-
-
Requires the presence of the libmp3lame headers and library during
-configuration. You need to explicitly configure the build with
---enable-libmp3lame
.
-
-
See libshine for a fixed-point MP3 encoder, although with a
-lower quality.
-
-
-
17.5.1 Options# TOC
-
-
The following options are supported by the libmp3lame wrapper. The
-lame
-equivalent of the options are listed in parentheses.
-
-
-b (-b )
-Set bitrate expressed in bits/s for CBR or ABR. LAME bitrate
is
-expressed in kilobits/s.
-
-
-q (-V )
-Set constant quality setting for VBR. This option is valid only
-using the ffmpeg
command-line tool. For library interface
-users, use global_quality .
-
-
-compression_level (-q )
-Set algorithm quality. Valid arguments are integers in the 0-9 range,
-with 0 meaning highest quality but slowest, and 9 meaning fastest
-while producing the worst quality.
-
-
-reservoir
-Enable use of bit reservoir when set to 1. Default value is 1. LAME
-has this enabled by default, but can be overridden by use
---nores option.
-
-
-joint_stereo (-m j )
-Enable the encoder to use (on a frame by frame basis) either L/R
-stereo or mid/side stereo. Default value is 1.
-
-
-abr (--abr )
-Enable the encoder to use ABR when set to 1. The lame
---abr sets the target bitrate, while this options only
-tells FFmpeg to use ABR still relies on b to set bitrate.
-
-
-
-
-
-
17.6 libopencore-amrnb# TOC
-
-
OpenCORE Adaptive Multi-Rate Narrowband encoder.
-
-
Requires the presence of the libopencore-amrnb headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopencore-amrnb --enable-version3
.
-
-
This is a mono-only encoder. Officially it only supports 8000Hz sample rate,
-but you can override it by setting strict to ‘unofficial ’ or
-lower.
-
-
-
17.6.1 Options# TOC
-
-
-b
-Set bitrate in bits per second. Only the following bitrates are supported,
-otherwise libavcodec will round to the nearest valid bitrate.
-
-
-4750
-5150
-5900
-6700
-7400
-7950
-10200
-12200
-
-
-
-dtx
-Allow discontinuous transmission (generate comfort noise) when set to 1. The
-default value is 0 (disabled).
-
-
-
-
-
-
17.7 libshine# TOC
-
-
Shine Fixed-Point MP3 encoder wrapper.
-
-
Shine is a fixed-point MP3 encoder. It has a far better performance on
-platforms without an FPU, e.g. armel CPUs, and some phones and tablets.
-However, as it is more targeted on performance than quality, it is not on par
-with LAME and other production-grade encoders quality-wise. Also, according to
-the project’s homepage, this encoder may not be free of bugs as the code was
-written a long time ago and the project was dead for at least 5 years.
-
-
This encoder only supports stereo and mono input. This is also CBR-only.
-
-
The original project (last updated in early 2007) is at
-http://sourceforge.net/projects/libshine-fxp/ . We only support the
-updated fork by the Savonet/Liquidsoap project at https://github.com/savonet/shine .
-
-
Requires the presence of the libshine headers and library during
-configuration. You need to explicitly configure the build with
---enable-libshine
.
-
-
See also libmp3lame .
-
-
-
17.7.1 Options# TOC
-
-
The following options are supported by the libshine wrapper. The
-shineenc
-equivalent of the options are listed in parentheses.
-
-
-b (-b )
-Set bitrate expressed in bits/s for CBR. shineenc
-b option
-is expressed in kilobits/s.
-
-
-
-
-
-
17.8 libtwolame# TOC
-
-
TwoLAME MP2 encoder wrapper.
-
-
Requires the presence of the libtwolame headers and library during
-configuration. You need to explicitly configure the build with
---enable-libtwolame
.
-
-
-
17.8.1 Options# TOC
-
-
The following options are supported by the libtwolame wrapper. The
-twolame
-equivalent options follow the FFmpeg ones and are in
-parentheses.
-
-
-b (-b )
-Set bitrate expressed in bits/s for CBR. twolame
b
-option is expressed in kilobits/s. Default value is 128k.
-
-
-q (-V )
-Set quality for experimental VBR support. Maximum value range is
-from -50 to 50, useful range is from -10 to 10. The higher the
-value, the better the quality. This option is valid only using the
-ffmpeg
command-line tool. For library interface users,
-use global_quality .
-
-
-mode (--mode )
-Set the mode of the resulting audio. Possible values:
-
-
-‘auto ’
-Choose mode automatically based on the input. This is the default.
-
-‘stereo ’
-Stereo
-
-‘joint_stereo ’
-Joint stereo
-
-‘dual_channel ’
-Dual channel
-
-‘mono ’
-Mono
-
-
-
-
-psymodel (--psyc-mode )
-Set psychoacoustic model to use in encoding. The argument must be
-an integer between -1 and 4, inclusive. The higher the value, the
-better the quality. The default value is 3.
-
-
-energy_levels (--energy )
-Enable energy levels extensions when set to 1. The default value is
-0 (disabled).
-
-
-error_protection (--protect )
-Enable CRC error protection when set to 1. The default value is 0
-(disabled).
-
-
-copyright (--copyright )
-Set MPEG audio copyright flag when set to 1. The default value is 0
-(disabled).
-
-
-original (--original )
-Set MPEG audio original flag when set to 1. The default value is 0
-(disabled).
-
-
-
-
-
-
17.9 libvo-aacenc# TOC
-
-
VisualOn AAC encoder.
-
-
Requires the presence of the libvo-aacenc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libvo-aacenc --enable-version3
.
-
-
This encoder is considered to be worse than the
-native experimental FFmpeg AAC encoder , according to
-multiple sources.
-
-
-
17.9.1 Options# TOC
-
-
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
-channels. It is also CBR-only.
-
-
-b
-Set bit rate in bits/s.
-
-
-
-
-
-
17.10 libvo-amrwbenc# TOC
-
-
VisualOn Adaptive Multi-Rate Wideband encoder.
-
-
Requires the presence of the libvo-amrwbenc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libvo-amrwbenc --enable-version3
.
-
-
This is a mono-only encoder. Officially it only supports 16000Hz sample
-rate, but you can override it by setting strict to
-‘unofficial ’ or lower.
-
-
-
17.10.1 Options# TOC
-
-
-b
-Set bitrate in bits/s. Only the following bitrates are supported, otherwise
-libavcodec will round to the nearest valid bitrate.
-
-
-‘6600 ’
-‘8850 ’
-‘12650 ’
-‘14250 ’
-‘15850 ’
-‘18250 ’
-‘19850 ’
-‘23050 ’
-‘23850 ’
-
-
-
-dtx
-Allow discontinuous transmission (generate comfort noise) when set to 1. The
-default value is 0 (disabled).
-
-
-
-
-
-
17.11 libopus# TOC
-
-
libopus Opus Interactive Audio Codec encoder wrapper.
-
-
Requires the presence of the libopus headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopus
.
-
-
-
17.11.1 Option Mapping# TOC
-
-
Most libopus options are modelled after the opusenc
utility from
-opus-tools. The following is an option mapping chart describing options
-supported by the libopus wrapper, and their opusenc
-equivalent
-in parentheses.
-
-
-b (bitrate )
-Set the bit rate in bits/s. FFmpeg’s b option is
-expressed in bits/s, while opusenc
’s bitrate in
-kilobits/s.
-
-
-vbr (vbr , hard-cbr , and cvbr )
-Set VBR mode. The FFmpeg vbr option has the following
-valid arguments, with the their opusenc
equivalent options
-in parentheses:
-
-
-‘off (hard-cbr ) ’
-Use constant bit rate encoding.
-
-
-‘on (vbr ) ’
-Use variable bit rate encoding (the default).
-
-
-‘constrained (cvbr ) ’
-Use constrained variable bit rate encoding.
-
-
-
-
-compression_level (comp )
-Set encoding algorithm complexity. Valid options are integers in
-the 0-10 range. 0 gives the fastest encodes but lower quality, while 10
-gives the highest quality but slowest encoding. The default is 10.
-
-
-frame_duration (framesize )
-Set maximum frame size, or duration of a frame in milliseconds. The
-argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
-frame sizes achieve lower latency but less quality at a given bitrate.
-Sizes greater than 20ms are only interesting at fairly low bitrates.
-The default is 20ms.
-
-
-packet_loss (expect-loss )
-Set expected packet loss percentage. The default is 0.
-
-
-application (N.A.)
-Set intended application type. Valid options are listed below:
-
-
-‘voip ’
-Favor improved speech intelligibility.
-
-‘audio ’
-Favor faithfulness to the input (the default).
-
-‘lowdelay ’
-Restrict to only the lowest delay modes.
-
-
-
-
-cutoff (N.A.)
-Set cutoff bandwidth in Hz. The argument must be exactly one of the
-following: 4000, 6000, 8000, 12000, or 20000, corresponding to
-narrowband, mediumband, wideband, super wideband, and fullband
-respectively. The default is 0 (cutoff disabled).
-
-
-
-
-
-
17.12 libvorbis# TOC
-
-
libvorbis encoder wrapper.
-
-
Requires the presence of the libvorbisenc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libvorbis
.
-
-
-
17.12.1 Options# TOC
-
-
The following options are supported by the libvorbis wrapper. The
-oggenc
-equivalent of the options are listed in parentheses.
-
-
To get a more accurate and extensive documentation of the libvorbis
-options, consult the libvorbisenc’s and oggenc
’s documentations.
-See http://xiph.org/vorbis/ ,
-http://wiki.xiph.org/Vorbis-tools , and oggenc(1).
-
-
-b (-b )
-Set bitrate expressed in bits/s for ABR. oggenc
-b is
-expressed in kilobits/s.
-
-
-q (-q )
-Set constant quality setting for VBR. The value should be a float
-number in the range of -1.0 to 10.0. The higher the value, the better
-the quality. The default value is ‘3.0 ’.
-
-This option is valid only using the ffmpeg
command-line tool.
-For library interface users, use global_quality .
-
-
-cutoff (--advanced-encode-option lowpass_frequency=N )
-Set cutoff bandwidth in Hz, a value of 0 disables cutoff. oggenc
’s
-related option is expressed in kHz. The default value is ‘0 ’ (cutoff
-disabled).
-
-
-minrate (-m )
-Set minimum bitrate expressed in bits/s. oggenc
-m is
-expressed in kilobits/s.
-
-
-maxrate (-M )
-Set maximum bitrate expressed in bits/s. oggenc
-M is
-expressed in kilobits/s. This only has effect on ABR mode.
-
-
-iblock (--advanced-encode-option impulse_noisetune=N )
-Set noise floor bias for impulse blocks. The value is a float number from
--15.0 to 0.0. A negative bias instructs the encoder to pay special attention
-to the crispness of transients in the encoded audio. The tradeoff for better
-transient response is a higher bitrate.
-
-
-
-
-
-
17.13 libwavpack# TOC
-
-
A wrapper providing WavPack encoding through libwavpack.
-
-
Only lossless mode using 32-bit integer samples is supported currently.
-
-
Requires the presence of the libwavpack headers and library during
-configuration. You need to explicitly configure the build with
---enable-libwavpack
.
-
-
Note that a libavcodec-native encoder for the WavPack codec exists so users can
-encode audios with this codec without using this encoder. See wavpackenc .
-
-
-
17.13.1 Options# TOC
-
-
wavpack
command line utility’s corresponding options are listed in
-parentheses, if any.
-
-
-frame_size (--blocksize )
-Default is 32768.
-
-
-compression_level
-Set speed vs. compression tradeoff. Acceptable arguments are listed below:
-
-
-‘0 (-f ) ’
-Fast mode.
-
-
-‘1 ’
-Normal (default) settings.
-
-
-‘2 (-h ) ’
-High quality.
-
-
-‘3 (-hh ) ’
-Very high quality.
-
-
-‘4-8 (-hh -x EXTRAPROC ) ’
-Same as ‘3 ’, but with extra processing enabled.
-
-‘4 ’ is the same as -x2 and ‘8 ’ is the same as -x6 .
-
-
-
-
-
-
-
-
17.14 wavpack# TOC
-
-
WavPack lossless audio encoder.
-
-
This is a libavcodec-native WavPack encoder. There is also an encoder based on
-libwavpack, but there is virtually no reason to use that encoder.
-
-
See also libwavpack .
-
-
-
17.14.1 Options# TOC
-
-
The equivalent options for wavpack
command line utility are listed in
-parentheses.
-
-
-
17.14.1.1 Shared options# TOC
-
-
The following shared options are effective for this encoder. Only special notes
-about this particular encoder will be documented here. For the general meaning
-of the options, see the Codec Options chapter .
-
-
-frame_size (--blocksize )
-For this encoder, the range for this option is between 128 and 131072. Default
-is automatically decided based on sample rate and number of channel.
-
-For the complete formula of calculating default, see
-libavcodec/wavpackenc.c .
-
-
-compression_level (-f , -h , -hh , and -x )
-This option’s syntax is consistent with libwavpack ’s.
-
-
-
-
-
17.14.1.2 Private options# TOC
-
-
-joint_stereo (-j )
-Set whether to enable joint stereo. Valid values are:
-
-
-‘on (1 ) ’
-Force mid/side audio encoding.
-
-‘off (0 ) ’
-Force left/right audio encoding.
-
-‘auto ’
-Let the encoder decide automatically.
-
-
-
-
-optimize_mono
-Set whether to enable optimization for mono. This option is only effective for
-non-mono streams. Available values:
-
-
-‘on ’
-enabled
-
-‘off ’
-disabled
-
-
-
-
-
-
-
-
-
18 Video Encoders# TOC
-
-
A description of some of the currently available video encoders
-follows.
-
-
-
18.1 libtheora# TOC
-
-
libtheora Theora encoder wrapper.
-
-
Requires the presence of the libtheora headers and library during
-configuration. You need to explicitly configure the build with
---enable-libtheora
.
-
-
For more information about the libtheora project see
-http://www.theora.org/ .
-
-
-
18.1.1 Options# TOC
-
-
The following global options are mapped to internal libtheora options
-which affect the quality and the bitrate of the encoded stream.
-
-
-b
-Set the video bitrate in bit/s for CBR (Constant Bit Rate) mode. In
-case VBR (Variable Bit Rate) mode is enabled this option is ignored.
-
-
-flags
-Used to enable constant quality mode (VBR) encoding through the
-qscale flag, and to enable the pass1
and pass2
-modes.
-
-
-g
-Set the GOP size.
-
-
-global_quality
-Set the global quality as an integer in lambda units.
-
-Only relevant when VBR mode is enabled with flags +qscale
. The
-value is converted to QP units by dividing it by FF_QP2LAMBDA
,
-clipped in the [0 - 10] range, and then multiplied by 6.3 to get a
-value in the native libtheora range [0-63]. A higher value corresponds
-to a higher quality.
-
-
-q
-Enable VBR mode when set to a non-negative value, and set constant
-quality value as a double floating point value in QP units.
-
-The value is clipped in the [0-10] range, and then multiplied by 6.3
-to get a value in the native libtheora range [0-63].
-
-This option is valid only using the ffmpeg
command-line
-tool. For library interface users, use global_quality .
-
-
-
-
-
18.1.2 Examples# TOC
-
-
- Set maximum constant quality (VBR) encoding with ffmpeg
:
-
-
ffmpeg -i INPUT -codec:v libtheora -q:v 10 OUTPUT.ogg
-
-
- Use ffmpeg
to convert a CBR 1000 kbps Theora video stream:
-
-
ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg
-
-
-
-
-
18.2 libvpx# TOC
-
-
VP8/VP9 format supported through libvpx.
-
-
Requires the presence of the libvpx headers and library during configuration.
-You need to explicitly configure the build with --enable-libvpx
.
-
-
-
18.2.1 Options# TOC
-
-
Mapping from FFmpeg to libvpx options with conversion notes in parentheses.
-
-
-threads
-g_threads
-
-
-profile
-g_profile
-
-
-vb
-rc_target_bitrate
-
-
-g
-kf_max_dist
-
-
-keyint_min
-kf_min_dist
-
-
-qmin
-rc_min_quantizer
-
-
-qmax
-rc_max_quantizer
-
-
-bufsize, vb
-rc_buf_sz
-(bufsize * 1000 / vb)
-
-rc_buf_optimal_sz
-(bufsize * 1000 / vb * 5 / 6)
-
-
-rc_init_occupancy, vb
-rc_buf_initial_sz
-(rc_init_occupancy * 1000 / vb)
-
-
-rc_buffer_aggressivity
-rc_undershoot_pct
-
-
-skip_threshold
-rc_dropframe_thresh
-
-
-qcomp
-rc_2pass_vbr_bias_pct
-
-
-maxrate, vb
-rc_2pass_vbr_maxsection_pct
-(maxrate * 100 / vb)
-
-
-minrate, vb
-rc_2pass_vbr_minsection_pct
-(minrate * 100 / vb)
-
-
-minrate, maxrate, vb
-VPX_CBR
-(minrate == maxrate == vb)
-
-
-crf
-VPX_CQ
, VP8E_SET_CQ_LEVEL
-
-
-quality
-
-best
-VPX_DL_BEST_QUALITY
-
-good
-VPX_DL_GOOD_QUALITY
-
-realtime
-VPX_DL_REALTIME
-
-
-
-
-speed
-VP8E_SET_CPUUSED
-
-
-nr
-VP8E_SET_NOISE_SENSITIVITY
-
-
-mb_threshold
-VP8E_SET_STATIC_THRESHOLD
-
-
-slices
-VP8E_SET_TOKEN_PARTITIONS
-
-
-max-intra-rate
-VP8E_SET_MAX_INTRA_BITRATE_PCT
-
-
-force_key_frames
-VPX_EFLAG_FORCE_KF
-
-
-Alternate reference frame related
-
-vp8flags altref
-VP8E_SET_ENABLEAUTOALTREF
-
-arnr_max_frames
-VP8E_SET_ARNR_MAXFRAMES
-
-arnr_type
-VP8E_SET_ARNR_TYPE
-
-arnr_strength
-VP8E_SET_ARNR_STRENGTH
-
-rc_lookahead
-g_lag_in_frames
-
-
-
-
-vp8flags error_resilient
-g_error_resilient
-
-
-aq_mode
-VP9E_SET_AQ_MODE
-
-
-
-
-
For more information about libvpx see:
-http://www.webmproject.org/
-
-
-
-
18.3 libwebp# TOC
-
-
libwebp WebP Image encoder wrapper
-
-
libwebp is Google’s official encoder for WebP images. It can encode in either
-lossy or lossless mode. Lossy images are essentially a wrapper around a VP8
-frame. Lossless images are a separate codec developed by Google.
-
-
-
18.3.1 Pixel Format# TOC
-
-
Currently, libwebp only supports YUV420 for lossy and RGB for lossless due
-to limitations of the format and libwebp. Alpha is supported for either mode.
-Because of API limitations, if RGB is passed in when encoding lossy or YUV is
-passed in for encoding lossless, the pixel format will automatically be
-converted using functions from libwebp. This is not ideal and is done only for
-convenience.
-
-
-
18.3.2 Options# TOC
-
-
--lossless boolean
-Enables/Disables use of lossless mode. Default is 0.
-
-
--compression_level integer
-For lossy, this is a quality/speed tradeoff. Higher values give better quality
-for a given size at the cost of increased encoding time. For lossless, this is
-a size/speed tradeoff. Higher values give smaller size at the cost of increased
-encoding time. More specifically, it controls the number of extra algorithms
-and compression tools used, and varies the combination of these tools. This
-maps to the method option in libwebp. The valid range is 0 to 6.
-Default is 4.
-
-
--qscale float
-For lossy encoding, this controls image quality, 0 to 100. For lossless
-encoding, this controls the effort and time spent at compressing more. The
-default value is 75. Note that for usage via libavcodec, this option is called
-global_quality and must be multiplied by FF_QP2LAMBDA .
-
-
--preset type
-Configuration preset. This does some automatic settings based on the general
-type of the image.
-
-none
-Do not use a preset.
-
-default
-Use the encoder default.
-
-picture
-Digital picture, like portrait, inner shot
-
-photo
-Outdoor photograph, with natural lighting
-
-drawing
-Hand or line drawing, with high-contrast details
-
-icon
-Small-sized colorful images
-
-text
-Text-like
-
-
-
-
-
-
-
-
18.4 libx264, libx264rgb# TOC
-
-
x264 H.264/MPEG-4 AVC encoder wrapper.
-
-
This encoder requires the presence of the libx264 headers and library
-during configuration. You need to explicitly configure the build with
---enable-libx264
.
-
-
libx264 supports an impressive number of features, including 8x8 and
-4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC
-entropy coding, interlacing (MBAFF), lossless mode, psy optimizations
-for detail retention (adaptive quantization, psy-RD, psy-trellis).
-
-
Many libx264 encoder options are mapped to FFmpeg global codec
-options, while unique encoder options are provided through private
-options. Additionally the x264opts and x264-params
-private options allows one to pass a list of key=value tuples as accepted
-by the libx264 x264_param_parse
function.
-
-
The x264 project website is at
-http://www.videolan.org/developers/x264.html .
-
-
The libx264rgb encoder is the same as libx264, except it accepts packed RGB
-pixel formats as input instead of YUV.
-
-
-
18.4.1 Supported Pixel Formats# TOC
-
-
x264 supports 8- to 10-bit color spaces. The exact bit depth is controlled at
-x264’s configure time. FFmpeg only supports one bit depth in one particular
-build. In other words, it is not possible to build one FFmpeg with multiple
-versions of x264 with different bit depths.
-
-
-
18.4.2 Options# TOC
-
-
The following options are supported by the libx264 wrapper. The
-x264
-equivalent options or values are listed in parentheses
-for easy migration.
-
-
To reduce the duplication of documentation, only the private options
-and some others requiring special attention are documented here. For
-the documentation of the undocumented generic options, see
-the Codec Options chapter .
-
-
To get a more accurate and extensive documentation of the libx264
-options, invoke the command x264 --full-help
or consult
-the libx264 documentation.
-
-
-b (bitrate )
-Set bitrate in bits/s. Note that FFmpeg’s b option is
-expressed in bits/s, while x264
’s bitrate is in
-kilobits/s.
-
-
-bf (bframes )
-g (keyint )
-qmin (qpmin )
-Minimum quantizer scale.
-
-
-qmax (qpmax )
-Maximum quantizer scale.
-
-
-qdiff (qpstep )
-Maximum difference between quantizer scales.
-
-
-qblur (qblur )
-Quantizer curve blur
-
-
-qcomp (qcomp )
-Quantizer curve compression factor
-
-
-refs (ref )
-Number of reference frames each P-frame can use. The range is from 0-16 .
-
-
-sc_threshold (scenecut )
-Sets the threshold for the scene change detection.
-
-
-trellis (trellis )
-Performs Trellis quantization to increase efficiency. Enabled by default.
-
-
-nr (nr )
-me_range (merange )
-Maximum range of the motion search in pixels.
-
-
-me_method (me )
-Set motion estimation method. Possible values in the decreasing order
-of speed:
-
-
-‘dia (dia ) ’
-‘epzs (dia ) ’
-Diamond search with radius 1 (fastest). ‘epzs ’ is an alias for
-‘dia ’.
-
-‘hex (hex ) ’
-Hexagonal search with radius 2.
-
-‘umh (umh ) ’
-Uneven multi-hexagon search.
-
-‘esa (esa ) ’
-Exhaustive search.
-
-‘tesa (tesa ) ’
-Hadamard exhaustive search (slowest).
-
-
-
-
-subq (subme )
-Sub-pixel motion estimation method.
-
-
-b_strategy (b-adapt )
-Adaptive B-frame placement decision algorithm. Use only on first-pass.
-
-
-keyint_min (min-keyint )
-Minimum GOP size.
-
-
-coder
-Set entropy encoder. Possible values:
-
-
-‘ac ’
-Enable CABAC.
-
-
-‘vlc ’
-Enable CAVLC and disable CABAC. It generates the same effect as
-x264
’s --no-cabac option.
-
-
-
-
-cmp
-Set full pixel motion estimation comparation algorithm. Possible values:
-
-
-‘chroma ’
-Enable chroma in motion estimation.
-
-
-‘sad ’
-Ignore chroma in motion estimation. It generates the same effect as
-x264
’s --no-chroma-me option.
-
-
-
-
-threads (threads )
-Number of encoding threads.
-
-
-thread_type
-Set multithreading technique. Possible values:
-
-
-‘slice ’
-Slice-based multithreading. It generates the same effect as
-x264
’s --sliced-threads option.
-
-‘frame ’
-Frame-based multithreading.
-
-
-
-
-flags
-Set encoding flags. It can be used to disable closed GOP and enable
-open GOP by setting it to -cgop
. The result is similar to
-the behavior of x264
’s --open-gop option.
-
-
-rc_init_occupancy (vbv-init )
-preset (preset )
-Set the encoding preset.
-
-
-tune (tune )
-Set tuning of the encoding params.
-
-
-profile (profile )
-Set profile restrictions.
-
-
-fastfirstpass
-Enable fast settings when encoding first pass, when set to 1. When set
-to 0, it has the same effect of x264
’s
---slow-firstpass option.
-
-
-crf (crf )
-Set the quality for constant quality mode.
-
-
-crf_max (crf-max )
-In CRF mode, prevents VBV from lowering quality beyond this point.
-
-
-qp (qp )
-Set constant quantization rate control method parameter.
-
-
-aq-mode (aq-mode )
-Set AQ method. Possible values:
-
-
-‘none (0 ) ’
-Disabled.
-
-
-‘variance (1 ) ’
-Variance AQ (complexity mask).
-
-
-‘autovariance (2 ) ’
-Auto-variance AQ (experimental).
-
-
-
-
-aq-strength (aq-strength )
-Set AQ strength, reduce blocking and blurring in flat and textured areas.
-
-
-psy
-Use psychovisual optimizations when set to 1. When set to 0, it has the
-same effect as x264
’s --no-psy option.
-
-
-psy-rd (psy-rd )
-Set strength of psychovisual optimization, in
-psy-rd :psy-trellis format.
-
-
-rc-lookahead (rc-lookahead )
-Set number of frames to look ahead for frametype and ratecontrol.
-
-
-weightb
-Enable weighted prediction for B-frames when set to 1. When set to 0,
-it has the same effect as x264
’s --no-weightb option.
-
-
-weightp (weightp )
-Set weighted prediction method for P-frames. Possible values:
-
-
-‘none (0 ) ’
-Disabled
-
-‘simple (1 ) ’
-Enable only weighted refs
-
-‘smart (2 ) ’
-Enable both weighted refs and duplicates
-
-
-
-
-ssim (ssim )
-Enable calculation and printing SSIM stats after the encoding.
-
-
-intra-refresh (intra-refresh )
-Enable the use of Periodic Intra Refresh instead of IDR frames when set
-to 1.
-
-
-avcintra-class (class )
-Configure the encoder to generate AVC-Intra.
-Valid values are 50,100 and 200
-
-
-bluray-compat (bluray-compat )
-Configure the encoder to be compatible with the bluray standard.
-It is a shorthand for setting "bluray-compat=1 force-cfr=1".
-
-
-b-bias (b-bias )
-Set the influence on how often B-frames are used.
-
-
-b-pyramid (b-pyramid )
-Set method for keeping of some B-frames as references. Possible values:
-
-
-‘none (none ) ’
-Disabled.
-
-‘strict (strict ) ’
-Strictly hierarchical pyramid.
-
-‘normal (normal ) ’
-Non-strict (not Blu-ray compatible).
-
-
-
-
-mixed-refs
-Enable the use of one reference per partition, as opposed to one
-reference per macroblock when set to 1. When set to 0, it has the
-same effect as x264
’s --no-mixed-refs option.
-
-
-8x8dct
-Enable adaptive spatial transform (high profile 8x8 transform)
-when set to 1. When set to 0, it has the same effect as
-x264
’s --no-8x8dct option.
-
-
-fast-pskip
-Enable early SKIP detection on P-frames when set to 1. When set
-to 0, it has the same effect as x264
’s
---no-fast-pskip option.
-
-
-aud (aud )
-Enable use of access unit delimiters when set to 1.
-
-
-mbtree
-Enable use macroblock tree ratecontrol when set to 1. When set
-to 0, it has the same effect as x264
’s
---no-mbtree option.
-
-
-deblock (deblock )
-Set loop filter parameters, in alpha :beta form.
-
-
-cplxblur (cplxblur )
-Set fluctuations reduction in QP (before curve compression).
-
-
-partitions (partitions )
-Set partitions to consider as a comma-separated list of. Possible
-values in the list:
-
-
-‘p8x8 ’
-8x8 P-frame partition.
-
-‘p4x4 ’
-4x4 P-frame partition.
-
-‘b8x8 ’
-4x4 B-frame partition.
-
-‘i8x8 ’
-8x8 I-frame partition.
-
-‘i4x4 ’
-4x4 I-frame partition.
-(Enabling ‘p4x4 ’ requires ‘p8x8 ’ to be enabled. Enabling
-‘i8x8 ’ requires adaptive spatial transform (8x8dct
-option) to be enabled.)
-
-‘none (none ) ’
-Do not consider any partitions.
-
-‘all (all ) ’
-Consider every partition.
-
-
-
-
-direct-pred (direct )
-Set direct MV prediction mode. Possible values:
-
-
-‘none (none ) ’
-Disable MV prediction.
-
-‘spatial (spatial ) ’
-Enable spatial predicting.
-
-‘temporal (temporal ) ’
-Enable temporal predicting.
-
-‘auto (auto ) ’
-Automatically decided.
-
-
-
-
-slice-max-size (slice-max-size )
-Set the limit of the size of each slice in bytes. If not specified
-but RTP payload size (ps ) is specified, that is used.
-
-
-stats (stats )
-Set the file name for multi-pass stats.
-
-
-nal-hrd (nal-hrd )
-Set signal HRD information (requires vbv-bufsize to be set).
-Possible values:
-
-
-‘none (none ) ’
-Disable HRD information signaling.
-
-‘vbr (vbr ) ’
-Variable bit rate.
-
-‘cbr (cbr ) ’
-Constant bit rate (not allowed in MP4 container).
-
-
-
-
-x264opts (N.A.)
-Set any x264 option, see x264 --fullhelp
for a list.
-
-Argument is a list of key =value couples separated by
-":". In filter and psy-rd options that use ":" as a separator
-themselves, use "," instead. They accept it as well since long ago but this
-is kept undocumented for some reason.
-
-For example to specify libx264 encoding options with ffmpeg
:
-
-
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
-
-
-
-x264-params (N.A.)
-Override the x264 configuration using a :-separated list of key=value
-parameters.
-
-This option is functionally the same as the x264opts , but is
-duplicated for compatibility with the Libav fork.
-
-For example to specify libx264 encoding options with ffmpeg
:
-
-
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
-cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
-no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
-
-
-
-
-
Encoding ffpresets for common usages are provided so they can be used with the
-general presets system (e.g. passing the pre option).
-
-
-
18.5 libx265# TOC
-
-
x265 H.265/HEVC encoder wrapper.
-
-
This encoder requires the presence of the libx265 headers and library
-during configuration. You need to explicitly configure the build with
---enable-libx265 .
-
-
-
18.5.1 Options# TOC
-
-
-preset
-Set the x265 preset.
-
-
-tune
-Set the x265 tune parameter.
-
-
-x265-params
-Set x265 options using a list of key =value couples separated
-by ":". See x265 --help
for a list of options.
-
-For example to specify libx265 encoding options with -x265-params :
-
-
-
ffmpeg -i input -c:v libx265 -x265-params crf=26:psy-rd=1 output.mp4
-
-
-
-
-
-
18.6 libxvid# TOC
-
-
Xvid MPEG-4 Part 2 encoder wrapper.
-
-
This encoder requires the presence of the libxvidcore headers and library
-during configuration. You need to explicitly configure the build with
---enable-libxvid --enable-gpl
.
-
-
The native mpeg4
encoder supports the MPEG-4 Part 2 format, so
-users can encode to this format without this library.
-
-
-
18.6.1 Options# TOC
-
-
The following options are supported by the libxvid wrapper. Some of
-the following options are listed but are not documented, and
-correspond to shared codec options. See the Codec
-Options chapter for their documentation. The other shared options
-which are not listed have no effect for the libxvid encoder.
-
-
-b
-g
-qmin
-qmax
-mpeg_quant
-threads
-bf
-b_qfactor
-b_qoffset
-flags
-Set specific encoding flags. Possible values:
-
-
-‘mv4 ’
-Use four motion vector by macroblock.
-
-
-‘aic ’
-Enable high quality AC prediction.
-
-
-‘gray ’
-Only encode grayscale.
-
-
-‘gmc ’
-Enable the use of global motion compensation (GMC).
-
-
-‘qpel ’
-Enable quarter-pixel motion compensation.
-
-
-‘cgop ’
-Enable closed GOP.
-
-
-‘global_header ’
-Place global headers in extradata instead of every keyframe.
-
-
-
-
-
-trellis
-me_method
-Set motion estimation method. Possible values in decreasing order of
-speed and increasing order of quality:
-
-
-‘zero ’
-Use no motion estimation (default).
-
-
-‘phods ’
-‘x1 ’
-‘log ’
-Enable advanced diamond zonal search for 16x16 blocks and half-pixel
-refinement for 16x16 blocks. ‘x1 ’ and ‘log ’ are aliases for
-‘phods ’.
-
-
-‘epzs ’
-Enable all of the things described above, plus advanced diamond zonal
-search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
-estimation on chroma planes.
-
-
-‘full ’
-Enable all of the things described above, plus extended 16x16 and 8x8
-blocks search.
-
-
-
-
-mbd
-Set macroblock decision algorithm. Possible values in the increasing
-order of quality:
-
-
-‘simple ’
-Use macroblock comparing function algorithm (default).
-
-
-‘bits ’
-Enable rate distortion-based half pixel and quarter pixel refinement for
-16x16 blocks.
-
-
-‘rd ’
-Enable all of the things described above, plus rate distortion-based
-half pixel and quarter pixel refinement for 8x8 blocks, and rate
-distortion-based search using square pattern.
-
-
-
-
-lumi_aq
-Enable lumi masking adaptive quantization when set to 1. Default is 0
-(disabled).
-
-
-variance_aq
-Enable variance adaptive quantization when set to 1. Default is 0
-(disabled).
-
-When combined with lumi_aq , the resulting quality will not
-be better than any of the two specified individually. In other
-words, the resulting quality will be the worse one of the two
-effects.
-
-
-ssim
-Set structural similarity (SSIM) displaying method. Possible values:
-
-
-‘off ’
-Disable displaying of SSIM information.
-
-
-‘avg ’
-Output average SSIM at the end of encoding to stdout. The format of
-showing the average SSIM is:
-
-
-
-For users who are not familiar with C, %f means a float number, or
-a decimal (e.g. 0.939232).
-
-
-‘frame ’
-Output both per-frame SSIM data during encoding and average SSIM at
-the end of encoding to stdout. The format of per-frame information
-is:
-
-
-
SSIM: avg: %1.3f min: %1.3f max: %1.3f
-
-
-For users who are not familiar with C, %1.3f means a float number
-rounded to 3 digits after the dot (e.g. 0.932).
-
-
-
-
-
-ssim_acc
-Set SSIM accuracy. Valid options are integers within the range of
-0-4, while 0 gives the most accurate result and 4 computes the
-fastest.
-
-
-
-
-
-
18.7 mpeg2# TOC
-
-
MPEG-2 video encoder.
-
-
-
18.7.1 Options# TOC
-
-
-seq_disp_ext integer
-Specifies if the encoder should write a sequence_display_extension to the
-output.
-
--1
-auto
-Decide automatically to write it or not (this is the default) by checking if
-the data to be written is different from the default or unspecified values.
-
-0
-never
-Never write it.
-
-1
-always
-Always write it.
-
-
-
-
-
-
-
18.8 png# TOC
-
-
PNG image encoder.
-
-
-
18.8.1 Private options# TOC
-
-
-dpi integer
-Set physical density of pixels, in dots per inch, unset by default
-
-dpm integer
-Set physical density of pixels, in dots per meter, unset by default
-
-
-
-
-
18.9 ProRes# TOC
-
-
Apple ProRes encoder.
-
-
FFmpeg contains 2 ProRes encoders, the prores-aw and prores-ks encoder.
-The used encoder can be chosen with the -vcodec
option.
-
-
-
18.9.1 Private Options for prores-ks# TOC
-
-
-profile integer
-Select the ProRes profile to encode
-
-‘proxy ’
-‘lt ’
-‘standard ’
-‘hq ’
-‘4444 ’
-
-
-
-quant_mat integer
-Select quantization matrix.
-
-‘auto ’
-‘default ’
-‘proxy ’
-‘lt ’
-‘standard ’
-‘hq ’
-
-If set to auto , the matrix matching the profile will be picked.
-If not set, the matrix providing the highest quality, default , will be
-picked.
-
-
-bits_per_mb integer
-How many bits to allot for coding one macroblock. Different profiles use
-between 200 and 2400 bits per macroblock, the maximum is 8000.
-
-
-mbs_per_slice integer
-Number of macroblocks in each slice (1-8); the default value (8)
-should be good in almost all situations.
-
-
-vendor string
-Override the 4-byte vendor ID.
-A custom vendor ID like apl0 would claim the stream was produced by
-the Apple encoder.
-
-
-alpha_bits integer
-Specify number of bits for alpha component.
-Possible values are 0 , 8 and 16 .
-Use 0 to disable alpha plane coding.
-
-
-
-
-
-
18.9.2 Speed considerations# TOC
-
-
In the default mode of operation the encoder has to honor frame constraints
-(i.e. not produce frames with size bigger than requested) while still making
-output picture as good as possible.
-A frame containing a lot of small details is harder to compress and the encoder
-would spend more time searching for appropriate quantizers for each slice.
-
-
Setting a higher bits_per_mb limit will improve the speed.
-
-
For the fastest encoding speed set the qscale parameter (4 is the
-recommended value) and do not set a size constraint.
-
-
-
-
19 Subtitles Encoders# TOC
-
-
-
19.1 dvdsub# TOC
-
-
This codec encodes the bitmap subtitle format that is used in DVDs.
-Typically they are stored in VOBSUB file pairs (*.idx + *.sub),
-and they can also be used in Matroska files.
-
-
-
19.1.1 Options# TOC
-
-
-even_rows_fix
-When set to 1, enable a work-around that makes the number of pixel rows
-even in all subtitles. This fixes a problem with some players that
-cut off the bottom row if the number is odd. The work-around just adds
-a fully transparent row if needed. The overhead is low, typically
-one byte per subtitle on average.
-
-By default, this work-around is disabled.
-
-
-
-
-
20 Bitstream Filters# TOC
-
-
When you configure your FFmpeg build, all the supported bitstream
-filters are enabled by default. You can list all available ones using
-the configure option --list-bsfs
.
-
-
You can disable all the bitstream filters using the configure option
---disable-bsfs
, and selectively enable any bitstream filter using
-the option --enable-bsf=BSF
, or you can disable a particular
-bitstream filter using the option --disable-bsf=BSF
.
-
-
The option -bsfs
of the ff* tools will display the list of
-all the supported bitstream filters included in your build.
-
-
The ff* tools have a -bsf option applied per stream, taking a
-comma-separated list of filters, whose parameters follow the filter
-name after a ’=’.
-
-
-
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
-
-
-
Below is a description of the currently available bitstream filters,
-with their parameters, if any.
-
-
-
20.1 aac_adtstoasc# TOC
-
-
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
-bitstream filter.
-
-
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
-ADTS header and removes the ADTS header.
-
-
This is required for example when copying an AAC stream from a raw
-ADTS AAC container to a FLV or a MOV/MP4 file.
-
-
-
20.2 chomp# TOC
-
-
Remove zero padding at the end of a packet.
-
-
-
20.3 dump_extra# TOC
-
-
Add extradata to the beginning of the filtered packets.
-
-
The additional argument specifies which packets should be filtered.
-It accepts the values:
-
-‘a ’
-add extradata to all key packets, but only if local_header is
-set in the flags2 codec context field
-
-
-‘k ’
-add extradata to all key packets
-
-
-‘e ’
-add extradata to all packets
-
-
-
-
If not specified it is assumed ‘k ’.
-
-
For example the following ffmpeg
command forces a global
-header (thus disabling individual packet headers) in the H.264 packets
-generated by the libx264
encoder, but corrects them by adding
-the header stored in extradata to the key packets:
-
-
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
-
-
-
-
20.4 h264_mp4toannexb# TOC
-
-
Convert an H.264 bitstream from length prefixed mode to start code
-prefixed mode (as defined in the Annex B of the ITU-T H.264
-specification).
-
-
This is required by some streaming formats, typically the MPEG-2
-transport stream format ("mpegts").
-
-
For example to remux an MP4 file containing an H.264 stream to mpegts
-format with ffmpeg
, you can use the command:
-
-
-
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
-
-
-
-
20.5 imxdump# TOC
-
-
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
-Pro decoder. This filter only applies to the mpeg2video codec, and is
-likely not needed for Final Cut Pro 7 and newer with the appropriate
--tag:v .
-
-
For example, to remux 30 MB/sec NTSC IMX to MOV:
-
-
-
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
-
-
-
-
20.6 mjpeg2jpeg# TOC
-
-
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
-
-
MJPEG is a video codec wherein each video frame is essentially a
-JPEG image. The individual frames can be extracted without loss,
-e.g. by
-
-
-
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
-
-
-
Unfortunately, these chunks are incomplete JPEG images, because
-they lack the DHT segment required for decoding. Quoting from
-http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml :
-
-
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
-commented that "MJPEG, or at least the MJPEG in AVIs having the
-MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
-Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
-and it must use basic Huffman encoding, not arithmetic or
-progressive. . . . You can indeed extract the MJPEG frames and
-decode them with a regular JPEG decoder, but you have to prepend
-the DHT segment to them, or else the decoder won’t have any idea
-how to decompress the data. The exact table necessary is given in
-the OpenDML spec."
-
-
This bitstream filter patches the header of frames extracted from an MJPEG
-stream (carrying the AVI1 header ID and lacking a DHT segment) to
-produce fully qualified JPEG images.
-
-
-
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
-exiftran -i -9 frame*.jpg
-ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
-
-
-
-
20.7 mjpega_dump_header# TOC
-
-
-
20.8 movsub# TOC
-
-
-
20.9 mp3_header_decompress# TOC
-
-
-
20.10 noise# TOC
-
-
Damages the contents of packets without damaging the container. Can be
-used for fuzzing or testing error resilience/concealment.
-
-
Parameters:
-A numeral string, whose value is related to how often output bytes will
-be modified. Therefore, values below or equal to 0 are forbidden, and
-the lower the more frequent bytes will be modified, with 1 meaning
-every byte is modified.
-
-
-
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
-
-
applies the modification to every byte.
-
-
-
20.11 remove_extra# TOC
-
-
-
21 Format Options# TOC
-
-
The libavformat library provides some generic global options, which
-can be set on all the muxers and demuxers. In addition each muxer or
-demuxer may support so-called private options, which are specific for
-that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follows:
-
-
-avioflags flags (input/output )
-Possible values:
-
-‘direct ’
-Reduce buffering.
-
-
-
-
-probesize integer (input )
-Set probing size in bytes, i.e. the size of the data to analyze to get
-stream information. A higher value will allow to detect more
-information in case it is dispersed into the stream, but will increase
-latency. Must be an integer not lesser than 32. It is 5000000 by default.
-
-
-packetsize integer (output )
-Set packet size.
-
-
-fflags flags (input/output )
-Set format flags.
-
-Possible values:
-
-‘ignidx ’
-Ignore index.
-
-‘genpts ’
-Generate PTS.
-
-‘nofillin ’
-Do not fill in missing values that can be exactly calculated.
-
-‘noparse ’
-Disable AVParsers, this needs +nofillin
too.
-
-‘igndts ’
-Ignore DTS.
-
-‘discardcorrupt ’
-Discard corrupted frames.
-
-‘sortdts ’
-Try to interleave output packets by DTS.
-
-‘keepside ’
-Do not merge side data.
-
-‘latm ’
-Enable RTP MP4A-LATM payload.
-
-‘nobuffer ’
-Reduce the latency introduced by optional buffering
-
-‘bitexact ’
-Only write platform-, build- and time-independent data.
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-
-
-
-seek2any integer (input )
-Allow seeking to non-keyframes on demuxer level when supported if set to 1.
-Default is 0.
-
-
-analyzeduration integer (input )
-Specify how many microseconds are analyzed to probe the input. A
-higher value will allow to detect more accurate information, but will
-increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
-
-
-cryptokey hexadecimal string (input )
-Set decryption key.
-
-
-indexmem integer (input )
-Set max memory used for timestamp index (per stream).
-
-
-rtbufsize integer (input )
-Set max memory used for buffering real-time frames.
-
-
-fdebug flags (input/output )
-Print specific debug info.
-
-Possible values:
-
-‘ts ’
-
-
-
-max_delay integer (input/output )
-Set maximum muxing or demuxing delay in microseconds.
-
-
-fpsprobesize integer (input )
-Set number of frames used to probe fps.
-
-
-audio_preload integer (output )
-Set microseconds by which audio packets should be interleaved earlier.
-
-
-chunk_duration integer (output )
-Set microseconds for each chunk.
-
-
-chunk_size integer (output )
-Set size in bytes for each chunk.
-
-
-err_detect, f_err_detect flags (input )
-Set error detection flags. f_err_detect
is deprecated and
-should be used only via the ffmpeg
tool.
-
-Possible values:
-
-‘crccheck ’
-Verify embedded CRCs.
-
-‘bitstream ’
-Detect bitstream specification deviations.
-
-‘buffer ’
-Detect improper bitstream length.
-
-‘explode ’
-Abort decoding on minor error detection.
-
-‘careful ’
-Consider things that violate the spec and have not been seen in the
-wild as errors.
-
-‘compliant ’
-Consider all spec non compliancies as errors.
-
-‘aggressive ’
-Consider things that a sane encoder should not do as an error.
-
-
-
-
-use_wallclock_as_timestamps integer (input )
-Use wallclock as timestamps.
-
-
-avoid_negative_ts integer (output )
-
-Possible values:
-
-‘make_non_negative ’
-Shift timestamps to make them non-negative.
-Also note that this affects only leading negative timestamps, and not
-non-monotonic negative timestamps.
-
-‘make_zero ’
-Shift timestamps so that the first timestamp is 0.
-
-‘auto (default) ’
-Enables shifting when required by the target format.
-
-‘disabled ’
-Disables shifting of timestamp.
-
-
-
-When shifting is enabled, all output timestamps are shifted by the
-same amount. Audio, video, and subtitles desynching and relative
-timestamp differences are preserved compared to how they would have
-been without shifting.
-
-
-skip_initial_bytes integer (input )
-Set number of bytes to skip before reading header and frames if set to 1.
-Default is 0.
-
-
-correct_ts_overflow integer (input )
-Correct single timestamp overflows if set to 1. Default is 1.
-
-
-flush_packets integer (output )
-Flush the underlying I/O stream after each packet. Default 1 enables it, and
-has the effect of reducing the latency; 0 disables it and may slightly
-increase performance in some cases.
-
-
-output_ts_offset offset (output )
-Set the output time offset.
-
-offset must be a time duration specification,
-see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-The offset is added by the muxer to the output timestamps.
-
-Specifying a positive offset means that the corresponding streams are
-delayed bt the time duration specified in offset . Default value
-is 0
(meaning that no offset is applied).
-
-
-format_whitelist list (input )
-"," separated List of allowed demuxers. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
21.1 Format stream specifiers# TOC
-
-
Format stream specifiers allow selection of one or more streams that
-match specific properties.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index.
-
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio,
-’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If
-stream_index is given, then it matches the stream number
-stream_index of this type. Otherwise, it matches all streams of
-this type.
-
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number
-stream_index in the program with the id
-program_id . Otherwise, it matches all streams in the program.
-
-
-#stream_id
-Matches the stream by a format-specific ID.
-
-
-
-
The exact semantics of stream specifiers is defined by the
-avformat_match_stream_specifier()
function declared in the
-libavformat/avformat.h header.
-
-
-
22 Demuxers# TOC
-
-
Demuxers are configured elements in FFmpeg that can read the
-multimedia streams from a particular type of file.
-
-
When you configure your FFmpeg build, all the supported demuxers
-are enabled by default. You can list all available ones using the
-configure option --list-demuxers
.
-
-
You can disable all the demuxers using the configure option
---disable-demuxers
, and selectively enable a single demuxer with
-the option --enable-demuxer=DEMUXER
, or disable it
-with the option --disable-demuxer=DEMUXER
.
-
-
The option -formats
of the ff* tools will display the list of
-enabled demuxers.
-
-
The description of some of the currently available demuxers follows.
-
-
-
22.1 applehttp# TOC
-
-
Apple HTTP Live Streaming demuxer.
-
-
This demuxer presents all AVStreams from all variant streams.
-The id field is set to the bitrate variant index number. By setting
-the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay),
-the caller can decide which variant streams to actually receive.
-The total bitrate of the variant that the stream belongs to is
-available in a metadata key named "variant_bitrate".
-
-
-
22.2 apng# TOC
-
-
Animated Portable Network Graphics demuxer.
-
-
This demuxer is used to demux APNG files.
-All headers, but the PNG signature, up to (but not including) the first
-fcTL chunk are transmitted as extradata.
-Frames are then split as being all the chunks between two fcTL ones, or
-between the last fcTL and IEND chunks.
-
-
--ignore_loop bool
-Ignore the loop variable in the file if set.
-
--max_fps int
-Maximum framerate in frames per second (0 for no limit).
-
--default_fps int
-Default framerate in frames per second when none is specified in the file
-(0 meaning as fast as possible).
-
-
-
-
-
22.3 asf# TOC
-
-
Advanced Systems Format demuxer.
-
-
This demuxer is used to demux ASF files and MMS network streams.
-
-
--no_resync_search bool
-Do not try to resynchronize by looking for a certain optional start code.
-
-
-
-
-
22.4 concat# TOC
-
-
Virtual concatenation script demuxer.
-
-
This demuxer reads a list of files and other directives from a text file and
-demuxes them one after the other, as if all their packet had been muxed
-together.
-
-
The timestamps in the files are adjusted so that the first file starts at 0
-and each next file starts where the previous one finishes. Note that it is
-done globally and may cause gaps if all streams do not have exactly the same
-length.
-
-
All files must have the same streams (same codecs, same time base, etc.).
-
-
The duration of each file is used to adjust the timestamps of the next file:
-if the duration is incorrect (because it was computed using the bit-rate or
-because the file is truncated, for example), it can cause artifacts. The
-duration
directive can be used to override the duration stored in
-each file.
-
-
-
22.4.1 Syntax# TOC
-
-
The script is a text file in extended-ASCII, with one directive per line.
-Empty lines, leading spaces and lines starting with ’#’ are ignored. The
-following directive is recognized:
-
-
-file path
-Path to a file to read; special characters and spaces must be escaped with
-backslash or single quotes.
-
-All subsequent file-related directives apply to that file.
-
-
-ffconcat version 1.0
-Identify the script type and version. It also sets the safe option
-to 1 if it was to its default -1.
-
-To make FFmpeg recognize the format automatically, this directive must
-appears exactly as is (no extra space or byte-order-mark) on the very first
-line of the script.
-
-
-duration dur
-Duration of the file. This information can be specified from the file;
-specifying it here may be more efficient or help if the information from the
-file is not available or accurate.
-
-If the duration is set for all files, then it is possible to seek in the
-whole concatenated video.
-
-
-stream
-Introduce a stream in the virtual file.
-All subsequent stream-related directives apply to the last introduced
-stream.
-Some streams properties must be set in order to allow identifying the
-matching streams in the subfiles.
-If no streams are defined in the script, the streams from the first file are
-copied.
-
-
-exact_stream_id id
-Set the id of the stream.
-If this directive is given, the string with the corresponding id in the
-subfiles will be used.
-This is especially useful for MPEG-PS (VOB) files, where the order of the
-streams is not reliable.
-
-
-
-
-
-
22.4.2 Options# TOC
-
-
This demuxer accepts the following option:
-
-
-safe
-If set to 1, reject unsafe file paths. A file path is considered safe if it
-does not contain a protocol specification and is relative and all components
-only contain characters from the portable character set (letters, digits,
-period, underscore and hyphen) and have no period at the beginning of a
-component.
-
-If set to 0, any file name is accepted.
-
-The default is -1, it is equivalent to 1 if the format was automatically
-probed and 0 otherwise.
-
-
-auto_convert
-If set to 1, try to perform automatic conversions on packet data to make the
-streams concatenable.
-
-Currently, the only conversion is adding the h264_mp4toannexb bitstream
-filter to H.264 streams in MP4 format. This is necessary in particular if
-there are resolution changes.
-
-
-
-
-
-
22.5 flv# TOC
-
-
Adobe Flash Video Format demuxer.
-
-
This demuxer is used to demux FLV files and RTMP network streams.
-
-
--flv_metadata bool
-Allocate the streams according to the onMetaData array content.
-
-
-
-
-
22.6 libgme# TOC
-
-
The Game Music Emu library is a collection of video game music file emulators.
-
-
See http://code.google.com/p/game-music-emu/ for more information.
-
-
Some files have multiple tracks. The demuxer will pick the first track by
-default. The track_index option can be used to select a different
-track. Track indexes start at 0. The demuxer exports the number of tracks as
-tracks meta data entry.
-
-
For very large files, the max_size option may have to be adjusted.
-
-
-
22.7 libquvi# TOC
-
-
Play media from Internet services using the quvi project.
-
-
The demuxer accepts a format option to request a specific quality. It
-is by default set to best .
-
-
See http://quvi.sourceforge.net/ for more information.
-
-
FFmpeg needs to be built with --enable-libquvi
for this demuxer to be
-enabled.
-
-
-
22.8 gif# TOC
-
-
Animated GIF demuxer.
-
-
It accepts the following options:
-
-
-min_delay
-Set the minimum valid delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 2.
-
-
-default_delay
-Set the default delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 10.
-
-
-ignore_loop
-GIF files can contain information to loop a certain number of times (or
-infinitely). If ignore_loop is set to 1, then the loop setting
-from the input will be ignored and looping will not occur. If set to 0,
-then looping will occur and will cycle the number of times according to
-the GIF. Default value is 1.
-
-
-
-
For example, with the overlay filter, place an infinitely looping GIF
-over another video:
-
-
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
-
-
-
Note that in the above example the shortest option for overlay filter is
-used to end the output video at the length of the shortest input file,
-which in this case is input.mp4 as the GIF in this example loops
-infinitely.
-
-
-
22.9 image2# TOC
-
-
Image file demuxer.
-
-
This demuxer reads from a list of image files specified by a pattern.
-The syntax and meaning of the pattern is specified by the
-option pattern_type .
-
-
The pattern may contain a suffix which is used to automatically
-determine the format of the images contained in the files.
-
-
The size, the pixel format, and the format of each image must be the
-same for all the files in the sequence.
-
-
This demuxer accepts the following options:
-
-framerate
-Set the frame rate for the video stream. It defaults to 25.
-
-loop
-If set to 1, loop over the input. Default value is 0.
-
-pattern_type
-Select the pattern type used to interpret the provided filename.
-
-pattern_type accepts one of the following values.
-
-sequence
-Select a sequence pattern type, used to specify a sequence of files
-indexed by sequential numbers.
-
-A sequence pattern may contain the string "%d" or "%0N d", which
-specifies the position of the characters representing a sequential
-number in each filename matched by the pattern. If the form
-"%d0N d" is used, the string representing the number in each
-filename is 0-padded and N is the total number of 0-padded
-digits representing the number. The literal character ’%’ can be
-specified in the pattern with the string "%%".
-
-If the sequence pattern contains "%d" or "%0N d", the first filename of
-the file list specified by the pattern must contain a number
-inclusively contained between start_number and
-start_number +start_number_range -1, and all the following
-numbers must be sequential.
-
-For example the pattern "img-%03d.bmp" will match a sequence of
-filenames of the form img-001.bmp , img-002.bmp , ...,
-img-010.bmp , etc.; the pattern "i%%m%%g-%d.jpg" will match a
-sequence of filenames of the form i%m%g-1.jpg ,
-i%m%g-2.jpg , ..., i%m%g-10.jpg , etc.
-
-Note that the pattern must not necessarily contain "%d" or
-"%0N d", for example to convert a single image file
-img.jpeg you can employ the command:
-
-
ffmpeg -i img.jpeg img.png
-
-
-
-glob
-Select a glob wildcard pattern type.
-
-The pattern is interpreted like a glob()
pattern. This is only
-selectable if libavformat was compiled with globbing support.
-
-
-glob_sequence (deprecated, will be removed)
-Select a mixed glob wildcard/sequence pattern.
-
-If your version of libavformat was compiled with globbing support, and
-the provided pattern contains at least one glob meta character among
-%*?[]{}
that is preceded by an unescaped "%", the pattern is
-interpreted like a glob()
pattern, otherwise it is interpreted
-like a sequence pattern.
-
-All glob special characters %*?[]{}
must be prefixed
-with "%". To escape a literal "%" you shall use "%%".
-
-For example the pattern foo-%*.jpeg
will match all the
-filenames prefixed by "foo-" and terminating with ".jpeg", and
-foo-%?%?%?.jpeg
will match all the filenames prefixed with
-"foo-", followed by a sequence of three characters, and terminating
-with ".jpeg".
-
-This pattern type is deprecated in favor of glob and
-sequence .
-
-
-
-Default value is glob_sequence .
-
-pixel_format
-Set the pixel format of the images to read. If not specified the pixel
-format is guessed from the first image file in the sequence.
-
-start_number
-Set the index of the file matched by the image file pattern to start
-to read from. Default value is 0.
-
-start_number_range
-Set the index interval range to check when looking for the first image
-file in the sequence, starting from start_number . Default value
-is 5.
-
-ts_from_file
-If set to 1, will set frame timestamp to modification time of image file. Note
-that monotonity of timestamps is not provided: images go in the same order as
-without this option. Default value is 0.
-If set to 2, will set frame timestamp to the modification time of the image file in
-nanosecond precision.
-
-video_size
-Set the video size of the images to read. If not specified the video
-size is guessed from the first image file in the sequence.
-
-
-
-
-
22.9.1 Examples# TOC
-
-
- Use ffmpeg
for creating a video from the images in the file
-sequence img-001.jpeg , img-002.jpeg , ..., assuming an
-input frame rate of 10 frames per second:
-
-
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
-
-
- As above, but start by reading from a file with index 100 in the sequence:
-
-
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
-
-
- Read images matching the "*.png" glob pattern , that is all the files
-terminating with the ".png" suffix:
-
-
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
-
-
-
-
-
22.10 mpegts# TOC
-
-
MPEG-2 transport stream demuxer.
-
-
-fix_teletext_pts
-Overrides teletext packet PTS and DTS values with the timestamps calculated
-from the PCR of the first program which the teletext stream is part of and is
-not discarded. Default value is 1, set this option to 0 if you want your
-teletext packet PTS and DTS values untouched.
-
-
-
-
-
22.11 rawvideo# TOC
-
-
Raw video demuxer.
-
-
This demuxer allows one to read raw video data. Since there is no header
-specifying the assumed video parameters, the user must specify them
-in order to be able to decode the data correctly.
-
-
This demuxer accepts the following options:
-
-framerate
-Set input video frame rate. Default value is 25.
-
-
-pixel_format
-Set the input video pixel format. Default value is yuv420p
.
-
-
-video_size
-Set the input video size. This value must be specified explicitly.
-
-
-
-
For example to read a rawvideo file input.raw with
-ffplay
, assuming a pixel format of rgb24
, a video
-size of 320x240
, and a frame rate of 10 images per second, use
-the command:
-
-
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
-
-
-
-
22.12 sbg# TOC
-
-
SBaGen script demuxer.
-
-
This demuxer reads the script language used by SBaGen
-http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG
-script looks like that:
-
-
-SE
-a: 300-2.5/3 440+4.5/0
-b: 300-2.5/0 440+4.5/3
-off: -
-NOW == a
-+0:07:00 == b
-+0:14:00 == a
-+0:21:00 == b
-+0:30:00 off
-
-
-
A SBG script can mix absolute and relative timestamps. If the script uses
-either only absolute timestamps (including the script start time) or only
-relative ones, then its layout is fixed, and the conversion is
-straightforward. On the other hand, if the script mixes both kind of
-timestamps, then the NOW reference for relative timestamps will be
-taken from the current time of day at the time the script is read, and the
-script layout will be frozen according to that reference. That means that if
-the script is directly played, the actual times will match the absolute
-timestamps up to the sound controller’s clock accuracy, but if the user
-somehow pauses the playback or seeks, all times will be shifted accordingly.
-
-
-
22.13 tedcaptions# TOC
-
-
JSON captions used for TED Talks .
-
-
TED does not provide links to the captions, but they can be guessed from the
-page. The file tools/bookmarklets.html from the FFmpeg source tree
-contains a bookmarklet to expose them.
-
-
This demuxer accepts the following option:
-
-start_time
-Set the start time of the TED talk, in milliseconds. The default is 15000
-(15s). It is used to sync the captions with the downloadable videos, because
-they include a 15s intro.
-
-
-
-
Example: convert the captions to a format most players understand:
-
-
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
-
-
-
-
23 Muxers# TOC
-
-
Muxers are configured elements in FFmpeg which allow writing
-multimedia streams to a particular type of file.
-
-
When you configure your FFmpeg build, all the supported muxers
-are enabled by default. You can list all available muxers using the
-configure option --list-muxers
.
-
-
You can disable all the muxers with the configure option
---disable-muxers
and selectively enable / disable single muxers
-with the options --enable-muxer=MUXER
/
---disable-muxer=MUXER
.
-
-
The option -formats
of the ff* tools will display the list of
-enabled muxers.
-
-
A description of some of the currently available muxers follows.
-
-
-
23.1 aiff# TOC
-
-
Audio Interchange File Format muxer.
-
-
-
23.1.1 Options# TOC
-
-
It accepts the following options:
-
-
-write_id3v2
-Enable ID3v2 tags writing when set to 1. Default is 0 (disabled).
-
-
-id3v2_version
-Select ID3v2 version to write. Currently only version 3 and 4 (aka.
-ID3v2.3 and ID3v2.4) are supported. The default is version 4.
-
-
-
-
-
-
23.2 crc# TOC
-
-
CRC (Cyclic Redundancy Check) testing format.
-
-
This muxer computes and prints the Adler-32 CRC of all the input audio
-and video frames. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-CRC.
-
-
The output of the muxer consists of a single line of the form:
-CRC=0xCRC , where CRC is a hexadecimal number 0-padded to
-8 digits containing the CRC for all the decoded input frames.
-
-
See also the framecrc muxer.
-
-
-
23.2.1 Examples# TOC
-
-
For example to compute the CRC of the input, and store it in the file
-out.crc :
-
-
ffmpeg -i INPUT -f crc out.crc
-
-
-
You can print the CRC to stdout with the command:
-
-
ffmpeg -i INPUT -f crc -
-
-
-
You can select the output format of each frame with ffmpeg
by
-specifying the audio and video codec and format. For example to
-compute the CRC of the input audio converted to PCM unsigned 8-bit
-and the input video converted to MPEG-2 video, use the command:
-
-
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
-
-
-
-
23.3 framecrc# TOC
-
-
Per-packet CRC (Cyclic Redundancy Check) testing format.
-
-
This muxer computes and prints the Adler-32 CRC for each audio
-and video packet. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-CRC.
-
-
The output of the muxer consists of a line for each audio and video
-packet of the form:
-
-
stream_index , packet_dts , packet_pts , packet_duration , packet_size , 0xCRC
-
-
-
CRC is a hexadecimal number 0-padded to 8 digits containing the
-CRC of the packet.
-
-
-
23.3.1 Examples# TOC
-
-
For example to compute the CRC of the audio and video frames in
-INPUT , converted to raw audio and video packets, and store it
-in the file out.crc :
-
-
ffmpeg -i INPUT -f framecrc out.crc
-
-
-
To print the information to stdout, use the command:
-
-
ffmpeg -i INPUT -f framecrc -
-
-
-
With ffmpeg
, you can select the output format to which the
-audio and video frames are encoded before computing the CRC for each
-packet by specifying the audio and video codec. For example, to
-compute the CRC of each decoded input audio frame converted to PCM
-unsigned 8-bit and of each decoded input video frame converted to
-MPEG-2 video, use the command:
-
-
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
-
-
-
See also the crc muxer.
-
-
-
23.4 framemd5# TOC
-
-
Per-packet MD5 testing format.
-
-
This muxer computes and prints the MD5 hash for each audio
-and video packet. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-hash.
-
-
The output of the muxer consists of a line for each audio and video
-packet of the form:
-
-
stream_index , packet_dts , packet_pts , packet_duration , packet_size , MD5
-
-
-
MD5 is a hexadecimal number representing the computed MD5 hash
-for the packet.
-
-
-
23.4.1 Examples# TOC
-
-
For example to compute the MD5 of the audio and video frames in
-INPUT , converted to raw audio and video packets, and store it
-in the file out.md5 :
-
-
ffmpeg -i INPUT -f framemd5 out.md5
-
-
-
To print the information to stdout, use the command:
-
-
ffmpeg -i INPUT -f framemd5 -
-
-
-
See also the md5 muxer.
-
-
-
23.5 gif# TOC
-
-
Animated GIF muxer.
-
-
It accepts the following options:
-
-
-loop
-Set the number of times to loop the output. Use -1
for no loop, 0
-for looping indefinitely (default).
-
-
-final_delay
-Force the delay (expressed in centiseconds) after the last frame. Each frame
-ends with a delay until the next frame. The default is -1
, which is a
-special value to tell the muxer to re-use the previous delay. In case of a
-loop, you might want to customize this value to mark a pause for instance.
-
-
-
-
For example, to encode a gif looping 10 times, with a 5 seconds delay between
-the loops:
-
-
ffmpeg -i INPUT -loop 10 -final_delay 500 out.gif
-
-
-
Note 1: if you wish to extract the frames in separate GIF files, you need to
-force the image2 muxer:
-
-
ffmpeg -i INPUT -c:v gif -f image2 "out%d.gif"
-
-
-
Note 2: the GIF format has a very small time base: the delay between two frames
-can not be smaller than one centi second.
-
-
-
23.6 hls# TOC
-
-
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
-the HTTP Live Streaming (HLS) specification.
-
-
It creates a playlist file, and one or more segment files. The output filename
-specifies the playlist filename.
-
-
By default, the muxer creates a file for each segment produced. These files
-have the same name as the playlist, followed by a sequential number and a
-.ts extension.
-
-
For example, to convert an input file with ffmpeg
:
-
-
ffmpeg -i in.nut out.m3u8
-
-
This example will produce the playlist, out.m3u8 , and segment files:
-out0.ts , out1.ts , out2.ts , etc.
-
-
See also the segment muxer, which provides a more generic and
-flexible implementation of a segmenter, and can be used to perform HLS
-segmentation.
-
-
-
23.6.1 Options# TOC
-
-
This muxer supports the following options:
-
-
-hls_time seconds
-Set the segment length in seconds. Default value is 2.
-
-
-hls_list_size size
-Set the maximum number of playlist entries. If set to 0 the list file
-will contain all the segments. Default value is 5.
-
-
-hls_ts_options options_list
-Set output format options using a :-separated list of key=value
-parameters. Values containing :
special characters must be
-escaped.
-
-
-hls_wrap wrap
-Set the number after which the segment filename number (the number
-specified in each segment file) wraps. If set to 0 the number will be
-never wrapped. Default value is 0.
-
-This option is useful to avoid to fill the disk with many segment
-files, and limits the maximum number of segment files written to disk
-to wrap .
-
-
-start_number number
-Start the playlist sequence number from number . Default value is
-0.
-
-
-hls_allow_cache allowcache
-Explicitly set whether the client MAY (1) or MUST NOT (0) cache media segments.
-
-
-hls_base_url baseurl
-Append baseurl to every entry in the playlist.
-Useful to generate playlists with absolute paths.
-
-Note that the playlist sequence number must be unique for each segment
-and it is not to be confused with the segment filename sequence number
-which can be cyclic, for example if the wrap option is
-specified.
-
-
-hls_segment_filename filename
-Set the segment filename. Unless hls_flags single_file is set filename
-is used as a string format with the segment number:
-
-
ffmpeg in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
-
-This example will produce the playlist, out.m3u8 , and segment files:
-file000.ts , file001.ts , file002.ts , etc.
-
-
-hls_flags single_file
-If this flag is set, the muxer will store all segments in a single MPEG-TS
-file, and will use byte ranges in the playlist. HLS playlists generated with
-this way will have the version number 4.
-For example:
-
-
ffmpeg -i in.nut -hls_flags single_file out.m3u8
-
-Will produce the playlist, out.m3u8 , and a single segment file,
-out.ts .
-
-
-hls_flags delete_segments
-Segment files removed from the playlist are deleted after a period of time
-equal to the duration of the segment plus the duration of the playlist.
-
-
-
-
-
23.7 ico# TOC
-
-
ICO file muxer.
-
-
Microsoft’s icon file format (ICO) has some strict limitations that should be noted:
-
-
- Size cannot exceed 256 pixels in any dimension
-
- Only BMP and PNG images can be stored
-
- If a BMP image is used, it must be one of the following pixel formats:
-
-
BMP Bit Depth FFmpeg Pixel Format
-1bit pal8
-4bit pal8
-8bit pal8
-16bit rgb555le
-24bit bgr24
-32bit bgra
-
-
- If a BMP image is used, it must use the BITMAPINFOHEADER DIB header
-
- If a PNG image is used, it must use the rgba pixel format
-
-
-
-
23.8 image2# TOC
-
-
Image file muxer.
-
-
The image file muxer writes video frames to image files.
-
-
The output filenames are specified by a pattern, which can be used to
-produce sequentially numbered series of files.
-The pattern may contain the string "%d" or "%0N d", this string
-specifies the position of the characters representing a numbering in
-the filenames. If the form "%0N d" is used, the string
-representing the number in each filename is 0-padded to N
-digits. The literal character ’%’ can be specified in the pattern with
-the string "%%".
-
-
If the pattern contains "%d" or "%0N d", the first filename of
-the file list specified will contain the number 1, all the following
-numbers will be sequential.
-
-
The pattern may contain a suffix which is used to automatically
-determine the format of the image files to write.
-
-
For example the pattern "img-%03d.bmp" will specify a sequence of
-filenames of the form img-001.bmp , img-002.bmp , ...,
-img-010.bmp , etc.
-The pattern "img%%-%d.jpg" will specify a sequence of filenames of the
-form img%-1.jpg , img%-2.jpg , ..., img%-10.jpg ,
-etc.
-
-
-
23.8.1 Examples# TOC
-
-
The following example shows how to use ffmpeg
for creating a
-sequence of files img-001.jpeg , img-002.jpeg , ...,
-taking one image every second from the input video:
-
-
ffmpeg -i in.avi -vsync 1 -r 1 -f image2 'img-%03d.jpeg'
-
-
-
Note that with ffmpeg
, if the format is not specified with the
--f
option and the output filename specifies an image file
-format, the image2 muxer is automatically selected, so the previous
-command can be written as:
-
-
ffmpeg -i in.avi -vsync 1 -r 1 'img-%03d.jpeg'
-
-
-
Note also that the pattern must not necessarily contain "%d" or
-"%0N d", for example to create a single image file
-img.jpeg from the input video you can employ the command:
-
-
ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg
-
-
-
The strftime option allows you to expand the filename with
-date and time information. Check the documentation of
-the strftime()
function for the syntax.
-
-
For example to generate image files from the strftime()
-"%Y-%m-%d_%H-%M-%S" pattern, the following ffmpeg
command
-can be used:
-
-
ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
-
-
-
-
23.8.2 Options# TOC
-
-
-start_number
-Start the sequence from the specified number. Default value is 1. Must
-be a non-negative number.
-
-
-update
-If set to 1, the filename will always be interpreted as just a
-filename, not a pattern, and the corresponding file will be continuously
-overwritten with new images. Default value is 0.
-
-
-strftime
-If set to 1, expand the filename with date and time information from
-strftime()
. Default value is 0.
-
-
-
-
The image muxer supports the .Y.U.V image file format. This format is
-special in that that each image frame consists of three files, for
-each of the YUV420P components. To read or write this image file format,
-specify the name of the ’.Y’ file. The muxer will automatically open the
-’.U’ and ’.V’ files as required.
-
-
-
23.9 matroska# TOC
-
-
Matroska container muxer.
-
-
This muxer implements the matroska and webm container specs.
-
-
-
23.9.1 Metadata# TOC
-
-
The recognized metadata settings in this muxer are:
-
-
-title
-Set title name provided to a single track.
-
-
-language
-Specify the language of the track in the Matroska languages form.
-
-The language can be either the 3 letters bibliographic ISO-639-2 (ISO
-639-2/B) form (like "fre" for French), or a language code mixed with a
-country code for specialities in languages (like "fre-ca" for Canadian
-French).
-
-
-stereo_mode
-Set stereo 3D video layout of two views in a single video track.
-
-The following values are recognized:
-
-‘mono ’
-video is not stereo
-
-‘left_right ’
-Both views are arranged side by side, Left-eye view is on the left
-
-‘bottom_top ’
-Both views are arranged in top-bottom orientation, Left-eye view is at bottom
-
-‘top_bottom ’
-Both views are arranged in top-bottom orientation, Left-eye view is on top
-
-‘checkerboard_rl ’
-Each view is arranged in a checkerboard interleaved pattern, Left-eye view being first
-
-‘checkerboard_lr ’
-Each view is arranged in a checkerboard interleaved pattern, Right-eye view being first
-
-‘row_interleaved_rl ’
-Each view is constituted by a row based interleaving, Right-eye view is first row
-
-‘row_interleaved_lr ’
-Each view is constituted by a row based interleaving, Left-eye view is first row
-
-‘col_interleaved_rl ’
-Both views are arranged in a column based interleaving manner, Right-eye view is first column
-
-‘col_interleaved_lr ’
-Both views are arranged in a column based interleaving manner, Left-eye view is first column
-
-‘anaglyph_cyan_red ’
-All frames are in anaglyph format viewable through red-cyan filters
-
-‘right_left ’
-Both views are arranged side by side, Right-eye view is on the left
-
-‘anaglyph_green_magenta ’
-All frames are in anaglyph format viewable through green-magenta filters
-
-‘block_lr ’
-Both eyes laced in one Block, Left-eye view is first
-
-‘block_rl ’
-Both eyes laced in one Block, Right-eye view is first
-
-
-
-
-
-
For example a 3D WebM clip can be created using the following command line:
-
-
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
-
-
-
-
23.9.2 Options# TOC
-
-
This muxer supports the following options:
-
-
-reserve_index_space
-By default, this muxer writes the index for seeking (called cues in Matroska
-terms) at the end of the file, because it cannot know in advance how much space
-to leave for the index at the beginning of the file. However for some use cases
-– e.g. streaming where seeking is possible but slow – it is useful to put the
-index at the beginning of the file.
-
-If this option is set to a non-zero value, the muxer will reserve a given amount
-of space in the file header and then try to write the cues there when the muxing
-finishes. If the available space does not suffice, muxing will fail. A safe size
-for most use cases should be about 50kB per hour of video.
-
-Note that cues are only written if the output is seekable and this option will
-have no effect if it is not.
-
-
-
-
-
23.10 md5# TOC
-
-
MD5 testing format.
-
-
This muxer computes and prints the MD5 hash of all the input audio
-and video frames. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-hash.
-
-
The output of the muxer consists of a single line of the form:
-MD5=MD5 , where MD5 is a hexadecimal number representing
-the computed MD5 hash.
-
-
For example to compute the MD5 hash of the input converted to raw
-audio and video, and store it in the file out.md5 :
-
-
ffmpeg -i INPUT -f md5 out.md5
-
-
-
You can print the MD5 to stdout with the command:
-
-
ffmpeg -i INPUT -f md5 -
-
-
-
See also the framemd5 muxer.
-
-
-
23.11 mov, mp4, ismv# TOC
-
-
MOV/MP4/ISMV (Smooth Streaming) muxer.
-
-
The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4
-file has all the metadata about all packets stored in one location
-(written at the end of the file, it can be moved to the start for
-better playback by adding faststart to the movflags , or
-using the qt-faststart
tool). A fragmented
-file consists of a number of fragments, where packets and metadata
-about these packets are stored together. Writing a fragmented
-file has the advantage that the file is decodable even if the
-writing is interrupted (while a normal MOV/MP4 is undecodable if
-it is not properly finished), and it requires less memory when writing
-very long files (since writing normal MOV/MP4 files stores info about
-every single packet in memory until the file is closed). The downside
-is that it is less compatible with other applications.
-
-
-
23.11.1 Options# TOC
-
-
Fragmentation is enabled by setting one of the AVOptions that define
-how to cut the file into fragments:
-
-
--moov_size bytes
-Reserves space for the moov atom at the beginning of the file instead of placing the
-moov atom at the end. If the space reserved is insufficient, muxing will fail.
-
--movflags frag_keyframe
-Start a new fragment at each video keyframe.
-
--frag_duration duration
-Create fragments that are duration microseconds long.
-
--frag_size size
-Create fragments that contain up to size bytes of payload data.
-
--movflags frag_custom
-Allow the caller to manually choose when to cut fragments, by
-calling av_write_frame(ctx, NULL)
to write a fragment with
-the packets written so far. (This is only useful with other
-applications integrating libavformat, not from ffmpeg
.)
-
--min_frag_duration duration
-Don’t create fragments that are shorter than duration microseconds long.
-
-
-
-
If more than one condition is specified, fragments are cut when
-one of the specified conditions is fulfilled. The exception to this is
--min_frag_duration
, which has to be fulfilled for any of the other
-conditions to apply.
-
-
Additionally, the way the output file is written can be adjusted
-through a few other options:
-
-
--movflags empty_moov
-Write an initial moov atom directly at the start of the file, without
-describing any samples in it. Generally, an mdat/moov pair is written
-at the start of the file, as a normal MOV/MP4 file, containing only
-a short portion of the file. With this option set, there is no initial
-mdat atom, and the moov atom only describes the tracks but has
-a zero duration.
-
-This option is implicitly set when writing ismv (Smooth Streaming) files.
-
--movflags separate_moof
-Write a separate moof (movie fragment) atom for each track. Normally,
-packets for all tracks are written in a moof atom (which is slightly
-more efficient), but with this option set, the muxer writes one moof/mdat
-pair for each track, making it easier to separate tracks.
-
-This option is implicitly set when writing ismv (Smooth Streaming) files.
-
--movflags faststart
-Run a second pass moving the index (moov atom) to the beginning of the file.
-This operation can take a while, and will not work in various situations such
-as fragmented output, thus it is not enabled by default.
-
--movflags rtphint
-Add RTP hinting tracks to the output file.
-
--movflags disable_chpl
-Disable Nero chapter markers (chpl atom). Normally, both Nero chapters
-and a QuickTime chapter track are written to the file. With this option
-set, only the QuickTime chapter track will be written. Nero chapters can
-cause failures when the file is reprocessed with certain tagging programs, like
-mp3Tag 2.61a and iTunes 11.3, most likely other versions are affected as well.
-
--movflags omit_tfhd_offset
-Do not write any absolute base_data_offset in tfhd atoms. This avoids
-tying fragments to absolute byte positions in the file/streams.
-
--movflags default_base_moof
-Similarly to the omit_tfhd_offset, this flag avoids writing the
-absolute base_data_offset field in tfhd atoms, but does so by using
-the new default-base-is-moof flag instead. This flag is new from
-14496-12:2012. This may make the fragments easier to parse in certain
-circumstances (avoiding basing track fragment location calculations
-on the implicit end of the previous track fragment).
-
-
-
-
-
23.11.2 Example# TOC
-
-
Smooth Streaming content can be pushed in real time to a publishing
-point on IIS with this muxer. Example:
-
-
ffmpeg -re <normal input/transcoding options> -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1)
-
-
-
-
23.12 mp3# TOC
-
-
The MP3 muxer writes a raw MP3 stream with the following optional features:
-
- An ID3v2 metadata header at the beginning (enabled by default). Versions 2.3 and
-2.4 are supported, the id3v2_version
private option controls which one is
-used (3 or 4). Setting id3v2_version
to 0 disables the ID3v2 header
-completely.
-
-The muxer supports writing attached pictures (APIC frames) to the ID3v2 header.
-The pictures are supplied to the muxer in form of a video stream with a single
-packet. There can be any number of those streams, each will correspond to a
-single APIC frame. The stream metadata tags title and comment map
-to APIC description and picture type respectively. See
-http://id3.org/id3v2.4.0-frames for allowed picture types.
-
-Note that the APIC frames must be written at the beginning, so the muxer will
-buffer the audio frames until it gets all the pictures. It is therefore advised
-to provide the pictures as soon as possible to avoid excessive buffering.
-
- A Xing/LAME frame right after the ID3v2 header (if present). It is enabled by
-default, but will be written only if the output is seekable. The
-write_xing
private option can be used to disable it. The frame contains
-various information that may be useful to the decoder, like the audio duration
-or encoder delay.
-
- A legacy ID3v1 tag at the end of the file (disabled by default). It may be
-enabled with the write_id3v1
private option, but as its capabilities are
-very limited, its usage is not recommended.
-
-
-
Examples:
-
-
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
-
-
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
-
-
-
To attach a picture to an mp3 file select both the audio and the picture stream
-with map
:
-
-
ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
--metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
-
-
-
Write a "clean" MP3 without any extra features:
-
-
ffmpeg -i input.wav -write_xing 0 -id3v2_version 0 out.mp3
-
-
-
-
23.13 mpegts# TOC
-
-
MPEG transport stream muxer.
-
-
This muxer implements ISO 13818-1 and part of ETSI EN 300 468.
-
-
The recognized metadata settings in mpegts muxer are service_provider
-and service_name
. If they are not set the default for
-service_provider
is "FFmpeg" and the default for
-service_name
is "Service01".
-
-
-
23.13.1 Options# TOC
-
-
The muxer options are:
-
-
--mpegts_original_network_id number
-Set the original_network_id (default 0x0001). This is unique identifier
-of a network in DVB. Its main use is in the unique identification of a
-service through the path Original_Network_ID, Transport_Stream_ID.
-
--mpegts_transport_stream_id number
-Set the transport_stream_id (default 0x0001). This identifies a
-transponder in DVB.
-
--mpegts_service_id number
-Set the service_id (default 0x0001) also known as program in DVB.
-
--mpegts_pmt_start_pid number
-Set the first PID for PMT (default 0x1000, max 0x1f00).
-
--mpegts_start_pid number
-Set the first PID for data packets (default 0x0100, max 0x0f00).
-
--mpegts_m2ts_mode number
-Enable m2ts mode if set to 1. Default value is -1 which disables m2ts mode.
-
--muxrate number
-Set a constant muxrate (default VBR).
-
--pcr_period numer
-Override the default PCR retransmission time (default 20ms), ignored
-if variable muxrate is selected.
-
--pes_payload_size number
-Set minimum PES packet payload in bytes.
-
--mpegts_flags flags
-Set flags (see below).
-
--mpegts_copyts number
-Preserve original timestamps, if value is set to 1. Default value is -1, which
-results in shifting timestamps so that they start from 0.
-
--tables_version number
-Set PAT, PMT and SDT version (default 0, valid values are from 0 to 31, inclusively).
-This option allows updating stream structure so that standard consumer may
-detect the change. To do so, reopen output AVFormatContext (in case of API
-usage) or restart ffmpeg instance, cyclically changing tables_version value:
-
-
ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
-ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
-...
-ffmpeg -i source3.ts -codec copy -f mpegts -tables_version 31 udp://1.1.1.1:1111
-ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
-ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
-...
-
-
-
-
-
Option mpegts_flags may take a set of such flags:
-
-
-resend_headers
-Reemit PAT/PMT before writing the next packet.
-
-latm
-Use LATM packetization for AAC.
-
-
-
-
-
23.13.2 Example# TOC
-
-
-
ffmpeg -i file.mpg -c copy \
- -mpegts_original_network_id 0x1122 \
- -mpegts_transport_stream_id 0x3344 \
- -mpegts_service_id 0x5566 \
- -mpegts_pmt_start_pid 0x1500 \
- -mpegts_start_pid 0x150 \
- -metadata service_provider="Some provider" \
- -metadata service_name="Some Channel" \
- -y out.ts
-
-
-
-
23.14 null# TOC
-
-
Null muxer.
-
-
This muxer does not generate any output file, it is mainly useful for
-testing or benchmarking purposes.
-
-
For example to benchmark decoding with ffmpeg
you can use the
-command:
-
-
ffmpeg -benchmark -i INPUT -f null out.null
-
-
-
Note that the above command does not read or write the out.null
-file, but specifying the output file is required by the ffmpeg
-syntax.
-
-
Alternatively you can write the command as:
-
-
ffmpeg -benchmark -i INPUT -f null -
-
-
-
-
23.15 nut# TOC
-
-
--syncpoints flags
-Change the syncpoint usage in nut:
-
-default use the normal low-overhead seeking aids.
-none do not use the syncpoints at all, reducing the overhead but making the stream non-seekable;
-Use of this option is not recommended, as the resulting files are very damage
- sensitive and seeking is not possible. Also in general the overhead from
- syncpoints is negligible. Note, -write_index
0 can be used to disable
- all growing data tables, allowing to mux endless streams with limited memory
- and without these disadvantages.
-
-timestamped extend the syncpoint with a wallclock field.
-
-The none and timestamped flags are experimental.
-
--write_index bool
-Write index at the end, the default is to write an index.
-
-
-
-
-
ffmpeg -i INPUT -f_strict experimental -syncpoints none - | processor
-
-
-
-
23.16 ogg# TOC
-
-
Ogg container muxer.
-
-
--page_duration duration
-Preferred page duration, in microseconds. The muxer will attempt to create
-pages that are approximately duration microseconds long. This allows the
-user to compromise between seek granularity and container overhead. The default
-is 1 second. A value of 0 will fill all segments, making pages as large as
-possible. A value of 1 will effectively use 1 packet-per-page in most
-situations, giving a small seek granularity at the cost of additional container
-overhead.
-
-
-
-
-
23.17 segment, stream_segment, ssegment# TOC
-
-
Basic stream segmenter.
-
-
This muxer outputs streams to a number of separate files of nearly
-fixed duration. Output filename pattern can be set in a fashion similar to
-image2 .
-
-
stream_segment
is a variant of the muxer used to write to
-streaming output formats, i.e. which do not require global headers,
-and is recommended for outputting e.g. to MPEG transport stream segments.
-ssegment
is a shorter alias for stream_segment
.
-
-
Every segment starts with a keyframe of the selected reference stream,
-which is set through the reference_stream option.
-
-
Note that if you want accurate splitting for a video file, you need to
-make the input key frames correspond to the exact splitting times
-expected by the segmenter, or the segment muxer will start the new
-segment with the key frame found next after the specified start
-time.
-
-
The segment muxer works best with a single constant frame rate video.
-
-
Optionally it can generate a list of the created segments, by setting
-the option segment_list . The list type is specified by the
-segment_list_type option. The entry filenames in the segment
-list are set by default to the basename of the corresponding segment
-files.
-
-
See also the hls muxer, which provides a more specific
-implementation for HLS segmentation.
-
-
-
23.17.1 Options# TOC
-
-
The segment muxer supports the following options:
-
-
-reference_stream specifier
-Set the reference stream, as specified by the string specifier .
-If specifier is set to auto
, the reference is chosen
-automatically. Otherwise it must be a stream specifier (see the “Stream
-specifiers” chapter in the ffmpeg manual) which specifies the
-reference stream. The default value is auto
.
-
-
-segment_format format
-Override the inner container format, by default it is guessed by the filename
-extension.
-
-
-segment_format_options options_list
-Set output format options using a :-separated list of key=value
-parameters. Values containing the :
special character must be
-escaped.
-
-
-segment_list name
-Generate also a listfile named name . If not specified no
-listfile is generated.
-
-
-segment_list_flags flags
-Set flags affecting the segment list generation.
-
-It currently supports the following flags:
-
-‘cache ’
-Allow caching (only affects M3U8 list files).
-
-
-‘live ’
-Allow live-friendly file generation.
-
-
-
-
-segment_list_type type
-Select the listing format.
-
-flat use a simple flat list of entries.
-hls use a m3u8-like structure.
-
-
-
-segment_list_size size
-Update the list file so that it contains at most size
-segments. If 0 the list file will contain all the segments. Default
-value is 0.
-
-
-segment_list_entry_prefix prefix
-Prepend prefix to each entry. Useful to generate absolute paths.
-By default no prefix is applied.
-
-The following values are recognized:
-
-‘flat ’
-Generate a flat list for the created segments, one segment per line.
-
-
-‘csv, ext ’
-Generate a list for the created segments, one segment per line,
-each line matching the format (comma-separated values):
-
-
segment_filename ,segment_start_time ,segment_end_time
-
-
-segment_filename is the name of the output file generated by the
-muxer according to the provided pattern. CSV escaping (according to
-RFC4180) is applied if required.
-
-segment_start_time and segment_end_time specify
-the segment start and end time expressed in seconds.
-
-A list file with the suffix ".csv"
or ".ext"
will
-auto-select this format.
-
-‘ext ’ is deprecated in favor or ‘csv ’.
-
-
-‘ffconcat ’
-Generate an ffconcat file for the created segments. The resulting file
-can be read using the FFmpeg concat demuxer.
-
-A list file with the suffix ".ffcat"
or ".ffconcat"
will
-auto-select this format.
-
-
-‘m3u8 ’
-Generate an extended M3U8 file, version 3, compliant with
-http://tools.ietf.org/id/draft-pantos-http-live-streaming .
-
-A list file with the suffix ".m3u8"
will auto-select this format.
-
-
-
-If not specified the type is guessed from the list file name suffix.
-
-
-segment_time time
-Set segment duration to time , the value must be a duration
-specification. Default value is "2". See also the
-segment_times option.
-
-Note that splitting may not be accurate, unless you force the
-reference stream key-frames at the given time. See the introductory
-notice and the examples below.
-
-
-segment_atclocktime 1|0
-If set to "1" split at regular clock time intervals starting from 00:00
-o’clock. The time value specified in segment_time is
-used for setting the length of the splitting interval.
-
-For example with segment_time set to "900" this makes it possible
-to create files at 12:00 o’clock, 12:15, 12:30, etc.
-
-Default value is "0".
-
-
-segment_time_delta delta
-Specify the accuracy time when selecting the start time for a
-segment, expressed as a duration specification. Default value is "0".
-
-When delta is specified a key-frame will start a new segment if its
-PTS satisfies the relation:
-
-
PTS >= start_time - time_delta
-
-
-This option is useful when splitting video content, which is always
-split at GOP boundaries, in case a key frame is found just before the
-specified split time.
-
-In particular may be used in combination with the ffmpeg option
-force_key_frames . The key frame times specified by
-force_key_frames may not be set accurately because of rounding
-issues, with the consequence that a key frame time may result set just
-before the specified time. For constant frame rate videos a value of
-1/(2*frame_rate ) should address the worst case mismatch between
-the specified time and the time set by force_key_frames .
-
-
-segment_times times
-Specify a list of split points. times contains a list of comma
-separated duration specifications, in increasing order. See also
-the segment_time option.
-
-
-segment_frames frames
-Specify a list of split video frame numbers. frames contains a
-list of comma separated integer numbers, in increasing order.
-
-This option specifies to start a new segment whenever a reference
-stream key frame is found and the sequential number (starting from 0)
-of the frame is greater or equal to the next value in the list.
-
-
-segment_wrap limit
-Wrap around segment index once it reaches limit .
-
-
-segment_start_number number
-Set the sequence number of the first segment. Defaults to 0
.
-
-
-reset_timestamps 1|0
-Reset timestamps at the begin of each segment, so that each segment
-will start with near-zero timestamps. It is meant to ease the playback
-of the generated segments. May not work with some combinations of
-muxers/codecs. It is set to 0
by default.
-
-
-initial_offset offset
-Specify timestamp offset to apply to the output packet timestamps. The
-argument must be a time duration specification, and defaults to 0.
-
-
-
-
-
23.17.2 Examples# TOC
-
-
-
-
-
23.18 smoothstreaming# TOC
-
-
Smooth Streaming muxer generates a set of files (Manifest, chunks) suitable for serving with conventional web server.
-
-
-window_size
-Specify the number of fragments kept in the manifest. Default 0 (keep all).
-
-
-extra_window_size
-Specify the number of fragments kept outside of the manifest before removing from disk. Default 5.
-
-
-lookahead_count
-Specify the number of lookahead fragments. Default 2.
-
-
-min_frag_duration
-Specify the minimum fragment duration (in microseconds). Default 5000000.
-
-
-remove_at_exit
-Specify whether to remove all fragments when finished. Default 0 (do not remove).
-
-
-
-
-
-
23.19 tee# TOC
-
-
The tee muxer can be used to write the same data to several files or any
-other kind of muxer. It can be used, for example, to both stream a video to
-the network and save it to disk at the same time.
-
-
It is different from specifying several outputs to the ffmpeg
-command-line tool because the audio and video data will be encoded only once
-with the tee muxer; encoding can be a very expensive process. It is not
-useful when using the libavformat API directly because it is then possible
-to feed the same packets to several muxers directly.
-
-
The slave outputs are specified in the file name given to the muxer,
-separated by ’|’. If any of the slave name contains the ’|’ separator,
-leading or trailing spaces or any special character, it must be
-escaped (see (ffmpeg-utils)the "Quoting and escaping"
-section in the ffmpeg-utils(1) manual ).
-
-
Muxer options can be specified for each slave by prepending them as a list of
-key =value pairs separated by ’:’, between square brackets. If
-the options values contain a special character or the ’:’ separator, they
-must be escaped; note that this is a second level escaping.
-
-
The following special options are also recognized:
-
-f
-Specify the format name. Useful if it cannot be guessed from the
-output name suffix.
-
-
-bsfs[/spec ]
-Specify a list of bitstream filters to apply to the specified
-output.
-
-It is possible to specify to which streams a given bitstream filter
-applies, by appending a stream specifier to the option separated by
-/
. spec must be a stream specifier (see Format stream specifiers ). If the stream specifier is not specified, the
-bitstream filters will be applied to all streams in the output.
-
-Several bitstream filters can be specified, separated by ",".
-
-
-select
-Select the streams that should be mapped to the slave output,
-specified by a stream specifier. If not specified, this defaults to
-all the input streams.
-
-
-
-
-
23.19.1 Examples# TOC
-
-
- Encode something and both archive it in a WebM file and stream it
-as MPEG-TS over UDP (the streams need to be explicitly mapped):
-
-
ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a
- "archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/"
-
-
- Use ffmpeg
to encode the input, and send the output
-to three different destinations. The dump_extra
bitstream
-filter is used to add extradata information to all the output video
-keyframes packets, as requested by the MPEG-TS format. The select
-option is applied to out.aac in order to make it contain only
-audio packets.
-
-
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
- -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac"
-
-
- As below, but select only stream a:1
for the audio output. Note
-that a second level escaping must be performed, as ":" is a special
-character used to separate options.
-
-
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
- -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac"
-
-
-
-
Note: some codecs may need different options depending on the output format;
-the auto-detection of this can not work with the tee muxer. The main example
-is the global_header flag.
-
-
-
23.20 webm_dash_manifest# TOC
-
-
WebM DASH Manifest muxer.
-
-
This muxer implements the WebM DASH Manifest specification to generate the DASH manifest XML.
-
-
-
23.20.1 Options# TOC
-
-
This muxer supports the following options:
-
-
-adaptation_sets
-This option has the following syntax: "id=x,streams=a,b,c id=y,streams=d,e" where x and y are the
-unique identifiers of the adaptation sets and a,b,c,d and e are the indices of the corresponding
-audio and video streams. Any number of adaptation sets can be added using this option.
-
-
-
-
-
23.20.2 Example# TOC
-
-
ffmpeg -f webm_dash_manifest -i video1.webm \
- -f webm_dash_manifest -i video2.webm \
- -f webm_dash_manifest -i audio1.webm \
- -f webm_dash_manifest -i audio2.webm \
- -map 0 -map 1 -map 2 -map 3 \
- -c copy \
- -f webm_dash_manifest \
- -adaptation_sets "id=0,streams=0,1 id=1,streams=2,3" \
- manifest.xml
-
-
-
-
24 Metadata# TOC
-
-
FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded
-INI-like text file and then load it back using the metadata muxer/demuxer.
-
-
The file format is as follows:
-
- A file consists of a header and a number of metadata tags divided into sections,
-each on its own line.
-
- The header is a ’;FFMETADATA’ string, followed by a version number (now 1).
-
- Metadata tags are of the form ’key=value’
-
- Immediately after header follows global metadata
-
- After global metadata there may be sections with per-stream/per-chapter
-metadata.
-
- A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
-brackets (’[’, ’]’) and ends with next section or end of file.
-
- At the beginning of a chapter section there may be an optional timebase to be
-used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and
-den are integers. If the timebase is missing then start/end times are assumed to
-be in milliseconds.
-Next a chapter section must contain chapter start and end times in form
-’START=num’, ’END=num’, where num is a positive integer.
-
- Empty lines and lines starting with ’;’ or ’#’ are ignored.
-
- Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a
-newline) must be escaped with a backslash ’\’.
-
- Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
-the tag (in the example above key is ’foo ’, value is ’ bar’).
-
-
-
A ffmetadata file might look like this:
-
-
;FFMETADATA1
-title=bike\\shed
-;this is a comment
-artist=FFmpeg troll team
-
-[CHAPTER]
-TIMEBASE=1/1000
-START=0
-#chapter ends at 0:01:00
-END=60000
-title=chapter \#1
-[STREAM]
-title=multi\
-line
-
-
-
By using the ffmetadata muxer and demuxer it is possible to extract
-metadata from an input file to an ffmetadata file, and then transcode
-the file into an output file with the edited ffmetadata file.
-
-
Extracting an ffmetadata file with ffmpeg goes as follows:
-
-
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
-
-
-
Reinserting edited metadata information from the FFMETADATAFILE file can
-be done as:
-
-
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
-
-
-
-
25 Protocols# TOC
-
-
Protocols are configured elements in FFmpeg that enable access to
-resources that require specific protocols.
-
-
When you configure your FFmpeg build, all the supported protocols are
-enabled by default. You can list all available ones using the
-configure option "–list-protocols".
-
-
You can disable all the protocols using the configure option
-"–disable-protocols", and selectively enable a protocol using the
-option "–enable-protocol=PROTOCOL ", or you can disable a
-particular protocol using the option
-"–disable-protocol=PROTOCOL ".
-
-
The option "-protocols" of the ff* tools will display the list of
-supported protocols.
-
-
A description of the currently available protocols follows.
-
-
-
25.1 bluray# TOC
-
-
Read BluRay playlist.
-
-
The accepted options are:
-
-angle
-BluRay angle
-
-
-chapter
-Start chapter (1...N)
-
-
-playlist
-Playlist to read (BDMV/PLAYLIST/?????.mpls)
-
-
-
-
-
Examples:
-
-
Read longest playlist from BluRay mounted to /mnt/bluray:
-
-
-
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
-
-
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
-
-
-
-
25.2 cache# TOC
-
-
Caching wrapper for input stream.
-
-
Cache the input stream to temporary file. It brings seeking capability to live streams.
-
-
-
-
-
25.3 concat# TOC
-
-
Physical concatenation protocol.
-
-
Allow to read and seek from many resource in sequence as if they were
-a unique resource.
-
-
A URL accepted by this protocol has the syntax:
-
-
concat:URL1 |URL2 |...|URLN
-
-
-
where URL1 , URL2 , ..., URLN are the urls of the
-resource to be concatenated, each one possibly specifying a distinct
-protocol.
-
-
For example to read a sequence of files split1.mpeg ,
-split2.mpeg , split3.mpeg with ffplay
use the
-command:
-
-
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
-
-
-
Note that you may need to escape the character "|" which is special for
-many shells.
-
-
-
25.4 crypto# TOC
-
-
AES-encrypted stream reading protocol.
-
-
The accepted options are:
-
-key
-Set the AES decryption key binary block from given hexadecimal representation.
-
-
-iv
-Set the AES decryption initialization vector binary block from given hexadecimal representation.
-
-
-
-
Accepted URL formats:
-
-
crypto:URL
-crypto+URL
-
-
-
-
25.5 data# TOC
-
-
Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme .
-
-
For example, to convert a GIF file given inline with ffmpeg
:
-
-
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
-
-
-
-
25.6 file# TOC
-
-
File access protocol.
-
-
Allow to read from or write to a file.
-
-
A file URL can have the form:
-
-
-
where filename is the path of the file to read.
-
-
An URL that does not have a protocol prefix will be assumed to be a
-file URL. Depending on the build, an URL that looks like a Windows
-path with the drive letter at the beginning will also be assumed to be
-a file URL (usually not the case in builds for unix-like systems).
-
-
For example to read from a file input.mpeg with ffmpeg
-use the command:
-
-
ffmpeg -i file:input.mpeg output.mpeg
-
-
-
This protocol accepts the following options:
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable for files on slow medium.
-
-
-
-
-
25.7 ftp# TOC
-
-
FTP (File Transfer Protocol).
-
-
Allow to read from or write to remote resources using FTP protocol.
-
-
Following syntax is required.
-
-
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-ftp-anonymous-password
-Password used when login as anonymous user. Typically an e-mail address
-should be used.
-
-
-ftp-write-seekable
-Control seekability of connection during encoding. If set to 1 the
-resource is supposed to be seekable, if set to 0 it is assumed not
-to be seekable. Default value is 0.
-
-
-
-
NOTE: Protocol can be used as output, but it is recommended to not do
-it, unless special care is taken (tests, customized server configuration
-etc.). Different FTP servers behave in different way during seek
-operation. ff* tools may produce incomplete content due to server limitations.
-
-
-
25.8 gopher# TOC
-
-
Gopher protocol.
-
-
-
25.9 hls# TOC
-
-
Read Apple HTTP Live Streaming compliant segmented stream as
-a uniform one. The M3U8 playlists describing the segments can be
-remote HTTP resources or local files, accessed using the standard
-file protocol.
-The nested protocol is declared by specifying
-"+proto " after the hls URI scheme name, where proto
-is either "file" or "http".
-
-
-
hls+http://host/path/to/remote/resource.m3u8
-hls+file://path/to/local/resource.m3u8
-
-
-
Using this protocol is discouraged - the hls demuxer should work
-just as well (if not, please report the issues) and is more complete.
-To use the hls demuxer instead, simply use the direct URLs to the
-m3u8 files.
-
-
-
25.10 http# TOC
-
-
HTTP (Hyper Text Transfer Protocol).
-
-
This protocol accepts the following options:
-
-
-seekable
-Control seekability of connection. If set to 1 the resource is
-supposed to be seekable, if set to 0 it is assumed not to be seekable,
-if set to -1 it will try to autodetect if it is seekable. Default
-value is -1.
-
-
-chunked_post
-If set to 1 use chunked Transfer-Encoding for posts, default is 1.
-
-
-content_type
-Set a specific content type for the POST messages.
-
-
-headers
-Set custom HTTP headers, can override built in default headers. The
-value must be a string encoding the headers.
-
-
-multiple_requests
-Use persistent connections if set to 1, default is 0.
-
-
-post_data
-Set custom HTTP post data.
-
-
-user-agent
-user_agent
-Override the User-Agent header. If not specified the protocol will use a
-string describing the libavformat build. ("Lavf/<version>")
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-mime_type
-Export the MIME type.
-
-
-icy
-If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
-supports this, the metadata has to be retrieved by the application by reading
-the icy_metadata_headers and icy_metadata_packet options.
-The default is 1.
-
-
-icy_metadata_headers
-If the server supports ICY metadata, this contains the ICY-specific HTTP reply
-headers, separated by newline characters.
-
-
-icy_metadata_packet
-If the server supports ICY metadata, and icy was set to 1, this
-contains the last non-empty metadata packet sent by the server. It should be
-polled in regular intervals by applications interested in mid-stream metadata
-updates.
-
-
-cookies
-Set the cookies to be sent in future requests. The format of each cookie is the
-same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
-delimited by a newline character.
-
-
-offset
-Set initial byte offset.
-
-
-end_offset
-Try to limit the request to bytes preceding this offset.
-
-
-
-
-
25.10.1 HTTP Cookies# TOC
-
-
Some HTTP requests will be denied unless cookie values are passed in with the
-request. The cookies option allows these cookies to be specified. At
-the very least, each cookie must specify a value along with a path and domain.
-HTTP requests that match both the domain and path will automatically include the
-cookie value in the HTTP Cookie header field. Multiple cookies can be delimited
-by a newline.
-
-
The required syntax to play a stream specifying a cookie is:
-
-
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
-
-
-
-
25.11 Icecast# TOC
-
-
Icecast protocol (stream to Icecast servers)
-
-
This protocol accepts the following options:
-
-
-ice_genre
-Set the stream genre.
-
-
-ice_name
-Set the stream name.
-
-
-ice_description
-Set the stream description.
-
-
-ice_url
-Set the stream website URL.
-
-
-ice_public
-Set if the stream should be public.
-The default is 0 (not public).
-
-
-user_agent
-Override the User-Agent header. If not specified a string of the form
-"Lavf/<version>" will be used.
-
-
-password
-Set the Icecast mountpoint password.
-
-
-content_type
-Set the stream content type. This must be set if it is different from
-audio/mpeg.
-
-
-legacy_icecast
-This enables support for Icecast versions < 2.4.0, that do not support the
-HTTP PUT method but the SOURCE method.
-
-
-
-
-
-
icecast://[username [:password ]@]server :port /mountpoint
-
-
-
-
25.12 mmst# TOC
-
-
MMS (Microsoft Media Server) protocol over TCP.
-
-
-
25.13 mmsh# TOC
-
-
MMS (Microsoft Media Server) protocol over HTTP.
-
-
The required syntax is:
-
-
mmsh://server [:port ][/app ][/playpath ]
-
-
-
-
25.14 md5# TOC
-
-
MD5 output protocol.
-
-
Computes the MD5 hash of the data to be written, and on close writes
-this to the designated output or stdout if none is specified. It can
-be used to test muxers without writing an actual file.
-
-
Some examples follow.
-
-
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
-ffmpeg -i input.flv -f avi -y md5:output.avi.md5
-
-# Write the MD5 hash of the encoded AVI file to stdout.
-ffmpeg -i input.flv -f avi -y md5:
-
-
-
Note that some formats (typically MOV) require the output protocol to
-be seekable, so they will fail with the MD5 output protocol.
-
-
-
25.15 pipe# TOC
-
-
UNIX pipe access protocol.
-
-
Allow to read and write from UNIX pipes.
-
-
The accepted syntax is:
-
-
-
number is the number corresponding to the file descriptor of the
-pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number
-is not specified, by default the stdout file descriptor will be used
-for writing, stdin for reading.
-
-
For example to read from stdin with ffmpeg
:
-
-
cat test.wav | ffmpeg -i pipe:0
-# ...this is the same as...
-cat test.wav | ffmpeg -i pipe:
-
-
-
For writing to stdout with ffmpeg
:
-
-
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
-# ...this is the same as...
-ffmpeg -i test.wav -f avi pipe: | cat > test.avi
-
-
-
This protocol accepts the following options:
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable if data transmission is slow.
-
-
-
-
Note that some formats (typically MOV), require the output protocol to
-be seekable, so they will fail with the pipe output protocol.
-
-
-
25.16 rtmp# TOC
-
-
Real-Time Messaging Protocol.
-
-
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
-content across a TCP/IP network.
-
-
The required syntax is:
-
-
rtmp://[username :password @]server [:port ][/app ][/instance ][/playpath ]
-
-
-
The accepted parameters are:
-
-username
-An optional username (mostly for publishing).
-
-
-password
-An optional password (mostly for publishing).
-
-
-server
-The address of the RTMP server.
-
-
-port
-The number of the TCP port to use (by default is 1935).
-
-
-app
-It is the name of the application to access. It usually corresponds to
-the path where the application is installed on the RTMP server
-(e.g. /ondemand/ , /flash/live/ , etc.). You can override
-the value parsed from the URI through the rtmp_app
option, too.
-
-
-playpath
-It is the path or name of the resource to play with reference to the
-application specified in app , may be prefixed by "mp4:". You
-can override the value parsed from the URI through the rtmp_playpath
-option, too.
-
-
-listen
-Act as a server, listening for an incoming connection.
-
-
-timeout
-Maximum time to wait for the incoming connection. Implies listen.
-
-
-
-
Additionally, the following parameters can be set via command line options
-(or in code via AVOption
s):
-
-rtmp_app
-Name of application to connect on the RTMP server. This option
-overrides the parameter specified in the URI.
-
-
-rtmp_buffer
-Set the client buffer time in milliseconds. The default is 3000.
-
-
-rtmp_conn
-Extra arbitrary AMF connection parameters, parsed from a string,
-e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0
.
-Each value is prefixed by a single character denoting the type,
-B for Boolean, N for number, S for string, O for object, or Z for null,
-followed by a colon. For Booleans the data must be either 0 or 1 for
-FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
-1 to end or begin an object, respectively. Data items in subobjects may
-be named, by prefixing the type with ’N’ and specifying the name before
-the value (i.e. NB:myFlag:1
). This option may be used multiple
-times to construct arbitrary AMF sequences.
-
-
-rtmp_flashver
-Version of the Flash plugin used to run the SWF player. The default
-is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
-<libavformat version>).)
-
-
-rtmp_flush_interval
-Number of packets flushed in the same request (RTMPT only). The default
-is 10.
-
-
-rtmp_live
-Specify that the media is a live stream. No resuming or seeking in
-live streams is possible. The default value is any
, which means the
-subscriber first tries to play the live stream specified in the
-playpath. If a live stream of that name is not found, it plays the
-recorded stream. The other possible values are live
and
-recorded
.
-
-
-rtmp_pageurl
-URL of the web page in which the media was embedded. By default no
-value will be sent.
-
-
-rtmp_playpath
-Stream identifier to play or to publish. This option overrides the
-parameter specified in the URI.
-
-
-rtmp_subscribe
-Name of live stream to subscribe to. By default no value will be sent.
-It is only sent if the option is specified or if rtmp_live
-is set to live.
-
-
-rtmp_swfhash
-SHA256 hash of the decompressed SWF file (32 bytes).
-
-
-rtmp_swfsize
-Size of the decompressed SWF file, required for SWFVerification.
-
-
-rtmp_swfurl
-URL of the SWF player for the media. By default no value will be sent.
-
-
-rtmp_swfverify
-URL to player swf file, compute hash/size automatically.
-
-
-rtmp_tcurl
-URL of the target stream. Defaults to proto://host[:port]/app.
-
-
-
-
-
For example to read with ffplay
a multimedia resource named
-"sample" from the application "vod" from an RTMP server "myserver":
-
-
ffplay rtmp://myserver/vod/sample
-
-
-
To publish to a password protected server, passing the playpath and
-app names separately:
-
-
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
-
-
-
-
25.17 rtmpe# TOC
-
-
Encrypted Real-Time Messaging Protocol.
-
-
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
-streaming multimedia content within standard cryptographic primitives,
-consisting of Diffie-Hellman key exchange and HMACSHA256, generating
-a pair of RC4 keys.
-
-
-
25.18 rtmps# TOC
-
-
Real-Time Messaging Protocol over a secure SSL connection.
-
-
The Real-Time Messaging Protocol (RTMPS) is used for streaming
-multimedia content across an encrypted connection.
-
-
-
25.19 rtmpt# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
-for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
25.20 rtmpte# TOC
-
-
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
-is used for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
25.21 rtmpts# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTPS.
-
-
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
-for streaming multimedia content within HTTPS requests to traverse
-firewalls.
-
-
-
25.22 libsmbclient# TOC
-
-
libsmbclient permits one to manipulate CIFS/SMB network resources.
-
-
Following syntax is required.
-
-
-
smb://[[domain:]user[:password@]]server[/share[/path[/file]]]
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in miliseconds of socket I/O operations used by the underlying
-low level operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-workgroup
-Set the workgroup used for making connections. By default workgroup is not specified.
-
-
-
-
-
For more information see: http://www.samba.org/ .
-
-
-
25.23 libssh# TOC
-
-
Secure File Transfer Protocol via libssh
-
-
Allow to read from or write to remote resources using SFTP protocol.
-
-
Following syntax is required.
-
-
-
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-private_key
-Specify the path of the file containing private key to use during authorization.
-By default libssh searches for keys in the ~/.ssh/ directory.
-
-
-
-
-
Example: Play a file stored on remote server.
-
-
-
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
-
-
-
-
25.24 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte# TOC
-
-
Real-Time Messaging Protocol and its variants supported through
-librtmp.
-
-
Requires the presence of the librtmp headers and library during
-configuration. You need to explicitly configure the build with
-"–enable-librtmp". If enabled this will replace the native RTMP
-protocol.
-
-
This protocol provides most client functions and a few server
-functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT),
-encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled
-variants of these encrypted types (RTMPTE, RTMPTS).
-
-
The required syntax is:
-
-
rtmp_proto ://server [:port ][/app ][/playpath ] options
-
-
-
where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe",
-"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and
-server , port , app and playpath have the same
-meaning as specified for the RTMP native protocol.
-options contains a list of space-separated options of the form
-key =val .
-
-
See the librtmp manual page (man 3 librtmp) for more information.
-
-
For example, to stream a file in real-time to an RTMP server using
-ffmpeg
:
-
-
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
-
-
-
To play the same stream using ffplay
:
-
-
ffplay "rtmp://myserver/live/mystream live=1"
-
-
-
-
25.25 rtp# TOC
-
-
Real-time Transport Protocol.
-
-
The required syntax for an RTP URL is:
-rtp://hostname [:port ][?option =val ...]
-
-
port specifies the RTP port to use.
-
-
The following URL options are supported:
-
-
-ttl=n
-Set the TTL (Time-To-Live) value (for multicast only).
-
-
-rtcpport=n
-Set the remote RTCP port to n .
-
-
-localrtpport=n
-Set the local RTP port to n .
-
-
-localrtcpport=n '
-Set the local RTCP port to n .
-
-
-pkt_size=n
-Set max packet size (in bytes) to n .
-
-
-connect=0|1
-Do a connect()
on the UDP socket (if set to 1) or not (if set
-to 0).
-
-
-sources=ip [,ip ]
-List allowed source IP addresses.
-
-
-block=ip [,ip ]
-List disallowed (blocked) source IP addresses.
-
-
-write_to_source=0|1
-Send packets to the source address of the latest received packet (if
-set to 1) or to a default remote address (if set to 0).
-
-
-localport=n
-Set the local RTP port to n .
-
-This is a deprecated option. Instead, localrtpport should be
-used.
-
-
-
-
-
Important notes:
-
-
- If rtcpport is not set the RTCP port will be set to the RTP
-port value plus 1.
-
- If localrtpport (the local RTP port) is not set any available
-port will be used for the local RTP and RTCP ports.
-
- If localrtcpport (the local RTCP port) is not set it will be
-set to the local RTP port value plus 1.
-
-
-
-
25.26 rtsp# TOC
-
-
Real-Time Streaming Protocol.
-
-
RTSP is not technically a protocol handler in libavformat, it is a demuxer
-and muxer. The demuxer supports both normal RTSP (with data transferred
-over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
-data transferred over RDT).
-
-
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
-supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s
-RTSP server ).
-
-
The required syntax for a RTSP url is:
-
-
rtsp://hostname [:port ]/path
-
-
-
Options can be set on the ffmpeg
/ffplay
command
-line, or set in code via AVOption
s or in
-avformat_open_input
.
-
-
The following options are supported.
-
-
-initial_pause
-Do not start playing the stream immediately if set to 1. Default value
-is 0.
-
-
-rtsp_transport
-Set RTSP transport protocols.
-
-It accepts the following values:
-
-‘udp ’
-Use UDP as lower transport protocol.
-
-
-‘tcp ’
-Use TCP (interleaving within the RTSP control channel) as lower
-transport protocol.
-
-
-‘udp_multicast ’
-Use UDP multicast as lower transport protocol.
-
-
-‘http ’
-Use HTTP tunneling as lower transport protocol, which is useful for
-passing proxies.
-
-
-
-Multiple lower transport protocols may be specified, in that case they are
-tried one at a time (if the setup of one fails, the next one is tried).
-For the muxer, only the ‘tcp ’ and ‘udp ’ options are supported.
-
-
-rtsp_flags
-Set RTSP flags.
-
-The following values are accepted:
-
-‘filter_src ’
-Accept packets only from negotiated peer address and port.
-
-‘listen ’
-Act as a server, listening for an incoming connection.
-
-‘prefer_tcp ’
-Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
-
-
-
-Default value is ‘none ’.
-
-
-allowed_media_types
-Set media types to accept from the server.
-
-The following flags are accepted:
-
-‘video ’
-‘audio ’
-‘data ’
-
-
-By default it accepts all media types.
-
-
-min_port
-Set minimum local UDP port. Default value is 5000.
-
-
-max_port
-Set maximum local UDP port. Default value is 65000.
-
-
-timeout
-Set maximum timeout (in seconds) to wait for incoming connections.
-
-A value of -1 means infinite (default). This option implies the
-rtsp_flags set to ‘listen ’.
-
-
-reorder_queue_size
-Set number of packets to buffer for handling of reordered packets.
-
-
-stimeout
-Set socket TCP I/O timeout in microseconds.
-
-
-user-agent
-Override User-Agent header. If not specified, it defaults to the
-libavformat identifier string.
-
-
-
-
When receiving data over UDP, the demuxer tries to reorder received packets
-(since they may arrive out of order, or packets may get lost totally). This
-can be disabled by setting the maximum demuxing delay to zero (via
-the max_delay
field of AVFormatContext).
-
-
When watching multi-bitrate Real-RTSP streams with ffplay
, the
-streams to display can be chosen with -vst
n and
--ast
n for video and audio respectively, and can be switched
-on the fly by pressing v
and a
.
-
-
-
25.26.1 Examples# TOC
-
-
The following examples all make use of the ffplay
and
-ffmpeg
tools.
-
-
- Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
-
-
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
-
-
- Watch a stream tunneled over HTTP:
-
-
ffplay -rtsp_transport http rtsp://server/video.mp4
-
-
- Send a stream in realtime to a RTSP server, for others to watch:
-
-
ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
-
-
- Receive a stream in realtime:
-
-
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
-
-
-
-
-
25.27 sap# TOC
-
-
Session Announcement Protocol (RFC 2974). This is not technically a
-protocol handler in libavformat, it is a muxer and demuxer.
-It is used for signalling of RTP streams, by announcing the SDP for the
-streams regularly on a separate port.
-
-
-
25.27.1 Muxer# TOC
-
-
The syntax for a SAP url given to the muxer is:
-
-
sap://destination [:port ][?options ]
-
-
-
The RTP packets are sent to destination on port port ,
-or to port 5004 if no port is specified.
-options is a &
-separated list. The following options
-are supported:
-
-
-announce_addr=address
-Specify the destination IP address for sending the announcements to.
-If omitted, the announcements are sent to the commonly used SAP
-announcement multicast address 224.2.127.254 (sap.mcast.net), or
-ff0e::2:7ffe if destination is an IPv6 address.
-
-
-announce_port=port
-Specify the port to send the announcements on, defaults to
-9875 if not specified.
-
-
-ttl=ttl
-Specify the time to live value for the announcements and RTP packets,
-defaults to 255.
-
-
-same_port=0|1
-If set to 1, send all RTP streams on the same port pair. If zero (the
-default), all streams are sent on unique ports, with each stream on a
-port 2 numbers higher than the previous.
-VLC/Live555 requires this to be set to 1, to be able to receive the stream.
-The RTP stack in libavformat for receiving requires all streams to be sent
-on unique ports.
-
-
-
-
Example command lines follow.
-
-
To broadcast a stream on the local subnet, for watching in VLC:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
-
-
-
Similarly, for watching in ffplay
:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255
-
-
-
And for watching in ffplay
, over IPv6:
-
-
-
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
-
-
-
-
25.27.2 Demuxer# TOC
-
-
The syntax for a SAP url given to the demuxer is:
-
-
sap://[address ][:port ]
-
-
-
address is the multicast address to listen for announcements on,
-if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port
-is the port that is listened on, 9875 if omitted.
-
-
The demuxers listens for announcements on the given address and port.
-Once an announcement is received, it tries to receive that particular stream.
-
-
Example command lines follow.
-
-
To play back the first stream announced on the normal SAP multicast address:
-
-
-
-
To play back the first stream announced on one the default IPv6 SAP multicast address:
-
-
-
ffplay sap://[ff0e::2:7ffe]
-
-
-
-
25.28 sctp# TOC
-
-
Stream Control Transmission Protocol.
-
-
The accepted URL syntax is:
-
-
sctp://host :port [?options ]
-
-
-
The protocol accepts the following options:
-
-listen
-If set to any value, listen for an incoming connection. Outgoing connection is done by default.
-
-
-max_streams
-Set the maximum number of streams. By default no limit is set.
-
-
-
-
-
25.29 srtp# TOC
-
-
Secure Real-time Transport Protocol.
-
-
The accepted options are:
-
-srtp_in_suite
-srtp_out_suite
-Select input and output encoding suites.
-
-Supported values:
-
-‘AES_CM_128_HMAC_SHA1_80 ’
-‘SRTP_AES128_CM_HMAC_SHA1_80 ’
-‘AES_CM_128_HMAC_SHA1_32 ’
-‘SRTP_AES128_CM_HMAC_SHA1_32 ’
-
-
-
-srtp_in_params
-srtp_out_params
-Set input and output encoding parameters, which are expressed by a
-base64-encoded representation of a binary block. The first 16 bytes of
-this binary block are used as master key, the following 14 bytes are
-used as master salt.
-
-
-
-
-
25.30 subfile# TOC
-
-
Virtually extract a segment of a file or another stream.
-The underlying stream must be seekable.
-
-
Accepted options:
-
-start
-Start offset of the extracted segment, in bytes.
-
-end
-End offset of the extracted segment, in bytes.
-
-
-
-
Examples:
-
-
Extract a chapter from a DVD VOB file (start and end sectors obtained
-externally and multiplied by 2048):
-
-
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
-
-
-
Play an AVI file directly from a TAR archive:
-subfile,,start,183241728,end,366490624,,:archive.tar
-
-
-
25.31 tcp# TOC
-
-
Transmission Control Protocol.
-
-
The required syntax for a TCP url is:
-
-
tcp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form
-key =val .
-
-
The list of supported options follows.
-
-
-listen=1|0
-Listen for an incoming connection. Default value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-listen_timeout=microseconds
-Set listen timeout, expressed in microseconds.
-
-
-
-
The following example shows how to setup a listening TCP connection
-with ffmpeg
, which is then accessed with ffplay
:
-
-
ffmpeg -i input -f format tcp://hostname :port ?listen
-ffplay tcp://hostname :port
-
-
-
-
25.32 tls# TOC
-
-
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
-
-
The required syntax for a TLS/SSL url is:
-
-
tls://hostname :port [?options ]
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-ca_file, cafile=filename
-A file containing certificate authority (CA) root certificates to treat
-as trusted. If the linked TLS library contains a default this might not
-need to be specified for verification to work, but not all libraries and
-setups have defaults built in.
-The file must be in OpenSSL PEM format.
-
-
-tls_verify=1|0
-If enabled, try to verify the peer that we are communicating with.
-Note, if using OpenSSL, this currently only makes sure that the
-peer certificate is signed by one of the root certificates in the CA
-database, but it does not validate that the certificate actually
-matches the host name we are trying to connect to. (With GnuTLS,
-the host name is validated as well.)
-
-This is disabled by default since it requires a CA database to be
-provided by the caller in many cases.
-
-
-cert_file, cert=filename
-A file containing a certificate to use in the handshake with the peer.
-(When operating as server, in listen mode, this is more often required
-by the peer, while client certificates only are mandated in certain
-setups.)
-
-
-key_file, key=filename
-A file containing the private key for the certificate.
-
-
-listen=1|0
-If enabled, listen for connections on the provided port, and assume
-the server role in the handshake instead of the client role.
-
-
-
-
-
Example command lines:
-
-
To create a TLS/SSL server that serves an input stream.
-
-
-
ffmpeg -i input -f format tls://hostname :port ?listen&cert=server.crt &key=server.key
-
-
-
To play back a stream from the TLS/SSL server using ffplay
:
-
-
-
ffplay tls://hostname :port
-
-
-
-
25.33 udp# TOC
-
-
User Datagram Protocol.
-
-
The required syntax for an UDP URL is:
-
-
udp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form key =val .
-
-
In case threading is enabled on the system, a circular buffer is used
-to store the incoming data, which allows one to reduce loss of data due to
-UDP socket buffer overruns. The fifo_size and
-overrun_nonfatal options are related to this buffer.
-
-
The list of supported options follows.
-
-
-buffer_size=size
-Set the UDP maximum socket buffer size in bytes. This is used to set either
-the receive or send buffer size, depending on what the socket is used for.
-Default is 64KB. See also fifo_size .
-
-
-localport=port
-Override the local UDP port to bind with.
-
-
-localaddr=addr
-Choose the local IP address. This is useful e.g. if sending multicast
-and the host has multiple interfaces, where the user can choose
-which interface to send on by specifying the IP address of that interface.
-
-
-pkt_size=size
-Set the size in bytes of UDP packets.
-
-
-reuse=1|0
-Explicitly allow or disallow reusing UDP sockets.
-
-
-ttl=ttl
-Set the time to live value (for multicast only).
-
-
-connect=1|0
-Initialize the UDP socket with connect()
. In this case, the
-destination address can’t be changed with ff_udp_set_remote_url later.
-If the destination address isn’t known at the start, this option can
-be specified in ff_udp_set_remote_url, too.
-This allows finding out the source address for the packets with getsockname,
-and makes writes return with AVERROR(ECONNREFUSED) if "destination
-unreachable" is received.
-For receiving, this gives the benefit of only receiving packets from
-the specified peer address/port.
-
-
-sources=address [,address ]
-Only receive packets sent to the multicast group from one of the
-specified sender IP addresses.
-
-
-block=address [,address ]
-Ignore packets sent to the multicast group from the specified
-sender IP addresses.
-
-
-fifo_size=units
-Set the UDP receiving circular buffer size, expressed as a number of
-packets with size of 188 bytes. If not specified defaults to 7*4096.
-
-
-overrun_nonfatal=1|0
-Survive in case of UDP receiving circular buffer overrun. Default
-value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-broadcast=1|0
-Explicitly allow or disallow UDP broadcasting.
-
-Note that broadcasting may not work properly on networks having
-a broadcast storm protection.
-
-
-
-
-
25.33.1 Examples# TOC
-
-
- Use ffmpeg
to stream over UDP to a remote endpoint:
-
-
ffmpeg -i input -f format udp://hostname :port
-
-
- Use ffmpeg
to stream in mpegts format over UDP using 188
-sized UDP packets, using a large input buffer:
-
-
ffmpeg -i input -f mpegts udp://hostname :port ?pkt_size=188&buffer_size=65535
-
-
- Use ffmpeg
to receive over UDP from a remote endpoint:
-
-
ffmpeg -i udp://[multicast-address ]:port ...
-
-
-
-
-
25.34 unix# TOC
-
-
Unix local socket
-
-
The required syntax for a Unix socket URL is:
-
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-timeout
-Timeout in ms.
-
-listen
-Create the Unix socket in listening mode.
-
-
-
-
-
26 Device Options# TOC
-
-
The libavdevice library provides the same interface as
-libavformat. Namely, an input device is considered like a demuxer, and
-an output device like a muxer, and the interface and generic device
-options are the same provided by libavformat (see the ffmpeg-formats
-manual).
-
-
In addition each input or output device may support so-called private
-options, which are specific for that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the device
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
-
-
27 Input Devices# TOC
-
-
Input devices are configured elements in FFmpeg which allow to access
-the data coming from a multimedia device attached to your system.
-
-
When you configure your FFmpeg build, all the supported input devices
-are enabled by default. You can list all available ones using the
-configure option "–list-indevs".
-
-
You can disable all the input devices using the configure option
-"–disable-indevs", and selectively enable an input device using the
-option "–enable-indev=INDEV ", or you can disable a particular
-input device using the option "–disable-indev=INDEV ".
-
-
The option "-devices" of the ff* tools will display the list of
-supported input devices.
-
-
A description of the currently available input devices follows.
-
-
-
27.1 alsa# TOC
-
-
ALSA (Advanced Linux Sound Architecture) input device.
-
-
To enable this input device during configuration you need libasound
-installed on your system.
-
-
This device allows capturing from an ALSA device. The name of the
-device to capture has to be an ALSA card identifier.
-
-
An ALSA identifier has the syntax:
-
-
hw:CARD [,DEV [,SUBDEV ]]
-
-
-
where the DEV and SUBDEV components are optional.
-
-
The three arguments (in order: CARD ,DEV ,SUBDEV )
-specify card number or identifier, device number and subdevice number
-(-1 means any).
-
-
To see the list of cards currently recognized by your system check the
-files /proc/asound/cards and /proc/asound/devices .
-
-
For example to capture with ffmpeg
from an ALSA device with
-card id 0, you may run the command:
-
-
ffmpeg -f alsa -i hw:0 alsaout.wav
-
-
-
For more information see:
-http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html
-
-
-
27.2 avfoundation# TOC
-
-
AVFoundation input device.
-
-
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
-The older QTKit framework has been marked deprecated since OSX version 10.7.
-
-
The input filename has to be given in the following syntax:
-
-
-i "[[VIDEO]:[AUDIO]]"
-
-
The first entry selects the video input while the latter selects the audio input.
-The stream has to be specified by the device name or the device index as shown by the device list.
-Alternatively, the video and/or audio input device can be chosen by index using the
-
- -video_device_index <INDEX>
-
-and/or
-
- -audio_device_index <INDEX>
-
-, overriding any
-device name or index given in the input filename.
-
-
All available devices can be enumerated by using -list_devices true , listing
-all device names and corresponding indices.
-
-
There are two device name aliases:
-
-default
-Select the AVFoundation default device of the corresponding type.
-
-
-none
-Do not record the corresponding media type.
-This is equivalent to specifying an empty device name or index.
-
-
-
-
-
-
27.2.1 Options# TOC
-
-
AVFoundation supports the following options:
-
-
--list_devices <TRUE|FALSE>
-If set to true, a list of all available input devices is given showing all
-device names and indices.
-
-
--video_device_index <INDEX>
-Specify the video device by its index. Overrides anything given in the input filename.
-
-
--audio_device_index <INDEX>
-Specify the audio device by its index. Overrides anything given in the input filename.
-
-
--pixel_format <FORMAT>
-Request the video device to use a specific pixel format.
-If the specified format is not supported, a list of available formats is given
-und the first one in this list is used instead. Available pixel formats are:
-monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
- bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
- yuv420p, nv12, yuyv422, gray
-
-
-
-
-
-
27.2.2 Examples# TOC
-
-
- Print the list of AVFoundation supported devices and exit:
-
-
$ ffmpeg -f avfoundation -list_devices true -i ""
-
-
- Record video from video device 0 and audio from audio device 0 into out.avi:
-
-
$ ffmpeg -f avfoundation -i "0:0" out.avi
-
-
- Record video from video device 2 and audio from audio device 1 into out.avi:
-
-
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
-
-
- Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
-
-
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
-
-
-
-
-
-
27.3 bktr# TOC
-
-
BSD video input device.
-
-
-
27.4 dshow# TOC
-
-
Windows DirectShow input device.
-
-
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
-Currently only audio and video devices are supported.
-
-
Multiple devices may be opened as separate inputs, but they may also be
-opened on the same input, which should improve synchronism between them.
-
-
The input name should be in the format:
-
-
-
-
where TYPE can be either audio or video ,
-and NAME is the device’s name.
-
-
-
27.4.1 Options# TOC
-
-
If no options are specified, the device’s defaults are used.
-If the device does not support the requested options, it will
-fail to open.
-
-
-video_size
-Set the video size in the captured video.
-
-
-framerate
-Set the frame rate in the captured video.
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-
-
-sample_size
-Set the sample size (in bits) of the captured audio.
-
-
-channels
-Set the number of channels in the captured audio.
-
-
-list_devices
-If set to true , print a list of devices and exit.
-
-
-list_options
-If set to true , print a list of selected device’s options
-and exit.
-
-
-video_device_number
-Set video device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-audio_device_number
-Set audio device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-pixel_format
-Select pixel format to be used by DirectShow. This may only be set when
-the video codec is not set or set to rawvideo.
-
-
-audio_buffer_size
-Set audio device buffer size in milliseconds (which can directly
-impact latency, depending on the device).
-Defaults to using the audio device’s
-default buffer size (typically some multiple of 500ms).
-Setting this value too low can degrade performance.
-See also
-http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx
-
-
-
-
-
-
27.4.2 Examples# TOC
-
-
- Print the list of DirectShow supported devices and exit:
-
-
$ ffmpeg -list_devices true -f dshow -i dummy
-
-
- Open video device Camera :
-
-
$ ffmpeg -f dshow -i video="Camera"
-
-
- Open second video device with name Camera :
-
-
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
-
-
- Open video device Camera and audio device Microphone :
-
-
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
-
-
- Print the list of supported options in selected device and exit:
-
-
$ ffmpeg -list_options true -f dshow -i video="Camera"
-
-
-
-
-
-
27.5 dv1394# TOC
-
-
Linux DV 1394 input device.
-
-
-
27.6 fbdev# TOC
-
-
Linux framebuffer input device.
-
-
The Linux framebuffer is a graphic hardware-independent abstraction
-layer to show graphics on a computer monitor, typically on the
-console. It is accessed through a file device node, usually
-/dev/fb0 .
-
-
For more detailed information read the file
-Documentation/fb/framebuffer.txt included in the Linux source tree.
-
-
To record from the framebuffer device /dev/fb0 with
-ffmpeg
:
-
-
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
-
-
-
You can take a single screenshot image with the command:
-
-
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
-
-
-
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
-
-
-
27.7 gdigrab# TOC
-
-
Win32 GDI-based screen capture device.
-
-
This device allows you to capture a region of the display on Windows.
-
-
There are two options for the input filename:
-
-
or
-
-
-
The first option will capture the entire desktop, or a fixed region of the
-desktop. The second option will instead capture the contents of a single
-window, regardless of its position on the screen.
-
-
For example, to grab the entire desktop using ffmpeg
:
-
-
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
-
-
-
Grab a 640x480 region at position 10,20
:
-
-
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
-
-
-
Grab the contents of the window named "Calculator"
-
-
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
-
-
-
-
27.7.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. Use the value 0
to
-not draw the pointer. Default value is 1
.
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-Note that show_region is incompatible with grabbing the contents
-of a single window.
-
-For example:
-
-
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
-
-
-
-video_size
-Set the video frame size. The default is to capture the full screen if desktop is selected, or the full window size if title=window_title is selected.
-
-
-offset_x
-When capturing a region with video_size , set the distance from the left edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative offset_x value to move the region to that monitor.
-
-
-offset_y
-When capturing a region with video_size , set the distance from the top edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative offset_y value to move the region to that monitor.
-
-
-
-
-
-
27.8 iec61883# TOC
-
-
FireWire DV/HDV input device using libiec61883.
-
-
To enable this input device, you need libiec61883, libraw1394 and
-libavc1394 installed on your system. Use the configure option
---enable-libiec61883
to compile with the device enabled.
-
-
The iec61883 capture device supports capturing from a video device
-connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
-FireWire stack (juju). This is the default DV/HDV input method in Linux
-Kernel 2.6.37 and later, since the old FireWire stack was removed.
-
-
Specify the FireWire port to be used as input file, or "auto"
-to choose the first port connected.
-
-
-
27.8.1 Options# TOC
-
-
-dvtype
-Override autodetection of DV/HDV. This should only be used if auto
-detection does not work, or if usage of a different device type
-should be prohibited. Treating a DV device as HDV (or vice versa) will
-not work and result in undefined behavior.
-The values auto , dv and hdv are supported.
-
-
-dvbuffer
-Set maximum size of buffer for incoming data, in frames. For DV, this
-is an exact value. For HDV, it is not frame exact, since HDV does
-not have a fixed frame size.
-
-
-dvguid
-Select the capture device by specifying it’s GUID. Capturing will only
-be performed from the specified device and fails if no device with the
-given GUID is found. This is useful to select the input if multiple
-devices are connected at the same time.
-Look at /sys/bus/firewire/devices to find out the GUIDs.
-
-
-
-
-
-
27.8.2 Examples# TOC
-
-
- Grab and show the input of a FireWire DV/HDV device.
-
-
ffplay -f iec61883 -i auto
-
-
- Grab and record the input of a FireWire DV/HDV device,
-using a packet buffer of 100000 packets if the source is HDV.
-
-
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
-
-
-
-
-
-
27.9 jack# TOC
-
-
JACK input device.
-
-
To enable this input device during configuration you need libjack
-installed on your system.
-
-
A JACK input device creates one or more JACK writable clients, one for
-each audio channel, with name client_name :input_N , where
-client_name is the name provided by the application, and N
-is a number which identifies the channel.
-Each writable client will send the acquired data to the FFmpeg input
-device.
-
-
Once you have created one or more JACK readable clients, you need to
-connect them to one or more JACK writable clients.
-
-
To connect or disconnect JACK clients you can use the jack_connect
-and jack_disconnect
programs, or do it through a graphical interface,
-for example with qjackctl
.
-
-
To list the JACK clients and their properties you can invoke the command
-jack_lsp
.
-
-
Follows an example which shows how to capture a JACK readable client
-with ffmpeg
.
-
-
# Create a JACK writable client with name "ffmpeg".
-$ ffmpeg -f jack -i ffmpeg -y out.wav
-
-# Start the sample jack_metro readable client.
-$ jack_metro -b 120 -d 0.2 -f 4000
-
-# List the current JACK clients.
-$ jack_lsp -c
-system:capture_1
-system:capture_2
-system:playback_1
-system:playback_2
-ffmpeg:input_1
-metro:120_bpm
-
-# Connect metro to the ffmpeg writable client.
-$ jack_connect metro:120_bpm ffmpeg:input_1
-
-
-
For more information read:
-http://jackaudio.org/
-
-
-
27.10 lavfi# TOC
-
-
Libavfilter input virtual device.
-
-
This input device reads data from the open output pads of a libavfilter
-filtergraph.
-
-
For each filtergraph open output, the input device will create a
-corresponding stream which is mapped to the generated output. Currently
-only video data is supported. The filtergraph is specified through the
-option graph .
-
-
-
27.10.1 Options# TOC
-
-
-graph
-Specify the filtergraph to use as input. Each video open output must be
-labelled by a unique string of the form "outN ", where N is a
-number starting from 0 corresponding to the mapped input stream
-generated by the device.
-The first unlabelled output is automatically assigned to the "out0"
-label, but all the others need to be specified explicitly.
-
-The suffix "+subcc" can be appended to the output label to create an extra
-stream with the closed captions packets attached to that output
-(experimental; only for EIA-608 / CEA-708 for now).
-The subcc streams are created after all the normal streams, in the order of
-the corresponding stream.
-For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
-stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
-
-If not specified defaults to the filename specified for the input
-device.
-
-
-graph_file
-Set the filename of the filtergraph to be read and sent to the other
-filters. Syntax of the filtergraph is the same as the one specified by
-the option graph .
-
-
-
-
-
-
27.10.2 Examples# TOC
-
-
- Create a color video stream and play it back with ffplay
:
-
-
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
-
-
- As the previous example, but use filename for specifying the graph
-description, and omit the "out0" label:
-
-
ffplay -f lavfi color=c=pink
-
-
- Create three different video test filtered sources and play them:
-
-
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
-
-
- Read an audio stream from a file using the amovie source and play it
-back with ffplay
:
-
-
ffplay -f lavfi "amovie=test.wav"
-
-
- Read an audio stream and a video stream and play it back with
-ffplay
:
-
-
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
-
-
- Dump decoded frames to images and closed captions to a file (experimental):
-
-
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
-
-
-
-
-
-
27.11 libcdio# TOC
-
-
Audio-CD input device based on cdio.
-
-
To enable this input device during configuration you need libcdio
-installed on your system. Requires the configure option
---enable-libcdio
.
-
-
This device allows playing and grabbing from an Audio-CD.
-
-
For example to copy with ffmpeg
the entire Audio-CD in /dev/sr0,
-you may run the command:
-
-
ffmpeg -f libcdio -i /dev/sr0 cd.wav
-
-
-
-
27.12 libdc1394# TOC
-
-
IIDC1394 input device, based on libdc1394 and libraw1394.
-
-
Requires the configure option --enable-libdc1394
.
-
-
-
27.13 openal# TOC
-
-
The OpenAL input device provides audio capture on all systems with a
-working OpenAL 1.1 implementation.
-
-
To enable this input device during configuration, you need OpenAL
-headers and libraries installed on your system, and need to configure
-FFmpeg with --enable-openal
.
-
-
OpenAL headers and libraries should be provided as part of your OpenAL
-implementation, or as an additional download (an SDK). Depending on your
-installation you may need to specify additional flags via the
---extra-cflags
and --extra-ldflags
for allowing the build
-system to locate the OpenAL headers and libraries.
-
-
An incomplete list of OpenAL implementations follows:
-
-
-Creative
-The official Windows implementation, providing hardware acceleration
-with supported devices and software fallback.
-See http://openal.org/ .
-
-OpenAL Soft
-Portable, open source (LGPL) software implementation. Includes
-backends for the most common sound APIs on the Windows, Linux,
-Solaris, and BSD operating systems.
-See http://kcat.strangesoft.net/openal.html .
-
-Apple
-OpenAL is part of Core Audio, the official Mac OS X Audio interface.
-See http://developer.apple.com/technologies/mac/audio-and-video.html
-
-
-
-
This device allows one to capture from an audio input device handled
-through OpenAL.
-
-
You need to specify the name of the device to capture in the provided
-filename. If the empty string is provided, the device will
-automatically select the default device. You can get the list of the
-supported devices by using the option list_devices .
-
-
-
27.13.1 Options# TOC
-
-
-channels
-Set the number of channels in the captured audio. Only the values
-1 (monaural) and 2 (stereo) are currently supported.
-Defaults to 2 .
-
-
-sample_size
-Set the sample size (in bits) of the captured audio. Only the values
-8 and 16 are currently supported. Defaults to
-16 .
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-Defaults to 44.1k .
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-
-
-
-
27.13.2 Examples# TOC
-
-
Print the list of OpenAL supported devices and exit:
-
-
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
-
-
-
Capture from the OpenAL device DR-BT101 via PulseAudio :
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
-
-
-
Capture from the default device (note the empty string ” as filename):
-
-
$ ffmpeg -f openal -i '' out.ogg
-
-
-
Capture from two devices simultaneously, writing to two different files,
-within the same ffmpeg
command:
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
-
-
Note: not all OpenAL implementations support multiple simultaneous capture -
-try the latest OpenAL Soft if the above does not work.
-
-
-
27.14 oss# TOC
-
-
Open Sound System input device.
-
-
The filename to provide to the input device is the device node
-representing the OSS input device, and is usually set to
-/dev/dsp .
-
-
For example to grab from /dev/dsp using ffmpeg
use the
-command:
-
-
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
-
-
-
For more information about OSS see:
-http://manuals.opensound.com/usersguide/dsp.html
-
-
-
27.15 pulse# TOC
-
-
PulseAudio input device.
-
-
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
-
-
The filename to provide to the input device is a source device or the
-string "default"
-
-
To list the PulseAudio source devices and their properties you can invoke
-the command pactl list sources
.
-
-
More information about PulseAudio can be found on http://www.pulseaudio.org .
-
-
-
27.15.1 Options# TOC
-
-server
-Connect to a specific PulseAudio server, specified by an IP address.
-Default server is used when not provided.
-
-
-name
-Specify the application name PulseAudio will use when showing active clients,
-by default it is the LIBAVFORMAT_IDENT
string.
-
-
-stream_name
-Specify the stream name PulseAudio will use when showing active streams,
-by default it is "record".
-
-
-sample_rate
-Specify the samplerate in Hz, by default 48kHz is used.
-
-
-channels
-Specify the channels in use, by default 2 (stereo) is set.
-
-
-frame_size
-Specify the number of bytes per frame, by default it is set to 1024.
-
-
-fragment_size
-Specify the minimal buffering fragment in PulseAudio, it will affect the
-audio latency. By default it is unset.
-
-
-
-
-
27.15.2 Examples# TOC
-
Record a stream from default device:
-
-
ffmpeg -f pulse -i default /tmp/pulse.wav
-
-
-
-
27.16 qtkit# TOC
-
-
QTKit input device.
-
-
The filename passed as input is parsed to contain either a device name or index.
-The device index can also be given by using -video_device_index.
-A given device index will override any given device name.
-If the desired device consists of numbers only, use -video_device_index to identify it.
-The default device will be chosen if an empty string or the device name "default" is given.
-The available devices can be enumerated by using -list_devices.
-
-
-
ffmpeg -f qtkit -i "0" out.mpg
-
-
-
-
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
-
-
-
-
ffmpeg -f qtkit -i "default" out.mpg
-
-
-
-
ffmpeg -f qtkit -list_devices true -i ""
-
-
-
-
27.17 sndio# TOC
-
-
sndio input device.
-
-
To enable this input device during configuration you need libsndio
-installed on your system.
-
-
The filename to provide to the input device is the device node
-representing the sndio input device, and is usually set to
-/dev/audio0 .
-
-
For example to grab from /dev/audio0 using ffmpeg
use the
-command:
-
-
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
-
-
-
-
27.18 video4linux2, v4l2# TOC
-
-
Video4Linux2 input video device.
-
-
"v4l2" can be used as alias for "video4linux2".
-
-
If FFmpeg is built with v4l-utils support (by using the
---enable-libv4l2
configure option), it is possible to use it with the
--use_libv4l2
input device option.
-
-
The name of the device to grab is a file device node, usually Linux
-systems tend to automatically create such nodes when the device
-(e.g. an USB webcam) is plugged into the system, and has a name of the
-kind /dev/videoN , where N is a number associated to
-the device.
-
-
Video4Linux2 devices usually support a limited set of
-width xheight sizes and frame rates. You can check which are
-supported using -list_formats all
for Video4Linux2 devices.
-Some devices, like TV cards, support one or more standards. It is possible
-to list all the supported standards using -list_standards all
.
-
-
The time base for the timestamps is 1 microsecond. Depending on the kernel
-version and configuration, the timestamps may be derived from the real time
-clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
-boot time, unaffected by NTP or manual changes to the clock). The
--timestamps abs or -ts abs option can be used to force
-conversion into the real time clock.
-
-
Some usage examples of the video4linux2 device with ffmpeg
-and ffplay
:
-
- Grab and show the input of a video4linux2 device:
-
-
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
-
-
- Grab and record the input of a video4linux2 device, leave the
-frame rate and size as previously set:
-
-
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
-
-
-
-
For more information about Video4Linux, check http://linuxtv.org/ .
-
-
-
27.18.1 Options# TOC
-
-
-standard
-Set the standard. Must be the name of a supported standard. To get a
-list of the supported standards, use the list_standards
-option.
-
-
-channel
-Set the input channel number. Default to -1, which means using the
-previously selected channel.
-
-
-video_size
-Set the video frame size. The argument must be a string in the form
-WIDTH xHEIGHT or a valid size abbreviation.
-
-
-pixel_format
-Select the pixel format (only valid for raw video input).
-
-
-input_format
-Set the preferred pixel format (for raw video) or a codec name.
-This option allows one to select the input format, when several are
-available.
-
-
-framerate
-Set the preferred video frame rate.
-
-
-list_formats
-List available formats (supported pixel formats, codecs, and frame
-sizes) and exit.
-
-Available values are:
-
-‘all ’
-Show all available (compressed and non-compressed) formats.
-
-
-‘raw ’
-Show only raw video (non-compressed) formats.
-
-
-‘compressed ’
-Show only compressed formats.
-
-
-
-
-list_standards
-List supported standards and exit.
-
-Available values are:
-
-‘all ’
-Show all supported standards.
-
-
-
-
-timestamps, ts
-Set type of timestamps for grabbed frames.
-
-Available values are:
-
-‘default ’
-Use timestamps from the kernel.
-
-
-‘abs ’
-Use absolute timestamps (wall clock).
-
-
-‘mono2abs ’
-Force conversion from monotonic to absolute timestamps.
-
-
-
-Default value is default
.
-
-
-
-
-
27.19 vfwcap# TOC
-
-
VfW (Video for Windows) capture input device.
-
-
The filename passed as input is the capture driver number, ranging from
-0 to 9. You may use "list" as filename to print a list of drivers. Any
-other filename will be interpreted as device number 0.
-
-
-
27.20 x11grab# TOC
-
-
X11 video input device.
-
-
Depends on X11, Xext, and Xfixes. Requires the configure option
---enable-x11grab
.
-
-
This device allows one to capture a region of an X11 display.
-
-
The filename passed as input has the syntax:
-
-
[hostname ]:display_number .screen_number [+x_offset ,y_offset ]
-
-
-
hostname :display_number .screen_number specifies the
-X11 display name of the screen to grab from. hostname can be
-omitted, and defaults to "localhost". The environment variable
-DISPLAY
contains the default display name.
-
-
x_offset and y_offset specify the offsets of the grabbed
-area with respect to the top-left border of the X11 screen. They
-default to 0.
-
-
Check the X11 documentation (e.g. man X) for more detailed information.
-
-
Use the dpyinfo
program for getting basic information about the
-properties of your X11 display (e.g. grep for "name" or "dimensions").
-
-
For example to grab from :0.0 using ffmpeg
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
Grab at position 10,20
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-
-
27.20.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. A value of 0
specify
-not to draw the pointer. Default value is 1
.
-
-
-follow_mouse
-Make the grabbed area follow the mouse. The argument can be
-centered
or a number of pixels PIXELS .
-
-When it is specified with "centered", the grabbing region follows the mouse
-pointer and keeps the pointer at the center of region; otherwise, the region
-follows only when the mouse pointer reaches within PIXELS (greater than
-zero) to the edge of region.
-
-For example:
-
-
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-To follow only when the mouse pointer reaches within 100 pixels to edge:
-
-
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-For example:
-
-
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-With follow_mouse :
-
-
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-video_size
-Set the video frame size. Default value is vga
.
-
-
-use_shm
-Use the MIT-SHM extension for shared memory. Default value is 1
.
-It may be necessary to disable it for remote displays.
-
-
-
-
-
27.21 decklink# TOC
-
-
The decklink input device provides capture capabilities for Blackmagic
-DeckLink devices.
-
-
To enable this input device, you need the Blackmagic DeckLink SDK and you
-need to configure with the appropriate --extra-cflags
-and --extra-ldflags
.
-On Windows, you need to run the IDL files through widl
.
-
-
DeckLink is very picky about the formats it supports. Pixel format is always
-uyvy422, framerate and video size must be determined for your device with
--list_formats 1
. Audio sample rate is always 48 kHz and the number
-of channels currently is limited to 2 (stereo).
-
-
-
27.21.1 Options# TOC
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-list_formats
-If set to true , print a list of supported formats and exit.
-Defaults to false .
-
-
-
-
-
-
27.21.2 Examples# TOC
-
-
- List input devices:
-
-
ffmpeg -f decklink -list_devices 1 -i dummy
-
-
- List supported formats:
-
-
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
-
-
- Capture video clip at 1080i50 (format 11):
-
-
ffmpeg -f decklink -i 'Intensity Pro@11' -acodec copy -vcodec copy output.avi
-
-
-
-
-
-
-
28 Output Devices# TOC
-
-
Output devices are configured elements in FFmpeg that can write
-multimedia data to an output device attached to your system.
-
-
When you configure your FFmpeg build, all the supported output devices
-are enabled by default. You can list all available ones using the
-configure option "–list-outdevs".
-
-
You can disable all the output devices using the configure option
-"–disable-outdevs", and selectively enable an output device using the
-option "–enable-outdev=OUTDEV ", or you can disable a particular
-input device using the option "–disable-outdev=OUTDEV ".
-
-
The option "-devices" of the ff* tools will display the list of
-enabled output devices.
-
-
A description of the currently available output devices follows.
-
-
-
28.1 alsa# TOC
-
-
ALSA (Advanced Linux Sound Architecture) output device.
-
-
-
28.1.1 Examples# TOC
-
-
- Play a file on default ALSA device:
-
-
ffmpeg -i INPUT -f alsa default
-
-
- Play a file on soundcard 1, audio device 7:
-
-
ffmpeg -i INPUT -f alsa hw:1,7
-
-
-
-
-
28.2 caca# TOC
-
-
CACA output device.
-
-
This output device allows one to show a video stream in CACA window.
-Only one CACA window is allowed per application, so you can
-have only one instance of this output device in an application.
-
-
To enable this output device you need to configure FFmpeg with
---enable-libcaca
.
-libcaca is a graphics library that outputs text instead of pixels.
-
-
For more information about libcaca, check:
-http://caca.zoy.org/wiki/libcaca
-
-
-
28.2.1 Options# TOC
-
-
-window_title
-Set the CACA window title, if not specified default to the filename
-specified for the output device.
-
-
-window_size
-Set the CACA window size, can be a string of the form
-width xheight or a video size abbreviation.
-If not specified it defaults to the size of the input video.
-
-
-driver
-Set display driver.
-
-
-algorithm
-Set dithering algorithm. Dithering is necessary
-because the picture being rendered has usually far more colours than
-the available palette.
-The accepted values are listed with -list_dither algorithms
.
-
-
-antialias
-Set antialias method. Antialiasing smoothens the rendered
-image and avoids the commonly seen staircase effect.
-The accepted values are listed with -list_dither antialiases
.
-
-
-charset
-Set which characters are going to be used when rendering text.
-The accepted values are listed with -list_dither charsets
.
-
-
-color
-Set color to be used when rendering text.
-The accepted values are listed with -list_dither colors
.
-
-
-list_drivers
-If set to true , print a list of available drivers and exit.
-
-
-list_dither
-List available dither options related to the argument.
-The argument must be one of algorithms
, antialiases
,
-charsets
, colors
.
-
-
-
-
-
28.2.2 Examples# TOC
-
-
- The following command shows the ffmpeg
output is an
-CACA window, forcing its size to 80x25:
-
-
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca -
-
-
- Show the list of available drivers and exit:
-
-
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_drivers true -
-
-
- Show the list of available dither colors and exit:
-
-
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_dither colors -
-
-
-
-
-
28.3 decklink# TOC
-
-
The decklink output device provides playback capabilities for Blackmagic
-DeckLink devices.
-
-
To enable this output device, you need the Blackmagic DeckLink SDK and you
-need to configure with the appropriate --extra-cflags
-and --extra-ldflags
.
-On Windows, you need to run the IDL files through widl
.
-
-
DeckLink is very picky about the formats it supports. Pixel format is always
-uyvy422, framerate and video size must be determined for your device with
--list_formats 1
. Audio sample rate is always 48 kHz.
-
-
-
28.3.1 Options# TOC
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-list_formats
-If set to true , print a list of supported formats and exit.
-Defaults to false .
-
-
-preroll
-Amount of time to preroll video in seconds.
-Defaults to 0.5 .
-
-
-
-
-
-
28.3.2 Examples# TOC
-
-
- List output devices:
-
-
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
-
-
- List supported formats:
-
-
ffmpeg -i test.avi -f decklink -list_formats 1 'DeckLink Mini Monitor'
-
-
- Play video clip:
-
-
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 'DeckLink Mini Monitor'
-
-
- Play video clip with non-standard framerate or video size:
-
-
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLink Mini Monitor'
-
-
-
-
-
-
28.4 fbdev# TOC
-
-
Linux framebuffer output device.
-
-
The Linux framebuffer is a graphic hardware-independent abstraction
-layer to show graphics on a computer monitor, typically on the
-console. It is accessed through a file device node, usually
-/dev/fb0 .
-
-
For more detailed information read the file
-Documentation/fb/framebuffer.txt included in the Linux source tree.
-
-
-
28.4.1 Options# TOC
-
-xoffset
-yoffset
-Set x/y coordinate of top left corner. Default is 0.
-
-
-
-
-
28.4.2 Examples# TOC
-
Play a file on framebuffer device /dev/fb0 .
-Required pixel format depends on current framebuffer settings.
-
-
ffmpeg -re -i INPUT -vcodec rawvideo -pix_fmt bgra -f fbdev /dev/fb0
-
-
-
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
-
-
-
28.5 opengl# TOC
-
OpenGL output device.
-
-
To enable this output device you need to configure FFmpeg with --enable-opengl
.
-
-
This output device allows one to render to OpenGL context.
-Context may be provided by application or default SDL window is created.
-
-
When device renders to external context, application must implement handlers for following messages:
-AV_DEV_TO_APP_CREATE_WINDOW_BUFFER
- create OpenGL context on current thread.
-AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER
- make OpenGL context current.
-AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER
- swap buffers.
-AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER
- destroy OpenGL context.
-Application is also required to inform a device about current resolution by sending AV_APP_TO_DEV_WINDOW_SIZE
message.
-
-
-
28.5.1 Options# TOC
-
-background
-Set background color. Black is a default.
-
-no_window
-Disables default SDL window when set to non-zero value.
-Application must provide OpenGL context and both window_size_cb
and window_swap_buffers_cb
callbacks when set.
-
-window_title
-Set the SDL window title, if not specified default to the filename specified for the output device.
-Ignored when no_window is set.
-
-window_size
-Set preferred window size, can be a string of the form widthxheight or a video size abbreviation.
-If not specified it defaults to the size of the input video, downscaled according to the aspect ratio.
-Mostly usable when no_window is not set.
-
-
-
-
-
-
28.5.2 Examples# TOC
-
Play a file on SDL window using OpenGL rendering:
-
-
ffmpeg -i INPUT -f opengl "window title"
-
-
-
-
28.6 oss# TOC
-
-
OSS (Open Sound System) output device.
-
-
-
28.7 pulse# TOC
-
-
PulseAudio output device.
-
-
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
-
-
More information about PulseAudio can be found on http://www.pulseaudio.org
-
-
-
28.7.1 Options# TOC
-
-server
-Connect to a specific PulseAudio server, specified by an IP address.
-Default server is used when not provided.
-
-
-name
-Specify the application name PulseAudio will use when showing active clients,
-by default it is the LIBAVFORMAT_IDENT
string.
-
-
-stream_name
-Specify the stream name PulseAudio will use when showing active streams,
-by default it is set to the specified output name.
-
-
-device
-Specify the device to use. Default device is used when not provided.
-List of output devices can be obtained with command pactl list sinks
.
-
-
-buffer_size
-buffer_duration
-Control the size and duration of the PulseAudio buffer. A small buffer
-gives more control, but requires more frequent updates.
-
-buffer_size specifies size in bytes while
-buffer_duration specifies duration in milliseconds.
-
-When both options are provided then the highest value is used
-(duration is recalculated to bytes using stream parameters). If they
-are set to 0 (which is default), the device will use the default
-PulseAudio duration value. By default PulseAudio set buffer duration
-to around 2 seconds.
-
-
-prebuf
-Specify pre-buffering size in bytes. The server does not start with
-playback before at least prebuf bytes are available in the
-buffer. By default this option is initialized to the same value as
-buffer_size or buffer_duration (whichever is bigger).
-
-
-minreq
-Specify minimum request size in bytes. The server does not request less
-than minreq bytes from the client, instead waits until the buffer
-is free enough to request more bytes at once. It is recommended to not set
-this option, which will initialize this to a value that is deemed sensible
-by the server.
-
-
-
-
-
-
28.7.2 Examples# TOC
-
Play a file on default device on default server:
-
-
ffmpeg -i INPUT -f pulse "stream name"
-
-
-
-
28.8 sdl# TOC
-
-
SDL (Simple DirectMedia Layer) output device.
-
-
This output device allows one to show a video stream in an SDL
-window. Only one SDL window is allowed per application, so you can
-have only one instance of this output device in an application.
-
-
To enable this output device you need libsdl installed on your system
-when configuring your build.
-
-
For more information about SDL, check:
-http://www.libsdl.org/
-
-
-
28.8.1 Options# TOC
-
-
-window_title
-Set the SDL window title, if not specified default to the filename
-specified for the output device.
-
-
-icon_title
-Set the name of the iconified SDL window, if not specified it is set
-to the same value of window_title .
-
-
-window_size
-Set the SDL window size, can be a string of the form
-width xheight or a video size abbreviation.
-If not specified it defaults to the size of the input video,
-downscaled according to the aspect ratio.
-
-
-window_fullscreen
-Set fullscreen mode when non-zero value is provided.
-Default value is zero.
-
-
-
-
-
28.8.2 Interactive commands# TOC
-
-
The window created by the device can be controlled through the
-following interactive commands.
-
-
-q, ESC
-Quit the device immediately.
-
-
-
-
-
28.8.3 Examples# TOC
-
-
The following command shows the ffmpeg
output is an
-SDL window, forcing its size to the qcif format:
-
-
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
-
-
-
-
28.9 sndio# TOC
-
-
sndio audio output device.
-
-
-
28.10 xv# TOC
-
-
XV (XVideo) output device.
-
-
This output device allows one to show a video stream in a X Window System
-window.
-
-
-
28.10.1 Options# TOC
-
-
-display_name
-Specify the hardware display name, which determines the display and
-communications domain to be used.
-
-The display name or DISPLAY environment variable can be a string in
-the format hostname [:number [.screen_number ]].
-
-hostname specifies the name of the host machine on which the
-display is physically attached. number specifies the number of
-the display server on that host machine. screen_number specifies
-the screen to be used on that server.
-
-If unspecified, it defaults to the value of the DISPLAY environment
-variable.
-
-For example, dual-headed:0.1
would specify screen 1 of display
-0 on the machine named “dual-headed”.
-
-Check the X11 specification for more detailed information about the
-display name format.
-
-
-window_id
-When set to non-zero value then device doesn’t create new window,
-but uses existing one with provided window_id . By default
-this options is set to zero and device creates its own window.
-
-
-window_size
-Set the created window size, can be a string of the form
-width xheight or a video size abbreviation. If not
-specified it defaults to the size of the input video.
-Ignored when window_id is set.
-
-
-window_x
-window_y
-Set the X and Y window offsets for the created window. They are both
-set to 0 by default. The values may be ignored by the window manager.
-Ignored when window_id is set.
-
-
-window_title
-Set the window title, if not specified default to the filename
-specified for the output device. Ignored when window_id is set.
-
-
-
-
For more information about XVideo see http://www.x.org/ .
-
-
-
28.10.2 Examples# TOC
-
-
- Decode, display and encode video input with ffmpeg
at the
-same time:
-
-
ffmpeg -i INPUT OUTPUT -f xv display
-
-
- Decode and display the input video to multiple X11 windows:
-
-
ffmpeg -i INPUT -f xv normal -vf negate -f xv negated
-
-
-
-
-
29 Resampler Options# TOC
-
-
The audio resampler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, option =value for the aresample filter,
-by setting the value explicitly in the
-SwrContext
options or using the libavutil/opt.h API for
-programmatic use.
-
-
-ich, in_channel_count
-Set the number of input channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-in_channel_layout is set.
-
-
-och, out_channel_count
-Set the number of output channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-out_channel_layout is set.
-
-
-uch, used_channel_count
-Set the number of used input channels. Default value is 0. This option is
-only used for special remapping.
-
-
-isr, in_sample_rate
-Set the input sample rate. Default value is 0.
-
-
-osr, out_sample_rate
-Set the output sample rate. Default value is 0.
-
-
-isf, in_sample_fmt
-Specify the input sample format. It is set by default to none
.
-
-
-osf, out_sample_fmt
-Specify the output sample format. It is set by default to none
.
-
-
-tsf, internal_sample_fmt
-Set the internal sample format. Default value is none
.
-This will automatically be chosen when it is not explicitly set.
-
-
-icl, in_channel_layout
-ocl, out_channel_layout
-Set the input/output channel layout.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-clev, center_mix_level
-Set the center mix level. It is a value expressed in deciBel, and must be
-in the interval [-32,32].
-
-
-slev, surround_mix_level
-Set the surround mix level. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-lfe_mix_level
-Set LFE mix into non LFE level. It is used when there is a LFE input but no
-LFE output. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-rmvol, rematrix_volume
-Set rematrix volume. Default value is 1.0.
-
-
-rematrix_maxval
-Set maximum output value for rematrixing.
-This can be used to prevent clipping vs. preventing volumn reduction
-A value of 1.0 prevents cliping.
-
-
-flags, swr_flags
-Set flags used by the converter. Default value is 0.
-
-It supports the following individual flags:
-
-res
-force resampling, this flag forces resampling to be used even when the
-input and output sample rates match.
-
-
-
-
-dither_scale
-Set the dither scale. Default value is 1.
-
-
-dither_method
-Set dither method. Default value is 0.
-
-Supported values:
-
-‘rectangular ’
-select rectangular dither
-
-‘triangular ’
-select triangular dither
-
-‘triangular_hp ’
-select triangular dither with high pass
-
-‘lipshitz ’
-select lipshitz noise shaping dither
-
-‘shibata ’
-select shibata noise shaping dither
-
-‘low_shibata ’
-select low shibata noise shaping dither
-
-‘high_shibata ’
-select high shibata noise shaping dither
-
-‘f_weighted ’
-select f-weighted noise shaping dither
-
-‘modified_e_weighted ’
-select modified-e-weighted noise shaping dither
-
-‘improved_e_weighted ’
-select improved-e-weighted noise shaping dither
-
-
-
-
-
-resampler
-Set resampling engine. Default value is swr.
-
-Supported values:
-
-‘swr ’
-select the native SW Resampler; filter options precision and cheby are not
-applicable in this case.
-
-‘soxr ’
-select the SoX Resampler (where available); compensation, and filter options
-filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
-case.
-
-
-
-
-filter_size
-For swr only, set resampling filter size, default value is 32.
-
-
-phase_shift
-For swr only, set resampling phase shift, default value is 10, and must be in
-the interval [0,30].
-
-
-linear_interp
-Use Linear Interpolation if set to 1, default value is 0.
-
-
-cutoff
-Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
-value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
-(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
-
-
-precision
-For soxr only, the precision in bits to which the resampled signal will be
-calculated. The default value of 20 (which, with suitable dithering, is
-appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
-value of 28 gives SoX’s ’Very High Quality’.
-
-
-cheby
-For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
-approximation for ’irrational’ ratios. Default value is 0.
-
-
-async
-For swr only, simple 1 parameter audio sync to timestamps using stretching,
-squeezing, filling and trimming. Setting this to 1 will enable filling and
-trimming, larger values represent the maximum amount in samples that the data
-may be stretched or squeezed for each second.
-Default value is 0, thus no compensation is applied to make the samples match
-the audio timestamps.
-
-
-first_pts
-For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
-This allows for padding/trimming at the start of stream. By default, no
-assumption is made about the first frame’s expected pts, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative pts due to encoder delay.
-
-
-min_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger stretching/squeezing/filling or trimming of the
-data to make it match the timestamps. The default is that
-stretching/squeezing/filling and trimming is disabled
-(min_comp = FLT_MAX
).
-
-
-min_hard_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger adding/dropping samples to make it match the
-timestamps. This option effectively is a threshold to select between
-hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
-all compensation is by default disabled through min_comp .
-The default is 0.1.
-
-
-comp_duration
-For swr only, set duration (in seconds) over which data is stretched/squeezed
-to make it match the timestamps. Must be a non-negative double float value,
-default value is 1.0.
-
-
-max_soft_comp
-For swr only, set maximum factor by which data is stretched/squeezed to make it
-match the timestamps. Must be a non-negative double float value, default value
-is 0.
-
-
-matrix_encoding
-Select matrixed stereo encoding.
-
-It accepts the following values:
-
-‘none ’
-select none
-
-‘dolby ’
-select Dolby
-
-‘dplii ’
-select Dolby Pro Logic II
-
-
-
-Default value is none
.
-
-
-filter_type
-For swr only, select resampling filter type. This only affects resampling
-operations.
-
-It accepts the following values:
-
-‘cubic ’
-select cubic
-
-‘blackman_nuttall ’
-select Blackman Nuttall Windowed Sinc
-
-‘kaiser ’
-select Kaiser Windowed Sinc
-
-
-
-
-kaiser_beta
-For swr only, set Kaiser Window Beta value. Must be an integer in the
-interval [2,16], default value is 9.
-
-
-output_sample_bits
-For swr only, set number of used output sample bits for dithering. Must be an integer in the
-interval [0,64], default value is 0, which means it’s not used.
-
-
-
-
-
-
30 Scaler Options# TOC
-
-
The video scaler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools. For programmatic use, they can be set explicitly in the
-SwsContext
options or through the libavutil/opt.h API.
-
-
-
-
-sws_flags
-Set the scaler flags. This is also used to set the scaling
-algorithm. Only a single algorithm should be selected.
-
-It accepts the following values:
-
-‘fast_bilinear ’
-Select fast bilinear scaling algorithm.
-
-
-‘bilinear ’
-Select bilinear scaling algorithm.
-
-
-‘bicubic ’
-Select bicubic scaling algorithm.
-
-
-‘experimental ’
-Select experimental scaling algorithm.
-
-
-‘neighbor ’
-Select nearest neighbor rescaling algorithm.
-
-
-‘area ’
-Select averaging area rescaling algorithm.
-
-
-‘bicublin ’
-Select bicubic scaling algorithm for the luma component, bilinear for
-chroma components.
-
-
-‘gauss ’
-Select Gaussian rescaling algorithm.
-
-
-‘sinc ’
-Select sinc rescaling algorithm.
-
-
-‘lanczos ’
-Select lanczos rescaling algorithm.
-
-
-‘spline ’
-Select natural bicubic spline rescaling algorithm.
-
-
-‘print_info ’
-Enable printing/debug logging.
-
-
-‘accurate_rnd ’
-Enable accurate rounding.
-
-
-‘full_chroma_int ’
-Enable full chroma interpolation.
-
-
-‘full_chroma_inp ’
-Select full chroma input.
-
-
-‘bitexact ’
-Enable bitexact output.
-
-
-
-
-srcw
-Set source width.
-
-
-srch
-Set source height.
-
-
-dstw
-Set destination width.
-
-
-dsth
-Set destination height.
-
-
-src_format
-Set source pixel format (must be expressed as an integer).
-
-
-dst_format
-Set destination pixel format (must be expressed as an integer).
-
-
-src_range
-Select source range.
-
-
-dst_range
-Select destination range.
-
-
-param0, param1
-Set scaling algorithm parameters. The specified values are specific of
-some scaling algorithms and ignored by others. The specified values
-are floating point number values.
-
-
-sws_dither
-Set the dithering algorithm. Accepts one of the following
-values. Default value is ‘auto ’.
-
-
-‘auto ’
-automatic choice
-
-
-‘none ’
-no dithering
-
-
-‘bayer ’
-bayer dither
-
-
-‘ed ’
-error diffusion dither
-
-
-‘a_dither ’
-arithmetic dither, based using addition
-
-
-‘x_dither ’
-arithmetic dither, based using xor (more random/less apparent patterning that
-a_dither).
-
-
-
-
-
-
-
-
-
31 Filtering Introduction# TOC
-
-
Filtering in FFmpeg is enabled through the libavfilter library.
-
-
In libavfilter, a filter can have multiple inputs and multiple
-outputs.
-To illustrate the sorts of things that are possible, we consider the
-following filtergraph.
-
-
-
[main]
-input --> split ---------------------> overlay --> output
- | ^
- |[tmp] [flip]|
- +-----> crop --> vflip -------+
-
-
-
This filtergraph splits the input stream in two streams, then sends one
-stream through the crop filter and the vflip filter, before merging it
-back with the other stream by overlaying it on top. You can use the
-following command to achieve this:
-
-
-
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
-
-
-
The result will be that the top half of the video is mirrored
-onto the bottom half of the output video.
-
-
Filters in the same linear chain are separated by commas, and distinct
-linear chains of filters are separated by semicolons. In our example,
-crop,vflip are in one linear chain, split and
-overlay are separately in another. The points where the linear
-chains join are labelled by names enclosed in square brackets. In the
-example, the split filter generates two outputs that are associated to
-the labels [main] and [tmp] .
-
-
The stream sent to the second output of split , labelled as
-[tmp] , is processed through the crop filter, which crops
-away the lower half part of the video, and then vertically flipped. The
-overlay filter takes in input the first unchanged output of the
-split filter (which was labelled as [main] ), and overlay on its
-lower half the output generated by the crop,vflip filterchain.
-
-
Some filters take in input a list of parameters: they are specified
-after the filter name and an equal sign, and are separated from each other
-by a colon.
-
-
There exist so-called source filters that do not have an
-audio/video input, and sink filters that will not have audio/video
-output.
-
-
-
-
32 graph2dot# TOC
-
-
The graph2dot program included in the FFmpeg tools
-directory can be used to parse a filtergraph description and issue a
-corresponding textual representation in the dot language.
-
-
Invoke the command:
-
-
-
to see how to use graph2dot .
-
-
You can then pass the dot description to the dot program (from
-the graphviz suite of programs) and obtain a graphical representation
-of the filtergraph.
-
-
For example the sequence of commands:
-
-
echo GRAPH_DESCRIPTION | \
-tools/graph2dot -o graph.tmp && \
-dot -Tpng graph.tmp -o graph.png && \
-display graph.png
-
-
-
can be used to create and display an image representing the graph
-described by the GRAPH_DESCRIPTION string. Note that this string must be
-a complete self-contained graph, with its inputs and outputs explicitly defined.
-For example if your command line is of the form:
-
-
ffmpeg -i infile -vf scale=640:360 outfile
-
-
your GRAPH_DESCRIPTION string will need to be of the form:
-
-
nullsrc,scale=640:360,nullsink
-
-
you may also need to set the nullsrc parameters and add a format
-filter in order to simulate a specific input file.
-
-
-
-
33 Filtergraph description# TOC
-
-
A filtergraph is a directed graph of connected filters. It can contain
-cycles, and there can be multiple links between a pair of
-filters. Each link has one input pad on one side connecting it to one
-filter from which it takes its input, and one output pad on the other
-side connecting it to one filter accepting its output.
-
-
Each filter in a filtergraph is an instance of a filter class
-registered in the application, which defines the features and the
-number of input and output pads of the filter.
-
-
A filter with no input pads is called a "source", and a filter with no
-output pads is called a "sink".
-
-
-
33.1 Filtergraph syntax# TOC
-
-
A filtergraph has a textual representation, which is
-recognized by the -filter /-vf and -filter_complex
-options in ffmpeg
and -vf in ffplay
, and by the
-avfilter_graph_parse()
/avfilter_graph_parse2()
functions defined in
-libavfilter/avfilter.h .
-
-
A filterchain consists of a sequence of connected filters, each one
-connected to the previous one in the sequence. A filterchain is
-represented by a list of ","-separated filter descriptions.
-
-
A filtergraph consists of a sequence of filterchains. A sequence of
-filterchains is represented by a list of ";"-separated filterchain
-descriptions.
-
-
A filter is represented by a string of the form:
-[in_link_1 ]...[in_link_N ]filter_name =arguments [out_link_1 ]...[out_link_M ]
-
-
filter_name is the name of the filter class of which the
-described filter is an instance of, and has to be the name of one of
-the filter classes registered in the program.
-The name of the filter class is optionally followed by a string
-"=arguments ".
-
-
arguments is a string which contains the parameters used to
-initialize the filter instance. It may have one of two forms:
-
- A ’:’-separated list of key=value pairs.
-
- A ’:’-separated list of value . In this case, the keys are assumed to be
-the option names in the order they are declared. E.g. the fade
filter
-declares three options in this order – type , start_frame and
-nb_frames . Then the parameter list in:0:30 means that the value
-in is assigned to the option type , 0 to
-start_frame and 30 to nb_frames .
-
- A ’:’-separated list of mixed direct value and long key=value
-pairs. The direct value must precede the key=value pairs, and
-follow the same constraints order of the previous point. The following
-key=value pairs can be set in any preferred order.
-
-
-
-
If the option value itself is a list of items (e.g. the format
filter
-takes a list of pixel formats), the items in the list are usually separated by
-’|’.
-
-
The list of arguments can be quoted using the character "’" as initial
-and ending mark, and the character ’\’ for escaping the characters
-within the quoted text; otherwise the argument string is considered
-terminated when the next special character (belonging to the set
-"[]=;,") is encountered.
-
-
The name and arguments of the filter are optionally preceded and
-followed by a list of link labels.
-A link label allows one to name a link and associate it to a filter output
-or input pad. The preceding labels in_link_1
-... in_link_N , are associated to the filter input pads,
-the following labels out_link_1 ... out_link_M , are
-associated to the output pads.
-
-
When two link labels with the same name are found in the
-filtergraph, a link between the corresponding input and output pad is
-created.
-
-
If an output pad is not labelled, it is linked by default to the first
-unlabelled input pad of the next filter in the filterchain.
-For example in the filterchain
-
-
nullsrc, split[L1], [L2]overlay, nullsink
-
-
the split filter instance has two output pads, and the overlay filter
-instance two input pads. The first output pad of split is labelled
-"L1", the first input pad of overlay is labelled "L2", and the second
-output pad of split is linked to the second input pad of overlay,
-which are both unlabelled.
-
-
In a complete filterchain all the unlabelled filter input and output
-pads must be connected. A filtergraph is considered valid if all the
-filter input and output pads of all the filterchains are connected.
-
-
Libavfilter will automatically insert scale filters where format
-conversion is required. It is possible to specify swscale flags
-for those automatically inserted scalers by prepending
-sws_flags=flags ;
-to the filtergraph description.
-
-
Here is a BNF description of the filtergraph syntax:
-
-
NAME ::= sequence of alphanumeric characters and '_'
-LINKLABEL ::= "[" NAME "]"
-LINKLABELS ::= LINKLABEL [LINKLABELS ]
-FILTER_ARGUMENTS ::= sequence of chars (possibly quoted)
-FILTER ::= [LINKLABELS ] NAME ["=" FILTER_ARGUMENTS ] [LINKLABELS ]
-FILTERCHAIN ::= FILTER [,FILTERCHAIN ]
-FILTERGRAPH ::= [sws_flags=flags ;] FILTERCHAIN [;FILTERGRAPH ]
-
-
-
-
33.2 Notes on filtergraph escaping# TOC
-
-
Filtergraph description composition entails several levels of
-escaping. See (ffmpeg-utils)the "Quoting and escaping"
-section in the ffmpeg-utils(1) manual for more
-information about the employed escaping procedure.
-
-
A first level escaping affects the content of each filter option
-value, which may contain the special character :
used to
-separate values, or one of the escaping characters \'
.
-
-
A second level escaping affects the whole filter description, which
-may contain the escaping characters \'
or the special
-characters [],;
used by the filtergraph description.
-
-
Finally, when you specify a filtergraph on a shell commandline, you
-need to perform a third level escaping for the shell special
-characters contained within it.
-
-
For example, consider the following string to be embedded in
-the drawtext filter description text value:
-
-
this is a 'string': may contain one, or more, special characters
-
-
-
This string contains the '
special escaping character, and the
-:
special character, so it needs to be escaped in this way:
-
-
text=this is a \'string\'\: may contain one, or more, special characters
-
-
-
A second level of escaping is required when embedding the filter
-description in a filtergraph description, in order to escape all the
-filtergraph special characters. Thus the example above becomes:
-
-
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
-
-
(note that in addition to the \'
escaping special characters,
-also ,
needs to be escaped).
-
-
Finally an additional level of escaping is needed when writing the
-filtergraph description in a shell command, which depends on the
-escaping rules of the adopted shell. For example, assuming that
-\
is special and needs to be escaped with another \
, the
-previous string will finally result in:
-
-
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
-
-
-
-
34 Timeline editing# TOC
-
-
Some filters support a generic enable option. For the filters
-supporting timeline editing, this option can be set to an expression which is
-evaluated before sending a frame to the filter. If the evaluation is non-zero,
-the filter will be enabled, otherwise the frame will be sent unchanged to the
-next filter in the filtergraph.
-
-
The expression accepts the following values:
-
-‘t ’
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-‘n ’
-sequential number of the input frame, starting from 0
-
-
-‘pos ’
-the position in the file of the input frame, NAN if unknown
-
-
-‘w ’
-‘h ’
-width and height of the input frame if video
-
-
-
-
Additionally, these filters support an enable command that can be used
-to re-define the expression.
-
-
Like any other filtering option, the enable option follows the same
-rules.
-
-
For example, to enable a blur filter (smartblur ) from 10 seconds to 3
-minutes, and a curves filter starting at 3 seconds:
-
-
smartblur = enable='between(t,10,3*60)',
-curves = enable='gte(t,3)' : preset=cross_process
-
-
-
-
-
35 Audio Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the audio filters included in your
-build.
-
-
Below is a description of the currently available audio filters.
-
-
-
35.1 adelay# TOC
-
-
Delay one or more audio channels.
-
-
Samples in delayed channel are filled with silence.
-
-
The filter accepts the following option:
-
-
-delays
-Set list of delays in milliseconds for each channel separated by ’|’.
-At least one delay greater than 0 should be provided.
-Unused delays will be silently ignored. If number of given delays is
-smaller than number of channels all remaining channels will not be delayed.
-
-
-
-
-
35.1.1 Examples# TOC
-
-
- Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave
-the second channel (and any other channels that may be present) unchanged.
-
-
-
-
-
35.2 aecho# TOC
-
-
Apply echoing to the input audio.
-
-
Echoes are reflected sound and can occur naturally amongst mountains
-(and sometimes large buildings) when talking or shouting; digital echo
-effects emulate this behaviour and are often used to help fill out the
-sound of a single instrument or vocal. The time difference between the
-original signal and the reflection is the delay
, and the
-loudness of the reflected signal is the decay
.
-Multiple echoes can have different delays and decays.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain of reflected signal. Default is 0.6
.
-
-
-out_gain
-Set output gain of reflected signal. Default is 0.3
.
-
-
-delays
-Set list of time intervals in milliseconds between original signal and reflections
-separated by ’|’. Allowed range for each delay
is (0 - 90000.0]
.
-Default is 1000
.
-
-
-decays
-Set list of loudnesses of reflected signals separated by ’|’.
-Allowed range for each decay
is (0 - 1.0]
.
-Default is 0.5
.
-
-
-
-
-
35.2.1 Examples# TOC
-
-
- Make it sound as if there are twice as many instruments as are actually playing:
-
-
- If delay is very short, then it sound like a (metallic) robot playing music:
-
-
- A longer delay will sound like an open air concert in the mountains:
-
-
aecho=0.8:0.9:1000:0.3
-
-
- Same as above but with one more mountain:
-
-
aecho=0.8:0.9:1000|1800:0.3|0.25
-
-
-
-
-
35.3 aeval# TOC
-
-
Modify an audio signal according to the specified expressions.
-
-
This filter accepts one or more expressions (one for each channel),
-which are evaluated and used to modify a corresponding audio signal.
-
-
It accepts the following parameters:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. If
-the number of input channels is greater than the number of
-expressions, the last specified expression is used for the remaining
-output channels.
-
-
-channel_layout, c
-Set output channel layout. If not specified, the channel layout is
-specified by the number of expressions. If set to ‘same ’, it will
-use by default the same input channel layout.
-
-
-
-
Each expression in exprs can contain the following constants and functions:
-
-
-ch
-channel number of the current expression
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-s
-sample rate
-
-
-t
-time of the evaluated sample expressed in seconds
-
-
-nb_in_channels
-nb_out_channels
-input and output number of channels
-
-
-val(CH)
-the value of input channel with number CH
-
-
-
-
Note: this filter is slow. For faster processing you should use a
-dedicated filter.
-
-
-
35.3.1 Examples# TOC
-
-
- Half volume:
-
-
aeval=val(ch)/2:c=same
-
-
- Invert phase of the second channel:
-
-
-
-
-
35.4 afade# TOC
-
-
Apply fade-in/out effect to input audio.
-
-
A description of the accepted parameters follows.
-
-
-type, t
-Specify the effect type, can be either in
for fade-in, or
-out
for a fade-out effect. Default is in
.
-
-
-start_sample, ss
-Specify the number of the start sample for starting to apply the fade
-effect. Default is 0.
-
-
-nb_samples, ns
-Specify the number of samples for which the fade effect has to last. At
-the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence. Default is 44100.
-
-
-start_time, st
-Specify the start time of the fade effect. Default is 0.
-The value must be specified as a time duration; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-If set this option is used instead of start_sample .
-
-
-duration, d
-Specify the duration of the fade effect. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-At the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence.
-By default the duration is determined by nb_samples .
-If set this option is used instead of nb_samples .
-
-
-curve
-Set curve for fade transition.
-
-It accepts the following values:
-
-tri
-select triangular, linear slope (default)
-
-qsin
-select quarter of sine wave
-
-hsin
-select half of sine wave
-
-esin
-select exponential sine wave
-
-log
-select logarithmic
-
-par
-select inverted parabola
-
-qua
-select quadratic
-
-cub
-select cubic
-
-squ
-select square root
-
-cbr
-select cubic root
-
-
-
-
-
-
-
35.4.1 Examples# TOC
-
-
- Fade in first 15 seconds of audio:
-
-
- Fade out last 25 seconds of a 900 seconds audio:
-
-
afade=t=out:st=875:d=25
-
-
-
-
-
35.5 aformat# TOC
-
-
Set output format constraints for the input audio. The framework will
-negotiate the most appropriate format to minimize conversions.
-
-
It accepts the following parameters:
-
-sample_fmts
-A ’|’-separated list of requested sample formats.
-
-
-sample_rates
-A ’|’-separated list of requested sample rates.
-
-
-channel_layouts
-A ’|’-separated list of requested channel layouts.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-
-
If a parameter is omitted, all values are allowed.
-
-
Force the output to either unsigned 8-bit or signed 16-bit stereo
-
-
aformat=sample_fmts=u8|s16:channel_layouts=stereo
-
-
-
-
35.6 allpass# TOC
-
-
Apply a two-pole all-pass filter with central frequency (in Hz)
-frequency , and filter-width width .
-An all-pass filter changes the audio’s frequency to phase relationship
-without changing its frequency to amplitude relationship.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
35.7 amerge# TOC
-
-
Merge two or more audio streams into a single multi-channel stream.
-
-
The filter accepts the following options:
-
-
-inputs
-Set the number of inputs. Default is 2.
-
-
-
-
-
If the channel layouts of the inputs are disjoint, and therefore compatible,
-the channel layout of the output will be set accordingly and the channels
-will be reordered as necessary. If the channel layouts of the inputs are not
-disjoint, the output will have all the channels of the first input then all
-the channels of the second input, in that order, and the channel layout of
-the output will be the default value corresponding to the total number of
-channels.
-
-
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
-is FC+BL+BR, then the output will be in 5.1, with the channels in the
-following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
-first input, b1 is the first channel of the second input).
-
-
On the other hand, if both input are in stereo, the output channels will be
-in the default order: a1, a2, b1, b2, and the channel layout will be
-arbitrarily set to 4.0, which may or may not be the expected value.
-
-
All inputs must have the same sample rate, and format.
-
-
If inputs do not have the same duration, the output will stop with the
-shortest.
-
-
-
35.7.1 Examples# TOC
-
-
- Merge two mono files into a stereo stream:
-
-
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
-
-
- Multiple merges assuming 1 video stream and 6 audio streams in input.mkv :
-
-
ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
-
-
-
-
-
35.8 amix# TOC
-
-
Mixes multiple audio inputs into a single output.
-
-
Note that this filter only supports float samples (the amerge
-and pan audio filters support many formats). If the amix
-input has integer samples then aresample will be automatically
-inserted to perform the conversion to float samples.
-
-
For example
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
-
-
will mix 3 input audio streams to a single output with the same duration as the
-first input and a dropout transition time of 3 seconds.
-
-
It accepts the following parameters:
-
-inputs
-The number of inputs. If unspecified, it defaults to 2.
-
-
-duration
-How to determine the end-of-stream.
-
-longest
-The duration of the longest input. (default)
-
-
-shortest
-The duration of the shortest input.
-
-
-first
-The duration of the first input.
-
-
-
-
-
-dropout_transition
-The transition time, in seconds, for volume renormalization when an input
-stream ends. The default value is 2 seconds.
-
-
-
-
-
-
35.9 anull# TOC
-
-
Pass the audio source unchanged to the output.
-
-
-
35.10 apad# TOC
-
-
Pad the end of an audio stream with silence.
-
-
This can be used together with ffmpeg
-shortest to
-extend audio streams to the same length as the video stream.
-
-
A description of the accepted options follows.
-
-
-packet_size
-Set silence packet size. Default value is 4096.
-
-
-pad_len
-Set the number of samples of silence to add to the end. After the
-value is reached, the stream is terminated. This option is mutually
-exclusive with whole_len .
-
-
-whole_len
-Set the minimum total number of samples in the output audio stream. If
-the value is longer than the input audio length, silence is added to
-the end, until the value is reached. This option is mutually exclusive
-with pad_len .
-
-
-
-
If neither the pad_len nor the whole_len option is
-set, the filter will add silence to the end of the input stream
-indefinitely.
-
-
-
35.10.1 Examples# TOC
-
-
- Add 1024 samples of silence to the end of the input:
-
-
- Make sure the audio output will contain at least 10000 samples, pad
-the input with silence if required:
-
-
- Use ffmpeg
to pad the audio input with silence, so that the
-video stream will always result the shortest and will be converted
-until the end in the output file when using the shortest
-option:
-
-
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
-
-
-
-
-
35.11 aphaser# TOC
-
Add a phasing effect to the input audio.
-
-
A phaser filter creates series of peaks and troughs in the frequency spectrum.
-The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain. Default is 0.4.
-
-
-out_gain
-Set output gain. Default is 0.74
-
-
-delay
-Set delay in milliseconds. Default is 3.0.
-
-
-decay
-Set decay. Default is 0.4.
-
-
-speed
-Set modulation speed in Hz. Default is 0.5.
-
-
-type
-Set modulation type. Default is triangular.
-
-It accepts the following values:
-
-‘triangular, t ’
-‘sinusoidal, s ’
-
-
-
-
-
-
35.12 aresample# TOC
-
-
Resample the input audio to the specified parameters, using the
-libswresample library. If none are specified then the filter will
-automatically convert between its input and output.
-
-
This filter is also able to stretch/squeeze the audio data to make it match
-the timestamps or to inject silence / cut out audio to make it match the
-timestamps, do a combination of both or do neither.
-
-
The filter accepts the syntax
-[sample_rate :]resampler_options , where sample_rate
-expresses a sample rate and resampler_options is a list of
-key =value pairs, separated by ":". See the
-ffmpeg-resampler manual for the complete list of supported options.
-
-
-
35.12.1 Examples# TOC
-
-
- Resample the input audio to 44100Hz:
-
-
- Stretch/squeeze samples to the given timestamps, with a maximum of 1000
-samples per second compensation:
-
-
-
-
-
35.13 asetnsamples# TOC
-
-
Set the number of samples per each output audio frame.
-
-
The last output packet may contain a different number of samples, as
-the filter will flush all the remaining samples when the input audio
-signal its end.
-
-
The filter accepts the following options:
-
-
-nb_out_samples, n
-Set the number of frames per each output audio frame. The number is
-intended as the number of samples per each channel .
-Default value is 1024.
-
-
-pad, p
-If set to 1, the filter will pad the last audio frame with zeroes, so
-that the last frame will contain the same number of samples as the
-previous ones. Default value is 1.
-
-
-
-
For example, to set the number of per-frame samples to 1234 and
-disable padding for the last frame, use:
-
-
asetnsamples=n=1234:p=0
-
-
-
-
35.14 asetrate# TOC
-
-
Set the sample rate without altering the PCM data.
-This will result in a change of speed and pitch.
-
-
The filter accepts the following options:
-
-
-sample_rate, r
-Set the output sample rate. Default is 44100 Hz.
-
-
-
-
-
35.15 ashowinfo# TOC
-
-
Show a line containing various information for each input audio frame.
-The input audio is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The presentation timestamp of the input frame, in time base units; the time base
-depends on the filter input pad, and is usually 1/sample_rate .
-
-
-pts_time
-The presentation timestamp of the input frame in seconds.
-
-
-pos
-position of the frame in the input stream, -1 if this information in
-unavailable and/or meaningless (for example in case of synthetic audio)
-
-
-fmt
-The sample format.
-
-
-chlayout
-The channel layout.
-
-
-rate
-The sample rate for the audio frame.
-
-
-nb_samples
-The number of samples (per channel) in the frame.
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of the audio data. For planar
-audio, the data is treated as if all the planes were concatenated.
-
-
-plane_checksums
-A list of Adler-32 checksums for each data plane.
-
-
-
-
-
35.16 astats# TOC
-
-
Display time domain statistical information about the audio channels.
-Statistics are calculated and displayed for each audio channel and,
-where applicable, an overall figure is also given.
-
-
It accepts the following option:
-
-length
-Short window length in seconds, used for peak and trough RMS measurement.
-Default is 0.05
(50 milliseconds). Allowed range is [0.1 - 10]
.
-
-
-
-
A description of each shown parameter follows:
-
-
-DC offset
-Mean amplitude displacement from zero.
-
-
-Min level
-Minimal sample level.
-
-
-Max level
-Maximal sample level.
-
-
-Peak level dB
-RMS level dB
-Standard peak and RMS level measured in dBFS.
-
-
-RMS peak dB
-RMS trough dB
-Peak and trough values for RMS level measured over a short window.
-
-
-Crest factor
-Standard ratio of peak to RMS level (note: not in dB).
-
-
-Flat factor
-Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels
-(i.e. either Min level or Max level ).
-
-
-Peak count
-Number of occasions (not the number of samples) that the signal attained either
-Min level or Max level .
-
-
-
-
-
35.17 astreamsync# TOC
-
-
Forward two audio streams and control the order the buffers are forwarded.
-
-
The filter accepts the following options:
-
-
-expr, e
-Set the expression deciding which stream should be
-forwarded next: if the result is negative, the first stream is forwarded; if
-the result is positive or zero, the second stream is forwarded. It can use
-the following variables:
-
-
-b1 b2
-number of buffers forwarded so far on each stream
-
-s1 s2
-number of samples forwarded so far on each stream
-
-t1 t2
-current timestamp of each stream
-
-
-
-The default value is t1-t2
, which means to always forward the stream
-that has a smaller timestamp.
-
-
-
-
-
35.17.1 Examples# TOC
-
-
Stress-test amerge
by randomly sending buffers on the wrong
-input, while avoiding too much of a desynchronization:
-
-
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
-[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
-[a2] [b2] amerge
-
-
-
-
35.18 asyncts# TOC
-
-
Synchronize audio data with timestamps by squeezing/stretching it and/or
-dropping samples/adding silence when needed.
-
-
This filter is not built by default, please use aresample to do squeezing/stretching.
-
-
It accepts the following parameters:
-
-compensate
-Enable stretching/squeezing the data to make it match the timestamps. Disabled
-by default. When disabled, time gaps are covered with silence.
-
-
-min_delta
-The minimum difference between timestamps and audio data (in seconds) to trigger
-adding/dropping samples. The default value is 0.1. If you get an imperfect
-sync with this filter, try setting this parameter to 0.
-
-
-max_comp
-The maximum compensation in samples per second. Only relevant with compensate=1.
-The default value is 500.
-
-
-first_pts
-Assume that the first PTS should be this value. The time base is 1 / sample
-rate. This allows for padding/trimming at the start of the stream. By default,
-no assumption is made about the first frame’s expected PTS, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative PTS due to encoder delay.
-
-
-
-
-
-
35.19 atempo# TOC
-
-
Adjust audio tempo.
-
-
The filter accepts exactly one parameter, the audio tempo. If not
-specified then the filter will assume nominal 1.0 tempo. Tempo must
-be in the [0.5, 2.0] range.
-
-
-
35.19.1 Examples# TOC
-
-
- Slow down audio to 80% tempo:
-
-
- To speed up audio to 125% tempo:
-
-
-
-
-
35.20 atrim# TOC
-
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Timestamp (in seconds) of the start of the section to keep. I.e. the audio
-sample with the timestamp start will be the first sample in the output.
-
-
-end
-Specify time of the first audio sample that will be dropped, i.e. the
-audio sample immediately preceding the one with the timestamp end will be
-the last sample in the output.
-
-
-start_pts
-Same as start , except this option sets the start timestamp in samples
-instead of seconds.
-
-
-end_pts
-Same as end , except this option sets the end timestamp in samples instead
-of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_sample
-The number of the first sample that should be output.
-
-
-end_sample
-The number of the first sample that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _sample options simply count the
-samples that pass through the filter. So start/end_pts and start/end_sample will
-give different results when the timestamps are wrong, inexact or do not start at
-zero. Also note that this filter does not modify the timestamps. If you wish
-to have the output timestamps start at zero, insert the asetpts filter after the
-atrim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all samples that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple atrim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -af atrim=60:120
-
-
- Keep only the first 1000 samples:
-
-
ffmpeg -i INPUT -af atrim=end_sample=1000
-
-
-
-
-
-
35.21 bandpass# TOC
-
-
Apply a two-pole Butterworth band-pass filter with central
-frequency frequency , and (3dB-point) band-width width.
-The csg option selects a constant skirt gain (peak gain = Q)
-instead of the default: constant 0dB peak gain.
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-csg
-Constant skirt gain if set to 1. Defaults to 0.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
35.22 bandreject# TOC
-
-
Apply a two-pole Butterworth band-reject filter with central
-frequency frequency , and (3dB-point) band-width width .
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
35.23 bass# TOC
-
-
Boost or cut the bass (lower) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at 0 Hz. Its useful range is about -20
-(for a large cut) to +20 (for a large boost).
-Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 100
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
35.24 biquad# TOC
-
-
Apply a biquad IIR filter with the given coefficients.
-Where b0 , b1 , b2 and a0 , a1 , a2
-are the numerator and denominator coefficients respectively.
-
-
-
35.25 bs2b# TOC
-
Bauer stereo to binaural transformation, which improves headphone listening of
-stereo audio records.
-
-
It accepts the following parameters:
-
-profile
-Pre-defined crossfeed level.
-
-default
-Default level (fcut=700, feed=50).
-
-
-cmoy
-Chu Moy circuit (fcut=700, feed=60).
-
-
-jmeier
-Jan Meier circuit (fcut=650, feed=95).
-
-
-
-
-
-fcut
-Cut frequency (in Hz).
-
-
-feed
-Feed level (in Hz).
-
-
-
-
-
-
35.26 channelmap# TOC
-
-
Remap input channels to new locations.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the output stream.
-
-
-map
-Map channels from input to output. The argument is a ’|’-separated list of
-mappings, each in the in_channel -out_channel
or
-in_channel form. in_channel can be either the name of the input
-channel (e.g. FL for front left) or its index in the input channel layout.
-out_channel is the name of the output channel or its index in the output
-channel layout. If out_channel is not given then it is implicitly an
-index, starting with zero and increasing by one for each mapping.
-
-
-
-
If no mapping is present, the filter will implicitly map input channels to
-output channels, preserving indices.
-
-
For example, assuming a 5.1+downmix input MOV file,
-
-
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
-
-
will create an output WAV file tagged as stereo from the downmix channels of
-the input.
-
-
To fix a 5.1 WAV improperly encoded in AAC’s native channel order
-
-
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
-
-
-
-
35.27 channelsplit# TOC
-
-
Split each channel from an input audio stream into a separate output stream.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the input stream. The default is "stereo".
-
-
-
-
For example, assuming a stereo input MP3 file,
-
-
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
-
-
will create an output Matroska file with two audio streams, one containing only
-the left channel and the other the right channel.
-
-
Split a 5.1 WAV file into per-channel files:
-
-
ffmpeg -i in.wav -filter_complex
-'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
--map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
-front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
-side_right.wav
-
-
-
-
35.28 compand# TOC
-
Compress or expand the audio’s dynamic range.
-
-
It accepts the following parameters:
-
-
-attacks
-decays
-A list of times in seconds for each channel over which the instantaneous level
-of the input signal is averaged to determine its volume. attacks refers to
-increase of volume and decays refers to decrease of volume. For most
-situations, the attack time (response to the audio getting louder) should be
-shorter than the decay time, because the human ear is more sensitive to sudden
-loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and
-a typical value for decay is 0.8 seconds.
-
-
-points
-A list of points for the transfer function, specified in dB relative to the
-maximum possible signal amplitude. Each key points list must be defined using
-the following syntax: x0/y0|x1/y1|x2/y2|....
or
-x0/y0 x1/y1 x2/y2 ....
-
-The input values must be in strictly increasing order but the transfer function
-does not have to be monotonically rising. The point 0/0
is assumed but
-may be overridden (by 0/out-dBn
). Typical values for the transfer
-function are -70/-70|-60/-20
.
-
-
-soft-knee
-Set the curve radius in dB for all joints. It defaults to 0.01.
-
-
-gain
-Set the additional gain in dB to be applied at all points on the transfer
-function. This allows for easy adjustment of the overall gain.
-It defaults to 0.
-
-
-volume
-Set an initial volume, in dB, to be assumed for each channel when filtering
-starts. This permits the user to supply a nominal level initially, so that, for
-example, a very large gain is not applied to initial signal levels before the
-companding has begun to operate. A typical value for audio which is initially
-quiet is -90 dB. It defaults to 0.
-
-
-delay
-Set a delay, in seconds. The input audio is analyzed immediately, but audio is
-delayed before being fed to the volume adjuster. Specifying a delay
-approximately equal to the attack/decay times allows the filter to effectively
-operate in predictive rather than reactive mode. It defaults to 0.
-
-
-
-
-
-
35.28.1 Examples# TOC
-
-
- Make music with both quiet and loud passages suitable for listening to in a
-noisy environment:
-
-
compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
-
-
- A noise gate for when the noise is at a lower level than the signal:
-
-
compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
-
-
- Here is another noise gate, this time for when the noise is at a higher level
-than the signal (making it, in some ways, similar to squelch):
-
-
compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
-
-
-
-
-
35.29 earwax# TOC
-
-
Make audio easier to listen to on headphones.
-
-
This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio
-so that when listened to on headphones the stereo image is moved from
-inside your head (standard for headphones) to outside and in front of
-the listener (standard for speakers).
-
-
Ported from SoX.
-
-
-
35.30 equalizer# TOC
-
-
Apply a two-pole peaking equalisation (EQ) filter. With this
-filter, the signal-level at and around a selected frequency can
-be increased or decreased, whilst (unlike bandpass and bandreject
-filters) that at all other frequencies is unchanged.
-
-
In order to produce complex equalisation curves, this filter can
-be given several times, each with a different central frequency.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-gain, g
-Set the required gain or attenuation in dB.
-Beware of clipping when using a positive gain.
-
-
-
-
-
35.30.1 Examples# TOC
-
- Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz:
-
-
equalizer=f=1000:width_type=h:width=200:g=-10
-
-
- Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2:
-
-
equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
-
-
-
-
-
35.31 flanger# TOC
-
Apply a flanging effect to the audio.
-
-
The filter accepts the following options:
-
-
-delay
-Set base delay in milliseconds. Range from 0 to 30. Default value is 0.
-
-
-depth
-Set added swep delay in milliseconds. Range from 0 to 10. Default value is 2.
-
-
-regen
-Set percentage regeneration (delayed signal feedback). Range from -95 to 95.
-Default value is 0.
-
-
-width
-Set percentage of delayed signal mixed with original. Range from 0 to 100.
-Default value is 71.
-
-
-speed
-Set sweeps per second (Hz). Range from 0.1 to 10. Default value is 0.5.
-
-
-shape
-Set swept wave shape, can be triangular or sinusoidal .
-Default value is sinusoidal .
-
-
-phase
-Set swept wave percentage-shift for multi channel. Range from 0 to 100.
-Default value is 25.
-
-
-interp
-Set delay-line interpolation, linear or quadratic .
-Default is linear .
-
-
-
-
-
35.32 highpass# TOC
-
-
Apply a high-pass filter with 3dB point frequency.
-The filter can be either single-pole, or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 3000.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
35.33 join# TOC
-
-
Join multiple input streams into one multi-channel stream.
-
-
It accepts the following parameters:
-
-inputs
-The number of input streams. It defaults to 2.
-
-
-channel_layout
-The desired output channel layout. It defaults to stereo.
-
-
-map
-Map channels from inputs to output. The argument is a ’|’-separated list of
-mappings, each in the input_idx .in_channel -out_channel
-form. input_idx is the 0-based index of the input stream. in_channel
-can be either the name of the input channel (e.g. FL for front left) or its
-index in the specified input stream. out_channel is the name of the output
-channel.
-
-
-
-
The filter will attempt to guess the mappings when they are not specified
-explicitly. It does so by first trying to find an unused matching input channel
-and if that fails it picks the first unused input channel.
-
-
Join 3 inputs (with properly set channel layouts):
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
-
-
-
Build a 5.1 output from 6 single-channel streams:
-
-
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
-'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
-out
-
-
-
-
35.34 ladspa# TOC
-
-
Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-ladspa
.
-
-
-file, f
-Specifies the name of LADSPA plugin library to load. If the environment
-variable LADSPA_PATH
is defined, the LADSPA plugin is searched in
-each one of the directories specified by the colon separated list in
-LADSPA_PATH
, otherwise in the standard LADSPA paths, which are in
-this order: HOME/.ladspa/lib/ , /usr/local/lib/ladspa/ ,
-/usr/lib/ladspa/ .
-
-
-plugin, p
-Specifies the plugin within the library. Some libraries contain only
-one plugin, but others contain many of them. If this is not set filter
-will list all available plugins within the specified library.
-
-
-controls, c
-Set the ’|’ separated list of controls which are zero or more floating point
-values that determine the behavior of the loaded plugin (for example delay,
-threshold or gain).
-Controls need to be defined using the following syntax:
-c0=value0 |c1=value1 |c2=value2 |..., where
-valuei is the value set on the i -th control.
-If controls is set to help
, all available controls and
-their valid ranges are printed.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100. Only used if plugin have
-zero inputs.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame, default
-is 1024. Only used if plugin have zero inputs.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified duration,
-as the generated audio is always cut at the end of a complete frame.
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-Only used if plugin have zero inputs.
-
-
-
-
-
-
35.34.1 Examples# TOC
-
-
- List all available plugins within amp (LADSPA example plugin) library:
-
-
- List all available controls and their valid ranges for vcf_notch
-plugin from VCF
library:
-
-
ladspa=f=vcf:p=vcf_notch:c=help
-
-
- Simulate low quality audio equipment using Computer Music Toolkit
(CMT)
-plugin library:
-
-
ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
-
-
- Add reverberation to the audio using TAP-plugins
-(Tom’s Audio Processing plugins):
-
-
ladspa=file=tap_reverb:tap_reverb
-
-
- Generate white noise, with 0.2 amplitude:
-
-
ladspa=file=cmt:noise_source_white:c=c0=.2
-
-
- Generate 20 bpm clicks using plugin C* Click - Metronome
from the
-C* Audio Plugin Suite
(CAPS) library:
-
-
ladspa=file=caps:Click:c=c1=20'
-
-
- Apply C* Eq10X2 - Stereo 10-band equaliser
effect:
-
-
ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
-
-
-
-
-
35.34.2 Commands# TOC
-
-
This filter supports the following commands:
-
-cN
-Modify the N -th control value.
-
-If the specified value is not valid, it is ignored and prior one is kept.
-
-
-
-
-
35.35 lowpass# TOC
-
-
Apply a low-pass filter with 3dB point frequency.
-The filter can be either single-pole or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 500.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
35.36 pan# TOC
-
-
Mix channels with specific gain levels. The filter accepts the output
-channel layout followed by a set of channels definitions.
-
-
This filter is also designed to efficiently remap the channels of an audio
-stream.
-
-
The filter accepts parameters of the form:
-"l |outdef |outdef |..."
-
-
-l
-output channel layout or number of channels
-
-
-outdef
-output channel specification, of the form:
-"out_name =[gain *]in_name [+[gain *]in_name ...]"
-
-
-out_name
-output channel to define, either a channel name (FL, FR, etc.) or a channel
-number (c0, c1, etc.)
-
-
-gain
-multiplicative coefficient for the channel, 1 leaving the volume unchanged
-
-
-in_name
-input channel to use, see out_name for details; it is not possible to mix
-named and numbered input channels
-
-
-
-
If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for
-that specification will be renormalized so that the total is 1, thus
-avoiding clipping noise.
-
-
-
35.36.1 Mixing examples# TOC
-
-
For example, if you want to down-mix from stereo to mono, but with a bigger
-factor for the left channel:
-
-
pan=1c|c0=0.9*c0+0.1*c1
-
-
-
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
-7-channels surround:
-
-
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
-
-
-
Note that ffmpeg
integrates a default down-mix (and up-mix) system
-that should be preferred (see "-ac" option) unless you have very specific
-needs.
-
-
-
35.36.2 Remapping examples# TOC
-
-
The channel remapping will be effective if, and only if:
-
-
- gain coefficients are zeroes or ones,
- only one input per channel output,
-
-
-
If all these conditions are satisfied, the filter will notify the user ("Pure
-channel mapping detected"), and use an optimized and lossless method to do the
-remapping.
-
-
For example, if you have a 5.1 source and want a stereo audio stream by
-dropping the extra channels:
-
-
pan="stereo| c0=FL | c1=FR"
-
-
-
Given the same source, you can also switch front left and front right channels
-and keep the input channel layout:
-
-
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
-
-
-
If the input is a stereo audio stream, you can mute the front left channel (and
-still keep the stereo channel layout) with:
-
-
-
Still with a stereo audio stream input, you can copy the right channel in both
-front left and right:
-
-
pan="stereo| c0=FR | c1=FR"
-
-
-
-
35.37 replaygain# TOC
-
-
ReplayGain scanner filter. This filter takes an audio stream as an input and
-outputs it unchanged.
-At end of filtering it displays track_gain
and track_peak
.
-
-
-
35.38 resample# TOC
-
-
Convert the audio sample format, sample rate and channel layout. It is
-not meant to be used directly.
-
-
-
35.39 silencedetect# TOC
-
-
Detect silence in an audio stream.
-
-
This filter logs a message when it detects that the input audio volume is less
-or equal to a noise tolerance value for a duration greater or equal to the
-minimum detected noise duration.
-
-
The printed times and duration are expressed in seconds.
-
-
The filter accepts the following options:
-
-
-duration, d
-Set silence duration until notification (default is 2 seconds).
-
-
-noise, n
-Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
-specified value) or amplitude ratio. Default is -60dB, or 0.001.
-
-
-
-
-
35.39.1 Examples# TOC
-
-
- Detect 5 seconds of silence with -50dB noise tolerance:
-
-
silencedetect=n=-50dB:d=5
-
-
- Complete example with ffmpeg
to detect silence with 0.0001 noise
-tolerance in silence.mp3 :
-
-
ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
-
-
-
-
-
35.40 silenceremove# TOC
-
-
Remove silence from the beginning, middle or end of the audio.
-
-
The filter accepts the following options:
-
-
-start_periods
-This value is used to indicate if audio should be trimmed at beginning of
-the audio. A value of zero indicates no silence should be trimmed from the
-beginning. When specifying a non-zero value, it trims audio up until it
-finds non-silence. Normally, when trimming silence from beginning of audio
-the start_periods will be 1
but it can be increased to higher
-values to trim all audio up to specific count of non-silence periods.
-Default value is 0
.
-
-
-start_duration
-Specify the amount of time that non-silence must be detected before it stops
-trimming audio. By increasing the duration, bursts of noises can be treated
-as silence and trimmed off. Default value is 0
.
-
-
-start_threshold
-This indicates what sample value should be treated as silence. For digital
-audio, a value of 0
may be fine but for audio recorded from analog,
-you may wish to increase the value to account for background noise.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-stop_periods
-Set the count for trimming silence from the end of audio.
-To remove silence from the middle of a file, specify a stop_periods
-that is negative. This value is then treated as a positive value and is
-used to indicate the effect should restart processing as specified by
-start_periods , making it suitable for removing periods of silence
-in the middle of the audio.
-Default value is 0
.
-
-
-stop_duration
-Specify a duration of silence that must exist before audio is not copied any
-more. By specifying a higher duration, silence that is wanted can be left in
-the audio.
-Default value is 0
.
-
-
-stop_threshold
-This is the same as start_threshold but for trimming silence from
-the end of audio.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-leave_silence
-This indicate that stop_duration length of audio should be left intact
-at the beginning of each period of silence.
-For example, if you want to remove long pauses between words but do not want
-to remove the pauses completely. Default value is 0
.
-
-
-
-
-
-
35.40.1 Examples# TOC
-
-
- The following example shows how this filter can be used to start a recording
-that does not contain the delay at the start which usually occurs between
-pressing the record button and the start of the performance:
-
-
silenceremove=1:5:0.02
-
-
-
-
-
35.41 treble# TOC
-
-
Boost or cut treble (upper) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at whichever is the lower of ~22 kHz and the
-Nyquist frequency. Its useful range is about -20 (for a large cut)
-to +20 (for a large boost). Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 3000
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
35.42 volume# TOC
-
-
Adjust the input audio volume.
-
-
It accepts the following parameters:
-
-volume
-Set audio volume expression.
-
-Output values are clipped to the maximum value.
-
-The output audio volume is given by the relation:
-
-
output_volume = volume * input_volume
-
-
-The default value for volume is "1.0".
-
-
-precision
-This parameter represents the mathematical precision.
-
-It determines which input sample formats will be allowed, which affects the
-precision of the volume scaling.
-
-
-fixed
-8-bit fixed-point; this limits input sample format to U8, S16, and S32.
-
-float
-32-bit floating-point; this limits input sample format to FLT. (default)
-
-double
-64-bit floating-point; this limits input sample format to DBL.
-
-
-
-
-replaygain
-Choose the behaviour on encountering ReplayGain side data in input frames.
-
-
-drop
-Remove ReplayGain side data, ignoring its contents (the default).
-
-
-ignore
-Ignore ReplayGain side data, but leave it in the frame.
-
-
-track
-Prefer the track gain, if present.
-
-
-album
-Prefer the album gain, if present.
-
-
-
-
-replaygain_preamp
-Pre-amplification gain in dB to apply to the selected replaygain gain.
-
-Default value for replaygain_preamp is 0.0.
-
-
-eval
-Set when the volume expression is evaluated.
-
-It accepts the following values:
-
-‘once ’
-only evaluate expression once during the filter initialization, or
-when the ‘volume ’ command is sent
-
-
-‘frame ’
-evaluate expression for each incoming frame
-
-
-
-Default value is ‘once ’.
-
-
-
-
The volume expression can contain the following parameters.
-
-
-n
-frame number (starting at zero)
-
-nb_channels
-number of channels
-
-nb_consumed_samples
-number of samples consumed by the filter
-
-nb_samples
-number of samples in the current frame
-
-pos
-original frame position in the file
-
-pts
-frame PTS
-
-sample_rate
-sample rate
-
-startpts
-PTS at start of stream
-
-startt
-time at start of stream
-
-t
-frame time
-
-tb
-timestamp timebase
-
-volume
-last set volume value
-
-
-
-
Note that when eval is set to ‘once ’ only the
-sample_rate and tb variables are available, all other
-variables will evaluate to NAN.
-
-
-
35.42.1 Commands# TOC
-
-
This filter supports the following commands:
-
-volume
-Modify the volume expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-replaygain_noclip
-Prevent clipping by limiting the gain applied.
-
-Default value for replaygain_noclip is 1.
-
-
-
-
-
-
35.42.2 Examples# TOC
-
-
-
-
-
35.43 volumedetect# TOC
-
-
Detect the volume of the input video.
-
-
The filter has no parameters. The input is not modified. Statistics about
-the volume will be printed in the log when the input stream end is reached.
-
-
In particular it will show the mean volume (root mean square), maximum
-volume (on a per-sample basis), and the beginning of a histogram of the
-registered volume values (from the maximum value to a cumulated 1/1000 of
-the samples).
-
-
All volumes are in decibels relative to the maximum PCM value.
-
-
-
35.43.1 Examples# TOC
-
-
Here is an excerpt of the output:
-
-
[Parsed_volumedetect_0 0xa23120] mean_volume: -27 dB
-[Parsed_volumedetect_0 0xa23120] max_volume: -4 dB
-[Parsed_volumedetect_0 0xa23120] histogram_4db: 6
-[Parsed_volumedetect_0 0xa23120] histogram_5db: 62
-[Parsed_volumedetect_0 0xa23120] histogram_6db: 286
-[Parsed_volumedetect_0 0xa23120] histogram_7db: 1042
-[Parsed_volumedetect_0 0xa23120] histogram_8db: 2551
-[Parsed_volumedetect_0 0xa23120] histogram_9db: 4609
-[Parsed_volumedetect_0 0xa23120] histogram_10db: 8409
-
-
-
It means that:
-
- The mean square energy is approximately -27 dB, or 10^-2.7.
- The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB.
- There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc.
-
-
-
In other words, raising the volume by +4 dB does not cause any clipping,
-raising it by +5 dB causes clipping for 6 samples, etc.
-
-
-
-
36 Audio Sources# TOC
-
-
Below is a description of the currently available audio sources.
-
-
-
36.1 abuffer# TOC
-
-
Buffer audio frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/asrc_abuffer.h .
-
-
It accepts the following parameters:
-
-time_base
-The timebase which will be used for timestamps of submitted frames. It must be
-either a floating-point number or in numerator /denominator form.
-
-
-sample_rate
-The sample rate of the incoming audio buffers.
-
-
-sample_fmt
-The sample format of the incoming audio buffers.
-Either a sample format name or its corresponding integer representation from
-the enum AVSampleFormat in libavutil/samplefmt.h
-
-
-channel_layout
-The channel layout of the incoming audio buffers.
-Either a channel layout name from channel_layout_map in
-libavutil/channel_layout.c or its corresponding integer representation
-from the AV_CH_LAYOUT_* macros in libavutil/channel_layout.h
-
-
-channels
-The number of channels of the incoming audio buffers.
-If both channels and channel_layout are specified, then they
-must be consistent.
-
-
-
-
-
-
36.1.1 Examples# TOC
-
-
-
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
-
-
-
will instruct the source to accept planar 16bit signed stereo at 44100Hz.
-Since the sample format with name "s16p" corresponds to the number
-6 and the "stereo" channel layout corresponds to the value 0x3, this is
-equivalent to:
-
-
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
-
-
-
-
36.2 aevalsrc# TOC
-
-
Generate an audio signal specified by an expression.
-
-
This source accepts in input one or more expressions (one for each
-channel), which are evaluated and used to generate a corresponding
-audio signal.
-
-
This source accepts the following options:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. In case the
-channel_layout option is not specified, the selected channel layout
-depends on the number of provided expressions. Otherwise the last
-specified expression is applied to the remaining output channels.
-
-
-channel_layout, c
-Set the channel layout. The number of channels in the specified layout
-must be equal to the number of specified expressions.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified
-duration, as the generated audio is always cut at the end of a
-complete frame.
-
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame,
-default to 1024.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100.
-
-
-
-
Each expression in exprs can contain the following constants:
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-t
-time of the evaluated sample expressed in seconds, starting from 0
-
-
-s
-sample rate
-
-
-
-
-
-
36.2.1 Examples# TOC
-
-
- Generate silence:
-
-
- Generate a sin signal with frequency of 440 Hz, set sample rate to
-8000 Hz:
-
-
aevalsrc="sin(440*2*PI*t):s=8000"
-
-
- Generate a two channels signal, specify the channel layout (Front
-Center + Back Center) explicitly:
-
-
aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
-
-
- Generate white noise:
-
-
aevalsrc="-2+random(0)"
-
-
- Generate an amplitude modulated signal:
-
-
aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
-
-
- Generate 2.5 Hz binaural beats on a 360 Hz carrier:
-
-
aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
-
-
-
-
-
-
36.3 anullsrc# TOC
-
-
The null audio source, return unprocessed audio frames. It is mainly useful
-as a template and to be employed in analysis / debugging tools, or as
-the source for filters which ignore the input data (for example the sox
-synth filter).
-
-
This source accepts the following options:
-
-
-channel_layout, cl
-
-Specifies the channel layout, and can be either an integer or a string
-representing a channel layout. The default value of channel_layout
-is "stereo".
-
-Check the channel_layout_map definition in
-libavutil/channel_layout.c for the mapping between strings and
-channel layout values.
-
-
-sample_rate, r
-Specifies the sample rate, and defaults to 44100.
-
-
-nb_samples, n
-Set the number of samples per requested frames.
-
-
-
-
-
-
36.3.1 Examples# TOC
-
-
- Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
-
-
- Do the same operation with a more obvious syntax:
-
-
anullsrc=r=48000:cl=mono
-
-
-
-
All the parameters need to be explicitly defined.
-
-
-
36.4 flite# TOC
-
-
Synthesize a voice utterance using the libflite library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libflite
.
-
-
Note that the flite library is not thread-safe.
-
-
The filter accepts the following options:
-
-
-list_voices
-If set to 1, list the names of the available voices and exit
-immediately. Default value is 0.
-
-
-nb_samples, n
-Set the maximum number of samples per frame. Default value is 512.
-
-
-textfile
-Set the filename containing the text to speak.
-
-
-text
-Set the text to speak.
-
-
-voice, v
-Set the voice to use for the speech synthesis. Default value is
-kal
. See also the list_voices option.
-
-
-
-
-
36.4.1 Examples# TOC
-
-
- Read from file speech.txt , and synthesize the text using the
-standard flite voice:
-
-
flite=textfile=speech.txt
-
-
- Read the specified text selecting the slt
voice:
-
-
flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Input text to ffmpeg:
-
-
ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Make ffplay speak the specified text, using flite
and
-the lavfi
device:
-
-
ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
-
-
-
-
For more information about libflite, check:
-http://www.speech.cs.cmu.edu/flite/
-
-
-
36.5 sine# TOC
-
-
Generate an audio signal made of a sine wave with amplitude 1/8.
-
-
The audio signal is bit-exact.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the carrier frequency. Default is 440 Hz.
-
-
-beep_factor, b
-Enable a periodic beep every second with frequency beep_factor times
-the carrier frequency. Default is 0, meaning the beep is disabled.
-
-
-sample_rate, r
-Specify the sample rate, default is 44100.
-
-
-duration, d
-Specify the duration of the generated audio stream.
-
-
-samples_per_frame
-Set the number of samples per output frame, default is 1024.
-
-
-
-
-
36.5.1 Examples# TOC
-
-
- Generate a simple 440 Hz sine wave:
-
-
- Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
-
-
sine=220:4:d=5
-sine=f=220:b=4:d=5
-sine=frequency=220:beep_factor=4:duration=5
-
-
-
-
-
-
-
37 Audio Sinks# TOC
-
-
Below is a description of the currently available audio sinks.
-
-
-
37.1 abuffersink# TOC
-
-
Buffer audio frames, and make them available to the end of filter chain.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVABufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
37.2 anullsink# TOC
-
-
Null audio sink; do absolutely nothing with the input audio. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
38 Video Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the video filters included in your
-build.
-
-
Below is a description of the currently available video filters.
-
-
-
38.1 alphaextract# TOC
-
-
Extract the alpha component from the input as a grayscale video. This
-is especially useful with the alphamerge filter.
-
-
-
38.2 alphamerge# TOC
-
-
Add or replace the alpha component of the primary input with the
-grayscale value of a second input. This is intended for use with
-alphaextract to allow the transmission or storage of frame
-sequences that have alpha in a format that doesn’t support an alpha
-channel.
-
-
For example, to reconstruct full frames from a normal YUV-encoded video
-and a separate video created with alphaextract , you might use:
-
-
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
-
-
-
Since this filter is designed for reconstruction, it operates on frame
-sequences without considering timestamps, and terminates when either
-input reaches end of stream. This will cause problems if your encoding
-pipeline drops frames. If you’re trying to apply an image as an
-overlay to a video stream, consider the overlay filter instead.
-
-
-
38.3 ass# TOC
-
-
Same as the subtitles filter, except that it doesn’t require libavcodec
-and libavformat to work. On the other hand, it is limited to ASS (Advanced
-Substation Alpha) subtitles files.
-
-
This filter accepts the following option in addition to the common options from
-the subtitles filter:
-
-
-shaping
-Set the shaping engine
-
-Available values are:
-
-‘auto ’
-The default libass shaping engine, which is the best available.
-
-‘simple ’
-Fast, font-agnostic shaper that can do only substitutions
-
-‘complex ’
-Slower shaper using OpenType for substitutions and positioning
-
-
-
-The default is auto
.
-
-
-
-
-
38.4 bbox# TOC
-
-
Compute the bounding box for the non-black pixels in the input frame
-luminance plane.
-
-
This filter computes the bounding box containing all the pixels with a
-luminance value greater than the minimum allowed value.
-The parameters describing the bounding box are printed on the filter
-log.
-
-
The filter accepts the following option:
-
-
-min_val
-Set the minimal luminance value. Default is 16
.
-
-
-
-
-
38.5 blackdetect# TOC
-
-
Detect video intervals that are (almost) completely black. Can be
-useful to detect chapter transitions, commercials, or invalid
-recordings. Output lines contains the time for the start, end and
-duration of the detected black interval expressed in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
The filter accepts the following options:
-
-
-black_min_duration, d
-Set the minimum detected black duration expressed in seconds. It must
-be a non-negative floating point number.
-
-Default value is 2.0.
-
-
-picture_black_ratio_th, pic_th
-Set the threshold for considering a picture "black".
-Express the minimum value for the ratio:
-
-
nb_black_pixels / nb_pixels
-
-
-for which a picture is considered black.
-Default value is 0.98.
-
-
-pixel_black_th, pix_th
-Set the threshold for considering a pixel "black".
-
-The threshold expresses the maximum pixel luminance value for which a
-pixel is considered "black". The provided value is scaled according to
-the following equation:
-
-
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
-
-
-luminance_range_size and luminance_minimum_value depend on
-the input video format, the range is [0-255] for YUV full-range
-formats and [16-235] for YUV non full-range formats.
-
-Default value is 0.10.
-
-
-
-
The following example sets the maximum pixel threshold to the minimum
-value, and detects only black intervals of 2 or more seconds:
-
-
blackdetect=d=2:pix_th=0.00
-
-
-
-
38.6 blackframe# TOC
-
-
Detect frames that are (almost) completely black. Can be useful to
-detect chapter transitions or commercials. Output lines consist of
-the frame number of the detected frame, the percentage of blackness,
-the position in the file if known or -1 and the timestamp in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
It accepts the following parameters:
-
-
-amount
-The percentage of the pixels that have to be below the threshold; it defaults to
-98
.
-
-
-threshold, thresh
-The threshold below which a pixel value is considered black; it defaults to
-32
.
-
-
-
-
-
-
38.7 blend, tblend# TOC
-
-
Blend two video frames into each other.
-
-
The blend
filter takes two input streams and outputs one
-stream, the first input is the "top" layer and second input is
-"bottom" layer. Output terminates when shortest input terminates.
-
-
The tblend
(time blend) filter takes two consecutive frames
-from one single stream, and outputs the result obtained by blending
-the new frame on top of the old frame.
-
-
A description of the accepted options follows.
-
-
-c0_mode
-c1_mode
-c2_mode
-c3_mode
-all_mode
-Set blend mode for specific pixel component or all pixel components in case
-of all_mode . Default value is normal
.
-
-Available values for component modes are:
-
-‘addition ’
-‘and ’
-‘average ’
-‘burn ’
-‘darken ’
-‘difference ’
-‘difference128 ’
-‘divide ’
-‘dodge ’
-‘exclusion ’
-‘hardlight ’
-‘lighten ’
-‘multiply ’
-‘negation ’
-‘normal ’
-‘or ’
-‘overlay ’
-‘phoenix ’
-‘pinlight ’
-‘reflect ’
-‘screen ’
-‘softlight ’
-‘subtract ’
-‘vividlight ’
-‘xor ’
-
-
-
-c0_opacity
-c1_opacity
-c2_opacity
-c3_opacity
-all_opacity
-Set blend opacity for specific pixel component or all pixel components in case
-of all_opacity . Only used in combination with pixel component blend modes.
-
-
-c0_expr
-c1_expr
-c2_expr
-c3_expr
-all_expr
-Set blend expression for specific pixel component or all pixel components in case
-of all_expr . Note that related mode options will be ignored if those are set.
-
-The expressions can use the following variables:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-the coordinates of the current sample
-
-
-W
-H
-the width and height of currently filtered plane
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-TOP, A
-Value of pixel component at current location for first video frame (top layer).
-
-
-BOTTOM, B
-Value of pixel component at current location for second video frame (bottom layer).
-
-
-
-
-shortest
-Force termination when the shortest input terminates. Default is
-0
. This option is only defined for the blend
filter.
-
-
-repeatlast
-Continue applying the last bottom frame after the end of the stream. A value of
-0
disable the filter after the last frame of the bottom layer is reached.
-Default is 1
. This option is only defined for the blend
filter.
-
-
-
-
-
38.7.1 Examples# TOC
-
-
- Apply transition from bottom layer to top layer in first 10 seconds:
-
-
blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
-
-
- Apply 1x1 checkerboard effect:
-
-
blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
-
-
- Apply uncover left effect:
-
-
blend=all_expr='if(gte(N*SW+X,W),A,B)'
-
-
- Apply uncover down effect:
-
-
blend=all_expr='if(gte(Y-N*SH,0),A,B)'
-
-
- Apply uncover up-left effect:
-
-
blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
-
-
- Display differences between the current and the previous frame:
-
-
tblend=all_mode=difference128
-
-
-
-
-
38.8 boxblur# TOC
-
-
Apply a boxblur algorithm to the input video.
-
-
It accepts the following parameters:
-
-
-luma_radius, lr
-luma_power, lp
-chroma_radius, cr
-chroma_power, cp
-alpha_radius, ar
-alpha_power, ap
-
-
-
A description of the accepted options follows.
-
-
-luma_radius, lr
-chroma_radius, cr
-alpha_radius, ar
-Set an expression for the box radius in pixels used for blurring the
-corresponding input plane.
-
-The radius value must be a non-negative number, and must not be
-greater than the value of the expression min(w,h)/2
for the
-luma and alpha planes, and of min(cw,ch)/2
for the chroma
-planes.
-
-Default value for luma_radius is "2". If not specified,
-chroma_radius and alpha_radius default to the
-corresponding value set for luma_radius .
-
-The expressions can contain the following constants:
-
-w
-h
-The input width and height in pixels.
-
-
-cw
-ch
-The input chroma image width and height in pixels.
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p", hsub is 2 and vsub is 1.
-
-
-
-
-luma_power, lp
-chroma_power, cp
-alpha_power, ap
-Specify how many times the boxblur filter is applied to the
-corresponding plane.
-
-Default value for luma_power is 2. If not specified,
-chroma_power and alpha_power default to the
-corresponding value set for luma_power .
-
-A value of 0 will disable the effect.
-
-
-
-
-
38.8.1 Examples# TOC
-
-
- Apply a boxblur filter with the luma, chroma, and alpha radii
-set to 2:
-
-
boxblur=luma_radius=2:luma_power=1
-boxblur=2:1
-
-
- Set the luma radius to 2, and alpha and chroma radius to 0:
-
-
- Set the luma and chroma radii to a fraction of the video dimension:
-
-
boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
-
-
-
-
-
38.9 codecview# TOC
-
-
Visualize information exported by some codecs.
-
-
Some codecs can export information through frames using side-data or other
-means. For example, some MPEG based codecs export motion vectors through the
-export_mvs flag in the codec flags2 option.
-
-
The filter accepts the following option:
-
-
-mv
-Set motion vectors to visualize.
-
-Available flags for mv are:
-
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-
-
-
38.9.1 Examples# TOC
-
-
- Visualizes multi-directionals MVs from P and B-Frames using ffplay
:
-
-
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
-
-
-
-
-
38.10 colorbalance# TOC
-
Modify intensity of primary colors (red, green and blue) of input frames.
-
-
The filter allows an input frame to be adjusted in the shadows, midtones or highlights
-regions for the red-cyan, green-magenta or blue-yellow balance.
-
-
A positive adjustment value shifts the balance towards the primary color, a negative
-value towards the complementary color.
-
-
The filter accepts the following options:
-
-
-rs
-gs
-bs
-Adjust red, green and blue shadows (darkest pixels).
-
-
-rm
-gm
-bm
-Adjust red, green and blue midtones (medium pixels).
-
-
-rh
-gh
-bh
-Adjust red, green and blue highlights (brightest pixels).
-
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-
-
-
38.10.1 Examples# TOC
-
-
- Add red color cast to shadows:
-
-
-
-
-
38.11 colorlevels# TOC
-
-
Adjust video input frames using levels.
-
-
The filter accepts the following options:
-
-
-rimin
-gimin
-bimin
-aimin
-Adjust red, green, blue and alpha input black point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-rimax
-gimax
-bimax
-aimax
-Adjust red, green, blue and alpha input white point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 1
.
-
-Input levels are used to lighten highlights (bright tones), darken shadows
-(dark tones), change the balance of bright and dark tones.
-
-
-romin
-gomin
-bomin
-aomin
-Adjust red, green, blue and alpha output black point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 0
.
-
-
-romax
-gomax
-bomax
-aomax
-Adjust red, green, blue and alpha output white point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 1
.
-
-Output levels allows manual selection of a constrained output level range.
-
-
-
-
-
38.11.1 Examples# TOC
-
-
- Make video output darker:
-
-
colorlevels=rimin=0.058:gimin=0.058:bimin=0.058
-
-
- Increase contrast:
-
-
colorlevels=rimin=0.039:gimin=0.039:bimin=0.039:rimax=0.96:gimax=0.96:bimax=0.96
-
-
- Make video output lighter:
-
-
colorlevels=rimax=0.902:gimax=0.902:bimax=0.902
-
-
- Increase brightness:
-
-
colorlevels=romin=0.5:gomin=0.5:bomin=0.5
-
-
-
-
-
38.12 colorchannelmixer# TOC
-
-
Adjust video input frames by re-mixing color channels.
-
-
This filter modifies a color channel by adding the values associated to
-the other channels of the same pixels. For example if the value to
-modify is red, the output value will be:
-
-
red =red *rr + blue *rb + green *rg + alpha *ra
-
-
-
The filter accepts the following options:
-
-
-rr
-rg
-rb
-ra
-Adjust contribution of input red, green, blue and alpha channels for output red channel.
-Default is 1
for rr , and 0
for rg , rb and ra .
-
-
-gr
-gg
-gb
-ga
-Adjust contribution of input red, green, blue and alpha channels for output green channel.
-Default is 1
for gg , and 0
for gr , gb and ga .
-
-
-br
-bg
-bb
-ba
-Adjust contribution of input red, green, blue and alpha channels for output blue channel.
-Default is 1
for bb , and 0
for br , bg and ba .
-
-
-ar
-ag
-ab
-aa
-Adjust contribution of input red, green, blue and alpha channels for output alpha channel.
-Default is 1
for aa , and 0
for ar , ag and ab .
-
-Allowed ranges for options are [-2.0, 2.0]
.
-
-
-
-
-
38.12.1 Examples# TOC
-
-
- Convert source to grayscale:
-
-
colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
-
- Simulate sepia tones:
-
-
colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
-
-
-
-
-
38.13 colormatrix# TOC
-
-
Convert color matrix.
-
-
The filter accepts the following options:
-
-
-src
-dst
-Specify the source and destination color matrix. Both values must be
-specified.
-
-The accepted values are:
-
-‘bt709 ’
-BT.709
-
-
-‘bt601 ’
-BT.601
-
-
-‘smpte240m ’
-SMPTE-240M
-
-
-‘fcc ’
-FCC
-
-
-
-
-
-
For example to convert from BT.601 to SMPTE-240M, use the command:
-
-
colormatrix=bt601:smpte240m
-
-
-
-
38.14 copy# TOC
-
-
Copy the input source unchanged to the output. This is mainly useful for
-testing purposes.
-
-
-
38.15 crop# TOC
-
-
Crop the input video to given dimensions.
-
-
It accepts the following parameters:
-
-
-w, out_w
-The width of the output video. It defaults to iw
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-h, out_h
-The height of the output video. It defaults to ih
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-x
-The horizontal position, in the input video, of the left edge of the output
-video. It defaults to (in_w-out_w)/2
.
-This expression is evaluated per-frame.
-
-
-y
-The vertical position, in the input video, of the top edge of the output video.
-It defaults to (in_h-out_h)/2
.
-This expression is evaluated per-frame.
-
-
-keep_aspect
-If set to 1 will force the output display aspect ratio
-to be the same of the input, by changing the output sample aspect
-ratio. It defaults to 0.
-
-
-
-
The out_w , out_h , x , y parameters are
-expressions containing the following constants:
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-in_w
-in_h
-The input width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (cropped) width and height.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-n
-The number of the input frame, starting from 0.
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
The expression for out_w may depend on the value of out_h ,
-and the expression for out_h may depend on out_w , but they
-cannot depend on x and y , as x and y are
-evaluated after out_w and out_h .
-
-
The x and y parameters specify the expressions for the
-position of the top-left corner of the output (non-cropped) area. They
-are evaluated for each frame. If the evaluated value is not valid, it
-is approximated to the nearest valid value.
-
-
The expression for x may depend on y , and the expression
-for y may depend on x .
-
-
-
38.15.1 Examples# TOC
-
-
-
-
-
38.16 cropdetect# TOC
-
-
Auto-detect the crop size.
-
-
It calculates the necessary cropping parameters and prints the
-recommended parameters via the logging system. The detected dimensions
-correspond to the non-black area of the input video.
-
-
It accepts the following parameters:
-
-
-limit
-Set higher black value threshold, which can be optionally specified
-from nothing (0) to everything (255 for 8bit based formats). An intensity
-value greater to the set value is considered non-black. It defaults to 24.
-You can also specify a value between 0.0 and 1.0 which will be scaled depending
-on the bitdepth of the pixel format.
-
-
-round
-The value which the width/height should be divisible by. It defaults to
-16. The offset is automatically adjusted to center the video. Use 2 to
-get only even dimensions (needed for 4:2:2 video). 16 is best when
-encoding to most video codecs.
-
-
-reset_count, reset
-Set the counter that determines after how many frames cropdetect will
-reset the previously detected largest video area and start over to
-detect the current optimal crop area. Default value is 0.
-
-This can be useful when channel logos distort the video area. 0
-indicates ’never reset’, and returns the largest area encountered during
-playback.
-
-
-
-
-
38.17 curves# TOC
-
-
Apply color adjustments using curves.
-
-
This filter is similar to the Adobe Photoshop and GIMP curves tools. Each
-component (red, green and blue) has its values defined by N key points
-tied from each other using a smooth curve. The x-axis represents the pixel
-values from the input frame, and the y-axis the new pixel values to be set for
-the output frame.
-
-
By default, a component curve is defined by the two points (0;0) and
-(1;1) . This creates a straight line where each original pixel value is
-"adjusted" to its own value, which means no change to the image.
-
-
The filter allows you to redefine these two points and add some more. A new
-curve (using a natural cubic spline interpolation) will be define to pass
-smoothly through all these new coordinates. The new defined points needs to be
-strictly increasing over the x-axis, and their x and y values must
-be in the [0;1] interval. If the computed curves happened to go outside
-the vector spaces, the values will be clipped accordingly.
-
-
If there is no key point defined in x=0
, the filter will automatically
-insert a (0;0) point. In the same way, if there is no key point defined
-in x=1
, the filter will automatically insert a (1;1) point.
-
-
The filter accepts the following options:
-
-
-preset
-Select one of the available color presets. This option can be used in addition
-to the r , g , b parameters; in this case, the later
-options takes priority on the preset values.
-Available presets are:
-
-‘none ’
-‘color_negative ’
-‘cross_process ’
-‘darker ’
-‘increase_contrast ’
-‘lighter ’
-‘linear_contrast ’
-‘medium_contrast ’
-‘negative ’
-‘strong_contrast ’
-‘vintage ’
-
-Default is none
.
-
-master, m
-Set the master key points. These points will define a second pass mapping. It
-is sometimes called a "luminance" or "value" mapping. It can be used with
-r , g , b or all since it acts like a
-post-processing LUT.
-
-red, r
-Set the key points for the red component.
-
-green, g
-Set the key points for the green component.
-
-blue, b
-Set the key points for the blue component.
-
-all
-Set the key points for all components (not including master).
-Can be used in addition to the other key points component
-options. In this case, the unset component(s) will fallback on this
-all setting.
-
-psfile
-Specify a Photoshop curves file (.asv
) to import the settings from.
-
-
-
-
To avoid some filtergraph syntax conflicts, each key points list need to be
-defined using the following syntax: x0/y0 x1/y1 x2/y2 ...
.
-
-
-
38.17.1 Examples# TOC
-
-
-
-
-
38.18 dctdnoiz# TOC
-
-
Denoise frames using 2D DCT (frequency domain filtering).
-
-
This filter is not designed for real time.
-
-
The filter accepts the following options:
-
-
-sigma, s
-Set the noise sigma constant.
-
-This sigma defines a hard threshold of 3 * sigma
; every DCT
-coefficient (absolute value) below this threshold with be dropped.
-
-If you need a more advanced filtering, see expr .
-
-Default is 0
.
-
-
-overlap
-Set number overlapping pixels for each block. Since the filter can be slow, you
-may want to reduce this value, at the cost of a less effective filter and the
-risk of various artefacts.
-
-If the overlapping value doesn’t allow to process the whole input width or
-height, a warning will be displayed and according borders won’t be denoised.
-
-Default value is blocksize -1, which is the best possible setting.
-
-
-expr, e
-Set the coefficient factor expression.
-
-For each coefficient of a DCT block, this expression will be evaluated as a
-multiplier value for the coefficient.
-
-If this is option is set, the sigma option will be ignored.
-
-The absolute value of the coefficient can be accessed through the c
-variable.
-
-
-n
-Set the blocksize using the number of bits. 1<<n
defines the
-blocksize , which is the width and height of the processed blocks.
-
-The default value is 3 (8x8) and can be raised to 4 for a
-blocksize of 16x16. Note that changing this setting has huge consequences
-on the speed processing. Also, a larger block size does not necessarily means a
-better de-noising.
-
-
-
-
-
38.18.1 Examples# TOC
-
-
Apply a denoise with a sigma of 4.5
:
-
-
-
The same operation can be achieved using the expression system:
-
-
dctdnoiz=e='gte(c, 4.5*3)'
-
-
-
Violent denoise using a block size of 16x16
:
-
-
-
-
38.19 decimate# TOC
-
-
Drop duplicated frames at regular intervals.
-
-
The filter accepts the following options:
-
-
-cycle
-Set the number of frames from which one will be dropped. Setting this to
-N means one frame in every batch of N frames will be dropped.
-Default is 5
.
-
-
-dupthresh
-Set the threshold for duplicate detection. If the difference metric for a frame
-is less than or equal to this value, then it is declared as duplicate. Default
-is 1.1
-
-
-scthresh
-Set scene change threshold. Default is 15
.
-
-
-blockx
-blocky
-Set the size of the x and y-axis blocks used during metric calculations.
-Larger blocks give better noise suppression, but also give worse detection of
-small movements. Must be a power of two. Default is 32
.
-
-
-ppsrc
-Mark main input as a pre-processed input and activate clean source input
-stream. This allows the input to be pre-processed with various filters to help
-the metrics calculation while keeping the frame selection lossless. When set to
-1
, the first stream is for the pre-processed input, and the second
-stream is the clean source from where the kept frames are chosen. Default is
-0
.
-
-
-chroma
-Set whether or not chroma is considered in the metric calculations. Default is
-1
.
-
-
-
-
-
38.20 dejudder# TOC
-
-
Remove judder produced by partially interlaced telecined content.
-
-
Judder can be introduced, for instance, by pullup filter. If the original
-source was partially telecined content then the output of pullup,dejudder
-will have a variable frame rate. May change the recorded frame rate of the
-container. Aside from that change, this filter will not affect constant frame
-rate video.
-
-
The option available in this filter is:
-
-cycle
-Specify the length of the window over which the judder repeats.
-
-Accepts any integer greater than 1. Useful values are:
-
-‘4 ’
-If the original was telecined from 24 to 30 fps (Film to NTSC).
-
-
-‘5 ’
-If the original was telecined from 25 to 30 fps (PAL to NTSC).
-
-
-‘20 ’
-If a mixture of the two.
-
-
-
-The default is ‘4 ’.
-
-
-
-
-
38.21 delogo# TOC
-
-
Suppress a TV station logo by a simple interpolation of the surrounding
-pixels. Just set a rectangle covering the logo and watch it disappear
-(and sometimes something even uglier appear - your mileage may vary).
-
-
It accepts the following parameters:
-
-x
-y
-Specify the top left corner coordinates of the logo. They must be
-specified.
-
-
-w
-h
-Specify the width and height of the logo to clear. They must be
-specified.
-
-
-band, t
-Specify the thickness of the fuzzy edge of the rectangle (added to
-w and h ). The default value is 4.
-
-
-show
-When set to 1, a green rectangle is drawn on the screen to simplify
-finding the right x , y , w , and h parameters.
-The default value is 0.
-
-The rectangle is drawn on the outermost pixels which will be (partly)
-replaced with interpolated values. The values of the next pixels
-immediately outside this rectangle in each direction will be used to
-compute the interpolated pixel values inside the rectangle.
-
-
-
-
-
-
38.21.1 Examples# TOC
-
-
- Set a rectangle covering the area with top left corner coordinates 0,0
-and size 100x77, and a band of size 10:
-
-
delogo=x=0:y=0:w=100:h=77:band=10
-
-
-
-
-
-
38.22 deshake# TOC
-
-
Attempt to fix small changes in horizontal and/or vertical shift. This
-filter helps remove camera shake from hand-holding a camera, bumping a
-tripod, moving on a vehicle, etc.
-
-
The filter accepts the following options:
-
-
-x
-y
-w
-h
-Specify a rectangular area where to limit the search for motion
-vectors.
-If desired the search for motion vectors can be limited to a
-rectangular area of the frame defined by its top left corner, width
-and height. These parameters have the same meaning as the drawbox
-filter which can be used to visualise the position of the bounding
-box.
-
-This is useful when simultaneous movement of subjects within the frame
-might be confused for camera motion by the motion vector search.
-
-If any or all of x , y , w and h are set to -1
-then the full frame is used. This allows later options to be set
-without specifying the bounding box for the motion vector search.
-
-Default - search the whole frame.
-
-
-rx
-ry
-Specify the maximum extent of movement in x and y directions in the
-range 0-64 pixels. Default 16.
-
-
-edge
-Specify how to generate pixels to fill blanks at the edge of the
-frame. Available values are:
-
-‘blank, 0 ’
-Fill zeroes at blank locations
-
-‘original, 1 ’
-Original image at blank locations
-
-‘clamp, 2 ’
-Extruded edge value at blank locations
-
-‘mirror, 3 ’
-Mirrored edge at blank locations
-
-
-Default value is ‘mirror ’.
-
-
-blocksize
-Specify the blocksize to use for motion search. Range 4-128 pixels,
-default 8.
-
-
-contrast
-Specify the contrast threshold for blocks. Only blocks with more than
-the specified contrast (difference between darkest and lightest
-pixels) will be considered. Range 1-255, default 125.
-
-
-search
-Specify the search strategy. Available values are:
-
-‘exhaustive, 0 ’
-Set exhaustive search
-
-‘less, 1 ’
-Set less exhaustive search.
-
-
-Default value is ‘exhaustive ’.
-
-
-filename
-If set then a detailed log of the motion search is written to the
-specified file.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
-
38.23 drawbox# TOC
-
-
Draw a colored box on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the top left corner coordinates of the box. It defaults to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the box; if 0 they are interpreted as
-the input width and height. It defaults to 0.
-
-
-color, c
-Specify the color of the box to write. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the box edge color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the box edge. Default value is 3
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y offset coordinates where the box is drawn.
-
-
-w
-h
-The width and height of the drawn box.
-
-
-t
-The thickness of the drawn box.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
38.23.1 Examples# TOC
-
-
-
-
-
38.24 drawgrid# TOC
-
-
Draw a grid on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the
-input width and height, respectively, minus thickness
, so image gets
-framed. Default to 0.
-
-
-color, c
-Specify the color of the grid. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the grid color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the grid line. Default value is 1
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input grid cell width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y coordinates of some point of grid intersection (meant to configure offset).
-
-
-w
-h
-The width and height of the drawn cell.
-
-
-t
-The thickness of the drawn cell.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
38.24.1 Examples# TOC
-
-
- Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%:
-
-
drawgrid=width=100:height=100:thickness=2:color=red@0.5
-
-
- Draw a white 3x3 grid with an opacity of 50%:
-
-
drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
-
-
-
-
-
38.25 drawtext# TOC
-
-
Draw a text string or text from a specified file on top of a video, using the
-libfreetype library.
-
-
To enable compilation of this filter, you need to configure FFmpeg with
---enable-libfreetype
.
-To enable default font fallback and the font option you need to
-configure FFmpeg with --enable-libfontconfig
.
-To enable the text_shaping option, you need to configure FFmpeg with
---enable-libfribidi
.
-
-
-
38.25.1 Syntax# TOC
-
-
It accepts the following parameters:
-
-
-box
-Used to draw a box around text using the background color.
-The value must be either 1 (enable) or 0 (disable).
-The default value of box is 0.
-
-
-boxcolor
-The color to be used for drawing box around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of boxcolor is "white".
-
-
-borderw
-Set the width of the border to be drawn around the text using bordercolor .
-The default value of borderw is 0.
-
-
-bordercolor
-Set the color to be used for drawing border around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of bordercolor is "black".
-
-
-expansion
-Select how the text is expanded. Can be either none
,
-strftime
(deprecated) or
-normal
(default). See the Text expansion section
-below for details.
-
-
-fix_bounds
-If true, check and fix text coords to avoid clipping.
-
-
-fontcolor
-The color to be used for drawing fonts. For the syntax of this option, check
-the "Color" section in the ffmpeg-utils manual.
-
-The default value of fontcolor is "black".
-
-
-fontcolor_expr
-String which is expanded the same way as text to obtain dynamic
-fontcolor value. By default this option has empty value and is not
-processed. When this option is set, it overrides fontcolor option.
-
-
-font
-The font family to be used for drawing text. By default Sans.
-
-
-fontfile
-The font file to be used for drawing text. The path must be included.
-This parameter is mandatory if the fontconfig support is disabled.
-
-
-fontsize
-The font size to be used for drawing text.
-The default value of fontsize is 16.
-
-
-text_shaping
-If set to 1, attempt to shape the text (for example, reverse the order of
-right-to-left text and join Arabic characters) before drawing it.
-Otherwise, just draw the text exactly as given.
-By default 1 (if supported).
-
-
-ft_load_flags
-The flags to be used for loading the fonts.
-
-The flags map the corresponding flags supported by libfreetype, and are
-a combination of the following values:
-
-default
-no_scale
-no_hinting
-render
-no_bitmap
-vertical_layout
-force_autohint
-crop_bitmap
-pedantic
-ignore_global_advance_width
-no_recurse
-ignore_transform
-monochrome
-linear_design
-no_autohint
-
-
-Default value is "default".
-
-For more information consult the documentation for the FT_LOAD_*
-libfreetype flags.
-
-
-shadowcolor
-The color to be used for drawing a shadow behind the drawn text. For the
-syntax of this option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of shadowcolor is "black".
-
-
-shadowx
-shadowy
-The x and y offsets for the text shadow position with respect to the
-position of the text. They can be either positive or negative
-values. The default value for both is "0".
-
-
-start_number
-The starting frame number for the n/frame_num variable. The default value
-is "0".
-
-
-tabsize
-The size in number of spaces to use for rendering the tab.
-Default value is 4.
-
-
-timecode
-Set the initial timecode representation in "hh:mm:ss[:;.]ff"
-format. It can be used with or without text parameter. timecode_rate
-option must be specified.
-
-
-timecode_rate, rate, r
-Set the timecode frame rate (timecode only).
-
-
-text
-The text string to be drawn. The text must be a sequence of UTF-8
-encoded characters.
-This parameter is mandatory if no file is specified with the parameter
-textfile .
-
-
-textfile
-A text file containing text to be drawn. The text must be a sequence
-of UTF-8 encoded characters.
-
-This parameter is mandatory if no text string is specified with the
-parameter text .
-
-If both text and textfile are specified, an error is thrown.
-
-
-reload
-If set to 1, the textfile will be reloaded before each frame.
-Be sure to update it atomically, or it may be read partially, or even fail.
-
-
-x
-y
-The expressions which specify the offsets where text will be drawn
-within the video frame. They are relative to the top/left border of the
-output image.
-
-The default value of x and y is "0".
-
-See below for the list of accepted constants and functions.
-
-
-
-
The parameters for x and y are expressions containing the
-following constants and functions:
-
-
-dar
-input display aspect ratio, it is the same as (w / h ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-line_h, lh
-the height of each text line
-
-
-main_h, h, H
-the input height
-
-
-main_w, w, W
-the input width
-
-
-max_glyph_a, ascent
-the maximum distance from the baseline to the highest/upper grid
-coordinate used to place a glyph outline point, for all the rendered
-glyphs.
-It is a positive value, due to the grid’s orientation with the Y axis
-upwards.
-
-
-max_glyph_d, descent
-the maximum distance from the baseline to the lowest grid coordinate
-used to place a glyph outline point, for all the rendered glyphs.
-This is a negative value, due to the grid’s orientation, with the Y axis
-upwards.
-
-
-max_glyph_h
-maximum glyph height, that is the maximum height for all the glyphs
-contained in the rendered text, it is equivalent to ascent -
-descent .
-
-
-max_glyph_w
-maximum glyph width, that is the maximum width for all the glyphs
-contained in the rendered text
-
-
-n
-the number of input frame, starting from 0
-
-
-rand(min, max)
-return a random number included between min and max
-
-
-sar
-The input sample aspect ratio.
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-text_h, th
-the height of the rendered text
-
-
-text_w, tw
-the width of the rendered text
-
-
-x
-y
-the x and y offset coordinates where the text is drawn.
-
-These parameters allow the x and y expressions to refer
-each other, so you can for example specify y=x/dar
.
-
-
-
-
-
38.25.2 Text expansion# TOC
-
-
If expansion is set to strftime
,
-the filter recognizes strftime() sequences in the provided text and
-expands them accordingly. Check the documentation of strftime(). This
-feature is deprecated.
-
-
If expansion is set to none
, the text is printed verbatim.
-
-
If expansion is set to normal
(which is the default),
-the following expansion mechanism is used.
-
-
The backslash character ’\’, followed by any character, always expands to
-the second character.
-
-
Sequence of the form %{...}
are expanded. The text between the
-braces is a function name, possibly followed by arguments separated by ’:’.
-If the arguments contain special characters or delimiters (’:’ or ’}’),
-they should be escaped.
-
-
Note that they probably must also be escaped as the value for the
-text option in the filter argument string and as the filter
-argument in the filtergraph description, and possibly also for the shell,
-that makes up to four levels of escaping; using a text file avoids these
-problems.
-
-
The following functions are available:
-
-
-expr, e
-The expression evaluation result.
-
-It must take one argument specifying the expression to be evaluated,
-which accepts the same constants and functions as the x and
-y values. Note that not all constants should be used, for
-example the text size is not known when evaluating the expression, so
-the constants text_w and text_h will have an undefined
-value.
-
-
-expr_int_format, eif
-Evaluate the expression’s value and output as formatted integer.
-
-The first argument is the expression to be evaluated, just as for the expr function.
-The second argument specifies the output format. Allowed values are ’x’, ’X’, ’d’ and
-’u’. They are treated exactly as in the printf function.
-The third parameter is optional and sets the number of positions taken by the output.
-It can be used to add padding with zeros from the left.
-
-
-gmtime
-The time at which the filter is running, expressed in UTC.
-It can accept an argument: a strftime() format string.
-
-
-localtime
-The time at which the filter is running, expressed in the local time zone.
-It can accept an argument: a strftime() format string.
-
-
-metadata
-Frame metadata. It must take one argument specifying metadata key.
-
-
-n, frame_num
-The frame number, starting from 0.
-
-
-pict_type
-A 1 character description of the current picture type.
-
-
-pts
-The timestamp of the current frame.
-It can take up to two arguments.
-
-The first argument is the format of the timestamp; it defaults to flt
-for seconds as a decimal number with microsecond accuracy; hms
stands
-for a formatted [-]HH:MM:SS.mmm timestamp with millisecond accuracy.
-
-The second argument is an offset added to the timestamp.
-
-
-
-
-
-
38.25.3 Examples# TOC
-
-
-
-
For more information about libfreetype, check:
-http://www.freetype.org/ .
-
-
For more information about fontconfig, check:
-http://freedesktop.org/software/fontconfig/fontconfig-user.html .
-
-
For more information about libfribidi, check:
-http://fribidi.org/ .
-
-
-
38.26 edgedetect# TOC
-
-
Detect and draw edges. The filter uses the Canny Edge Detection algorithm.
-
-
The filter accepts the following options:
-
-
-low
-high
-Set low and high threshold values used by the Canny thresholding
-algorithm.
-
-The high threshold selects the "strong" edge pixels, which are then
-connected through 8-connectivity with the "weak" edge pixels selected
-by the low threshold.
-
-low and high threshold values must be chosen in the range
-[0,1], and low should be lesser or equal to high .
-
-Default value for low is 20/255
, and default value for high
-is 50/255
.
-
-
-mode
-Define the drawing mode.
-
-
-‘wires ’
-Draw white/gray wires on black background.
-
-
-‘colormix ’
-Mix the colors to create a paint/cartoon effect.
-
-
-
-Default value is wires .
-
-
-
-
-
38.26.1 Examples# TOC
-
-
- Standard edge detection with custom values for the hysteresis thresholding:
-
-
edgedetect=low=0.1:high=0.4
-
-
- Painting effect without thresholding:
-
-
edgedetect=mode=colormix:high=0
-
-
-
-
-
38.27 extractplanes# TOC
-
-
Extract color channel components from input video stream into
-separate grayscale video streams.
-
-
The filter accepts the following option:
-
-
-planes
-Set plane(s) to extract.
-
-Available values for planes are:
-
-‘y ’
-‘u ’
-‘v ’
-‘a ’
-‘r ’
-‘g ’
-‘b ’
-
-
-Choosing planes not available in the input will result in an error.
-That means you cannot select r
, g
, b
planes
-with y
, u
, v
planes at same time.
-
-
-
-
-
38.27.1 Examples# TOC
-
-
- Extract luma, u and v color channel component from input video frame
-into 3 grayscale outputs:
-
-
ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
-
-
-
-
-
38.28 elbg# TOC
-
-
Apply a posterize effect using the ELBG (Enhanced LBG) algorithm.
-
-
For each input image, the filter will compute the optimal mapping from
-the input to the output given the codebook length, that is the number
-of distinct output colors.
-
-
This filter accepts the following options.
-
-
-codebook_length, l
-Set codebook length. The value must be a positive integer, and
-represents the number of distinct output colors. Default value is 256.
-
-
-nb_steps, n
-Set the maximum number of iterations to apply for computing the optimal
-mapping. The higher the value the better the result and the higher the
-computation time. Default value is 1.
-
-
-seed, s
-Set a random seed, must be an integer included between 0 and
-UINT32_MAX. If not specified, or if explicitly set to -1, the filter
-will try to use a good random seed on a best effort basis.
-
-
-
-
-
38.29 fade# TOC
-
-
Apply a fade-in/out effect to the input video.
-
-
It accepts the following parameters:
-
-
-type, t
-The effect type can be either "in" for a fade-in, or "out" for a fade-out
-effect.
-Default is in
.
-
-
-start_frame, s
-Specify the number of the frame to start applying the fade
-effect at. Default is 0.
-
-
-nb_frames, n
-The number of frames that the fade effect lasts. At the end of the
-fade-in effect, the output video will have the same intensity as the input video.
-At the end of the fade-out transition, the output video will be filled with the
-selected color .
-Default is 25.
-
-
-alpha
-If set to 1, fade only alpha channel, if one exists on the input.
-Default value is 0.
-
-
-start_time, st
-Specify the timestamp (in seconds) of the frame to start to apply the fade
-effect. If both start_frame and start_time are specified, the fade will start at
-whichever comes last. Default is 0.
-
-
-duration, d
-The number of seconds for which the fade effect has to last. At the end of the
-fade-in effect the output video will have the same intensity as the input video,
-at the end of the fade-out transition the output video will be filled with the
-selected color .
-If both duration and nb_frames are specified, duration is used. Default is 0.
-
-
-color, c
-Specify the color of the fade. Default is "black".
-
-
-
-
-
38.29.1 Examples# TOC
-
-
-
-
-
38.30 field# TOC
-
-
Extract a single field from an interlaced image using stride
-arithmetic to avoid wasting CPU time. The output frames are marked as
-non-interlaced.
-
-
The filter accepts the following options:
-
-
-type
-Specify whether to extract the top (if the value is 0
or
-top
) or the bottom field (if the value is 1
or
-bottom
).
-
-
-
-
-
38.31 fieldmatch# TOC
-
-
Field matching filter for inverse telecine. It is meant to reconstruct the
-progressive frames from a telecined stream. The filter does not drop duplicated
-frames, so to achieve a complete inverse telecine fieldmatch
needs to be
-followed by a decimation filter such as decimate in the filtergraph.
-
-
The separation of the field matching and the decimation is notably motivated by
-the possibility of inserting a de-interlacing filter fallback between the two.
-If the source has mixed telecined and real interlaced content,
-fieldmatch
will not be able to match fields for the interlaced parts.
-But these remaining combed frames will be marked as interlaced, and thus can be
-de-interlaced by a later filter such as yadif before decimation.
-
-
In addition to the various configuration options, fieldmatch
can take an
-optional second stream, activated through the ppsrc option. If
-enabled, the frames reconstruction will be based on the fields and frames from
-this second stream. This allows the first input to be pre-processed in order to
-help the various algorithms of the filter, while keeping the output lossless
-(assuming the fields are matched properly). Typically, a field-aware denoiser,
-or brightness/contrast adjustments can help.
-
-
Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project)
-and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
-which fieldmatch
is based on. While the semantic and usage are very
-close, some behaviour and options names can differ.
-
-
The decimate filter currently only works for constant frame rate input.
-Do not use fieldmatch
and decimate if your input has mixed
-telecined and progressive content with changing framerate.
-
-
The filter accepts the following options:
-
-
-order
-Specify the assumed field order of the input stream. Available values are:
-
-
-‘auto ’
-Auto detect parity (use FFmpeg’s internal parity value).
-
-‘bff ’
-Assume bottom field first.
-
-‘tff ’
-Assume top field first.
-
-
-
-Note that it is sometimes recommended not to trust the parity announced by the
-stream.
-
-Default value is auto .
-
-
-mode
-Set the matching mode or strategy to use. pc mode is the safest in the
-sense that it won’t risk creating jerkiness due to duplicate frames when
-possible, but if there are bad edits or blended fields it will end up
-outputting combed frames when a good match might actually exist. On the other
-hand, pcn_ub mode is the most risky in terms of creating jerkiness,
-but will almost always find a good frame if there is one. The other values are
-all somewhere in between pc and pcn_ub in terms of risking
-jerkiness and creating duplicate frames versus finding good matches in sections
-with bad edits, orphaned fields, blended fields, etc.
-
-More details about p/c/n/u/b are available in p/c/n/u/b meaning section.
-
-Available values are:
-
-
-‘pc ’
-2-way matching (p/c)
-
-‘pc_n ’
-2-way matching, and trying 3rd match if still combed (p/c + n)
-
-‘pc_u ’
-2-way matching, and trying 3rd match (same order) if still combed (p/c + u)
-
-‘pc_n_ub ’
-2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if
-still combed (p/c + n + u/b)
-
-‘pcn ’
-3-way matching (p/c/n)
-
-‘pcn_ub ’
-3-way matching, and trying 4th/5th matches if all 3 of the original matches are
-detected as combed (p/c/n + u/b)
-
-
-
-The parenthesis at the end indicate the matches that would be used for that
-mode assuming order =tff (and field on auto or
-top ).
-
-In terms of speed pc mode is by far the fastest and pcn_ub is
-the slowest.
-
-Default value is pc_n .
-
-
-ppsrc
-Mark the main input stream as a pre-processed input, and enable the secondary
-input stream as the clean source to pick the fields from. See the filter
-introduction for more details. It is similar to the clip2 feature from
-VFM/TFM.
-
-Default value is 0
(disabled).
-
-
-field
-Set the field to match from. It is recommended to set this to the same value as
-order unless you experience matching failures with that setting. In
-certain circumstances changing the field that is used to match from can have a
-large impact on matching performance. Available values are:
-
-
-‘auto ’
-Automatic (same value as order ).
-
-‘bottom ’
-Match from the bottom field.
-
-‘top ’
-Match from the top field.
-
-
-
-Default value is auto .
-
-
-mchroma
-Set whether or not chroma is included during the match comparisons. In most
-cases it is recommended to leave this enabled. You should set this to 0
-only if your clip has bad chroma problems such as heavy rainbowing or other
-artifacts. Setting this to 0
could also be used to speed things up at
-the cost of some accuracy.
-
-Default value is 1
.
-
-
-y0
-y1
-These define an exclusion band which excludes the lines between y0 and
-y1 from being included in the field matching decision. An exclusion
-band can be used to ignore subtitles, a logo, or other things that may
-interfere with the matching. y0 sets the starting scan line and
-y1 sets the ending line; all lines in between y0 and
-y1 (including y0 and y1 ) will be ignored. Setting
-y0 and y1 to the same value will disable the feature.
-y0 and y1 defaults to 0
.
-
-
-scthresh
-Set the scene change detection threshold as a percentage of maximum change on
-the luma plane. Good values are in the [8.0, 14.0]
range. Scene change
-detection is only relevant in case combmatch =sc . The range for
-scthresh is [0.0, 100.0]
.
-
-Default value is 12.0
.
-
-
-combmatch
-When combatch is not none , fieldmatch
will take into
-account the combed scores of matches when deciding what match to use as the
-final match. Available values are:
-
-
-‘none ’
-No final matching based on combed scores.
-
-‘sc ’
-Combed scores are only used when a scene change is detected.
-
-‘full ’
-Use combed scores all the time.
-
-
-
-Default is sc .
-
-
-combdbg
-Force fieldmatch
to calculate the combed metrics for certain matches and
-print them. This setting is known as micout in TFM/VFM vocabulary.
-Available values are:
-
-
-‘none ’
-No forced calculation.
-
-‘pcn ’
-Force p/c/n calculations.
-
-‘pcnub ’
-Force p/c/n/u/b calculations.
-
-
-
-Default value is none .
-
-
-cthresh
-This is the area combing threshold used for combed frame detection. This
-essentially controls how "strong" or "visible" combing must be to be detected.
-Larger values mean combing must be more visible and smaller values mean combing
-can be less visible or strong and still be detected. Valid settings are from
--1
(every pixel will be detected as combed) to 255
(no pixel will
-be detected as combed). This is basically a pixel difference value. A good
-range is [8, 12]
.
-
-Default value is 9
.
-
-
-chroma
-Sets whether or not chroma is considered in the combed frame decision. Only
-disable this if your source has chroma problems (rainbowing, etc.) that are
-causing problems for the combed frame detection with chroma enabled. Actually,
-using chroma =0 is usually more reliable, except for the case
-where there is chroma only combing in the source.
-
-Default value is 0
.
-
-
-blockx
-blocky
-Respectively set the x-axis and y-axis size of the window used during combed
-frame detection. This has to do with the size of the area in which
-combpel pixels are required to be detected as combed for a frame to be
-declared combed. See the combpel parameter description for more info.
-Possible values are any number that is a power of 2 starting at 4 and going up
-to 512.
-
-Default value is 16
.
-
-
-combpel
-The number of combed pixels inside any of the blocky by
-blockx size blocks on the frame for the frame to be detected as
-combed. While cthresh controls how "visible" the combing must be, this
-setting controls "how much" combing there must be in any localized area (a
-window defined by the blockx and blocky settings) on the
-frame. Minimum value is 0
and maximum is blocky x blockx
(at
-which point no frames will ever be detected as combed). This setting is known
-as MI in TFM/VFM vocabulary.
-
-Default value is 80
.
-
-
-
-
-
38.31.1 p/c/n/u/b meaning# TOC
-
-
-
38.31.1.1 p/c/n# TOC
-
-
We assume the following telecined stream:
-
-
-
Top fields: 1 2 2 3 4
-Bottom fields: 1 2 3 4 4
-
-
-
The numbers correspond to the progressive frame the fields relate to. Here, the
-first two frames are progressive, the 3rd and 4th are combed, and so on.
-
-
When fieldmatch
is configured to run a matching from bottom
-(field =bottom ) this is how this input stream get transformed:
-
-
-
Input stream:
- T 1 2 2 3 4
- B 1 2 3 4 4 <-- matching reference
-
-Matches: c c n n c
-
-Output stream:
- T 1 2 3 4 4
- B 1 2 3 4 4
-
-
-
As a result of the field matching, we can see that some frames get duplicated.
-To perform a complete inverse telecine, you need to rely on a decimation filter
-after this operation. See for instance the decimate filter.
-
-
The same operation now matching from top fields (field =top )
-looks like this:
-
-
-
Input stream:
- T 1 2 2 3 4 <-- matching reference
- B 1 2 3 4 4
-
-Matches: c c p p c
-
-Output stream:
- T 1 2 2 3 4
- B 1 2 2 3 4
-
-
-
In these examples, we can see what p , c and n mean;
-basically, they refer to the frame and field of the opposite parity:
-
-
- p matches the field of the opposite parity in the previous frame
- c matches the field of the opposite parity in the current frame
- n matches the field of the opposite parity in the next frame
-
-
-
-
38.31.1.2 u/b# TOC
-
-
The u and b matching are a bit special in the sense that they match
-from the opposite parity flag. In the following examples, we assume that we are
-currently matching the 2nd frame (Top:2, bottom:2). According to the match, a
-’x’ is placed above and below each matched fields.
-
-
With bottom matching (field =bottom ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 1 2 2 2
- 2 2 2 1 3
-
-
-
With top matching (field =top ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 2 2 1 2
- 2 1 3 2 2
-
-
-
-
38.31.2 Examples# TOC
-
-
Simple IVTC of a top field first telecined stream:
-
-
fieldmatch=order=tff:combmatch=none, decimate
-
-
-
Advanced IVTC, with fallback on yadif for still combed frames:
-
-
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
-
-
-
-
38.32 fieldorder# TOC
-
-
Transform the field order of the input video.
-
-
It accepts the following parameters:
-
-
-order
-The output field order. Valid values are tff for top field first or bff
-for bottom field first.
-
-
-
-
The default value is ‘tff ’.
-
-
The transformation is done by shifting the picture content up or down
-by one line, and filling the remaining line with appropriate picture content.
-This method is consistent with most broadcast field order converters.
-
-
If the input video is not flagged as being interlaced, or it is already
-flagged as being of the required output field order, then this filter does
-not alter the incoming video.
-
-
It is very useful when converting to or from PAL DV material,
-which is bottom field first.
-
-
For example:
-
-
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
-
-
-
-
38.33 fifo# TOC
-
-
Buffer input images and send them when they are requested.
-
-
It is mainly useful when auto-inserted by the libavfilter
-framework.
-
-
It does not take parameters.
-
-
-
38.34 format# TOC
-
-
Convert the input video to one of the specified pixel formats.
-Libavfilter will try to pick one that is suitable as input to
-the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-"pix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
38.34.1 Examples# TOC
-
-
-
-
-
38.35 fps# TOC
-
-
Convert the video to specified constant frame rate by duplicating or dropping
-frames as necessary.
-
-
It accepts the following parameters:
-
-fps
-The desired output frame rate. The default is 25
.
-
-
-round
-Rounding method.
-
-Possible values are:
-
-zero
-zero round towards 0
-
-inf
-round away from 0
-
-down
-round towards -infinity
-
-up
-round towards +infinity
-
-near
-round to nearest
-
-
-The default is near
.
-
-
-start_time
-Assume the first PTS should be the given value, in seconds. This allows for
-padding/trimming at the start of stream. By default, no assumption is made
-about the first frame’s expected PTS, so no padding or trimming is done.
-For example, this could be set to 0 to pad the beginning with duplicates of
-the first frame if a video stream starts after the audio stream or to trim any
-frames with a negative PTS.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-fps [:round ].
-
-
See also the setpts filter.
-
-
-
38.35.1 Examples# TOC
-
-
- A typical usage in order to set the fps to 25:
-
-
- Sets the fps to 24, using abbreviation and rounding method to round to nearest:
-
-
fps=fps=film:round=near
-
-
-
-
-
38.36 framepack# TOC
-
-
Pack two different video streams into a stereoscopic video, setting proper
-metadata on supported codecs. The two views should have the same size and
-framerate and processing will stop when the shorter video ends. Please note
-that you may conveniently adjust view properties with the scale and
-fps filters.
-
-
It accepts the following parameters:
-
-format
-The desired packing format. Supported values are:
-
-
-sbs
-The views are next to each other (default).
-
-
-tab
-The views are on top of each other.
-
-
-lines
-The views are packed by line.
-
-
-columns
-The views are packed by column.
-
-
-frameseq
-The views are temporally interleaved.
-
-
-
-
-
-
-
-
Some examples:
-
-
-
# Convert left and right views into a frame-sequential video
-ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
-
-# Convert views into a side-by-side video with the same output resolution as the input
-ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
-
-
-
-
38.37 framestep# TOC
-
-
Select one frame every N-th frame.
-
-
This filter accepts the following option:
-
-step
-Select frame after every step
frames.
-Allowed values are positive integers higher than 0. Default value is 1
.
-
-
-
-
-
38.38 frei0r# TOC
-
-
Apply a frei0r effect to the input video.
-
-
To enable the compilation of this filter, you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the frei0r effect to load. If the environment variable
-FREI0R_PATH
is defined, the frei0r effect is searched for in each of the
-directories specified by the colon-separated list in FREIOR_PATH
.
-Otherwise, the standard frei0r paths are searched, in this order:
-HOME/.frei0r-1/lib/ , /usr/local/lib/frei0r-1/ ,
-/usr/lib/frei0r-1/ .
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r effect.
-
-
-
-
-
A frei0r effect parameter can be a boolean (its value is either
-"y" or "n"), a double, a color (specified as
-R /G /B , where R , G , and B are floating point
-numbers between 0.0 and 1.0, inclusive) or by a color description specified in the "Color"
-section in the ffmpeg-utils manual), a position (specified as X /Y , where
-X and Y are floating point numbers) and/or a string.
-
-
The number and types of parameters depend on the loaded effect. If an
-effect parameter is not specified, the default value is set.
-
-
-
38.38.1 Examples# TOC
-
-
- Apply the distort0r effect, setting the first two double parameters:
-
-
frei0r=filter_name=distort0r:filter_params=0.5|0.01
-
-
- Apply the colordistance effect, taking a color as the first parameter:
-
-
frei0r=colordistance:0.2/0.3/0.4
-frei0r=colordistance:violet
-frei0r=colordistance:0x112233
-
-
- Apply the perspective effect, specifying the top left and top right image
-positions:
-
-
frei0r=perspective:0.2/0.2|0.8/0.2
-
-
-
-
For more information, see
-http://frei0r.dyne.org
-
-
-
38.39 fspp# TOC
-
-
Apply fast and simple postprocessing. It is a faster version of spp .
-
-
It splits (I)DCT into horizontal/vertical passes. Unlike the simple post-
-processing filter, one of them is performed once per block, not per pixel.
-This allows for much higher speed.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 4-5. Default value is 4
.
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range 0-63.
-If not set, the filter will use the QP from the video stream (if available).
-
-
-strength
-Set filter strength. It accepts an integer in range -15 to 32. Lower values mean
-more details but also more artifacts, while higher values make the image smoother
-but also blurrier. Default value is 0
− PSNR optimal.
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
-
38.40 geq# TOC
-
-
The filter accepts the following options:
-
-
-lum_expr, lum
-Set the luminance expression.
-
-cb_expr, cb
-Set the chrominance blue expression.
-
-cr_expr, cr
-Set the chrominance red expression.
-
-alpha_expr, a
-Set the alpha expression.
-
-red_expr, r
-Set the red expression.
-
-green_expr, g
-Set the green expression.
-
-blue_expr, b
-Set the blue expression.
-
-
-
-
The colorspace is selected according to the specified options. If one
-of the lum_expr , cb_expr , or cr_expr
-options is specified, the filter will automatically select a YCbCr
-colorspace. If one of the red_expr , green_expr , or
-blue_expr options is specified, it will select an RGB
-colorspace.
-
-
If one of the chrominance expression is not defined, it falls back on the other
-one. If no alpha expression is specified it will evaluate to opaque value.
-If none of chrominance expressions are specified, they will evaluate
-to the luminance expression.
-
-
The expressions can use the following variables and functions:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-The coordinates of the current sample.
-
-
-W
-H
-The width and height of the image.
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-p(x, y)
-Return the value of the pixel at location (x ,y ) of the current
-plane.
-
-
-lum(x, y)
-Return the value of the pixel at location (x ,y ) of the luminance
-plane.
-
-
-cb(x, y)
-Return the value of the pixel at location (x ,y ) of the
-blue-difference chroma plane. Return 0 if there is no such plane.
-
-
-cr(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red-difference chroma plane. Return 0 if there is no such plane.
-
-
-r(x, y)
-g(x, y)
-b(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red/green/blue component. Return 0 if there is no such component.
-
-
-alpha(x, y)
-Return the value of the pixel at location (x ,y ) of the alpha
-plane. Return 0 if there is no such plane.
-
-
-
-
For functions, if x and y are outside the area, the value will be
-automatically clipped to the closer edge.
-
-
-
38.40.1 Examples# TOC
-
-
- Flip the image horizontally:
-
-
- Generate a bidimensional sine wave, with angle PI/3
and a
-wavelength of 100 pixels:
-
-
geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
-
-
- Generate a fancy enigmatic moving light:
-
-
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
-
-
- Generate a quick emboss effect:
-
-
format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
-
-
- Modify RGB components depending on pixel position:
-
-
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
-
-
- Create a radial gradient that is the same size as the input (also see
-the vignette filter):
-
-
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
-
-
- Create a linear gradient to use as a mask for another filter, then
-compose with overlay . In this example the video will gradually
-become more blurry from the top to the bottom of the y-axis as defined
-by the linear gradient:
-
-
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
-
-
-
-
-
38.41 gradfun# TOC
-
-
Fix the banding artifacts that are sometimes introduced into nearly flat
-regions by truncation to 8bit color depth.
-Interpolate the gradients that should go where the bands are, and
-dither them.
-
-
It is designed for playback only. Do not use it prior to
-lossy compression, because compression tends to lose the dither and
-bring back the bands.
-
-
It accepts the following parameters:
-
-
-strength
-The maximum amount by which the filter will change any one pixel. This is also
-the threshold for detecting nearly flat regions. Acceptable values range from
-.51 to 64; the default value is 1.2. Out-of-range values will be clipped to the
-valid range.
-
-
-radius
-The neighborhood to fit the gradient to. A larger radius makes for smoother
-gradients, but also prevents the filter from modifying the pixels near detailed
-regions. Acceptable values are 8-32; the default value is 16. Out-of-range
-values will be clipped to the valid range.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-strength [:radius ]
-
-
-
38.41.1 Examples# TOC
-
-
- Apply the filter with a 3.5
strength and radius of 8
:
-
-
- Specify radius, omitting the strength (which will fall-back to the default
-value):
-
-
-
-
-
-
38.42 haldclut# TOC
-
-
Apply a Hald CLUT to a video stream.
-
-
First input is the video stream to process, and second one is the Hald CLUT.
-The Hald CLUT input can be a simple picture or a complete video stream.
-
-
The filter accepts the following options:
-
-
-shortest
-Force termination when the shortest input terminates. Default is 0
.
-
-repeatlast
-Continue applying the last CLUT after the end of the stream. A value of
-0
disable the filter after the last frame of the CLUT is reached.
-Default is 1
.
-
-
-
-
haldclut
also has the same interpolation options as lut3d (both
-filters share the same internals).
-
-
More information about the Hald CLUT can be found on Eskil Steenberg’s website
-(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html .
-
-
-
38.42.1 Workflow examples# TOC
-
-
-
38.42.1.1 Hald CLUT video stream# TOC
-
-
Generate an identity Hald CLUT stream altered with various effects:
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
-
-
-
Note: make sure you use a lossless codec.
-
-
Then use it with haldclut
to apply it on some random stream:
-
-
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
-
-
-
The Hald CLUT will be applied to the 10 first seconds (duration of
-clut.nut ), then the latest picture of that CLUT stream will be applied
-to the remaining frames of the mandelbrot
stream.
-
-
-
38.42.1.2 Hald CLUT with preview# TOC
-
-
A Hald CLUT is supposed to be a squared image of Level*Level*Level
by
-Level*Level*Level
pixels. For a given Hald CLUT, FFmpeg will select the
-biggest possible square starting at the top left of the picture. The remaining
-padding pixels (bottom or right) will be ignored. This area can be used to add
-a preview of the Hald CLUT.
-
-
Typically, the following generated Hald CLUT will be supported by the
-haldclut
filter:
-
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "
- pad=iw+320 [padded_clut];
- smptebars=s=320x256, split [a][b];
- [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
- [main][b] overlay=W-320" -frames:v 1 clut.png
-
-
-
It contains the original and a preview of the effect of the CLUT: SMPTE color
-bars are displayed on the right-top, and below the same color bars processed by
-the color changes.
-
-
Then, the effect of this Hald CLUT can be visualized with:
-
-
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
-
-
-
-
38.43 hflip# TOC
-
-
Flip the input video horizontally.
-
-
For example, to horizontally flip the input video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "hflip" out.avi
-
-
-
-
38.44 histeq# TOC
-
This filter applies a global color histogram equalization on a
-per-frame basis.
-
-
It can be used to correct video that has a compressed range of pixel
-intensities. The filter redistributes the pixel intensities to
-equalize their distribution across the intensity range. It may be
-viewed as an "automatically adjusting contrast filter". This filter is
-useful only for correcting degraded or poorly captured source
-video.
-
-
The filter accepts the following options:
-
-
-strength
-Determine the amount of equalization to be applied. As the strength
-is reduced, the distribution of pixel intensities more-and-more
-approaches that of the input frame. The value must be a float number
-in the range [0,1] and defaults to 0.200.
-
-
-intensity
-Set the maximum intensity that can generated and scale the output
-values appropriately. The strength should be set as desired and then
-the intensity can be limited if needed to avoid washing-out. The value
-must be a float number in the range [0,1] and defaults to 0.210.
-
-
-antibanding
-Set the antibanding level. If enabled the filter will randomly vary
-the luminance of output pixels by a small amount to avoid banding of
-the histogram. Possible values are none
, weak
or
-strong
. It defaults to none
.
-
-
-
-
-
38.45 histogram# TOC
-
-
Compute and draw a color distribution histogram for the input video.
-
-
The computed histogram is a representation of the color component
-distribution in an image.
-
-
The filter accepts the following options:
-
-
-mode
-Set histogram mode.
-
-It accepts the following values:
-
-‘levels ’
-Standard histogram that displays the color components distribution in an
-image. Displays color graph for each color component. Shows distribution of
-the Y, U, V, A or R, G, B components, depending on input format, in the
-current frame. Below each graph a color component scale meter is shown.
-
-
-‘color ’
-Displays chroma values (U/V color placement) in a two dimensional
-graph (which is called a vectorscope). The brighter a pixel in the
-vectorscope, the more pixels of the input frame correspond to that pixel
-(i.e., more pixels have this chroma value). The V component is displayed on
-the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost
-side being V = 255. The U component is displayed on the vertical (Y) axis,
-with the top representing U = 0 and the bottom representing U = 255.
-
-The position of a white pixel in the graph corresponds to the chroma value of
-a pixel of the input clip. The graph can therefore be used to read the hue
-(color flavor) and the saturation (the dominance of the hue in the color). As
-the hue of a color changes, it moves around the square. At the center of the
-square the saturation is zero, which means that the corresponding pixel has no
-color. If the amount of a specific color is increased (while leaving the other
-colors unchanged) the saturation increases, and the indicator moves towards
-the edge of the square.
-
-
-‘color2 ’
-Chroma values in vectorscope, similar as color
but actual chroma values
-are displayed.
-
-
-‘waveform ’
-Per row/column color component graph. In row mode, the graph on the left side
-represents color component value 0 and the right side represents value = 255.
-In column mode, the top side represents color component value = 0 and bottom
-side represents value = 255.
-
-
-Default value is levels
.
-
-
-level_height
-Set height of level in levels
. Default value is 200
.
-Allowed range is [50, 2048].
-
-
-scale_height
-Set height of color scale in levels
. Default value is 12
.
-Allowed range is [0, 40].
-
-
-step
-Set step for waveform
mode. Smaller values are useful to find out how
-many values of the same luminance are distributed across input rows/columns.
-Default value is 10
. Allowed range is [1, 255].
-
-
-waveform_mode
-Set mode for waveform
. Can be either row
, or column
.
-Default is row
.
-
-
-waveform_mirror
-Set mirroring mode for waveform
. 0
means unmirrored, 1
-means mirrored. In mirrored mode, higher values will be represented on the left
-side for row
mode and at the top for column
mode. Default is
-0
(unmirrored).
-
-
-display_mode
-Set display mode for waveform
and levels
.
-It accepts the following values:
-
-‘parade ’
-Display separate graph for the color components side by side in
-row
waveform mode or one below the other in column
waveform mode
-for waveform
histogram mode. For levels
histogram mode,
-per color component graphs are placed below each other.
-
-Using this display mode in waveform
histogram mode makes it easy to
-spot color casts in the highlights and shadows of an image, by comparing the
-contours of the top and the bottom graphs of each waveform. Since whites,
-grays, and blacks are characterized by exactly equal amounts of red, green,
-and blue, neutral areas of the picture should display three waveforms of
-roughly equal width/height. If not, the correction is easy to perform by
-making level adjustments the three waveforms.
-
-
-‘overlay ’
-Presents information identical to that in the parade
, except
-that the graphs representing color components are superimposed directly
-over one another.
-
-This display mode in waveform
histogram mode makes it easier to spot
-relative differences or similarities in overlapping areas of the color
-components that are supposed to be identical, such as neutral whites, grays,
-or blacks.
-
-
-Default is parade
.
-
-
-levels_mode
-Set mode for levels
. Can be either linear
, or logarithmic
.
-Default is linear
.
-
-
-
-
-
38.45.1 Examples# TOC
-
-
- Calculate and draw histogram:
-
-
ffplay -i input -vf histogram
-
-
-
-
-
-
38.46 hqdn3d# TOC
-
-
This is a high precision/quality 3d denoise filter. It aims to reduce
-image noise, producing smooth images and making still images really
-still. It should enhance compressibility.
-
-
It accepts the following optional parameters:
-
-
-luma_spatial
-A non-negative floating point number which specifies spatial luma strength.
-It defaults to 4.0.
-
-
-chroma_spatial
-A non-negative floating point number which specifies spatial chroma strength.
-It defaults to 3.0*luma_spatial /4.0.
-
-
-luma_tmp
-A floating point number which specifies luma temporal strength. It defaults to
-6.0*luma_spatial /4.0.
-
-
-chroma_tmp
-A floating point number which specifies chroma temporal strength. It defaults to
-luma_tmp *chroma_spatial /luma_spatial .
-
-
-
-
-
38.47 hqx# TOC
-
-
Apply a high-quality magnification filter designed for pixel art. This filter
-was originally created by Maxim Stepin.
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for hq2x
, 3
for
-hq3x
and 4
for hq4x
.
-Default is 3
.
-
-
-
-
-
38.48 hue# TOC
-
-
Modify the hue and/or the saturation of the input.
-
-
It accepts the following parameters:
-
-
-h
-Specify the hue angle as a number of degrees. It accepts an expression,
-and defaults to "0".
-
-
-s
-Specify the saturation in the [-10,10] range. It accepts an expression and
-defaults to "1".
-
-
-H
-Specify the hue angle as a number of radians. It accepts an
-expression, and defaults to "0".
-
-
-b
-Specify the brightness in the [-10,10] range. It accepts an expression and
-defaults to "0".
-
-
-
-
h and H are mutually exclusive, and can’t be
-specified at the same time.
-
-
The b , h , H and s option values are
-expressions containing the following constants:
-
-
-n
-frame count of the input frame starting from 0
-
-
-pts
-presentation timestamp of the input frame expressed in time base units
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-tb
-time base of the input video
-
-
-
-
-
38.48.1 Examples# TOC
-
-
-
-
-
38.48.2 Commands# TOC
-
-
This filter supports the following commands:
-
-b
-s
-h
-H
-Modify the hue and/or the saturation and/or brightness of the input video.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
38.49 idet# TOC
-
-
Detect video interlacing type.
-
-
This filter tries to detect if the input frames as interlaced, progressive,
-top or bottom field first. It will also try and detect fields that are
-repeated between adjacent frames (a sign of telecine).
-
-
Single frame detection considers only immediately adjacent frames when classifying each frame.
-Multiple frame detection incorporates the classification history of previous frames.
-
-
The filter will log these metadata values:
-
-
-single.current_frame
-Detected type of current frame using single-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-single.tff
-Cumulative number of frames detected as top field first using single-frame detection.
-
-
-multiple.tff
-Cumulative number of frames detected as top field first using multiple-frame detection.
-
-
-single.bff
-Cumulative number of frames detected as bottom field first using single-frame detection.
-
-
-multiple.current_frame
-Detected type of current frame using multiple-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-multiple.bff
-Cumulative number of frames detected as bottom field first using multiple-frame detection.
-
-
-single.progressive
-Cumulative number of frames detected as progressive using single-frame detection.
-
-
-multiple.progressive
-Cumulative number of frames detected as progressive using multiple-frame detection.
-
-
-single.undetermined
-Cumulative number of frames that could not be classified using single-frame detection.
-
-
-multiple.undetermined
-Cumulative number of frames that could not be classified using multiple-frame detection.
-
-
-repeated.current_frame
-Which field in the current frame is repeated from the last. One of “neither”, “top”, or “bottom”.
-
-
-repeated.neither
-Cumulative number of frames with no repeated field.
-
-
-repeated.top
-Cumulative number of frames with the top field repeated from the previous frame’s top field.
-
-
-repeated.bottom
-Cumulative number of frames with the bottom field repeated from the previous frame’s bottom field.
-
-
-
-
The filter accepts the following options:
-
-
-intl_thres
-Set interlacing threshold.
-
-prog_thres
-Set progressive threshold.
-
-repeat_thres
-Threshold for repeated field detection.
-
-half_life
-Number of frames after which a given frame’s contribution to the
-statistics is halved (i.e., it contributes only 0.5 to it’s
-classification). The default of 0 means that all frames seen are given
-full weight of 1.0 forever.
-
-analyze_interlaced_flag
-When this is not 0 then idet will use the specified number of frames to determine
-if the interlaced flag is accurate, it will not count undetermined frames.
-If the flag is found to be accurate it will be used without any further
-computations, if it is found to be inaccuarte it will be cleared without any
-further computations. This allows inserting the idet filter as a low computational
-method to clean up the interlaced flag
-
-
-
-
-
38.50 il# TOC
-
-
Deinterleave or interleave fields.
-
-
This filter allows one to process interlaced images fields without
-deinterlacing them. Deinterleaving splits the input frame into 2
-fields (so called half pictures). Odd lines are moved to the top
-half of the output image, even lines to the bottom half.
-You can process (filter) them independently and then re-interleave them.
-
-
The filter accepts the following options:
-
-
-luma_mode, l
-chroma_mode, c
-alpha_mode, a
-Available values for luma_mode , chroma_mode and
-alpha_mode are:
-
-
-‘none ’
-Do nothing.
-
-
-‘deinterleave, d ’
-Deinterleave fields, placing one above the other.
-
-
-‘interleave, i ’
-Interleave fields. Reverse the effect of deinterleaving.
-
-
-Default value is none
.
-
-
-luma_swap, ls
-chroma_swap, cs
-alpha_swap, as
-Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0
.
-
-
-
-
-
38.51 interlace# TOC
-
-
Simple interlacing filter from progressive contents. This interleaves upper (or
-lower) lines from odd frames with lower (or upper) lines from even frames,
-halving the frame rate and preserving image height.
-
-
-
Original Original New Frame
- Frame 'j' Frame 'j+1' (tff)
- ========== =========== ==================
- Line 0 --------------------> Frame 'j' Line 0
- Line 1 Line 1 ----> Frame 'j+1' Line 1
- Line 2 ---------------------> Frame 'j' Line 2
- Line 3 Line 3 ----> Frame 'j+1' Line 3
- ... ... ...
-New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
-
-
-
It accepts the following optional parameters:
-
-
-scan
-This determines whether the interlaced frame is taken from the even
-(tff - default) or odd (bff) lines of the progressive frame.
-
-
-lowpass
-Enable (default) or disable the vertical lowpass filter to avoid twitter
-interlacing and reduce moire patterns.
-
-
-
-
-
38.52 kerndeint# TOC
-
-
Deinterlace input video by applying Donald Graft’s adaptive kernel
-deinterling. Work on interlaced parts of a video to produce
-progressive frames.
-
-
The description of the accepted parameters follows.
-
-
-thresh
-Set the threshold which affects the filter’s tolerance when
-determining if a pixel line must be processed. It must be an integer
-in the range [0,255] and defaults to 10. A value of 0 will result in
-applying the process on every pixels.
-
-
-map
-Paint pixels exceeding the threshold value to white if set to 1.
-Default is 0.
-
-
-order
-Set the fields order. Swap fields if set to 1, leave fields alone if
-0. Default is 0.
-
-
-sharp
-Enable additional sharpening if set to 1. Default is 0.
-
-
-twoway
-Enable twoway sharpening if set to 1. Default is 0.
-
-
-
-
-
38.52.1 Examples# TOC
-
-
- Apply default values:
-
-
kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
-
-
- Enable additional sharpening:
-
-
- Paint processed pixels in white:
-
-
-
-
-
38.53 lenscorrection# TOC
-
-
Correct radial lens distortion
-
-
This filter can be used to correct for radial distortion as can result from the use
-of wide angle lenses, and thereby re-rectify the image. To find the right parameters
-one can use tools available for example as part of opencv or simply trial-and-error.
-To use opencv use the calibration sample (under samples/cpp) from the opencv sources
-and extract the k1 and k2 coefficients from the resulting matrix.
-
-
Note that effectively the same filter is available in the open-source tools Krita and
-Digikam from the KDE project.
-
-
In contrast to the vignette filter, which can also be used to compensate lens errors,
-this filter corrects the distortion of the image, whereas vignette corrects the
-brightness distribution, so you may want to use both filters together in certain
-cases, though you will have to take care of ordering, i.e. whether vignetting should
-be applied before or after lens correction.
-
-
-
38.53.1 Options# TOC
-
-
The filter accepts the following options:
-
-
-cx
-Relative x-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-width.
-
-cy
-Relative y-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-height.
-
-k1
-Coefficient of the quadratic correction term. 0.5 means no correction.
-
-k2
-Coefficient of the double quadratic correction term. 0.5 means no correction.
-
-
-
-
The formula that generates the correction is:
-
-
r_src = r_tgt * (1 + k1 * (r_tgt / r_0 )^2 + k2 * (r_tgt / r_0 )^4)
-
-
where r_0 is halve of the image diagonal and r_src and r_tgt are the
-distances from the focal point in the source and target images, respectively.
-
-
-
38.54 lut3d# TOC
-
-
Apply a 3D LUT to an input video.
-
-
The filter accepts the following options:
-
-
-file
-Set the 3D LUT file name.
-
-Currently supported formats:
-
-‘3dl ’
-AfterEffects
-
-‘cube ’
-Iridas
-
-‘dat ’
-DaVinci
-
-‘m3d ’
-Pandora
-
-
-
-interp
-Select interpolation mode.
-
-Available values are:
-
-
-‘nearest ’
-Use values from the nearest defined point.
-
-‘trilinear ’
-Interpolate values using the 8 points defining a cube.
-
-‘tetrahedral ’
-Interpolate values using a tetrahedron.
-
-
-
-
-
-
-
38.55 lut, lutrgb, lutyuv# TOC
-
-
Compute a look-up table for binding each pixel component input value
-to an output value, and apply it to the input video.
-
-
lutyuv applies a lookup table to a YUV input video, lutrgb
-to an RGB input video.
-
-
These filters accept the following parameters:
-
-c0
-set first pixel component expression
-
-c1
-set second pixel component expression
-
-c2
-set third pixel component expression
-
-c3
-set fourth pixel component expression, corresponds to the alpha component
-
-
-r
-set red component expression
-
-g
-set green component expression
-
-b
-set blue component expression
-
-a
-alpha component expression
-
-
-y
-set Y/luminance component expression
-
-u
-set U/Cb component expression
-
-v
-set V/Cr component expression
-
-
-
-
Each of them specifies the expression to use for computing the lookup table for
-the corresponding pixel component values.
-
-
The exact component associated to each of the c* options depends on the
-format in input.
-
-
The lut filter requires either YUV or RGB pixel formats in input,
-lutrgb requires RGB pixel formats in input, and lutyuv requires YUV.
-
-
The expressions can contain the following constants and functions:
-
-
-w
-h
-The input width and height.
-
-
-val
-The input value for the pixel component.
-
-
-clipval
-The input value, clipped to the minval -maxval range.
-
-
-maxval
-The maximum value for the pixel component.
-
-
-minval
-The minimum value for the pixel component.
-
-
-negval
-The negated value for the pixel component value, clipped to the
-minval -maxval range; it corresponds to the expression
-"maxval-clipval+minval".
-
-
-clip(val)
-The computed value in val , clipped to the
-minval -maxval range.
-
-
-gammaval(gamma)
-The computed gamma correction value of the pixel component value,
-clipped to the minval -maxval range. It corresponds to the
-expression
-"pow((clipval-minval)/(maxval-minval)\,gamma )*(maxval-minval)+minval"
-
-
-
-
-
All expressions default to "val".
-
-
-
38.55.1 Examples# TOC
-
-
-
-
-
38.56 mergeplanes# TOC
-
-
Merge color channel components from several video streams.
-
-
The filter accepts up to 4 input streams, and merge selected input
-planes to the output video.
-
-
This filter accepts the following options:
-
-mapping
-Set input to output plane mapping. Default is 0
.
-
-The mappings is specified as a bitmap. It should be specified as a
-hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the
-mapping for the first plane of the output stream. ’A’ sets the number of
-the input stream to use (from 0 to 3), and ’a’ the plane number of the
-corresponding input to use (from 0 to 3). The rest of the mappings is
-similar, ’Bb’ describes the mapping for the output stream second
-plane, ’Cc’ describes the mapping for the output stream third plane and
-’Dd’ describes the mapping for the output stream fourth plane.
-
-
-format
-Set output pixel format. Default is yuva444p
.
-
-
-
-
-
38.56.1 Examples# TOC
-
-
- Merge three gray video streams of same width and height into single video stream:
-
-
[a0][a1][a2]mergeplanes=0x001020:yuv444p
-
-
- Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream:
-
-
[a0][a1]mergeplanes=0x00010210:yuva444p
-
-
- Swap Y and A plane in yuva444p stream:
-
-
format=yuva444p,mergeplanes=0x03010200:yuva444p
-
-
- Swap U and V plane in yuv420p stream:
-
-
format=yuv420p,mergeplanes=0x000201:yuv420p
-
-
- Cast a rgb24 clip to yuv444p:
-
-
format=rgb24,mergeplanes=0x000102:yuv444p
-
-
-
-
-
38.57 mcdeint# TOC
-
-
Apply motion-compensation deinterlacing.
-
-
It needs one field per frame as input and must thus be used together
-with yadif=1/3 or equivalent.
-
-
This filter accepts the following options:
-
-mode
-Set the deinterlacing mode.
-
-It accepts one of the following values:
-
-‘fast ’
-‘medium ’
-‘slow ’
-use iterative motion estimation
-
-‘extra_slow ’
-like ‘slow ’, but use multiple reference frames.
-
-
-Default value is ‘fast ’.
-
-
-parity
-Set the picture field parity assumed for the input video. It must be
-one of the following values:
-
-
-‘0, tff ’
-assume top field first
-
-‘1, bff ’
-assume bottom field first
-
-
-
-Default value is ‘bff ’.
-
-
-qp
-Set per-block quantization parameter (QP) used by the internal
-encoder.
-
-Higher values should result in a smoother motion vector field but less
-optimal individual vectors. Default value is 1.
-
-
-
-
-
38.58 mp# TOC
-
-
Apply an MPlayer filter to the input video.
-
-
This filter provides a wrapper around some of the filters of
-MPlayer/MEncoder.
-
-
This wrapper is considered experimental. Some of the wrapped filters
-may not work properly and we may drop support for them, as they will
-be implemented natively into FFmpeg. Thus you should avoid
-depending on them when writing portable scripts.
-
-
The filter accepts the parameters:
-filter_name [:=]filter_params
-
-
filter_name is the name of a supported MPlayer filter,
-filter_params is a string containing the parameters accepted by
-the named filter.
-
-
The list of the currently supported filters follows:
-
-eq2
-eq
-ilpack
-softpulldown
-
-
-
The parameter syntax and behavior for the listed filters are the same
-of the corresponding MPlayer filters. For detailed instructions check
-the "VIDEO FILTERS" section in the MPlayer manual.
-
-
-
38.58.1 Examples# TOC
-
-
- Adjust gamma, brightness, contrast:
-
-
-
-
See also mplayer(1), http://www.mplayerhq.hu/ .
-
-
-
38.59 mpdecimate# TOC
-
-
Drop frames that do not differ greatly from the previous frame in
-order to reduce frame rate.
-
-
The main use of this filter is for very-low-bitrate encoding
-(e.g. streaming over dialup modem), but it could in theory be used for
-fixing movies that were inverse-telecined incorrectly.
-
-
A description of the accepted options follows.
-
-
-max
-Set the maximum number of consecutive frames which can be dropped (if
-positive), or the minimum interval between dropped frames (if
-negative). If the value is 0, the frame is dropped unregarding the
-number of previous sequentially dropped frames.
-
-Default value is 0.
-
-
-hi
-lo
-frac
-Set the dropping threshold values.
-
-Values for hi and lo are for 8x8 pixel blocks and
-represent actual pixel value differences, so a threshold of 64
-corresponds to 1 unit of difference for each pixel, or the same spread
-out differently over the block.
-
-A frame is a candidate for dropping if no 8x8 blocks differ by more
-than a threshold of hi , and if no more than frac blocks (1
-meaning the whole image) differ by more than a threshold of lo .
-
-Default value for hi is 64*12, default value for lo is
-64*5, and default value for frac is 0.33.
-
-
-
-
-
-
38.60 negate# TOC
-
-
Negate input video.
-
-
It accepts an integer in input; if non-zero it negates the
-alpha component (if available). The default value in input is 0.
-
-
-
38.61 noformat# TOC
-
-
Force libavfilter not to use any of the specified pixel formats for the
-input to the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-apix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
38.61.1 Examples# TOC
-
-
- Force libavfilter to use a format different from yuv420p for the
-input to the vflip filter:
-
-
noformat=pix_fmts=yuv420p,vflip
-
-
- Convert the input video to any of the formats not contained in the list:
-
-
noformat=yuv420p|yuv444p|yuv410p
-
-
-
-
-
38.62 noise# TOC
-
-
Add noise on video input frame.
-
-
The filter accepts the following options:
-
-
-all_seed
-c0_seed
-c1_seed
-c2_seed
-c3_seed
-Set noise seed for specific pixel component or all pixel components in case
-of all_seed . Default value is 123457
.
-
-
-all_strength, alls
-c0_strength, c0s
-c1_strength, c1s
-c2_strength, c2s
-c3_strength, c3s
-Set noise strength for specific pixel component or all pixel components in case
-all_strength . Default value is 0
. Allowed range is [0, 100].
-
-
-all_flags, allf
-c0_flags, c0f
-c1_flags, c1f
-c2_flags, c2f
-c3_flags, c3f
-Set pixel component flags or set flags for all components if all_flags .
-Available values for component flags are:
-
-‘a ’
-averaged temporal noise (smoother)
-
-‘p ’
-mix random noise with a (semi)regular pattern
-
-‘t ’
-temporal noise (noise pattern changes between frames)
-
-‘u ’
-uniform noise (gaussian otherwise)
-
-
-
-
-
-
-
38.62.1 Examples# TOC
-
-
Add temporal and uniform noise to input video:
-
-
noise=alls=20:allf=t+u
-
-
-
-
38.63 null# TOC
-
-
Pass the video source unchanged to the output.
-
-
-
38.64 ocv# TOC
-
-
Apply a video transform using libopencv.
-
-
To enable this filter, install the libopencv library and headers and
-configure FFmpeg with --enable-libopencv
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the libopencv filter to apply.
-
-
-filter_params
-The parameters to pass to the libopencv filter. If not specified, the default
-values are assumed.
-
-
-
-
-
Refer to the official libopencv documentation for more precise
-information:
-http://docs.opencv.org/master/modules/imgproc/doc/filtering.html
-
-
Several libopencv filters are supported; see the following subsections.
-
-
-
38.64.1 dilate# TOC
-
-
Dilate an image by using a specific structuring element.
-It corresponds to the libopencv function cvDilate
.
-
-
It accepts the parameters: struct_el |nb_iterations .
-
-
struct_el represents a structuring element, and has the syntax:
-cols xrows +anchor_x xanchor_y /shape
-
-
cols and rows represent the number of columns and rows of
-the structuring element, anchor_x and anchor_y the anchor
-point, and shape the shape for the structuring element. shape
-must be "rect", "cross", "ellipse", or "custom".
-
-
If the value for shape is "custom", it must be followed by a
-string of the form "=filename ". The file with name
-filename is assumed to represent a binary image, with each
-printable character corresponding to a bright pixel. When a custom
-shape is used, cols and rows are ignored, the number
-or columns and rows of the read file are assumed instead.
-
-
The default value for struct_el is "3x3+0x0/rect".
-
-
nb_iterations specifies the number of times the transform is
-applied to the image, and defaults to 1.
-
-
Some examples:
-
-
# Use the default values
-ocv=dilate
-
-# Dilate using a structuring element with a 5x5 cross, iterating two times
-ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
-
-# Read the shape from the file diamond.shape, iterating two times.
-# The file diamond.shape may contain a pattern of characters like this
-# *
-# ***
-# *****
-# ***
-# *
-# The specified columns and rows are ignored
-# but the anchor point coordinates are not
-ocv=dilate:0x0+2x2/custom=diamond.shape|2
-
-
-
-
38.64.2 erode# TOC
-
-
Erode an image by using a specific structuring element.
-It corresponds to the libopencv function cvErode
.
-
-
It accepts the parameters: struct_el :nb_iterations ,
-with the same syntax and semantics as the dilate filter.
-
-
-
38.64.3 smooth# TOC
-
-
Smooth the input video.
-
-
The filter takes the following parameters:
-type |param1 |param2 |param3 |param4 .
-
-
type is the type of smooth filter to apply, and must be one of
-the following values: "blur", "blur_no_scale", "median", "gaussian",
-or "bilateral". The default value is "gaussian".
-
-
The meaning of param1 , param2 , param3 , and param4
-depend on the smooth type. param1 and
-param2 accept integer positive values or 0. param3 and
-param4 accept floating point values.
-
-
The default value for param1 is 3. The default value for the
-other parameters is 0.
-
-
These parameters correspond to the parameters assigned to the
-libopencv function cvSmooth
.
-
-
-
38.65 overlay# TOC
-
-
Overlay one video on top of another.
-
-
It takes two inputs and has one output. The first input is the "main"
-video on which the second input is overlaid.
-
-
It accepts the following parameters:
-
-
A description of the accepted options follows.
-
-
-x
-y
-Set the expression for the x and y coordinates of the overlaid video
-on the main video. Default value is "0" for both expressions. In case
-the expression is invalid, it is set to a huge value (meaning that the
-overlay will not be displayed within the output visible area).
-
-
-eof_action
-The action to take when EOF is encountered on the secondary input; it accepts
-one of the following values:
-
-
-repeat
-Repeat the last frame (the default).
-
-endall
-End both streams.
-
-pass
-Pass the main input through.
-
-
-
-
-eval
-Set when the expressions for x , and y are evaluated.
-
-It accepts the following values:
-
-‘init ’
-only evaluate expressions once during the filter initialization or
-when a command is processed
-
-
-‘frame ’
-evaluate expressions for each incoming frame
-
-
-
-Default value is ‘frame ’.
-
-
-shortest
-If set to 1, force the output to terminate when the shortest input
-terminates. Default value is 0.
-
-
-format
-Set the format for the output video.
-
-It accepts the following values:
-
-‘yuv420 ’
-force YUV420 output
-
-
-‘yuv422 ’
-force YUV422 output
-
-
-‘yuv444 ’
-force YUV444 output
-
-
-‘rgb ’
-force RGB output
-
-
-
-Default value is ‘yuv420 ’.
-
-
-rgb (deprecated)
-If set to 1, force the filter to accept inputs in the RGB
-color space. Default value is 0. This option is deprecated, use
-format instead.
-
-
-repeatlast
-If set to 1, force the filter to draw the last overlay frame over the
-main input until the end of the stream. A value of 0 disables this
-behavior. Default value is 1.
-
-
-
-
The x , and y expressions can contain the following
-parameters.
-
-
-main_w, W
-main_h, H
-The main input width and height.
-
-
-overlay_w, w
-overlay_h, h
-The overlay input width and height.
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values of the output
-format. For example for the pixel format "yuv422p" hsub is 2 and
-vsub is 1.
-
-
-n
-the number of input frame, starting from 0
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp, expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
Note that the n , pos , t variables are available only
-when evaluation is done per frame , and will evaluate to NAN
-when eval is set to ‘init ’.
-
-
Be aware that frames are taken from each input video in timestamp
-order, hence, if their initial timestamps differ, it is a good idea
-to pass the two inputs through a setpts=PTS-STARTPTS filter to
-have them begin in the same zero timestamp, as the example for
-the movie filter does.
-
-
You can chain together more overlays but you should test the
-efficiency of such approach.
-
-
-
38.65.1 Commands# TOC
-
-
This filter supports the following commands:
-
-x
-y
-Modify the x and y of the overlay input.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
38.65.2 Examples# TOC
-
-
-
-
-
38.66 owdenoise# TOC
-
-
Apply Overcomplete Wavelet denoiser.
-
-
The filter accepts the following options:
-
-
-depth
-Set depth.
-
-Larger depth values will denoise lower frequency components more, but
-slow down filtering.
-
-Must be an int in the range 8-16, default is 8
.
-
-
-luma_strength, ls
-Set luma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-chroma_strength, cs
-Set chroma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-
-
-
38.67 pad# TOC
-
-
Add paddings to the input image, and place the original input at the
-provided x , y coordinates.
-
-
It accepts the following parameters:
-
-
-width, w
-height, h
-Specify an expression for the size of the output image with the
-paddings added. If the value for width or height is 0, the
-corresponding input size is used for the output.
-
-The width expression can reference the value set by the
-height expression, and vice versa.
-
-The default value of width and height is 0.
-
-
-x
-y
-Specify the offsets to place the input image at within the padded area,
-with respect to the top/left border of the output image.
-
-The x expression can reference the value set by the y
-expression, and vice versa.
-
-The default value of x and y is 0.
-
-
-color
-Specify the color of the padded area. For the syntax of this option,
-check the "Color" section in the ffmpeg-utils manual.
-
-The default value of color is "black".
-
-
-
-
The value for the width , height , x , and y
-options are expressions containing the following constants:
-
-
-in_w
-in_h
-The input video width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output width and height (the size of the padded area), as
-specified by the width and height expressions.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-x
-y
-The x and y offsets as specified by the x and y
-expressions, or NAN if not yet specified.
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
38.67.1 Examples# TOC
-
-
-
-
-
38.68 perspective# TOC
-
-
Correct perspective of video not recorded perpendicular to the screen.
-
-
A description of the accepted parameters follows.
-
-
-x0
-y0
-x1
-y1
-x2
-y2
-x3
-y3
-Set coordinates expression for top left, top right, bottom left and bottom right corners.
-Default values are 0:0:W:0:0:H:W:H
with which perspective will remain unchanged.
-If the sense
option is set to source
, then the specified points will be sent
-to the corners of the destination. If the sense
option is set to destination
,
-then the corners of the source will be sent to the specified coordinates.
-
-The expressions can use the following variables:
-
-
-W
-H
-the width and height of video frame.
-
-
-
-
-interpolation
-Set interpolation for perspective correction.
-
-It accepts the following values:
-
-‘linear ’
-‘cubic ’
-
-
-Default value is ‘linear ’.
-
-
-sense
-Set interpretation of coordinate options.
-
-It accepts the following values:
-
-‘0, source ’
-
-Send point in the source specified by the given coordinates to
-the corners of the destination.
-
-
-‘1, destination ’
-
-Send the corners of the source to the point in the destination specified
-by the given coordinates.
-
-Default value is ‘source ’.
-
-
-
-
-
-
-
38.69 phase# TOC
-
-
Delay interlaced video by one field time so that the field order changes.
-
-
The intended use is to fix PAL movies that have been captured with the
-opposite field order to the film-to-video transfer.
-
-
A description of the accepted parameters follows.
-
-
-mode
-Set phase mode.
-
-It accepts the following values:
-
-‘t ’
-Capture field order top-first, transfer bottom-first.
-Filter will delay the bottom field.
-
-
-‘b ’
-Capture field order bottom-first, transfer top-first.
-Filter will delay the top field.
-
-
-‘p ’
-Capture and transfer with the same field order. This mode only exists
-for the documentation of the other options to refer to, but if you
-actually select it, the filter will faithfully do nothing.
-
-
-‘a ’
-Capture field order determined automatically by field flags, transfer
-opposite.
-Filter selects among ‘t ’ and ‘b ’ modes on a frame by frame
-basis using field flags. If no field information is available,
-then this works just like ‘u ’.
-
-
-‘u ’
-Capture unknown or varying, transfer opposite.
-Filter selects among ‘t ’ and ‘b ’ on a frame by frame basis by
-analyzing the images and selecting the alternative that produces best
-match between the fields.
-
-
-‘T ’
-Capture top-first, transfer unknown or varying.
-Filter selects among ‘t ’ and ‘p ’ using image analysis.
-
-
-‘B ’
-Capture bottom-first, transfer unknown or varying.
-Filter selects among ‘b ’ and ‘p ’ using image analysis.
-
-
-‘A ’
-Capture determined by field flags, transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using field flags and
-image analysis. If no field information is available, then this works just
-like ‘U ’. This is the default mode.
-
-
-‘U ’
-Both capture and transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using image analysis only.
-
-
-
-
-
-
-
38.70 pixdesctest# TOC
-
-
Pixel format descriptor test filter, mainly useful for internal
-testing. The output video should be equal to the input video.
-
-
For example:
-
-
format=monow, pixdesctest
-
-
-
can be used to test the monowhite pixel format descriptor definition.
-
-
-
38.71 pp# TOC
-
-
Enable the specified chain of postprocessing subfilters using libpostproc. This
-library should be automatically selected with a GPL build (--enable-gpl
).
-Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’.
-Each subfilter and some options have a short and a long name that can be used
-interchangeably, i.e. dr/dering are the same.
-
-
The filters accept the following options:
-
-
-subfilters
-Set postprocessing subfilters string.
-
-
-
-
All subfilters share common options to determine their scope:
-
-
-a/autoq
-Honor the quality commands for this subfilter.
-
-
-c/chrom
-Do chrominance filtering, too (default).
-
-
-y/nochrom
-Do luminance filtering only (no chrominance).
-
-
-n/noluma
-Do chrominance filtering only (no luminance).
-
-
-
-
These options can be appended after the subfilter name, separated by a ’|’.
-
-
Available subfilters are:
-
-
-hb/hdeblock[|difference[|flatness]]
-Horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-vb/vdeblock[|difference[|flatness]]
-Vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-ha/hadeblock[|difference[|flatness]]
-Accurate horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-va/vadeblock[|difference[|flatness]]
-Accurate vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-
-
The horizontal and vertical deblocking filters share the difference and
-flatness values so you cannot set different horizontal and vertical
-thresholds.
-
-
-h1/x1hdeblock
-Experimental horizontal deblocking filter
-
-
-v1/x1vdeblock
-Experimental vertical deblocking filter
-
-
-dr/dering
-Deringing filter
-
-
-tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
-
-threshold1
-larger -> stronger filtering
-
-threshold2
-larger -> stronger filtering
-
-threshold3
-larger -> stronger filtering
-
-
-
-
-al/autolevels[:f/fullyrange], automatic brightness / contrast correction
-
-f/fullyrange
-Stretch luminance to 0-255
.
-
-
-
-
-lb/linblenddeint
-Linear blend deinterlacing filter that deinterlaces the given block by
-filtering all lines with a (1 2 1)
filter.
-
-
-li/linipoldeint
-Linear interpolating deinterlacing filter that deinterlaces the given block by
-linearly interpolating every second line.
-
-
-ci/cubicipoldeint
-Cubic interpolating deinterlacing filter deinterlaces the given block by
-cubically interpolating every second line.
-
-
-md/mediandeint
-Median deinterlacing filter that deinterlaces the given block by applying a
-median filter to every second line.
-
-
-fd/ffmpegdeint
-FFmpeg deinterlacing filter that deinterlaces the given block by filtering every
-second line with a (-1 4 2 4 -1)
filter.
-
-
-l5/lowpass5
-Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given
-block by filtering all lines with a (-1 2 6 2 -1)
filter.
-
-
-fq/forceQuant[|quantizer]
-Overrides the quantizer table from the input with the constant quantizer you
-specify.
-
-quantizer
-Quantizer to use
-
-
-
-
-de/default
-Default pp filter combination (hb|a,vb|a,dr|a
)
-
-
-fa/fast
-Fast pp filter combination (h1|a,v1|a,dr|a
)
-
-
-ac
-High quality pp filter combination (ha|a|128|7,va|a,dr|a
)
-
-
-
-
-
38.71.1 Examples# TOC
-
-
- Apply horizontal and vertical deblocking, deringing and automatic
-brightness/contrast:
-
-
- Apply default filters without brightness/contrast correction:
-
-
- Apply default filters and temporal denoiser:
-
-
pp=default/tmpnoise|1|2|3
-
-
- Apply deblocking on luminance only, and switch vertical deblocking on or off
-automatically depending on available CPU time:
-
-
-
-
-
38.72 pp7# TOC
-
Apply Postprocessing filter 7. It is variant of the spp filter,
-similar to spp = 6 with 7 point DCT, where only the center sample is
-used after IDCT.
-
-
The filter accepts the following options:
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range
-0 to 63. If not set, the filter will use the QP from the video stream
-(if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding.
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-‘medium ’
-Set medium thresholding (good results, default).
-
-
-
-
-
-
-
38.73 psnr# TOC
-
-
Obtain the average, maximum and minimum PSNR (Peak Signal to Noise
-Ratio) between two input videos.
-
-
This filter takes in input two input videos, the first input is
-considered the "main" source and is passed unchanged to the
-output. The second input is used as a "reference" video for computing
-the PSNR.
-
-
Both video inputs must have the same resolution and pixel format for
-this filter to work correctly. Also it assumes that both inputs
-have the same number of frames, which are compared one by one.
-
-
The obtained average PSNR is printed through the logging system.
-
-
The filter stores the accumulated MSE (mean squared error) of each
-frame, and at the end of the processing it is averaged across all frames
-equally, and the following formula is applied to obtain the PSNR:
-
-
-
PSNR = 10*log10(MAX^2/MSE)
-
-
-
Where MAX is the average of the maximum values of each component of the
-image.
-
-
The description of the accepted parameters follows.
-
-
-stats_file, f
-If specified the filter will use the named file to save the PSNR of
-each individual frame.
-
-
-
-
The file printed if stats_file is selected, contains a sequence of
-key/value pairs of the form key :value for each compared
-couple of frames.
-
-
A description of each shown parameter follows:
-
-
-n
-sequential number of the input frame, starting from 1
-
-
-mse_avg
-Mean Square Error pixel-by-pixel average difference of the compared
-frames, averaged over all the image components.
-
-
-mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
-Mean Square Error pixel-by-pixel average difference of the compared
-frames for the component specified by the suffix.
-
-
-psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
-Peak Signal to Noise ratio of the compared frames for the component
-specified by the suffix.
-
-
-
-
For example:
-
-
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
-[main][ref] psnr="stats_file=stats.log" [out]
-
-
-
On this example the input file being processed is compared with the
-reference file ref_movie.mpg . The PSNR of each individual frame
-is stored in stats.log .
-
-
-
38.74 pullup# TOC
-
-
Pulldown reversal (inverse telecine) filter, capable of handling mixed
-hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive
-content.
-
-
The pullup filter is designed to take advantage of future context in making
-its decisions. This filter is stateless in the sense that it does not lock
-onto a pattern to follow, but it instead looks forward to the following
-fields in order to identify matches and rebuild progressive frames.
-
-
To produce content with an even framerate, insert the fps filter after
-pullup, use fps=24000/1001
if the input frame rate is 29.97fps,
-fps=24
for 30fps and the (rare) telecined 25fps input.
-
-
The filter accepts the following options:
-
-
-jl
-jr
-jt
-jb
-These options set the amount of "junk" to ignore at the left, right, top, and
-bottom of the image, respectively. Left and right are in units of 8 pixels,
-while top and bottom are in units of 2 lines.
-The default is 8 pixels on each side.
-
-
-sb
-Set the strict breaks. Setting this option to 1 will reduce the chances of
-filter generating an occasional mismatched frame, but it may also cause an
-excessive number of frames to be dropped during high motion sequences.
-Conversely, setting it to -1 will make filter match fields more easily.
-This may help processing of video where there is slight blurring between
-the fields, but may also cause there to be interlaced frames in the output.
-Default value is 0
.
-
-
-mp
-Set the metric plane to use. It accepts the following values:
-
-‘l ’
-Use luma plane.
-
-
-‘u ’
-Use chroma blue plane.
-
-
-‘v ’
-Use chroma red plane.
-
-
-
-This option may be set to use chroma plane instead of the default luma plane
-for doing filter’s computations. This may improve accuracy on very clean
-source material, but more likely will decrease accuracy, especially if there
-is chroma noise (rainbow effect) or any grayscale video.
-The main purpose of setting mp to a chroma plane is to reduce CPU
-load and make pullup usable in realtime on slow machines.
-
-
-
-
For best results (without duplicated frames in the output file) it is
-necessary to change the output frame rate. For example, to inverse
-telecine NTSC input:
-
-
ffmpeg -i input -vf pullup -r 24000/1001 ...
-
-
-
-
38.75 qp# TOC
-
-
Change video quantization parameters (QP).
-
-
The filter accepts the following option:
-
-
-qp
-Set expression for quantization parameter.
-
-
-
-
The expression is evaluated through the eval API and can contain, among others,
-the following constants:
-
-
-known
-1 if index is not 129, 0 otherwise.
-
-
-qp
-Sequentional index starting from -129 to 128.
-
-
-
-
-
38.75.1 Examples# TOC
-
-
- Some equation like:
-
-
-
-
-
38.76 removelogo# TOC
-
-
Suppress a TV station logo, using an image file to determine which
-pixels comprise the logo. It works by filling in the pixels that
-comprise the logo with neighboring pixels.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filter bitmap file, which can be any image format supported by
-libavformat. The width and height of the image file must match those of the
-video stream being processed.
-
-
-
-
Pixels in the provided bitmap image with a value of zero are not
-considered part of the logo, non-zero pixels are considered part of
-the logo. If you use white (255) for the logo and black (0) for the
-rest, you will be safe. For making the filter bitmap, it is
-recommended to take a screen capture of a black frame with the logo
-visible, and then using a threshold filter followed by the erode
-filter once or twice.
-
-
If needed, little splotches can be fixed manually. Remember that if
-logo pixels are not covered, the filter quality will be much
-reduced. Marking too many pixels as part of the logo does not hurt as
-much, but it will increase the amount of blurring needed to cover over
-the image and will destroy more information than necessary, and extra
-pixels will slow things down on a large logo.
-
-
-
38.77 rotate# TOC
-
-
Rotate video by an arbitrary angle expressed in radians.
-
-
The filter accepts the following options:
-
-
A description of the optional parameters follows.
-
-angle, a
-Set an expression for the angle by which to rotate the input video
-clockwise, expressed as a number of radians. A negative value will
-result in a counter-clockwise rotation. By default it is set to "0".
-
-This expression is evaluated for each frame.
-
-
-out_w, ow
-Set the output width expression, default value is "iw".
-This expression is evaluated just once during configuration.
-
-
-out_h, oh
-Set the output height expression, default value is "ih".
-This expression is evaluated just once during configuration.
-
-
-bilinear
-Enable bilinear interpolation if set to 1, a value of 0 disables
-it. Default value is 1.
-
-
-fillcolor, c
-Set the color used to fill the output area not covered by the rotated
-image. For the general syntax of this option, check the "Color" section in the
-ffmpeg-utils manual. If the special value "none" is selected then no
-background is printed (useful for example if the background is never shown).
-
-Default value is "black".
-
-
-
-
The expressions for the angle and the output size can contain the
-following constants and functions:
-
-
-n
-sequential number of the input frame, starting from 0. It is always NAN
-before the first frame is filtered.
-
-
-t
-time in seconds of the input frame, it is set to 0 when the filter is
-configured. It is always NAN before the first frame is filtered.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_w, iw
-in_h, ih
-the input video width and height
-
-
-out_w, ow
-out_h, oh
-the output width and height, that is the size of the padded area as
-specified by the width and height expressions
-
-
-rotw(a)
-roth(a)
-the minimal width/height required for completely containing the input
-video rotated by a radians.
-
-These are only available when computing the out_w and
-out_h expressions.
-
-
-
-
-
38.77.1 Examples# TOC
-
-
- Rotate the input by PI/6 radians clockwise:
-
-
- Rotate the input by PI/6 radians counter-clockwise:
-
-
- Rotate the input by 45 degrees clockwise:
-
-
- Apply a constant rotation with period T, starting from an angle of PI/3:
-
-
- Make the input video rotation oscillating with a period of T
-seconds and an amplitude of A radians:
-
-
rotate=A*sin(2*PI/T*t)
-
-
- Rotate the video, output size is chosen so that the whole rotating
-input video is always completely contained in the output:
-
-
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
-
-
- Rotate the video, reduce the output size so that no background is ever
-shown:
-
-
rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
-
-
-
-
-
38.77.2 Commands# TOC
-
-
The filter supports the following commands:
-
-
-a, angle
-Set the angle expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
38.78 sab# TOC
-
-
Apply Shape Adaptive Blur.
-
-
The filter accepts the following options:
-
-
-luma_radius, lr
-Set luma blur filter strength, must be a value in range 0.1-4.0, default
-value is 1.0. A greater value will result in a more blurred image, and
-in slower processing.
-
-
-luma_pre_filter_radius, lpfr
-Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default
-value is 1.0.
-
-
-luma_strength, ls
-Set luma maximum difference between pixels to still be considered, must
-be a value in the 0.1-100.0 range, default value is 1.0.
-
-
-chroma_radius, cr
-Set chroma blur filter strength, must be a value in range 0.1-4.0. A
-greater value will result in a more blurred image, and in slower
-processing.
-
-
-chroma_pre_filter_radius, cpfr
-Set chroma pre-filter radius, must be a value in the 0.1-2.0 range.
-
-
-chroma_strength, cs
-Set chroma maximum difference between pixels to still be considered,
-must be a value in the 0.1-100.0 range.
-
-
-
-
Each chroma option value, if not explicitly specified, is set to the
-corresponding luma option value.
-
-
-
38.79 scale# TOC
-
-
Scale (resize) the input video, using the libswscale library.
-
-
The scale filter forces the output display aspect ratio to be the same
-of the input, by changing the output sample aspect ratio.
-
-
If the input image format is different from the format requested by
-the next filter, the scale filter will convert the input to the
-requested format.
-
-
-
38.79.1 Options# TOC
-
The filter accepts the following options, or any of the options
-supported by the libswscale scaler.
-
-
See (ffmpeg-scaler)the ffmpeg-scaler manual for
-the complete list of scaler options.
-
-
-width, w
-height, h
-Set the output video dimension expression. Default value is the input
-dimension.
-
-If the value is 0, the input width is used for the output.
-
-If one of the values is -1, the scale filter will use a value that
-maintains the aspect ratio of the input image, calculated from the
-other specified dimension. If both of them are -1, the input size is
-used
-
-If one of the values is -n with n > 1, the scale filter will also use a value
-that maintains the aspect ratio of the input image, calculated from the other
-specified dimension. After that it will, however, make sure that the calculated
-dimension is divisible by n and adjust the value if necessary.
-
-See below for the list of accepted constants for use in the dimension
-expression.
-
-
-interl
-Set the interlacing mode. It accepts the following values:
-
-
-‘1 ’
-Force interlaced aware scaling.
-
-
-‘0 ’
-Do not apply interlaced scaling.
-
-
-‘-1 ’
-Select interlaced aware scaling depending on whether the source frames
-are flagged as interlaced or not.
-
-
-
-Default value is ‘0 ’.
-
-
-flags
-Set libswscale scaling flags. See
-(ffmpeg-scaler)the ffmpeg-scaler manual for the
-complete list of values. If not explicitly specified the filter applies
-the default flags.
-
-
-size, s
-Set the video size. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-in_color_matrix
-out_color_matrix
-Set in/output YCbCr color space type.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder.
-
-If not specified, the color space type depends on the pixel format.
-
-Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘bt709 ’
-Format conforming to International Telecommunication Union (ITU)
-Recommendation BT.709.
-
-
-‘fcc ’
-Set color space conforming to the United States Federal Communications
-Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a).
-
-
-‘bt601 ’
-Set color space conforming to:
-
-
- ITU Radiocommunication Sector (ITU-R) Recommendation BT.601
-
- ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G
-
- Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004
-
-
-
-
-‘smpte240m ’
-Set color space conforming to SMPTE ST 240:1999.
-
-
-
-
-in_range
-out_range
-Set in/output YCbCr sample range.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder. If not specified, the
-range depends on the pixel format. Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘jpeg/full/pc ’
-Set full range (0-255 in case of 8-bit luma).
-
-
-‘mpeg/tv ’
-Set "MPEG" range (16-235 in case of 8-bit luma).
-
-
-
-
-force_original_aspect_ratio
-Enable decreasing or increasing output video width or height if necessary to
-keep the original aspect ratio. Possible values:
-
-
-‘disable ’
-Scale the video as specified and disable this feature.
-
-
-‘decrease ’
-The output video dimensions will automatically be decreased if needed.
-
-
-‘increase ’
-The output video dimensions will automatically be increased if needed.
-
-
-
-
-One useful instance of this option is that when you know a specific device’s
-maximum allowed resolution, you can use this to limit the output video to
-that, while retaining the aspect ratio. For example, device A allows
-1280x720 playback, and your video is 1920x800. Using this option (set it to
-decrease) and specifying 1280x720 to the command line makes the output
-1280x533.
-
-Please note that this is a different thing than specifying -1 for w
-or h , you still need to specify the output resolution for this option
-to work.
-
-
-
-
-
The values of the w and h options are expressions
-containing the following constants:
-
-
-in_w
-in_h
-The input width and height
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (scaled) width and height
-
-
-ow
-oh
-These are the same as out_w and out_h
-
-
-a
-The same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-The input display aspect ratio. Calculated from (iw / ih) * sar
.
-
-
-hsub
-vsub
-horizontal and vertical input chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-ohsub
-ovsub
-horizontal and vertical output chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
38.79.2 Examples# TOC
-
-
-
-
-
38.80 separatefields# TOC
-
-
The separatefields
takes a frame-based video input and splits
-each frame into its components fields, producing a new half height clip
-with twice the frame rate and twice the frame count.
-
-
This filter use field-dominance information in frame to decide which
-of each pair of fields to place first in the output.
-If it gets it wrong use setfield filter before separatefields
filter.
-
-
-
38.81 setdar, setsar# TOC
-
-
The setdar
filter sets the Display Aspect Ratio for the filter
-output video.
-
-
This is done by changing the specified Sample (aka Pixel) Aspect
-Ratio, according to the following equation:
-
-
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
-
-
-
Keep in mind that the setdar
filter does not modify the pixel
-dimensions of the video frame. Also, the display aspect ratio set by
-this filter may be changed by later filters in the filterchain,
-e.g. in case of scaling or if another "setdar" or a "setsar" filter is
-applied.
-
-
The setsar
filter sets the Sample (aka Pixel) Aspect Ratio for
-the filter output video.
-
-
Note that as a consequence of the application of this filter, the
-output display aspect ratio will change according to the equation
-above.
-
-
Keep in mind that the sample aspect ratio set by the setsar
-filter may be changed by later filters in the filterchain, e.g. if
-another "setsar" or a "setdar" filter is applied.
-
-
It accepts the following parameters:
-
-
-r, ratio, dar (setdar
only), sar (setsar
only)
-Set the aspect ratio used by the filter.
-
-The parameter can be a floating point number string, an expression, or
-a string of the form num :den , where num and
-den are the numerator and denominator of the aspect ratio. If
-the parameter is not specified, it is assumed the value "0".
-In case the form "num :den " is used, the :
character
-should be escaped.
-
-
-max
-Set the maximum integer value to use for expressing numerator and
-denominator when reducing the expressed aspect ratio to a rational.
-Default value is 100
.
-
-
-
-
-
The parameter sar is an expression containing
-the following constants:
-
-
-E, PI, PHI
-These are approximated values for the mathematical constants e
-(Euler’s number), pi (Greek pi), and phi (the golden ratio).
-
-
-w, h
-The input width and height.
-
-
-a
-These are the same as w / h .
-
-
-sar
-The input sample aspect ratio.
-
-
-dar
-The input display aspect ratio. It is the same as
-(w / h ) * sar .
-
-
-hsub, vsub
-Horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
38.81.1 Examples# TOC
-
-
- To change the display aspect ratio to 16:9, specify one of the following:
-
-
setdar=dar=1.77777
-setdar=dar=16/9
-setdar=dar=1.77777
-
-
- To change the sample aspect ratio to 10:11, specify:
-
-
- To set a display aspect ratio of 16:9, and specify a maximum integer value of
-1000 in the aspect ratio reduction, use the command:
-
-
setdar=ratio=16/9:max=1000
-
-
-
-
-
-
38.82 setfield# TOC
-
-
Force field for the output video frame.
-
-
The setfield
filter marks the interlace type field for the
-output frames. It does not change the input frame, but only sets the
-corresponding property, which affects how the frame is treated by
-following filters (e.g. fieldorder
or yadif
).
-
-
The filter accepts the following options:
-
-
-mode
-Available values are:
-
-
-‘auto ’
-Keep the same field property.
-
-
-‘bff ’
-Mark the frame as bottom-field-first.
-
-
-‘tff ’
-Mark the frame as top-field-first.
-
-
-‘prog ’
-Mark the frame as progressive.
-
-
-
-
-
-
-
38.83 showinfo# TOC
-
-
Show a line containing various information for each input video frame.
-The input video is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The Presentation TimeStamp of the input frame, expressed as a number of
-time base units. The time base unit depends on the filter input pad.
-
-
-pts_time
-The Presentation TimeStamp of the input frame, expressed as a number of
-seconds.
-
-
-pos
-The position of the frame in the input stream, or -1 if this information is
-unavailable and/or meaningless (for example in case of synthetic video).
-
-
-fmt
-The pixel format name.
-
-
-sar
-The sample aspect ratio of the input frame, expressed in the form
-num /den .
-
-
-s
-The size of the input frame. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-i
-The type of interlaced mode ("P" for "progressive", "T" for top field first, "B"
-for bottom field first).
-
-
-iskey
-This is 1 if the frame is a key frame, 0 otherwise.
-
-
-type
-The picture type of the input frame ("I" for an I-frame, "P" for a
-P-frame, "B" for a B-frame, or "?" for an unknown type).
-Also refer to the documentation of the AVPictureType
enum and of
-the av_get_picture_type_char
function defined in
-libavutil/avutil.h .
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame.
-
-
-plane_checksum
-The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
-expressed in the form "[c0 c1 c2 c3 ]".
-
-
-
-
-
38.84 shuffleplanes# TOC
-
-
Reorder and/or duplicate video planes.
-
-
It accepts the following parameters:
-
-
-map0
-The index of the input plane to be used as the first output plane.
-
-
-map1
-The index of the input plane to be used as the second output plane.
-
-
-map2
-The index of the input plane to be used as the third output plane.
-
-
-map3
-The index of the input plane to be used as the fourth output plane.
-
-
-
-
-
The first plane has the index 0. The default is to keep the input unchanged.
-
-
Swap the second and third planes of the input:
-
-
ffmpeg -i INPUT -vf shuffleplanes=0:2:1:3 OUTPUT
-
-
-
-
38.85 signalstats# TOC
-
Evaluate various visual metrics that assist in determining issues associated
-with the digitization of analog video media.
-
-
By default the filter will log these metadata values:
-
-
-YMIN
-Display the minimal Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-YLOW
-Display the Y value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YAVG
-Display the average Y value within the input frame. Expressed in range of
-[0-255].
-
-
-YHIGH
-Display the Y value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YMAX
-Display the maximum Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-UMIN
-Display the minimal U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-ULOW
-Display the U value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UAVG
-Display the average U value within the input frame. Expressed in range of
-[0-255].
-
-
-UHIGH
-Display the U value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UMAX
-Display the maximum U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VMIN
-Display the minimal V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VLOW
-Display the V value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VAVG
-Display the average V value within the input frame. Expressed in range of
-[0-255].
-
-
-VHIGH
-Display the V value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VMAX
-Display the maximum V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-SATMIN
-Display the minimal saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATLOW
-Display the saturation value at the 10% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATAVG
-Display the average saturation value within the input frame. Expressed in range
-of [0-~181.02].
-
-
-SATHIGH
-Display the saturation value at the 90% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATMAX
-Display the maximum saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-HUEMED
-Display the median value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-HUEAVG
-Display the average value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-YDIF
-Display the average of sample value difference between all values of the Y
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-UDIF
-Display the average of sample value difference between all values of the U
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-VDIF
-Display the average of sample value difference between all values of the V
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-
-
The filter accepts the following options:
-
-
-stat
-out
-
-stat specify an additional form of image analysis.
-out output video with the specified type of pixel highlighted.
-
-Both options accept the following values:
-
-
-‘tout ’
-Identify temporal outliers pixels. A temporal outlier is a pixel
-unlike the neighboring pixels of the same field. Examples of temporal outliers
-include the results of video dropouts, head clogs, or tape tracking issues.
-
-
-‘vrep ’
-Identify vertical line repetition . Vertical line repetition includes
-similar rows of pixels within a frame. In born-digital video vertical line
-repetition is common, but this pattern is uncommon in video digitized from an
-analog source. When it occurs in video that results from the digitization of an
-analog source it can indicate concealment from a dropout compensator.
-
-
-‘brng ’
-Identify pixels that fall outside of legal broadcast range.
-
-
-
-
-color, c
-Set the highlight color for the out option. The default color is
-yellow.
-
-
-
-
-
38.85.1 Examples# TOC
-
-
-
-
-
38.86 smartblur# TOC
-
-
Blur the input video without impacting the outlines.
-
-
It accepts the following options:
-
-
-luma_radius, lr
-Set the luma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-luma_strength, ls
-Set the luma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-luma_threshold, lt
-Set the luma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-chroma_radius, cr
-Set the chroma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-chroma_strength, cs
-Set the chroma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-chroma_threshold, ct
-Set the chroma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-
-
If a chroma option is not explicitly set, the corresponding luma value
-is set.
-
-
-
38.87 stereo3d# TOC
-
-
Convert between different stereoscopic image formats.
-
-
The filters accept the following options:
-
-
-in
-Set stereoscopic image format of input.
-
-Available values for input image formats are:
-
-‘sbsl ’
-side by side parallel (left eye left, right eye right)
-
-
-‘sbsr ’
-side by side crosseye (right eye left, left eye right)
-
-
-‘sbs2l ’
-side by side parallel with half width resolution
-(left eye left, right eye right)
-
-
-‘sbs2r ’
-side by side crosseye with half width resolution
-(right eye left, left eye right)
-
-
-‘abl ’
-above-below (left eye above, right eye below)
-
-
-‘abr ’
-above-below (right eye above, left eye below)
-
-
-‘ab2l ’
-above-below with half height resolution
-(left eye above, right eye below)
-
-
-‘ab2r ’
-above-below with half height resolution
-(right eye above, left eye below)
-
-
-‘al ’
-alternating frames (left eye first, right eye second)
-
-
-‘ar ’
-alternating frames (right eye first, left eye second)
-
-Default value is ‘sbsl ’.
-
-
-
-
-out
-Set stereoscopic image format of output.
-
-Available values for output image formats are all the input formats as well as:
-
-‘arbg ’
-anaglyph red/blue gray
-(red filter on left eye, blue filter on right eye)
-
-
-‘argg ’
-anaglyph red/green gray
-(red filter on left eye, green filter on right eye)
-
-
-‘arcg ’
-anaglyph red/cyan gray
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arch ’
-anaglyph red/cyan half colored
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcc ’
-anaglyph red/cyan color
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcd ’
-anaglyph red/cyan color optimized with the least squares projection of dubois
-(red filter on left eye, cyan filter on right eye)
-
-
-‘agmg ’
-anaglyph green/magenta gray
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmh ’
-anaglyph green/magenta half colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmc ’
-anaglyph green/magenta colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmd ’
-anaglyph green/magenta color optimized with the least squares projection of dubois
-(green filter on left eye, magenta filter on right eye)
-
-
-‘aybg ’
-anaglyph yellow/blue gray
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybh ’
-anaglyph yellow/blue half colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybc ’
-anaglyph yellow/blue colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybd ’
-anaglyph yellow/blue color optimized with the least squares projection of dubois
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘irl ’
-interleaved rows (left eye has top row, right eye starts on next row)
-
-
-‘irr ’
-interleaved rows (right eye has top row, left eye starts on next row)
-
-
-‘ml ’
-mono output (left eye only)
-
-
-‘mr ’
-mono output (right eye only)
-
-
-
-Default value is ‘arcd ’.
-
-
-
-
-
38.87.1 Examples# TOC
-
-
- Convert input video from side by side parallel to anaglyph yellow/blue dubois:
-
-
- Convert input video from above bellow (left eye above, right eye below) to side by side crosseye.
-
-
-
-
-
38.88 spp# TOC
-
-
Apply a simple postprocessing filter that compresses and decompresses the image
-at several (or - in the case of quality level 6
- all) shifts
-and average the results.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-6. If set to 0
, the filter will have no
-effect. A value of 6
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding (default).
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
38.89 subtitles# TOC
-
-
Draw subtitles on top of input video using the libass library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libass
. This filter also requires a build with libavcodec and
-libavformat to convert the passed subtitles file to ASS (Advanced Substation
-Alpha) subtitles format.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filename of the subtitle file to read. It must be specified.
-
-
-original_size
-Specify the size of the original video, the video for which the ASS file
-was composed. For the syntax of this option, check the "Video size" section in
-the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic,
-this is necessary to correctly scale the fonts if the aspect ratio has been
-changed.
-
-
-charenc
-Set subtitles input character encoding. subtitles
filter only. Only
-useful if not UTF-8.
-
-
-stream_index, si
-Set subtitles stream index. subtitles
filter only.
-
-
-
-
If the first key is not specified, it is assumed that the first value
-specifies the filename .
-
-
For example, to render the file sub.srt on top of the input
-video, use the command:
-
-
-
which is equivalent to:
-
-
subtitles=filename=sub.srt
-
-
-
To render the default subtitles stream from file video.mkv , use:
-
-
-
To render the second subtitles stream from that file, use:
-
-
subtitles=video.mkv:si=1
-
-
-
-
38.90 super2xsai# TOC
-
-
Scale the input by 2x and smooth using the Super2xSaI (Scale and
-Interpolate) pixel art scaling algorithm.
-
-
Useful for enlarging pixel art images without reducing sharpness.
-
-
-
38.91 swapuv# TOC
-
Swap U & V plane.
-
-
-
38.92 telecine# TOC
-
-
Apply telecine process to the video.
-
-
This filter accepts the following options:
-
-
-first_field
-
-‘top, t ’
-top field first
-
-‘bottom, b ’
-bottom field first
-The default value is top
.
-
-
-
-
-pattern
-A string of numbers representing the pulldown pattern you wish to apply.
-The default value is 23
.
-
-
-
-
-
Some typical patterns:
-
-NTSC output (30i):
-27.5p: 32222
-24p: 23 (classic)
-24p: 2332 (preferred)
-20p: 33
-18p: 334
-16p: 3444
-
-PAL output (25i):
-27.5p: 12222
-24p: 222222222223 ("Euro pulldown")
-16.67p: 33
-16p: 33333334
-
-
-
-
38.93 thumbnail# TOC
-
Select the most representative frame in a given sequence of consecutive frames.
-
-
The filter accepts the following options:
-
-
-n
-Set the frames batch size to analyze; in a set of n frames, the filter
-will pick one of them, and then handle the next batch of n frames until
-the end. Default is 100
.
-
-
-
-
Since the filter keeps track of the whole frames sequence, a bigger n
-value will result in a higher memory usage, so a high value is not recommended.
-
-
-
38.93.1 Examples# TOC
-
-
- Extract one picture each 50 frames:
-
-
- Complete example of a thumbnail creation with ffmpeg
:
-
-
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
-
-
-
-
-
38.94 tile# TOC
-
-
Tile several successive frames together.
-
-
The filter accepts the following options:
-
-
-layout
-Set the grid size (i.e. the number of lines and columns). For the syntax of
-this option, check the "Video size" section in the ffmpeg-utils manual.
-
-
-nb_frames
-Set the maximum number of frames to render in the given area. It must be less
-than or equal to w xh . The default value is 0
, meaning all
-the area will be used.
-
-
-margin
-Set the outer border margin in pixels.
-
-
-padding
-Set the inner border thickness (i.e. the number of pixels between frames). For
-more advanced padding options (such as having different values for the edges),
-refer to the pad video filter.
-
-
-color
-Specify the color of the unused area. For the syntax of this option, check the
-"Color" section in the ffmpeg-utils manual. The default value of color
-is "black".
-
-
-
-
-
38.94.1 Examples# TOC
-
-
-
-
-
38.95 tinterlace# TOC
-
-
Perform various types of temporal field interlacing.
-
-
Frames are counted starting from 1, so the first input frame is
-considered odd.
-
-
The filter accepts the following options:
-
-
-mode
-Specify the mode of the interlacing. This option can also be specified
-as a value alone. See below for a list of values for this option.
-
-Available values are:
-
-
-‘merge, 0 ’
-Move odd frames into the upper field, even into the lower field,
-generating a double height frame at half frame rate.
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-‘drop_odd, 1 ’
-Only output even frames, odd frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
- 22222 44444
- 22222 44444
- 22222 44444
- 22222 44444
-
-
-
-‘drop_even, 2 ’
-Only output odd frames, even frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-11111 33333
-11111 33333
-11111 33333
-
-
-
-‘pad, 3 ’
-Expand each frame to full height, but pad alternate lines with black,
-generating a frame with double height at the same input frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-
-
-
-
-‘interleave_top, 4 ’
-Interleave the upper field from odd frames with the lower field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-
-‘interleave_bottom, 5 ’
-Interleave the lower field from odd frames with the upper field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-
-Output:
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-
-
-
-
-‘interlacex2, 6 ’
-Double frame rate with unchanged height. Frames are inserted each
-containing the second temporal field from the previous input frame and
-the first temporal field from the next input frame. This mode relies on
-the top_field_first flag. Useful for interlaced video displays with no
-field synchronisation.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
- 11111 22222 33333 44444
-11111 22222 33333 44444
- 11111 22222 33333 44444
-
-Output:
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-
-
-
-
-
-
-Numeric values are deprecated but are accepted for backward
-compatibility reasons.
-
-Default mode is merge
.
-
-
-flags
-Specify flags influencing the filter process.
-
-Available value for flags is:
-
-
-low_pass_filter, vlfp
-Enable vertical low-pass filtering in the filter.
-Vertical low-pass filtering is required when creating an interlaced
-destination from a progressive source which contains high-frequency
-vertical detail. Filtering will reduce interlace ’twitter’ and Moire
-patterning.
-
-Vertical low-pass filtering can only be enabled for mode
-interleave_top and interleave_bottom .
-
-
-
-
-
-
-
-
38.96 transpose# TOC
-
-
Transpose rows with columns in the input video and optionally flip it.
-
-
It accepts the following parameters:
-
-
-dir
-Specify the transposition direction.
-
-Can assume the following values:
-
-‘0, 4, cclock_flip ’
-Rotate by 90 degrees counterclockwise and vertically flip (default), that is:
-
-
L.R L.l
-. . -> . .
-l.r R.r
-
-
-
-‘1, 5, clock ’
-Rotate by 90 degrees clockwise, that is:
-
-
L.R l.L
-. . -> . .
-l.r r.R
-
-
-
-‘2, 6, cclock ’
-Rotate by 90 degrees counterclockwise, that is:
-
-
L.R R.r
-. . -> . .
-l.r L.l
-
-
-
-‘3, 7, clock_flip ’
-Rotate by 90 degrees clockwise and vertically flip, that is:
-
-
L.R r.R
-. . -> . .
-l.r l.L
-
-
-
-
-For values between 4-7, the transposition is only done if the input
-video geometry is portrait and not landscape. These values are
-deprecated, the passthrough
option should be used instead.
-
-Numerical values are deprecated, and should be dropped in favor of
-symbolic constants.
-
-
-passthrough
-Do not apply the transposition if the input geometry matches the one
-specified by the specified value. It accepts the following values:
-
-‘none ’
-Always apply transposition.
-
-‘portrait ’
-Preserve portrait geometry (when height >= width ).
-
-‘landscape ’
-Preserve landscape geometry (when width >= height ).
-
-
-
-Default value is none
.
-
-
-
-
For example to rotate by 90 degrees clockwise and preserve portrait
-layout:
-
-
transpose=dir=1:passthrough=portrait
-
-
-
The command above can also be specified as:
-
-
-
-
38.97 trim# TOC
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Specify the time of the start of the kept section, i.e. the frame with the
-timestamp start will be the first frame in the output.
-
-
-end
-Specify the time of the first frame that will be dropped, i.e. the frame
-immediately preceding the one with the timestamp end will be the last
-frame in the output.
-
-
-start_pts
-This is the same as start , except this option sets the start timestamp
-in timebase units instead of seconds.
-
-
-end_pts
-This is the same as end , except this option sets the end timestamp
-in timebase units instead of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_frame
-The number of the first frame that should be passed to the output.
-
-
-end_frame
-The number of the first frame that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _frame variants simply count the
-frames that pass through the filter. Also note that this filter does not modify
-the timestamps. If you wish for the output timestamps to start at zero, insert a
-setpts filter after the trim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all the frames that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple trim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -vf trim=60:120
-
-
- Keep only the first second:
-
-
ffmpeg -i INPUT -vf trim=duration=1
-
-
-
-
-
-
-
38.98 unsharp# TOC
-
-
Sharpen or blur the input video.
-
-
It accepts the following parameters:
-
-
-luma_msize_x, lx
-Set the luma matrix horizontal size. It must be an odd integer between
-3 and 63. The default value is 5.
-
-
-luma_msize_y, ly
-Set the luma matrix vertical size. It must be an odd integer between 3
-and 63. The default value is 5.
-
-
-luma_amount, la
-Set the luma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 1.0.
-
-
-chroma_msize_x, cx
-Set the chroma matrix horizontal size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_msize_y, cy
-Set the chroma matrix vertical size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_amount, ca
-Set the chroma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 0.0.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
All parameters are optional and default to the equivalent of the
-string ’5:5:1.0:5:5:0.0’.
-
-
-
38.98.1 Examples# TOC
-
-
- Apply strong luma sharpen effect:
-
-
unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
-
-
- Apply a strong blur of both luma and chroma parameters:
-
-
-
-
-
38.99 uspp# TOC
-
-
Apply ultra slow/simple postprocessing filter that compresses and decompresses
-the image at several (or - in the case of quality level 8
- all)
-shifts and average the results.
-
-
The way this differs from the behavior of spp is that uspp actually encodes &
-decodes each case with libavcodec Snow, whereas spp uses a simplified intra only 8x8
-DCT similar to MJPEG.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-8. If set to 0
, the filter will have no
-effect. A value of 8
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-
-
-
38.100 vidstabdetect# TOC
-
-
Analyze video stabilization/deshaking. Perform pass 1 of 2, see
-vidstabtransform for pass 2.
-
-
This filter generates a file with relative translation and rotation
-transform information about subsequent frames, which is then used by
-the vidstabtransform filter.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
This filter accepts the following options:
-
-
-result
-Set the path to the file used to write the transforms information.
-Default value is transforms.trf .
-
-
-shakiness
-Set how shaky the video is and how quick the camera is. It accepts an
-integer in the range 1-10, a value of 1 means little shakiness, a
-value of 10 means strong shakiness. Default value is 5.
-
-
-accuracy
-Set the accuracy of the detection process. It must be a value in the
-range 1-15. A value of 1 means low accuracy, a value of 15 means high
-accuracy. Default value is 15.
-
-
-stepsize
-Set stepsize of the search process. The region around minimum is
-scanned with 1 pixel resolution. Default value is 6.
-
-
-mincontrast
-Set minimum contrast. Below this value a local measurement field is
-discarded. Must be a floating point value in the range 0-1. Default
-value is 0.3.
-
-
-tripod
-Set reference frame number for tripod mode.
-
-If enabled, the motion of the frames is compared to a reference frame
-in the filtered stream, identified by the specified number. The idea
-is to compensate all movements in a more-or-less static scene and keep
-the camera view absolutely still.
-
-If set to 0, it is disabled. The frames are counted starting from 1.
-
-
-show
-Show fields and transforms in the resulting frames. It accepts an
-integer in the range 0-2. Default value is 0, which disables any
-visualization.
-
-
-
-
-
38.100.1 Examples# TOC
-
-
- Use default values:
-
-
- Analyze strongly shaky movie and put the results in file
-mytransforms.trf :
-
-
vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
-
-
- Visualize the result of internal transformations in the resulting
-video:
-
-
- Analyze a video with medium shakiness using ffmpeg
:
-
-
ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
-
-
-
-
-
38.101 vidstabtransform# TOC
-
-
Video stabilization/deshaking: pass 2 of 2,
-see vidstabdetect for pass 1.
-
-
Read a file with transform information for each frame and
-apply/compensate them. Together with the vidstabdetect
-filter this can be used to deshake videos. See also
-http://public.hronopik.de/vid.stab . It is important to also use
-the unsharp filter, see below.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
-
38.101.1 Options# TOC
-
-
-input
-Set path to the file used to read the transforms. Default value is
-transforms.trf .
-
-
-smoothing
-Set the number of frames (value*2 + 1) used for lowpass filtering the
-camera movements. Default value is 10.
-
-For example a number of 10 means that 21 frames are used (10 in the
-past and 10 in the future) to smoothen the motion in the video. A
-larger value leads to a smoother video, but limits the acceleration of
-the camera (pan/tilt movements). 0 is a special case where a static
-camera is simulated.
-
-
-optalgo
-Set the camera path optimization algorithm.
-
-Accepted values are:
-
-‘gauss ’
-gaussian kernel low-pass filter on camera motion (default)
-
-‘avg ’
-averaging on transformations
-
-
-
-
-maxshift
-Set maximal number of pixels to translate frames. Default value is -1,
-meaning no limit.
-
-
-maxangle
-Set maximal angle in radians (degree*PI/180) to rotate frames. Default
-value is -1, meaning no limit.
-
-
-crop
-Specify how to deal with borders that may be visible due to movement
-compensation.
-
-Available values are:
-
-‘keep ’
-keep image information from previous frame (default)
-
-‘black ’
-fill the border black
-
-
-
-
-invert
-Invert transforms if set to 1. Default value is 0.
-
-
-relative
-Consider transforms as relative to previous frame if set to 1,
-absolute if set to 0. Default value is 0.
-
-
-zoom
-Set percentage to zoom. A positive value will result in a zoom-in
-effect, a negative value in a zoom-out effect. Default value is 0 (no
-zoom).
-
-
-optzoom
-Set optimal zooming to avoid borders.
-
-Accepted values are:
-
-‘0 ’
-disabled
-
-‘1 ’
-optimal static zoom value is determined (only very strong movements
-will lead to visible borders) (default)
-
-‘2 ’
-optimal adaptive zoom value is determined (no borders will be
-visible), see zoomspeed
-
-
-
-Note that the value given at zoom is added to the one calculated here.
-
-
-zoomspeed
-Set percent to zoom maximally each frame (enabled when
-optzoom is set to 2). Range is from 0 to 5, default value is
-0.25.
-
-
-interpol
-Specify type of interpolation.
-
-Available values are:
-
-‘no ’
-no interpolation
-
-‘linear ’
-linear only horizontal
-
-‘bilinear ’
-linear in both directions (default)
-
-‘bicubic ’
-cubic in both directions (slow)
-
-
-
-
-tripod
-Enable virtual tripod mode if set to 1, which is equivalent to
-relative=0:smoothing=0
. Default value is 0.
-
-Use also tripod
option of vidstabdetect .
-
-
-debug
-Increase log verbosity if set to 1. Also the detected global motions
-are written to the temporary file global_motions.trf . Default
-value is 0.
-
-
-
-
-
38.101.2 Examples# TOC
-
-
-
-
-
38.102 vflip# TOC
-
-
Flip the input video vertically.
-
-
For example, to vertically flip a video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "vflip" out.avi
-
-
-
-
38.103 vignette# TOC
-
-
Make or reverse a natural vignetting effect.
-
-
The filter accepts the following options:
-
-
-angle, a
-Set lens angle expression as a number of radians.
-
-The value is clipped in the [0,PI/2]
range.
-
-Default value: "PI/5"
-
-
-x0
-y0
-Set center coordinates expressions. Respectively "w/2"
and "h/2"
-by default.
-
-
-mode
-Set forward/backward mode.
-
-Available modes are:
-
-‘forward ’
-The larger the distance from the central point, the darker the image becomes.
-
-
-‘backward ’
-The larger the distance from the central point, the brighter the image becomes.
-This can be used to reverse a vignette effect, though there is no automatic
-detection to extract the lens angle and other settings (yet). It can
-also be used to create a burning effect.
-
-
-
-Default value is ‘forward ’.
-
-
-eval
-Set evaluation mode for the expressions (angle , x0 , y0 ).
-
-It accepts the following values:
-
-‘init ’
-Evaluate expressions only once during the filter initialization.
-
-
-‘frame ’
-Evaluate expressions for each incoming frame. This is way slower than the
-‘init ’ mode since it requires all the scalers to be re-computed, but it
-allows advanced dynamic expressions.
-
-
-
-Default value is ‘init ’.
-
-
-dither
-Set dithering to reduce the circular banding effects. Default is 1
-(enabled).
-
-
-aspect
-Set vignette aspect. This setting allows one to adjust the shape of the vignette.
-Setting this value to the SAR of the input will make a rectangular vignetting
-following the dimensions of the video.
-
-Default is 1/1
.
-
-
-
-
-
38.103.1 Expressions# TOC
-
-
The alpha , x0 and y0 expressions can contain the
-following parameters.
-
-
-w
-h
-input width and height
-
-
-n
-the number of input frame, starting from 0
-
-
-pts
-the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in
-TB units, NAN if undefined
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-the PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in seconds, NAN if undefined
-
-
-tb
-time base of the input video
-
-
-
-
-
-
38.103.2 Examples# TOC
-
-
- Apply simple strong vignetting effect:
-
-
- Make a flickering vignetting:
-
-
vignette='PI/4+random(1)*PI/50':eval=frame
-
-
-
-
-
-
38.104 w3fdif# TOC
-
-
Deinterlace the input video ("w3fdif" stands for "Weston 3 Field
-Deinterlacing Filter").
-
-
Based on the process described by Martin Weston for BBC R&D, and
-implemented based on the de-interlace algorithm written by Jim
-Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter
-uses filter coefficients calculated by BBC R&D.
-
-
There are two sets of filter coefficients, so called "simple":
-and "complex". Which set of filter coefficients is used can
-be set by passing an optional parameter:
-
-
-filter
-Set the interlacing filter coefficients. Accepts one of the following values:
-
-
-‘simple ’
-Simple filter coefficient set.
-
-‘complex ’
-More-complex filter coefficient set.
-
-
-Default value is ‘complex ’.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following values:
-
-
-‘all ’
-Deinterlace all frames,
-
-‘interlaced ’
-Only deinterlace frames marked as interlaced.
-
-
-
-Default value is ‘all ’.
-
-
-
-
-
38.105 xbr# TOC
-
Apply the xBR high-quality magnification filter which is designed for pixel
-art. It follows a set of edge-detection rules, see
-http://www.libretro.com/forums/viewtopic.php?f=6&t=134 .
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for 2xBR
, 3
for
-3xBR
and 4
for 4xBR
.
-Default is 3
.
-
-
-
-
-
38.106 yadif# TOC
-
-
Deinterlace the input video ("yadif" means "yet another deinterlacing
-filter").
-
-
It accepts the following parameters:
-
-
-
-mode
-The interlacing mode to adopt. It accepts one of the following values:
-
-
-0, send_frame
-Output one frame for each frame.
-
-1, send_field
-Output one frame for each field.
-
-2, send_frame_nospatial
-Like send_frame
, but it skips the spatial interlacing check.
-
-3, send_field_nospatial
-Like send_field
, but it skips the spatial interlacing check.
-
-
-
-The default value is send_frame
.
-
-
-parity
-The picture field parity assumed for the input interlaced video. It accepts one
-of the following values:
-
-
-0, tff
-Assume the top field is first.
-
-1, bff
-Assume the bottom field is first.
-
--1, auto
-Enable automatic detection of field parity.
-
-
-
-The default value is auto
.
-If the interlacing is unknown or the decoder does not export this information,
-top field first will be assumed.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following
-values:
-
-
-0, all
-Deinterlace all frames.
-
-1, interlaced
-Only deinterlace frames marked as interlaced.
-
-
-
-The default value is all
.
-
-
-
-
-
38.107 zoompan# TOC
-
-
Apply Zoom & Pan effect.
-
-
This filter accepts the following options:
-
-
-zoom, z
-Set the zoom expression. Default is 1.
-
-
-x
-y
-Set the x and y expression. Default is 0.
-
-
-d
-Set the duration expression in number of frames.
-This sets for how many number of frames effect will last for
-single input image.
-
-
-s
-Set the output image size, default is ’hd720’.
-
-
-
-
Each expression can contain the following constants:
-
-
-in_w, iw
-Input width.
-
-
-in_h, ih
-Input height.
-
-
-out_w, ow
-Output width.
-
-
-out_h, oh
-Output height.
-
-
-in
-Input frame count.
-
-
-on
-Output frame count.
-
-
-x
-y
-Last calculated ’x’ and ’y’ position from ’x’ and ’y’ expression
-for current input frame.
-
-
-px
-py
-’x’ and ’y’ of last output frame of previous input frame or 0 when there was
-not yet such frame (first input frame).
-
-
-zoom
-Last calculated zoom from ’z’ expression for current input frame.
-
-
-pzoom
-Last calculated zoom of last output frame of previous input frame.
-
-
-duration
-Number of output frames for current input frame. Calculated from ’d’ expression
-for each input frame.
-
-
-pduration
-number of output frames created for previous input frame
-
-
-a
-Rational number: input width / input height
-
-
-sar
-sample aspect ratio
-
-
-dar
-display aspect ratio
-
-
-
-
-
-
38.107.1 Examples# TOC
-
-
- Zoom-in up to 1.5 and pan at same time to some spot near center of picture:
-
-
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
-
-
-
-
-
-
39 Video Sources# TOC
-
-
Below is a description of the currently available video sources.
-
-
-
39.1 buffer# TOC
-
-
Buffer video frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/vsrc_buffer.h .
-
-
It accepts the following parameters:
-
-
-video_size
-Specify the size (width and height) of the buffered video frames. For the
-syntax of this option, check the "Video size" section in the ffmpeg-utils
-manual.
-
-
-width
-The input video width.
-
-
-height
-The input video height.
-
-
-pix_fmt
-A string representing the pixel format of the buffered video frames.
-It may be a number corresponding to a pixel format, or a pixel format
-name.
-
-
-time_base
-Specify the timebase assumed by the timestamps of the buffered frames.
-
-
-frame_rate
-Specify the frame rate expected for the video stream.
-
-
-pixel_aspect, sar
-The sample (pixel) aspect ratio of the input video.
-
-
-sws_param
-Specify the optional parameters to be used for the scale filter which
-is automatically inserted when an input change is detected in the
-input size or format.
-
-
-
-
For example:
-
-
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
-
-
-
will instruct the source to accept video frames with size 320x240 and
-with format "yuv410p", assuming 1/24 as the timestamps timebase and
-square pixels (1:1 sample aspect ratio).
-Since the pixel format with name "yuv410p" corresponds to the number 6
-(check the enum AVPixelFormat definition in libavutil/pixfmt.h ),
-this example corresponds to:
-
-
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
-
-
-
Alternatively, the options can be specified as a flat string, but this
-syntax is deprecated:
-
-
width :height :pix_fmt :time_base.num :time_base.den :pixel_aspect.num :pixel_aspect.den [:sws_param ]
-
-
-
39.2 cellauto# TOC
-
-
Create a pattern generated by an elementary cellular automaton.
-
-
The initial state of the cellular automaton can be defined through the
-filename , and pattern options. If such options are
-not specified an initial state is created randomly.
-
-
At each new frame a new row in the video is filled with the result of
-the cellular automaton next generation. The behavior when the whole
-frame is filled is defined by the scroll option.
-
-
This source accepts the following options:
-
-
-filename, f
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified file.
-In the file, each non-whitespace character is considered an alive
-cell, a newline will terminate the row, and further characters in the
-file will be ignored.
-
-
-pattern, p
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified string.
-
-Each non-whitespace character in the string is considered an alive
-cell, a newline will terminate the row, and further characters in the
-string will be ignored.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial cellular automaton row. It
-is a floating point number value ranging from 0 to 1, defaults to
-1/PHI.
-
-This option is ignored when a file or a pattern is specified.
-
-
-random_seed, seed
-Set the seed for filling randomly the initial row, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the cellular automaton rule, it is a number ranging from 0 to 255.
-Default value is 110.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual.
-
-If filename or pattern is specified, the size is set
-by default to the width of the specified initial state row, and the
-height is set to width * PHI.
-
-If size is set, it must contain the width of the specified
-pattern string, and the specified pattern will be centered in the
-larger row.
-
-If a filename or a pattern string is not specified, the size value
-defaults to "320x518" (used for a randomly generated initial state).
-
-
-scroll
-If set to 1, scroll the output upward when all the rows in the output
-have been already filled. If set to 0, the new generated row will be
-written over the top row just after the bottom row is filled.
-Defaults to 1.
-
-
-start_full, full
-If set to 1, completely fill the output with generated rows before
-outputting the first frame.
-This is the default behavior, for disabling set the value to 0.
-
-
-stitch
-If set to 1, stitch the left and right row edges together.
-This is the default behavior, for disabling set the value to 0.
-
-
-
-
-
39.2.1 Examples# TOC
-
-
- Read the initial state from pattern , and specify an output of
-size 200x400.
-
-
cellauto=f=pattern:s=200x400
-
-
- Generate a random initial row with a width of 200 cells, with a fill
-ratio of 2/3:
-
-
cellauto=ratio=2/3:s=200x200
-
-
- Create a pattern generated by rule 18 starting by a single alive cell
-centered on an initial row with width 100:
-
-
cellauto=p=@:s=100x400:full=0:rule=18
-
-
- Specify a more elaborated initial pattern:
-
-
cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
-
-
-
-
-
-
39.3 mandelbrot# TOC
-
-
Generate a Mandelbrot set fractal, and progressively zoom towards the
-point specified with start_x and start_y .
-
-
This source accepts the following options:
-
-
-end_pts
-Set the terminal pts value. Default value is 400.
-
-
-end_scale
-Set the terminal scale value.
-Must be a floating point value. Default value is 0.3.
-
-
-inner
-Set the inner coloring mode, that is the algorithm used to draw the
-Mandelbrot fractal internal region.
-
-It shall assume one of the following values:
-
-black
-Set black mode.
-
-convergence
-Show time until convergence.
-
-mincol
-Set color based on point closest to the origin of the iterations.
-
-period
-Set period mode.
-
-
-
-Default value is mincol .
-
-
-bailout
-Set the bailout value. Default value is 10.0.
-
-
-maxiter
-Set the maximum of iterations performed by the rendering
-algorithm. Default value is 7189.
-
-
-outer
-Set outer coloring mode.
-It shall assume one of following values:
-
-iteration_count
-Set iteration cound mode.
-
-normalized_iteration_count
-set normalized iteration count mode.
-
-
-Default value is normalized_iteration_count .
-
-
-rate, r
-Set frame rate, expressed as number of frames per second. Default
-value is "25".
-
-
-size, s
-Set frame size. For the syntax of this option, check the "Video
-size" section in the ffmpeg-utils manual. Default value is "640x480".
-
-
-start_scale
-Set the initial scale value. Default value is 3.0.
-
-
-start_x
-Set the initial x position. Must be a floating point value between
--100 and 100. Default value is -0.743643887037158704752191506114774.
-
-
-start_y
-Set the initial y position. Must be a floating point value between
--100 and 100. Default value is -0.131825904205311970493132056385139.
-
-
-
-
-
39.4 mptestsrc# TOC
-
-
Generate various test patterns, as generated by the MPlayer test filter.
-
-
The size of the generated video is fixed, and is 256x256.
-This source is useful in particular for testing encoding features.
-
-
This source accepts the following options:
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-test, t
-
-Set the number or the name of the test to perform. Supported tests are:
-
-dc_luma
-dc_chroma
-freq_luma
-freq_chroma
-amp_luma
-amp_chroma
-cbp
-mv
-ring1
-ring2
-all
-
-
-Default value is "all", which will cycle through the list of all tests.
-
-
-
-
Some examples:
-
-
-
will generate a "dc_luma" test pattern.
-
-
-
39.5 frei0r_src# TOC
-
-
Provide a frei0r source.
-
-
To enable compilation of this filter you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
This source accepts the following parameters:
-
-
-size
-The size of the video to generate. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-
-framerate
-The framerate of the generated video. It may be a string of the form
-num /den or a frame rate abbreviation.
-
-
-filter_name
-The name to the frei0r source to load. For more information regarding frei0r and
-how to set the parameters, read the frei0r section in the video filters
-documentation.
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r source.
-
-
-
-
-
For example, to generate a frei0r partik0l source with size 200x200
-and frame rate 10 which is overlaid on the overlay filter main input:
-
-
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
-
-
-
-
39.6 life# TOC
-
-
Generate a life pattern.
-
-
This source is based on a generalization of John Conway’s life game.
-
-
The sourced input represents a life grid, each pixel represents a cell
-which can be in one of two possible states, alive or dead. Every cell
-interacts with its eight neighbours, which are the cells that are
-horizontally, vertically, or diagonally adjacent.
-
-
At each interaction the grid evolves according to the adopted rule,
-which specifies the number of neighbor alive cells which will make a
-cell stay alive or born. The rule option allows one to specify
-the rule to adopt.
-
-
This source accepts the following options:
-
-
-filename, f
-Set the file from which to read the initial grid state. In the file,
-each non-whitespace character is considered an alive cell, and newline
-is used to delimit the end of each row.
-
-If this option is not specified, the initial grid is generated
-randomly.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial random grid. It is a
-floating point number value ranging from 0 to 1, defaults to 1/PHI.
-It is ignored when a file is specified.
-
-
-random_seed, seed
-Set the seed for filling the initial random grid, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the life rule.
-
-A rule can be specified with a code of the kind "SNS /BNB ",
-where NS and NB are sequences of numbers in the range 0-8,
-NS specifies the number of alive neighbor cells which make a
-live cell stay alive, and NB the number of alive neighbor cells
-which make a dead cell to become alive (i.e. to "born").
-"s" and "b" can be used in place of "S" and "B", respectively.
-
-Alternatively a rule can be specified by an 18-bits integer. The 9
-high order bits are used to encode the next cell state if it is alive
-for each number of neighbor alive cells, the low order bits specify
-the rule for "borning" new cells. Higher order bits encode for an
-higher number of neighbor cells.
-For example the number 6153 = (12<<9)+9
specifies a stay alive
-rule of 12 and a born rule of 9, which corresponds to "S23/B03".
-
-Default value is "S23/B3", which is the original Conway’s game of life
-rule, and will keep a cell alive if it has 2 or 3 neighbor alive
-cells, and will born a new cell if there are three alive cells around
-a dead cell.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-If filename is specified, the size is set by default to the
-same size of the input file. If size is set, it must contain
-the size specified in the input file, and the initial grid defined in
-that file is centered in the larger resulting area.
-
-If a filename is not specified, the size value defaults to "320x240"
-(used for a randomly generated initial grid).
-
-
-stitch
-If set to 1, stitch the left and right grid edges together, and the
-top and bottom edges also. Defaults to 1.
-
-
-mold
-Set cell mold speed. If set, a dead cell will go from death_color to
-mold_color with a step of mold . mold can have a
-value from 0 to 255.
-
-
-life_color
-Set the color of living (or new born) cells.
-
-
-death_color
-Set the color of dead cells. If mold is set, this is the first color
-used to represent a dead cell.
-
-
-mold_color
-Set mold color, for definitely dead and moldy cells.
-
-For the syntax of these 3 color options, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-
-
-
39.6.1 Examples# TOC
-
-
- Read a grid from pattern , and center it on a grid of size
-300x300 pixels:
-
-
life=f=pattern:s=300x300
-
-
- Generate a random grid of size 200x200, with a fill ratio of 2/3:
-
-
life=ratio=2/3:s=200x200
-
-
- Specify a custom rule for evolving a randomly generated grid:
-
-
- Full example with slow death effect (mold) using ffplay
:
-
-
ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
-
-
-
-
-
39.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc# TOC
-
-
The color
source provides an uniformly colored input.
-
-
The haldclutsrc
source provides an identity Hald CLUT. See also
-haldclut filter.
-
-
The nullsrc
source returns unprocessed video frames. It is
-mainly useful to be employed in analysis / debugging tools, or as the
-source for filters which ignore the input data.
-
-
The rgbtestsrc
source generates an RGB test pattern useful for
-detecting RGB vs BGR issues. You should see a red, green and blue
-stripe from top to bottom.
-
-
The smptebars
source generates a color bars pattern, based on
-the SMPTE Engineering Guideline EG 1-1990.
-
-
The smptehdbars
source generates a color bars pattern, based on
-the SMPTE RP 219-2002.
-
-
The testsrc
source generates a test video pattern, showing a
-color pattern, a scrolling gradient and a timestamp. This is mainly
-intended for testing purposes.
-
-
The sources accept the following parameters:
-
-
-color, c
-Specify the color of the source, only available in the color
-source. For the syntax of this option, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-level
-Specify the level of the Hald CLUT, only available in the haldclutsrc
-source. A level of N
generates a picture of N*N*N
by N*N*N
-pixels to be used as identity matrix for 3D lookup tables. Each component is
-coded on a 1/(N*N)
scale.
-
-
-size, s
-Specify the size of the sourced video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual. The default value is
-"320x240".
-
-This option is not available with the haldclutsrc
filter.
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-sar
-Set the sample aspect ratio of the sourced video.
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-decimals, n
-Set the number of decimals to show in the timestamp, only available in the
-testsrc
source.
-
-The displayed timestamp value will correspond to the original
-timestamp value multiplied by the power of 10 of the specified
-value. Default value is 0.
-
-
-
-
For example the following:
-
-
testsrc=duration=5.3:size=qcif:rate=10
-
-
-
will generate a video with a duration of 5.3 seconds, with size
-176x144 and a frame rate of 10 frames per second.
-
-
The following graph description will generate a red source
-with an opacity of 0.2, with size "qcif" and a frame rate of 10
-frames per second.
-
-
color=c=red@0.2:s=qcif:r=10
-
-
-
If the input content is to be ignored, nullsrc
can be used. The
-following command generates noise in the luminance plane by employing
-the geq
filter:
-
-
nullsrc=s=256x256, geq=random(1)*255:128:128
-
-
-
-
39.7.1 Commands# TOC
-
-
The color
source supports the following commands:
-
-
-c, color
-Set the color of the created image. Accepts the same syntax of the
-corresponding color option.
-
-
-
-
-
-
40 Video Sinks# TOC
-
-
Below is a description of the currently available video sinks.
-
-
-
40.1 buffersink# TOC
-
-
Buffer video frames, and make them available to the end of the filter
-graph.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVBufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
-
40.2 nullsink# TOC
-
-
Null video sink: do absolutely nothing with the input video. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
41 Multimedia Filters# TOC
-
-
Below is a description of the currently available multimedia filters.
-
-
-
41.1 avectorscope# TOC
-
-
Convert input audio to a video output, representing the audio vector
-scope.
-
-
The filter is used to measure the difference between channels of stereo
-audio stream. A monoaural signal, consisting of identical left and right
-signal, results in straight vertical line. Any stereo separation is visible
-as a deviation from this line, creating a Lissajous figure.
-If the straight (or deviation from it) but horizontal line appears this
-indicates that the left and right channels are out of phase.
-
-
The filter accepts the following options:
-
-
-mode, m
-Set the vectorscope mode.
-
-Available values are:
-
-‘lissajous ’
-Lissajous rotated by 45 degrees.
-
-
-‘lissajous_xy ’
-Same as above but not rotated.
-
-
-
-Default value is ‘lissajous ’.
-
-
-size, s
-Set the video size for the output. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual. Default value is 400x400
.
-
-
-rate, r
-Set the output frame rate. Default value is 25
.
-
-
-rc
-gc
-bc
-Specify the red, green and blue contrast. Default values are 40
, 160
and 80
.
-Allowed range is [0, 255]
.
-
-
-rf
-gf
-bf
-Specify the red, green and blue fade. Default values are 15
, 10
and 5
.
-Allowed range is [0, 255]
.
-
-
-zoom
-Set the zoom factor. Default value is 1
. Allowed range is [1, 10]
.
-
-
-
-
-
41.1.1 Examples# TOC
-
-
- Complete example using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
-
-
-
-
-
41.2 concat# TOC
-
-
Concatenate audio and video streams, joining them together one after the
-other.
-
-
The filter works on segments of synchronized video and audio streams. All
-segments must have the same number of streams of each type, and that will
-also be the number of streams at output.
-
-
The filter accepts the following options:
-
-
-n
-Set the number of segments. Default is 2.
-
-
-v
-Set the number of output video streams, that is also the number of video
-streams in each segment. Default is 1.
-
-
-a
-Set the number of output audio streams, that is also the number of audio
-streams in each segment. Default is 0.
-
-
-unsafe
-Activate unsafe mode: do not fail if segments have a different format.
-
-
-
-
-
The filter has v +a outputs: first v video outputs, then
-a audio outputs.
-
-
There are n x(v +a ) inputs: first the inputs for the first
-segment, in the same order as the outputs, then the inputs for the second
-segment, etc.
-
-
Related streams do not always have exactly the same duration, for various
-reasons including codec frame size or sloppy authoring. For that reason,
-related synchronized streams (e.g. a video and its audio track) should be
-concatenated at once. The concat filter will use the duration of the longest
-stream in each segment (except the last one), and if necessary pad shorter
-audio streams with silence.
-
-
For this filter to work correctly, all segments must start at timestamp 0.
-
-
All corresponding streams must have the same parameters in all segments; the
-filtering system will automatically select a common pixel format for video
-streams, and a common sample format, sample rate and channel layout for
-audio streams, but other settings, such as resolution, must be converted
-explicitly by the user.
-
-
Different frame rates are acceptable but will result in variable frame rate
-at output; be sure to configure the output file to handle it.
-
-
-
41.2.1 Examples# TOC
-
-
-
-
-
41.3 ebur128# TOC
-
-
EBU R128 scanner filter. This filter takes an audio stream as input and outputs
-it unchanged. By default, it logs a message at a frequency of 10Hz with the
-Momentary loudness (identified by M
), Short-term loudness (S
),
-Integrated loudness (I
) and Loudness Range (LRA
).
-
-
The filter also has a video output (see the video option) with a real
-time graph to observe the loudness evolution. The graphic contains the logged
-message mentioned above, so it is not printed anymore when this option is set,
-unless the verbose logging is set. The main graphing area contains the
-short-term loudness (3 seconds of analysis), and the gauge on the right is for
-the momentary loudness (400 milliseconds).
-
-
More information about the Loudness Recommendation EBU R128 on
-http://tech.ebu.ch/loudness .
-
-
The filter accepts the following options:
-
-
-video
-Activate the video output. The audio stream is passed unchanged whether this
-option is set or no. The video stream will be the first output stream if
-activated. Default is 0
.
-
-
-size
-Set the video size. This option is for video only. For the syntax of this
-option, check the "Video size" section in the ffmpeg-utils manual. Default
-and minimum resolution is 640x480
.
-
-
-meter
-Set the EBU scale meter. Default is 9
. Common values are 9
and
-18
, respectively for EBU scale meter +9 and EBU scale meter +18. Any
-other integer value between this range is allowed.
-
-
-metadata
-Set metadata injection. If set to 1
, the audio input will be segmented
-into 100ms output frames, each of them containing various loudness information
-in metadata. All the metadata keys are prefixed with lavfi.r128.
.
-
-Default is 0
.
-
-
-framelog
-Force the frame logging level.
-
-Available values are:
-
-‘info ’
-information logging level
-
-‘verbose ’
-verbose logging level
-
-
-
-By default, the logging level is set to info . If the video or
-the metadata options are set, it switches to verbose .
-
-
-peak
-Set peak mode(s).
-
-Available modes can be cumulated (the option is a flag
type). Possible
-values are:
-
-‘none ’
-Disable any peak mode (default).
-
-‘sample ’
-Enable sample-peak mode.
-
-Simple peak mode looking for the higher sample value. It logs a message
-for sample-peak (identified by SPK
).
-
-‘true ’
-Enable true-peak mode.
-
-If enabled, the peak lookup is done on an over-sampled version of the input
-stream for better peak accuracy. It logs a message for true-peak.
-(identified by TPK
) and true-peak per frame (identified by FTPK
).
-This mode requires a build with libswresample
.
-
-
-
-
-
-
-
-
41.3.1 Examples# TOC
-
-
- Real-time graph using ffplay
, with a EBU scale meter +18:
-
-
ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
-
-
- Run an analysis with ffmpeg
:
-
-
ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
-
-
-
-
-
41.4 interleave, ainterleave# TOC
-
-
Temporally interleave frames from several inputs.
-
-
interleave
works with video inputs, ainterleave
with audio.
-
-
These filters read frames from several inputs and send the oldest
-queued frame to the output.
-
-
Input streams must have a well defined, monotonically increasing frame
-timestamp values.
-
-
In order to submit one frame to output, these filters need to enqueue
-at least one frame for each input, so they cannot work in case one
-input is not yet terminated and will not receive incoming frames.
-
-
For example consider the case when one input is a select
filter
-which always drop input frames. The interleave
filter will keep
-reading from that input, but it will never be able to send new frames
-to output until the input will send an end-of-stream signal.
-
-
Also, depending on inputs synchronization, the filters will drop
-frames in case one input receives more frames than the other ones, and
-the queue is already filled.
-
-
These filters accept the following options:
-
-
-nb_inputs, n
-Set the number of different inputs, it is 2 by default.
-
-
-
-
-
41.4.1 Examples# TOC
-
-
- Interleave frames belonging to different streams using ffmpeg
:
-
-
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
-
-
- Add flickering blur effect:
-
-
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
-
-
-
-
-
41.5 perms, aperms# TOC
-
-
Set read/write permissions for the output frames.
-
-
These filters are mainly aimed at developers to test direct path in the
-following filter in the filtergraph.
-
-
The filters accept the following options:
-
-
-mode
-Select the permissions mode.
-
-It accepts the following values:
-
-‘none ’
-Do nothing. This is the default.
-
-‘ro ’
-Set all the output frames read-only.
-
-‘rw ’
-Set all the output frames directly writable.
-
-‘toggle ’
-Make the frame read-only if writable, and writable if read-only.
-
-‘random ’
-Set each output frame read-only or writable randomly.
-
-
-
-
-seed
-Set the seed for the random mode, must be an integer included between
-0
and UINT32_MAX
. If not specified, or if explicitly set to
--1
, the filter will try to use a good random seed on a best effort
-basis.
-
-
-
-
Note: in case of auto-inserted filter between the permission filter and the
-following one, the permission might not be received as expected in that
-following filter. Inserting a format or aformat filter before the
-perms/aperms filter can avoid this problem.
-
-
-
41.6 select, aselect# TOC
-
-
Select frames to pass in output.
-
-
This filter accepts the following options:
-
-
-expr, e
-Set expression, which is evaluated for each input frame.
-
-If the expression is evaluated to zero, the frame is discarded.
-
-If the evaluation result is negative or NaN, the frame is sent to the
-first output; otherwise it is sent to the output with index
-ceil(val)-1
, assuming that the input index starts from 0.
-
-For example a value of 1.2
corresponds to the output with index
-ceil(1.2)-1 = 2-1 = 1
, that is the second output.
-
-
-outputs, n
-Set the number of outputs. The output to which to send the selected
-frame is based on the result of the evaluation. Default value is 1.
-
-
-
-
The expression can contain the following constants:
-
-
-n
-The (sequential) number of the filtered frame, starting from 0.
-
-
-selected_n
-The (sequential) number of the selected frame, starting from 0.
-
-
-prev_selected_n
-The sequential number of the last selected frame. It’s NAN if undefined.
-
-
-TB
-The timebase of the input timestamps.
-
-
-pts
-The PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in TB units. It’s NAN if undefined.
-
-
-t
-The PTS of the filtered video frame,
-expressed in seconds. It’s NAN if undefined.
-
-
-prev_pts
-The PTS of the previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_pts
-The PTS of the last previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_t
-The PTS of the last previously selected video frame. It’s NAN if undefined.
-
-
-start_pts
-The PTS of the first video frame in the video. It’s NAN if undefined.
-
-
-start_t
-The time of the first video frame in the video. It’s NAN if undefined.
-
-
-pict_type (video only)
-The type of the filtered frame. It can assume one of the following
-values:
-
-I
-P
-B
-S
-SI
-SP
-BI
-
-
-
-interlace_type (video only)
-The frame interlace type. It can assume one of the following values:
-
-PROGRESSIVE
-The frame is progressive (not interlaced).
-
-TOPFIRST
-The frame is top-field-first.
-
-BOTTOMFIRST
-The frame is bottom-field-first.
-
-
-
-
-consumed_sample_n (audio only)
-the number of selected samples before the current frame
-
-
-samples_n (audio only)
-the number of samples in the current frame
-
-
-sample_rate (audio only)
-the input sample rate
-
-
-key
-This is 1 if the filtered frame is a key-frame, 0 otherwise.
-
-
-pos
-the position in the file of the filtered frame, -1 if the information
-is not available (e.g. for synthetic video)
-
-
-scene (video only)
-value between 0 and 1 to indicate a new scene; a low value reflects a low
-probability for the current frame to introduce a new scene, while a higher
-value means the current frame is more likely to be one (see the example below)
-
-
-
-
-
The default value of the select expression is "1".
-
-
-
41.6.1 Examples# TOC
-
-
-
-
-
41.7 sendcmd, asendcmd# TOC
-
-
Send commands to filters in the filtergraph.
-
-
These filters read commands to be sent to other filters in the
-filtergraph.
-
-
sendcmd
must be inserted between two video filters,
-asendcmd
must be inserted between two audio filters, but apart
-from that they act the same way.
-
-
The specification of commands can be provided in the filter arguments
-with the commands option, or in a file specified by the
-filename option.
-
-
These filters accept the following options:
-
-commands, c
-Set the commands to be read and sent to the other filters.
-
-filename, f
-Set the filename of the commands to be read and sent to the other
-filters.
-
-
-
-
-
41.7.1 Commands syntax# TOC
-
-
A commands description consists of a sequence of interval
-specifications, comprising a list of commands to be executed when a
-particular event related to that interval occurs. The occurring event
-is typically the current frame time entering or leaving a given time
-interval.
-
-
An interval is specified by the following syntax:
-
-
-
The time interval is specified by the START and END times.
-END is optional and defaults to the maximum time.
-
-
The current frame time is considered within the specified interval if
-it is included in the interval [START , END ), that is when
-the time is greater or equal to START and is lesser than
-END .
-
-
COMMANDS consists of a sequence of one or more command
-specifications, separated by ",", relating to that interval. The
-syntax of a command specification is given by:
-
-
[FLAGS ] TARGET COMMAND ARG
-
-
-
FLAGS is optional and specifies the type of events relating to
-the time interval which enable sending the specified command, and must
-be a non-null sequence of identifier flags separated by "+" or "|" and
-enclosed between "[" and "]".
-
-
The following flags are recognized:
-
-enter
-The command is sent when the current frame timestamp enters the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was not in the given interval, and the
-current is.
-
-
-leave
-The command is sent when the current frame timestamp leaves the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was in the given interval, and the
-current is not.
-
-
-
-
If FLAGS is not specified, a default value of [enter]
is
-assumed.
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional list of argument for
-the given COMMAND .
-
-
Between one interval specification and another, whitespaces, or
-sequences of characters starting with #
until the end of line,
-are ignored and can be used to annotate comments.
-
-
A simplified BNF description of the commands specification syntax
-follows:
-
-
COMMAND_FLAG ::= "enter" | "leave"
-COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG ]
-COMMAND ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG ]
-COMMANDS ::= COMMAND [,COMMANDS ]
-INTERVAL ::= START [-END ] COMMANDS
-INTERVALS ::= INTERVAL [;INTERVALS ]
-
-
-
-
41.7.2 Examples# TOC
-
-
-
-
-
41.8 setpts, asetpts# TOC
-
-
Change the PTS (presentation timestamp) of the input frames.
-
-
setpts
works on video frames, asetpts
on audio frames.
-
-
This filter accepts the following options:
-
-
-expr
-The expression which is evaluated for each frame to construct its timestamp.
-
-
-
-
-
The expression is evaluated through the eval API and can contain the following
-constants:
-
-
-FRAME_RATE
-frame rate, only defined for constant frame-rate video
-
-
-PTS
-The presentation timestamp in input
-
-
-N
-The count of the input frame for video or the number of consumed samples,
-not including the current frame for audio, starting from 0.
-
-
-NB_CONSUMED_SAMPLES
-The number of consumed samples, not including the current frame (only
-audio)
-
-
-NB_SAMPLES, S
-The number of samples in the current frame (only audio)
-
-
-SAMPLE_RATE, SR
-The audio sample rate.
-
-
-STARTPTS
-The PTS of the first frame.
-
-
-STARTT
-the time in seconds of the first frame
-
-
-INTERLACED
-State whether the current frame is interlaced.
-
-
-T
-the time in seconds of the current frame
-
-
-POS
-original position in the file of the frame, or undefined if undefined
-for the current frame
-
-
-PREV_INPTS
-The previous input PTS.
-
-
-PREV_INT
-previous input time in seconds
-
-
-PREV_OUTPTS
-The previous output PTS.
-
-
-PREV_OUTT
-previous output time in seconds
-
-
-RTCTIME
-The wallclock (RTC) time in microseconds. This is deprecated, use time(0)
-instead.
-
-
-RTCSTART
-The wallclock (RTC) time at the start of the movie in microseconds.
-
-
-TB
-The timebase of the input timestamps.
-
-
-
-
-
-
41.8.1 Examples# TOC
-
-
- Start counting PTS from zero
-
-
- Apply fast motion effect:
-
-
- Apply slow motion effect:
-
-
- Set fixed rate of 25 frames per second:
-
-
- Set fixed rate 25 fps with some jitter:
-
-
setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
-
-
- Apply an offset of 10 seconds to the input PTS:
-
-
- Generate timestamps from a "live source" and rebase onto the current timebase:
-
-
setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
-
-
- Generate timestamps by counting samples:
-
-
-
-
-
-
41.9 settb, asettb# TOC
-
-
Set the timebase to use for the output frames timestamps.
-It is mainly useful for testing timebase configuration.
-
-
It accepts the following parameters:
-
-
-expr, tb
-The expression which is evaluated into the output timebase.
-
-
-
-
-
The value for tb is an arithmetic expression representing a
-rational. The expression can contain the constants "AVTB" (the default
-timebase), "intb" (the input timebase) and "sr" (the sample rate,
-audio only). Default value is "intb".
-
-
-
41.9.1 Examples# TOC
-
-
- Set the timebase to 1/25:
-
-
- Set the timebase to 1/10:
-
-
- Set the timebase to 1001/1000:
-
-
- Set the timebase to 2*intb:
-
-
- Set the default timebase value:
-
-
-
-
-
41.10 showcqt# TOC
-
Convert input audio to a video output representing
-frequency spectrum logarithmically (using constant Q transform with
-Brown-Puckette algorithm), with musical tone scale, from E0 to D#10 (10 octaves).
-
-
The filter accepts the following options:
-
-
-volume
-Specify transform volume (multiplier) expression. The expression can contain
-variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-a_weighting(f)
-A-weighting of equal loudness
-
-b_weighting(f)
-B-weighting of equal loudness
-
-c_weighting(f)
-C-weighting of equal loudness
-
-
-Default value is 16
.
-
-
-tlength
-Specify transform length expression. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-Default value is 384/f*tc/(384/f+tc)
.
-
-
-timeclamp
-Specify the transform timeclamp. At low frequency, there is trade-off between
-accuracy in time domain and frequency domain. If timeclamp is lower,
-event in time domain is represented more accurately (such as fast bass drum),
-otherwise event in frequency domain is represented more accurately
-(such as bass guitar). Acceptable value is [0.1, 1.0]. Default value is 0.17
.
-
-
-coeffclamp
-Specify the transform coeffclamp. If coeffclamp is lower, transform is
-more accurate, otherwise transform is faster. Acceptable value is [0.1, 10.0].
-Default value is 1.0
.
-
-
-gamma
-Specify gamma. Lower gamma makes the spectrum more contrast, higher gamma
-makes the spectrum having more range. Acceptable value is [1.0, 7.0].
-Default value is 3.0
.
-
-
-fontfile
-Specify font file for use with freetype. If not specified, use embedded font.
-
-
-fontcolor
-Specify font color expression. This is arithmetic expression that should return
-integer value 0xRRGGBB. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-midi(f)
-midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
-
-r(x), g(x), b(x)
-red, green, and blue value of intensity x
-
-
-Default value is st(0, (midi(f)-59.5)/12);
-st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
-r(1-ld(1)) + b(ld(1))
-
-
-fullhd
-If set to 1 (the default), the video size is 1920x1080 (full HD),
-if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
-
-
-fps
-Specify video fps. Default value is 25
.
-
-
-count
-Specify number of transform per frame, so there are fps*count transforms
-per second. Note that audio data rate must be divisible by fps*count.
-Default value is 6
.
-
-
-
-
-
-
41.10.1 Examples# TOC
-
-
- Playing audio while showing the spectrum:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with frame rate 30 fps:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fps=30:count=5 [out0]'
-
-
- Playing at 960x540 and lower CPU usage:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fullhd=0:count=3 [out0]'
-
-
- A1 and its harmonics: A1, A2, (near)E3, A3:
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with more accuracy in frequency domain (and slower):
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
-
-
- B-weighting of equal loudness
-
-
volume=16*b_weighting(f)
-
-
- Lower Q factor
-
-
tlength=100/f*tc/(100/f+tc)
-
-
- Custom fontcolor, C-note is colored green, others are colored blue
-
-
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
-
-
-
-
-
-
41.11 showspectrum# TOC
-
-
Convert input audio to a video output, representing the audio frequency
-spectrum.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value is
-640x512
.
-
-
-slide
-Specify how the spectrum should slide along the window.
-
-It accepts the following values:
-
-‘replace ’
-the samples start again on the left when they reach the right
-
-‘scroll ’
-the samples scroll from right to left
-
-‘fullframe ’
-frames are only produced when the samples reach the right
-
-
-
-Default value is replace
.
-
-
-mode
-Specify display mode.
-
-It accepts the following values:
-
-‘combined ’
-all channels are displayed in the same row
-
-‘separate ’
-all channels are displayed in separate rows
-
-
-
-Default value is ‘combined ’.
-
-
-color
-Specify display color mode.
-
-It accepts the following values:
-
-‘channel ’
-each channel is displayed in a separate color
-
-‘intensity ’
-each channel is is displayed using the same color scheme
-
-
-
-Default value is ‘channel ’.
-
-
-scale
-Specify scale used for calculating intensity color values.
-
-It accepts the following values:
-
-‘lin ’
-linear
-
-‘sqrt ’
-square root, default
-
-‘cbrt ’
-cubic root
-
-‘log ’
-logarithmic
-
-
-
-Default value is ‘sqrt ’.
-
-
-saturation
-Set saturation modifier for displayed colors. Negative values provide
-alternative color scheme. 0
is no saturation at all.
-Saturation must be in [-10.0, 10.0] range.
-Default value is 1
.
-
-
-win_func
-Set window function.
-
-It accepts the following values:
-
-‘none ’
-No samples pre-processing (do not expect this to be faster)
-
-‘hann ’
-Hann window
-
-‘hamming ’
-Hamming window
-
-‘blackman ’
-Blackman window
-
-
-
-Default value is hann
.
-
-
-
-
The usage is very similar to the showwaves filter; see the examples in that
-section.
-
-
-
41.11.1 Examples# TOC
-
-
- Large window with logarithmic color scaling:
-
-
showspectrum=s=1280x480:scale=log
-
-
- Complete example for a colored and sliding spectrum per channel using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
-
-
-
-
-
41.12 showwaves# TOC
-
-
Convert input audio to a video output, representing the samples waves.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value
-is "600x240".
-
-
-mode
-Set display mode.
-
-Available values are:
-
-‘point ’
-Draw a point for each sample.
-
-
-‘line ’
-Draw a vertical line for each sample.
-
-
-‘p2p ’
-Draw a point for each sample and a line between them.
-
-
-‘cline ’
-Draw a centered vertical line for each sample.
-
-
-
-Default value is point
.
-
-
-n
-Set the number of samples which are printed on the same column. A
-larger value will decrease the frame rate. Must be a positive
-integer. This option can be set only if the value for rate
-is not explicitly specified.
-
-
-rate, r
-Set the (approximate) output frame rate. This is done by setting the
-option n . Default value is "25".
-
-
-split_channels
-Set if channels should be drawn separately or overlap. Default value is 0.
-
-
-
-
-
-
41.12.1 Examples# TOC
-
-
- Output the input file audio and the corresponding video representation
-at the same time:
-
-
amovie=a.mp3,asplit[out0],showwaves[out1]
-
-
- Create a synthetic signal and show it with showwaves, forcing a
-frame rate of 30 frames per second:
-
-
aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
-
-
-
-
-
41.13 split, asplit# TOC
-
-
Split input into several identical outputs.
-
-
asplit
works with audio input, split
with video.
-
-
The filter accepts a single parameter which specifies the number of outputs. If
-unspecified, it defaults to 2.
-
-
-
41.13.1 Examples# TOC
-
-
- Create two separate outputs from the same input:
-
-
[in] split [out0][out1]
-
-
- To create 3 or more outputs, you need to specify the number of
-outputs, like in:
-
-
[in] asplit=3 [out0][out1][out2]
-
-
- Create two separate outputs from the same input, one cropped and
-one padded:
-
-
[in] split [splitout1][splitout2];
-[splitout1] crop=100:100:0:0 [cropout];
-[splitout2] pad=200:200:100:100 [padout];
-
-
- Create 5 copies of the input audio with ffmpeg
:
-
-
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
-
-
-
-
-
41.14 zmq, azmq# TOC
-
-
Receive commands sent through a libzmq client, and forward them to
-filters in the filtergraph.
-
-
zmq
and azmq
work as a pass-through filters. zmq
-must be inserted between two video filters, azmq
between two
-audio filters.
-
-
To enable these filters you need to install the libzmq library and
-headers and configure FFmpeg with --enable-libzmq
.
-
-
For more information about libzmq see:
-http://www.zeromq.org/
-
-
The zmq
and azmq
filters work as a libzmq server, which
-receives messages sent through a network interface defined by the
-bind_address option.
-
-
The received message must be in the form:
-
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional argument list for the
-given COMMAND .
-
-
Upon reception, the message is processed and the corresponding command
-is injected into the filtergraph. Depending on the result, the filter
-will send a reply to the client, adopting the format:
-
-
ERROR_CODE ERROR_REASON
-MESSAGE
-
-
-
MESSAGE is optional.
-
-
-
41.14.1 Examples# TOC
-
-
Look at tools/zmqsend for an example of a zmq client which can
-be used to send commands processed by these filters.
-
-
Consider the following filtergraph generated by ffplay
-
-
ffplay -dumpgraph 1 -f lavfi "
-color=s=100x100:c=red [l];
-color=s=100x100:c=blue [r];
-nullsrc=s=200x100, zmq [bg];
-[bg][l] overlay [bg+l];
-[bg+l][r] overlay=x=100 "
-
-
-
To change the color of the left side of the video, the following
-command can be used:
-
-
echo Parsed_color_0 c yellow | tools/zmqsend
-
-
-
To change the right side:
-
-
echo Parsed_color_1 c pink | tools/zmqsend
-
-
-
-
-
42 Multimedia Sources# TOC
-
-
Below is a description of the currently available multimedia sources.
-
-
-
42.1 amovie# TOC
-
-
This is the same as movie source, except it selects an audio
-stream by default.
-
-
-
42.2 movie# TOC
-
-
Read audio and/or video stream(s) from a movie container.
-
-
It accepts the following parameters:
-
-
-filename
-The name of the resource to read (not necessarily a file; it can also be a
-device or a stream accessed through some protocol).
-
-
-format_name, f
-Specifies the format assumed for the movie to read, and can be either
-the name of a container or an input device. If not specified, the
-format is guessed from movie_name or by probing.
-
-
-seek_point, sp
-Specifies the seek point in seconds. The frames will be output
-starting from this seek point. The parameter is evaluated with
-av_strtod
, so the numerical value may be suffixed by an IS
-postfix. The default value is "0".
-
-
-streams, s
-Specifies the streams to read. Several streams can be specified,
-separated by "+". The source will then have as many outputs, in the
-same order. The syntax is explained in the “Stream specifiers”
-section in the ffmpeg manual. Two special names, "dv" and "da" specify
-respectively the default (best suited) video and audio stream. Default
-is "dv", or "da" if the filter is called as "amovie".
-
-
-stream_index, si
-Specifies the index of the video stream to read. If the value is -1,
-the most suitable video stream will be automatically selected. The default
-value is "-1". Deprecated. If the filter is called "amovie", it will select
-audio instead of video.
-
-
-loop
-Specifies how many times to read the stream in sequence.
-If the value is less than 1, the stream will be read again and again.
-Default value is "1".
-
-Note that when the movie is looped the source timestamps are not
-changed, so it will generate non monotonically increasing timestamps.
-
-
-
-
It allows overlaying a second video on top of the main input of
-a filtergraph, as shown in this graph:
-
-
input -----------> deltapts0 --> overlay --> output
- ^
- |
-movie --> scale--> deltapts1 -------+
-
-
-
42.2.1 Examples# TOC
-
-
- Skip 3.2 seconds from the start of the AVI file in.avi, and overlay it
-on top of the input labelled "in":
-
-
movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read from a video4linux2 device, and overlay it on top of the input
-labelled "in":
-
-
movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read the first video stream and the audio stream with id 0x81 from
-dvd.vob; the video is connected to the pad named "video" and the audio is
-connected to the pad named "audio":
-
-
movie=dvd.vob:s=v:0+#0x81 [video] [audio]
-
-
-
-
-
-
43 See Also# TOC
-
-
ffmpeg
-ffplay , ffprobe , ffserver ,
-ffmpeg-utils ,
-ffmpeg-scaler ,
-ffmpeg-resampler ,
-ffmpeg-codecs ,
-ffmpeg-bitstream-filters ,
-ffmpeg-formats ,
-ffmpeg-devices ,
-ffmpeg-protocols ,
-ffmpeg-filters
-
-
-
-
44 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffmpeg-bitstream-filters.html b/Externals/ffmpeg/shared/doc/ffmpeg-bitstream-filters.html
deleted file mode 100644
index b7195b944f..0000000000
--- a/Externals/ffmpeg/shared/doc/ffmpeg-bitstream-filters.html
+++ /dev/null
@@ -1,261 +0,0 @@
-
-
-
-
-
-
- FFmpeg Bitstream Filters Documentation
-
-
-
-
-
-
-
-
- FFmpeg Bitstream Filters Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes the bitstream filters provided by the
-libavcodec library.
-
-
A bitstream filter operates on the encoded stream data, and performs
-bitstream level modifications without performing decoding.
-
-
-
-
2 Bitstream Filters# TOC
-
-
When you configure your FFmpeg build, all the supported bitstream
-filters are enabled by default. You can list all available ones using
-the configure option --list-bsfs
.
-
-
You can disable all the bitstream filters using the configure option
---disable-bsfs
, and selectively enable any bitstream filter using
-the option --enable-bsf=BSF
, or you can disable a particular
-bitstream filter using the option --disable-bsf=BSF
.
-
-
The option -bsfs
of the ff* tools will display the list of
-all the supported bitstream filters included in your build.
-
-
The ff* tools have a -bsf option applied per stream, taking a
-comma-separated list of filters, whose parameters follow the filter
-name after a ’=’.
-
-
-
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
-
-
-
Below is a description of the currently available bitstream filters,
-with their parameters, if any.
-
-
-
2.1 aac_adtstoasc# TOC
-
-
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
-bitstream filter.
-
-
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
-ADTS header and removes the ADTS header.
-
-
This is required for example when copying an AAC stream from a raw
-ADTS AAC container to a FLV or a MOV/MP4 file.
-
-
-
2.2 chomp# TOC
-
-
Remove zero padding at the end of a packet.
-
-
-
2.3 dump_extra# TOC
-
-
Add extradata to the beginning of the filtered packets.
-
-
The additional argument specifies which packets should be filtered.
-It accepts the values:
-
-‘a ’
-add extradata to all key packets, but only if local_header is
-set in the flags2 codec context field
-
-
-‘k ’
-add extradata to all key packets
-
-
-‘e ’
-add extradata to all packets
-
-
-
-
If not specified it is assumed ‘k ’.
-
-
For example the following ffmpeg
command forces a global
-header (thus disabling individual packet headers) in the H.264 packets
-generated by the libx264
encoder, but corrects them by adding
-the header stored in extradata to the key packets:
-
-
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
-
-
-
-
2.4 h264_mp4toannexb# TOC
-
-
Convert an H.264 bitstream from length prefixed mode to start code
-prefixed mode (as defined in the Annex B of the ITU-T H.264
-specification).
-
-
This is required by some streaming formats, typically the MPEG-2
-transport stream format ("mpegts").
-
-
For example to remux an MP4 file containing an H.264 stream to mpegts
-format with ffmpeg
, you can use the command:
-
-
-
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
-
-
-
-
2.5 imxdump# TOC
-
-
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
-Pro decoder. This filter only applies to the mpeg2video codec, and is
-likely not needed for Final Cut Pro 7 and newer with the appropriate
--tag:v .
-
-
For example, to remux 30 MB/sec NTSC IMX to MOV:
-
-
-
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
-
-
-
-
2.6 mjpeg2jpeg# TOC
-
-
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
-
-
MJPEG is a video codec wherein each video frame is essentially a
-JPEG image. The individual frames can be extracted without loss,
-e.g. by
-
-
-
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
-
-
-
Unfortunately, these chunks are incomplete JPEG images, because
-they lack the DHT segment required for decoding. Quoting from
-http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml :
-
-
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
-commented that "MJPEG, or at least the MJPEG in AVIs having the
-MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
-Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
-and it must use basic Huffman encoding, not arithmetic or
-progressive. . . . You can indeed extract the MJPEG frames and
-decode them with a regular JPEG decoder, but you have to prepend
-the DHT segment to them, or else the decoder won’t have any idea
-how to decompress the data. The exact table necessary is given in
-the OpenDML spec."
-
-
This bitstream filter patches the header of frames extracted from an MJPEG
-stream (carrying the AVI1 header ID and lacking a DHT segment) to
-produce fully qualified JPEG images.
-
-
-
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
-exiftran -i -9 frame*.jpg
-ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
-
-
-
-
2.7 mjpega_dump_header# TOC
-
-
-
2.8 movsub# TOC
-
-
-
2.9 mp3_header_decompress# TOC
-
-
-
2.10 noise# TOC
-
-
Damages the contents of packets without damaging the container. Can be
-used for fuzzing or testing error resilience/concealment.
-
-
Parameters:
-A numeral string, whose value is related to how often output bytes will
-be modified. Therefore, values below or equal to 0 are forbidden, and
-the lower the more frequent bytes will be modified, with 1 meaning
-every byte is modified.
-
-
-
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
-
-
applies the modification to every byte.
-
-
-
2.11 remove_extra# TOC
-
-
-
-
3 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavcodec
-
-
-
-
4 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffmpeg-codecs.html b/Externals/ffmpeg/shared/doc/ffmpeg-codecs.html
deleted file mode 100644
index 968b12f421..0000000000
--- a/Externals/ffmpeg/shared/doc/ffmpeg-codecs.html
+++ /dev/null
@@ -1,4474 +0,0 @@
-
-
-
-
-
-
- FFmpeg Codecs Documentation
-
-
-
-
-
-
-
-
- FFmpeg Codecs Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes the codecs (decoders and encoders) provided by
-the libavcodec library.
-
-
-
-
2 Codec Options# TOC
-
-
libavcodec provides some generic global options, which can be set on
-all the encoders and decoders. In addition each codec may support
-so-called private options, which are specific for a given codec.
-
-
Sometimes, a global option may only affect a specific kind of codec,
-and may be nonsensical or ignored by another, so you need to be aware
-of the meaning of the specified options. Also some options are
-meant only for decoding or encoding.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVCodecContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follow:
-
-
-b integer (encoding,audio,video )
-Set bitrate in bits/s. Default value is 200K.
-
-
-ab integer (encoding,audio )
-Set audio bitrate (in bits/s). Default value is 128K.
-
-
-bt integer (encoding,video )
-Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate
-tolerance specifies how far ratecontrol is willing to deviate from the
-target average bitrate value. This is not related to min/max
-bitrate. Lowering tolerance too much has an adverse effect on quality.
-
-
-flags flags (decoding/encoding,audio,video,subtitles )
-Set generic flags.
-
-Possible values:
-
-‘mv4 ’
-Use four motion vector by macroblock (mpeg4).
-
-‘qpel ’
-Use 1/4 pel motion compensation.
-
-‘loop ’
-Use loop filter.
-
-‘qscale ’
-Use fixed qscale.
-
-‘gmc ’
-Use gmc.
-
-‘mv0 ’
-Always try a mb with mv=<0,0>.
-
-‘input_preserved ’
-‘pass1 ’
-Use internal 2pass ratecontrol in first pass mode.
-
-‘pass2 ’
-Use internal 2pass ratecontrol in second pass mode.
-
-‘gray ’
-Only decode/encode grayscale.
-
-‘emu_edge ’
-Do not draw edges.
-
-‘psnr ’
-Set error[?] variables during encoding.
-
-‘truncated ’
-‘naq ’
-Normalize adaptive quantization.
-
-‘ildct ’
-Use interlaced DCT.
-
-‘low_delay ’
-Force low delay.
-
-‘global_header ’
-Place global headers in extradata instead of every keyframe.
-
-‘bitexact ’
-Only write platform-, build- and time-independent data. (except (I)DCT).
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-‘aic ’
-Apply H263 advanced intra coding / mpeg4 ac prediction.
-
-‘cbp ’
-Deprecated, use mpegvideo private options instead.
-
-‘qprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘ilme ’
-Apply interlaced motion estimation.
-
-‘cgop ’
-Use closed gop.
-
-
-
-
-me_method integer (encoding,video )
-Set motion estimation method.
-
-Possible values:
-
-‘zero ’
-zero motion estimation (fastest)
-
-‘full ’
-full motion estimation (slowest)
-
-‘epzs ’
-EPZS motion estimation (default)
-
-‘esa ’
-esa motion estimation (alias for full)
-
-‘tesa ’
-tesa motion estimation
-
-‘dia ’
-dia motion estimation (alias for epzs)
-
-‘log ’
-log motion estimation
-
-‘phods ’
-phods motion estimation
-
-‘x1 ’
-X1 motion estimation
-
-‘hex ’
-hex motion estimation
-
-‘umh ’
-umh motion estimation
-
-‘iter ’
-iter motion estimation
-
-
-
-
-extradata_size integer
-Set extradata size.
-
-
-time_base rational number
-Set codec time base.
-
-It is the fundamental unit of time (in seconds) in terms of which
-frame timestamps are represented. For fixed-fps content, timebase
-should be 1 / frame_rate
and timestamp increments should be
-identically 1.
-
-
-g integer (encoding,video )
-Set the group of picture size. Default value is 12.
-
-
-ar integer (decoding/encoding,audio )
-Set audio sampling rate (in Hz).
-
-
-ac integer (decoding/encoding,audio )
-Set number of audio channels.
-
-
-cutoff integer (encoding,audio )
-Set cutoff bandwidth.
-
-
-frame_size integer (encoding,audio )
-Set audio frame size.
-
-Each submitted frame except the last must contain exactly frame_size
-samples per channel. May be 0 when the codec has
-CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not
-restricted. It is set by some decoders to indicate constant frame
-size.
-
-
-frame_number integer
-Set the frame number.
-
-
-delay integer
-qcomp float (encoding,video )
-Set video quantizer scale compression (VBR). It is used as a constant
-in the ratecontrol equation. Recommended range for default rc_eq:
-0.0-1.0.
-
-
-qblur float (encoding,video )
-Set video quantizer scale blur (VBR).
-
-
-qmin integer (encoding,video )
-Set min video quantizer scale (VBR). Must be included between -1 and
-69, default value is 2.
-
-
-qmax integer (encoding,video )
-Set max video quantizer scale (VBR). Must be included between -1 and
-1024, default value is 31.
-
-
-qdiff integer (encoding,video )
-Set max difference between the quantizer scale (VBR).
-
-
-bf integer (encoding,video )
-Set max number of B frames between non-B-frames.
-
-Must be an integer between -1 and 16. 0 means that B-frames are
-disabled. If a value of -1 is used, it will choose an automatic value
-depending on the encoder.
-
-Default value is 0.
-
-
-b_qfactor float (encoding,video )
-Set qp factor between P and B frames.
-
-
-rc_strategy integer (encoding,video )
-Set ratecontrol method.
-
-
-b_strategy integer (encoding,video )
-Set strategy to choose between I/P/B-frames.
-
-
-ps integer (encoding,video )
-Set RTP payload size in bytes.
-
-
-mv_bits integer
-header_bits integer
-i_tex_bits integer
-p_tex_bits integer
-i_count integer
-p_count integer
-skip_count integer
-misc_bits integer
-frame_bits integer
-codec_tag integer
-bug flags (decoding,video )
-Workaround not auto detected encoder bugs.
-
-Possible values:
-
-‘autodetect ’
-‘old_msmpeg4 ’
-some old lavc generated msmpeg4v3 files (no autodetection)
-
-‘xvid_ilace ’
-Xvid interlacing bug (autodetected if fourcc==XVIX)
-
-‘ump4 ’
-(autodetected if fourcc==UMP4)
-
-‘no_padding ’
-padding bug (autodetected)
-
-‘amv ’
-‘ac_vlc ’
-illegal vlc bug (autodetected per fourcc)
-
-‘qpel_chroma ’
-‘std_qpel ’
-old standard qpel (autodetected per fourcc/version)
-
-‘qpel_chroma2 ’
-‘direct_blocksize ’
-direct-qpel-blocksize bug (autodetected per fourcc/version)
-
-‘edge ’
-edge padding bug (autodetected per fourcc/version)
-
-‘hpel_chroma ’
-‘dc_clip ’
-‘ms ’
-Workaround various bugs in microsoft broken decoders.
-
-‘trunc ’
-trancated frames
-
-
-
-
-lelim integer (encoding,video )
-Set single coefficient elimination threshold for luminance (negative
-values also consider DC coefficient).
-
-
-celim integer (encoding,video )
-Set single coefficient elimination threshold for chrominance (negative
-values also consider dc coefficient)
-
-
-strict integer (decoding/encoding,audio,video )
-Specify how strictly to follow the standards.
-
-Possible values:
-
-‘very ’
-strictly conform to a older more strict version of the spec or reference software
-
-‘strict ’
-strictly conform to all the things in the spec no matter what consequences
-
-‘normal ’
-‘unofficial ’
-allow unofficial extensions
-
-‘experimental ’
-allow non standardized experimental things, experimental
-(unfinished/work in progress/not well tested) decoders and encoders.
-Note: experimental decoders can pose a security risk, do not use this for
-decoding untrusted input.
-
-
-
-
-b_qoffset float (encoding,video )
-Set QP offset between P and B frames.
-
-
-err_detect flags (decoding,audio,video )
-Set error detection flags.
-
-Possible values:
-
-‘crccheck ’
-verify embedded CRCs
-
-‘bitstream ’
-detect bitstream specification deviations
-
-‘buffer ’
-detect improper bitstream length
-
-‘explode ’
-abort decoding on minor error detection
-
-‘ignore_err ’
-ignore decoding errors, and continue decoding.
-This is useful if you want to analyze the content of a video and thus want
-everything to be decoded no matter what. This option will not result in a video
-that is pleasing to watch in case of errors.
-
-‘careful ’
-consider things that violate the spec and have not been seen in the wild as errors
-
-‘compliant ’
-consider all spec non compliancies as errors
-
-‘aggressive ’
-consider things that a sane encoder should not do as an error
-
-
-
-
-has_b_frames integer
-block_align integer
-mpeg_quant integer (encoding,video )
-Use MPEG quantizers instead of H.263.
-
-
-qsquish float (encoding,video )
-How to keep quantizer between qmin and qmax (0 = clip, 1 = use
-differentiable function).
-
-
-rc_qmod_amp float (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_qmod_freq integer (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_override_count integer
-rc_eq string (encoding,video )
-Set rate control equation. When computing the expression, besides the
-standard functions defined in the section ’Expression Evaluation’, the
-following functions are available: bits2qp(bits), qp2bits(qp). Also
-the following constants are available: iTex pTex tex mv fCode iCount
-mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
-avgTex.
-
-
-maxrate integer (encoding,audio,video )
-Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
-
-
-minrate integer (encoding,audio,video )
-Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR
-encode. It is of little use elsewise.
-
-
-bufsize integer (encoding,audio,video )
-Set ratecontrol buffer size (in bits).
-
-
-rc_buf_aggressivity float (encoding,video )
-Currently useless.
-
-
-i_qfactor float (encoding,video )
-Set QP factor between P and I frames.
-
-
-i_qoffset float (encoding,video )
-Set QP offset between P and I frames.
-
-
-rc_init_cplx float (encoding,video )
-Set initial complexity for 1-pass encoding.
-
-
-dct integer (encoding,video )
-Set DCT algorithm.
-
-Possible values:
-
-‘auto ’
-autoselect a good one (default)
-
-‘fastint ’
-fast integer
-
-‘int ’
-accurate integer
-
-‘mmx ’
-‘altivec ’
-‘faan ’
-floating point AAN DCT
-
-
-
-
-lumi_mask float (encoding,video )
-Compress bright areas stronger than medium ones.
-
-
-tcplx_mask float (encoding,video )
-Set temporal complexity masking.
-
-
-scplx_mask float (encoding,video )
-Set spatial complexity masking.
-
-
-p_mask float (encoding,video )
-Set inter masking.
-
-
-dark_mask float (encoding,video )
-Compress dark areas stronger than medium ones.
-
-
-idct integer (decoding/encoding,video )
-Select IDCT implementation.
-
-Possible values:
-
-‘auto ’
-‘int ’
-‘simple ’
-‘simplemmx ’
-‘simpleauto ’
-Automatically pick a IDCT compatible with the simple one
-
-
-‘arm ’
-‘altivec ’
-‘sh4 ’
-‘simplearm ’
-‘simplearmv5te ’
-‘simplearmv6 ’
-‘simpleneon ’
-‘simplealpha ’
-‘ipp ’
-‘xvidmmx ’
-‘faani ’
-floating point AAN IDCT
-
-
-
-
-slice_count integer
-ec flags (decoding,video )
-Set error concealment strategy.
-
-Possible values:
-
-‘guess_mvs ’
-iterative motion vector (MV) search (slow)
-
-‘deblock ’
-use strong deblock filter for damaged MBs
-
-‘favor_inter ’
-favor predicting from the previous frame instead of the current
-
-
-
-
-bits_per_coded_sample integer
-pred integer (encoding,video )
-Set prediction method.
-
-Possible values:
-
-‘left ’
-‘plane ’
-‘median ’
-
-
-
-aspect rational number (encoding,video )
-Set sample aspect ratio.
-
-
-debug flags (decoding/encoding,audio,video,subtitles )
-Print specific debug info.
-
-Possible values:
-
-‘pict ’
-picture info
-
-‘rc ’
-rate control
-
-‘bitstream ’
-‘mb_type ’
-macroblock (MB) type
-
-‘qp ’
-per-block quantization parameter (QP)
-
-‘mv ’
-motion vector
-
-‘dct_coeff ’
-‘skip ’
-‘startcode ’
-‘pts ’
-‘er ’
-error recognition
-
-‘mmco ’
-memory management control operations (H.264)
-
-‘bugs ’
-‘vis_qp ’
-visualize quantization parameter (QP), lower QP are tinted greener
-
-‘vis_mb_type ’
-visualize block types
-
-‘buffers ’
-picture buffer allocations
-
-‘thread_ops ’
-threading operations
-
-‘nomc ’
-skip motion compensation
-
-
-
-
-vismv integer (decoding,video )
-Visualize motion vectors (MVs).
-
-This option is deprecated, see the codecview filter instead.
-
-Possible values:
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-cmp integer (encoding,video )
-Set full pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-subcmp integer (encoding,video )
-Set sub pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-mbcmp integer (encoding,video )
-Set macroblock compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-ildctcmp integer (encoding,video )
-Set interlaced dct compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-dia_size integer (encoding,video )
-Set diamond type & size for motion estimation.
-
-
-last_pred integer (encoding,video )
-Set amount of motion predictors from the previous frame.
-
-
-preme integer (encoding,video )
-Set pre motion estimation.
-
-
-precmp integer (encoding,video )
-Set pre motion estimation compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-pre_dia_size integer (encoding,video )
-Set diamond type & size for motion estimation pre-pass.
-
-
-subq integer (encoding,video )
-Set sub pel motion estimation quality.
-
-
-dtg_active_format integer
-me_range integer (encoding,video )
-Set limit motion vectors range (1023 for DivX player).
-
-
-ibias integer (encoding,video )
-Set intra quant bias.
-
-
-pbias integer (encoding,video )
-Set inter quant bias.
-
-
-color_table_id integer
-global_quality integer (encoding,audio,video )
-coder integer (encoding,video )
-
-Possible values:
-
-‘vlc ’
-variable length coder / huffman coder
-
-‘ac ’
-arithmetic coder
-
-‘raw ’
-raw (no encoding)
-
-‘rle ’
-run-length coder
-
-‘deflate ’
-deflate-based coder
-
-
-
-
-context integer (encoding,video )
-Set context model.
-
-
-slice_flags integer
-xvmc_acceleration integer
-mbd integer (encoding,video )
-Set macroblock decision algorithm (high quality mode).
-
-Possible values:
-
-‘simple ’
-use mbcmp (default)
-
-‘bits ’
-use fewest bits
-
-‘rd ’
-use best rate distortion
-
-
-
-
-stream_codec_tag integer
-sc_threshold integer (encoding,video )
-Set scene change threshold.
-
-
-lmin integer (encoding,video )
-Set min lagrange factor (VBR).
-
-
-lmax integer (encoding,video )
-Set max lagrange factor (VBR).
-
-
-nr integer (encoding,video )
-Set noise reduction.
-
-
-rc_init_occupancy integer (encoding,video )
-Set number of bits which should be loaded into the rc buffer before
-decoding starts.
-
-
-flags2 flags (decoding/encoding,audio,video )
-
-Possible values:
-
-‘fast ’
-Allow non spec compliant speedup tricks.
-
-‘sgop ’
-Deprecated, use mpegvideo private options instead.
-
-‘noout ’
-Skip bitstream encoding.
-
-‘ignorecrop ’
-Ignore cropping information from sps.
-
-‘local_header ’
-Place global headers at every keyframe instead of in extradata.
-
-‘chunks ’
-Frame data might be split into multiple chunks.
-
-‘showall ’
-Show all frames before the first keyframe.
-
-‘skiprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘export_mvs ’
-Export motion vectors into frame side-data (see AV_FRAME_DATA_MOTION_VECTORS
)
-for codecs that support it. See also doc/examples/export_mvs.c .
-
-
-
-
-error integer (encoding,video )
-qns integer (encoding,video )
-Deprecated, use mpegvideo private options instead.
-
-
-threads integer (decoding/encoding,video )
-
-Possible values:
-
-‘auto ’
-detect a good number of threads
-
-
-
-
-me_threshold integer (encoding,video )
-Set motion estimation threshold.
-
-
-mb_threshold integer (encoding,video )
-Set macroblock threshold.
-
-
-dc integer (encoding,video )
-Set intra_dc_precision.
-
-
-nssew integer (encoding,video )
-Set nsse weight.
-
-
-skip_top integer (decoding,video )
-Set number of macroblock rows at the top which are skipped.
-
-
-skip_bottom integer (decoding,video )
-Set number of macroblock rows at the bottom which are skipped.
-
-
-profile integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-‘aac_main ’
-‘aac_low ’
-‘aac_ssr ’
-‘aac_ltp ’
-‘aac_he ’
-‘aac_he_v2 ’
-‘aac_ld ’
-‘aac_eld ’
-‘mpeg2_aac_low ’
-‘mpeg2_aac_he ’
-‘mpeg4_sp ’
-‘mpeg4_core ’
-‘mpeg4_main ’
-‘mpeg4_asp ’
-‘dts ’
-‘dts_es ’
-‘dts_96_24 ’
-‘dts_hd_hra ’
-‘dts_hd_ma ’
-
-
-
-level integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-
-
-
-lowres integer (decoding,audio,video )
-Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
-
-
-skip_threshold integer (encoding,video )
-Set frame skip threshold.
-
-
-skip_factor integer (encoding,video )
-Set frame skip factor.
-
-
-skip_exp integer (encoding,video )
-Set frame skip exponent.
-Negative values behave identical to the corresponding positive ones, except
-that the score is normalized.
-Positive values exist primarily for compatibility reasons and are not so useful.
-
-
-skipcmp integer (encoding,video )
-Set frame skip compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-border_mask float (encoding,video )
-Increase the quantizer for macroblocks close to borders.
-
-
-mblmin integer (encoding,video )
-Set min macroblock lagrange factor (VBR).
-
-
-mblmax integer (encoding,video )
-Set max macroblock lagrange factor (VBR).
-
-
-mepc integer (encoding,video )
-Set motion estimation bitrate penalty compensation (1.0 = 256).
-
-
-skip_loop_filter integer (decoding,video )
-skip_idct integer (decoding,video )
-skip_frame integer (decoding,video )
-
-Make decoder discard processing depending on the frame type selected
-by the option value.
-
-skip_loop_filter skips frame loop filtering, skip_idct
-skips frame IDCT/dequantization, skip_frame skips decoding.
-
-Possible values:
-
-‘none ’
-Discard no frame.
-
-
-‘default ’
-Discard useless frames like 0-sized frames.
-
-
-‘noref ’
-Discard all non-reference frames.
-
-
-‘bidir ’
-Discard all bidirectional frames.
-
-
-‘nokey ’
-Discard all frames excepts keyframes.
-
-
-‘all ’
-Discard all frames.
-
-
-
-Default value is ‘default ’.
-
-
-bidir_refine integer (encoding,video )
-Refine the two motion vectors used in bidirectional macroblocks.
-
-
-brd_scale integer (encoding,video )
-Downscale frames for dynamic B-frame decision.
-
-
-keyint_min integer (encoding,video )
-Set minimum interval between IDR-frames.
-
-
-refs integer (encoding,video )
-Set reference frames to consider for motion compensation.
-
-
-chromaoffset integer (encoding,video )
-Set chroma qp offset from luma.
-
-
-trellis integer (encoding,audio,video )
-Set rate-distortion optimal quantization.
-
-
-sc_factor integer (encoding,video )
-Set value multiplied by qscale for each frame and added to
-scene_change_score.
-
-
-mv0_threshold integer (encoding,video )
-b_sensitivity integer (encoding,video )
-Adjust sensitivity of b_frame_strategy 1.
-
-
-compression_level integer (encoding,audio,video )
-min_prediction_order integer (encoding,audio )
-max_prediction_order integer (encoding,audio )
-timecode_frame_start integer (encoding,video )
-Set GOP timecode frame start number, in non drop frame format.
-
-
-request_channels integer (decoding,audio )
-Set desired number of audio channels.
-
-
-bits_per_raw_sample integer
-channel_layout integer (decoding/encoding,audio )
-
-Possible values:
-
-request_channel_layout integer (decoding,audio )
-
-Possible values:
-
-rc_max_vbv_use float (encoding,video )
-rc_min_vbv_use float (encoding,video )
-ticks_per_frame integer (decoding/encoding,audio,video )
-color_primaries integer (decoding/encoding,video )
-color_trc integer (decoding/encoding,video )
-colorspace integer (decoding/encoding,video )
-color_range integer (decoding/encoding,video )
-chroma_sample_location integer (decoding/encoding,video )
-log_level_offset integer
-Set the log level offset.
-
-
-slices integer (encoding,video )
-Number of slices, used in parallelized encoding.
-
-
-thread_type flags (decoding/encoding,video )
-Select which multithreading methods to use.
-
-Use of ‘frame ’ will increase decoding delay by one frame per
-thread, so clients which cannot provide future frames should not use
-it.
-
-Possible values:
-
-‘slice ’
-Decode more than one part of a single frame at once.
-
-Multithreading using slices works only when the video was encoded with
-slices.
-
-
-‘frame ’
-Decode more than one frame at once.
-
-
-
-Default value is ‘slice+frame ’.
-
-
-audio_service_type integer (encoding,audio )
-Set audio service type.
-
-Possible values:
-
-‘ma ’
-Main Audio Service
-
-‘ef ’
-Effects
-
-‘vi ’
-Visually Impaired
-
-‘hi ’
-Hearing Impaired
-
-‘di ’
-Dialogue
-
-‘co ’
-Commentary
-
-‘em ’
-Emergency
-
-‘vo ’
-Voice Over
-
-‘ka ’
-Karaoke
-
-
-
-
-request_sample_fmt sample_fmt (decoding,audio )
-Set sample format audio decoders should prefer. Default value is
-none
.
-
-
-pkt_timebase rational number
-sub_charenc encoding (decoding,subtitles )
-Set the input subtitles character encoding.
-
-
-field_order field_order (video )
-Set/override the field order of the video.
-Possible values:
-
-‘progressive ’
-Progressive video
-
-‘tt ’
-Interlaced video, top field coded and displayed first
-
-‘bb ’
-Interlaced video, bottom field coded and displayed first
-
-‘tb ’
-Interlaced video, top coded first, bottom displayed first
-
-‘bt ’
-Interlaced video, bottom coded first, top displayed first
-
-
-
-
-skip_alpha integer (decoding,video )
-Set to 1 to disable processing alpha (transparency). This works like the
-‘gray ’ flag in the flags option which skips chroma information
-instead of alpha. Default is 0.
-
-
-codec_whitelist list (input )
-"," separated List of allowed decoders. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
-
3 Decoders# TOC
-
-
Decoders are configured elements in FFmpeg which allow the decoding of
-multimedia streams.
-
-
When you configure your FFmpeg build, all the supported native decoders
-are enabled by default. Decoders requiring an external library must be enabled
-manually via the corresponding --enable-lib
option. You can list all
-available decoders using the configure option --list-decoders
.
-
-
You can disable all the decoders with the configure option
---disable-decoders
and selectively enable / disable single decoders
-with the options --enable-decoder=DECODER
/
---disable-decoder=DECODER
.
-
-
The option -decoders
of the ff* tools will display the list of
-enabled decoders.
-
-
-
-
4 Video Decoders# TOC
-
-
A description of some of the currently available video decoders
-follows.
-
-
-
4.1 rawvideo# TOC
-
-
Raw video decoder.
-
-
This decoder decodes rawvideo streams.
-
-
-
4.1.1 Options# TOC
-
-
-top top_field_first
-Specify the assumed field type of the input video.
-
--1
-the video is assumed to be progressive (default)
-
-0
-bottom-field-first is assumed
-
-1
-top-field-first is assumed
-
-
-
-
-
-
-
-
-
5 Audio Decoders# TOC
-
-
A description of some of the currently available audio decoders
-follows.
-
-
-
-
-
AC-3 audio decoder.
-
-
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
-the undocumented RealAudio 3 (a.k.a. dnet).
-
-
-
5.1.1 AC-3 Decoder Options# TOC
-
-
--drc_scale value
-Dynamic Range Scale Factor. The factor to apply to dynamic range values
-from the AC-3 stream. This factor is applied exponentially.
-There are 3 notable scale factor ranges:
-
-drc_scale == 0
-DRC disabled. Produces full range audio.
-
-0 < drc_scale <= 1
-DRC enabled. Applies a fraction of the stream DRC value.
-Audio reproduction is between full range and full compression.
-
-drc_scale > 1
-DRC enabled. Applies drc_scale asymmetrically.
-Loud sounds are fully compressed. Soft sounds are enhanced.
-
-
-
-
-
-
-
-
5.2 ffwavesynth# TOC
-
-
Internal wave synthetizer.
-
-
This decoder generates wave patterns according to predefined sequences. Its
-use is purely internal and the format of the data it accepts is not publicly
-documented.
-
-
-
5.3 libcelt# TOC
-
-
libcelt decoder wrapper.
-
-
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
-Requires the presence of the libcelt headers and library during configuration.
-You need to explicitly configure the build with --enable-libcelt
.
-
-
-
5.4 libgsm# TOC
-
-
libgsm decoder wrapper.
-
-
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
-the presence of the libgsm headers and library during configuration. You need
-to explicitly configure the build with --enable-libgsm
.
-
-
This decoder supports both the ordinary GSM and the Microsoft variant.
-
-
-
5.5 libilbc# TOC
-
-
libilbc decoder wrapper.
-
-
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
-audio codec. Requires the presence of the libilbc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libilbc
.
-
-
-
5.5.1 Options# TOC
-
-
The following option is supported by the libilbc wrapper.
-
-
-enhance
-
-Enable the enhancement of the decoded audio when set to 1. The default
-value is 0 (disabled).
-
-
-
-
-
-
5.6 libopencore-amrnb# TOC
-
-
libopencore-amrnb decoder wrapper.
-
-
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
-Narrowband audio codec. Using it requires the presence of the
-libopencore-amrnb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrnb
.
-
-
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
-without this library.
-
-
-
5.7 libopencore-amrwb# TOC
-
-
libopencore-amrwb decoder wrapper.
-
-
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
-Wideband audio codec. Using it requires the presence of the
-libopencore-amrwb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrwb
.
-
-
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
-without this library.
-
-
-
5.8 libopus# TOC
-
-
libopus decoder wrapper.
-
-
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
-Requires the presence of the libopus headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopus
.
-
-
An FFmpeg native decoder for Opus exists, so users can decode Opus
-without this library.
-
-
-
-
6 Subtitles Decoders# TOC
-
-
-
6.1 dvdsub# TOC
-
-
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
-also be found in VobSub file pairs and in some Matroska files.
-
-
-
6.1.1 Options# TOC
-
-
-palette
-Specify the global palette used by the bitmaps. When stored in VobSub, the
-palette is normally specified in the index file; in Matroska, the palette is
-stored in the codec extra-data in the same format as in VobSub. In DVDs, the
-palette is stored in the IFO file, and therefore not available when reading
-from dumped VOB files.
-
-The format for this option is a string containing 16 24-bits hexadecimal
-numbers (without 0x prefix) separated by comas, for example 0d00ee,
-ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
-7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b
.
-
-
-ifo_palette
-Specify the IFO file from which the global palette is obtained.
-(experimental)
-
-
-forced_subs_only
-Only decode subtitle entries marked as forced. Some titles have forced
-and non-forced subtitles in the same track. Setting this flag to 1
-will only keep the forced subtitles. Default value is 0
.
-
-
-
-
-
6.2 libzvbi-teletext# TOC
-
-
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
-subtitles. Requires the presence of the libzvbi headers and library during
-configuration. You need to explicitly configure the build with
---enable-libzvbi
.
-
-
-
6.2.1 Options# TOC
-
-
-txt_page
-List of teletext page numbers to decode. You may use the special * string to
-match all pages. Pages that do not match the specified list are dropped.
-Default value is *.
-
-txt_chop_top
-Discards the top teletext line. Default value is 1.
-
-txt_format
-Specifies the format of the decoded subtitles. The teletext decoder is capable
-of decoding the teletext pages to bitmaps or to simple text, you should use
-"bitmap" for teletext pages, because certain graphics and colors cannot be
-expressed in simple text. You might use "text" for teletext based subtitles if
-your application can handle simple text based subtitles. Default value is
-bitmap.
-
-txt_left
-X offset of generated bitmaps, default is 0.
-
-txt_top
-Y offset of generated bitmaps, default is 0.
-
-txt_chop_spaces
-Chops leading and trailing spaces and removes empty lines from the generated
-text. This option is useful for teletext based subtitles where empty spaces may
-be present at the start or at the end of the lines or empty lines may be
-present between the subtitle lines because of double-sized teletext charactes.
-Default value is 1.
-
-txt_duration
-Sets the display duration of the decoded teletext pages or subtitles in
-miliseconds. Default value is 30000 which is 30 seconds.
-
-txt_transparent
-Force transparent background of the generated teletext bitmaps. Default value
-is 0 which means an opaque (black) background.
-
-
-
-
-
7 Encoders# TOC
-
-
Encoders are configured elements in FFmpeg which allow the encoding of
-multimedia streams.
-
-
When you configure your FFmpeg build, all the supported native encoders
-are enabled by default. Encoders requiring an external library must be enabled
-manually via the corresponding --enable-lib
option. You can list all
-available encoders using the configure option --list-encoders
.
-
-
You can disable all the encoders with the configure option
---disable-encoders
and selectively enable / disable single encoders
-with the options --enable-encoder=ENCODER
/
---disable-encoder=ENCODER
.
-
-
The option -encoders
of the ff* tools will display the list of
-enabled encoders.
-
-
-
-
8 Audio Encoders# TOC
-
-
A description of some of the currently available audio encoders
-follows.
-
-
-
-
-
Advanced Audio Coding (AAC) encoder.
-
-
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
-low complexity (AAC-LC) profile is supported. To use this encoder, you must set
-strict option to ‘experimental ’ or lower.
-
-
As this encoder is experimental, unexpected behavior may exist from time to
-time. For a more stable AAC encoder, see libvo-aacenc . However, be warned
-that it has a worse quality reported by some users.
-
-
See also libfdk_aac and libfaac .
-
-
-
8.1.1 Options# TOC
-
-
-b
-Set bit rate in bits/s. Setting this automatically activates constant bit rate
-(CBR) mode.
-
-
-q
-Set quality for variable bit rate (VBR) mode. This option is valid only using
-the ffmpeg
command-line tool. For library interface users, use
-global_quality .
-
-
-stereo_mode
-Set stereo encoding mode. Possible values:
-
-
-‘auto ’
-Automatically selected by the encoder.
-
-
-‘ms_off ’
-Disable middle/side encoding. This is the default.
-
-
-‘ms_force ’
-Force middle/side encoding.
-
-
-
-
-aac_coder
-Set AAC encoder coding method. Possible values:
-
-
-‘faac ’
-FAAC-inspired method.
-
-This method is a simplified reimplementation of the method used in FAAC, which
-sets thresholds proportional to the band energies, and then decreases all the
-thresholds with quantizer steps to find the appropriate quantization with
-distortion below threshold band by band.
-
-The quality of this method is comparable to the two loop searching method
-described below, but somewhat a little better and slower.
-
-
-‘anmr ’
-Average noise to mask ratio (ANMR) trellis-based solution.
-
-This has a theoretic best quality out of all the coding methods, but at the
-cost of the slowest speed.
-
-
-‘twoloop ’
-Two loop searching (TLS) method.
-
-This method first sets quantizers depending on band thresholds and then tries
-to find an optimal combination by adding or subtracting a specific value from
-all quantizers and adjusting some individual quantizer a little.
-
-This method produces similar quality with the FAAC method and is the default.
-
-
-‘fast ’
-Constant quantizer method.
-
-This method sets a constant quantizer for all bands. This is the fastest of all
-the methods, yet produces the worst quality.
-
-
-
-
-
-
-
-
-
8.2 ac3 and ac3_fixed# TOC
-
-
AC-3 audio encoders.
-
-
These encoders implement part of ATSC A/52:2010 and ETSI TS 102 366, as well as
-the undocumented RealAudio 3 (a.k.a. dnet).
-
-
The ac3 encoder uses floating-point math, while the ac3_fixed
-encoder only uses fixed-point integer math. This does not mean that one is
-always faster, just that one or the other may be better suited to a
-particular system. The floating-point encoder will generally produce better
-quality audio for a given bitrate. The ac3_fixed encoder is not the
-default codec for any of the output formats, so it must be specified explicitly
-using the option -acodec ac3_fixed
in order to use it.
-
-
-
8.2.1 AC-3 Metadata# TOC
-
-
The AC-3 metadata options are used to set parameters that describe the audio,
-but in most cases do not affect the audio encoding itself. Some of the options
-do directly affect or influence the decoding and playback of the resulting
-bitstream, while others are just for informational purposes. A few of the
-options will add bits to the output stream that could otherwise be used for
-audio data, and will thus affect the quality of the output. Those will be
-indicated accordingly with a note in the option list below.
-
-
These parameters are described in detail in several publicly-available
-documents.
-
-
-
-
8.2.1.1 Metadata Control Options# TOC
-
-
--per_frame_metadata boolean
-Allow Per-Frame Metadata. Specifies if the encoder should check for changing
-metadata for each frame.
-
-0
-The metadata values set at initialization will be used for every frame in the
-stream. (default)
-
-1
-Metadata values can be changed before encoding each frame.
-
-
-
-
-
-
-
-
8.2.1.2 Downmix Levels# TOC
-
-
--center_mixlev level
-Center Mix Level. The amount of gain the decoder should apply to the center
-channel when downmixing to stereo. This field will only be written to the
-bitstream if a center channel is present. The value is specified as a scale
-factor. There are 3 valid values:
-
-0.707
-Apply -3dB gain
-
-0.595
-Apply -4.5dB gain (default)
-
-0.500
-Apply -6dB gain
-
-
-
-
--surround_mixlev level
-Surround Mix Level. The amount of gain the decoder should apply to the surround
-channel(s) when downmixing to stereo. This field will only be written to the
-bitstream if one or more surround channels are present. The value is specified
-as a scale factor. There are 3 valid values:
-
-0.707
-Apply -3dB gain
-
-0.500
-Apply -6dB gain (default)
-
-0.000
-Silence Surround Channel(s)
-
-
-
-
-
-
-
-
8.2.1.3 Audio Production Information# TOC
-
Audio Production Information is optional information describing the mixing
-environment. Either none or both of the fields are written to the bitstream.
-
-
--mixing_level number
-Mixing Level. Specifies peak sound pressure level (SPL) in the production
-environment when the mix was mastered. Valid values are 80 to 111, or -1 for
-unknown or not indicated. The default value is -1, but that value cannot be
-used if the Audio Production Information is written to the bitstream. Therefore,
-if the room_type
option is not the default value, the mixing_level
-option must not be -1.
-
-
--room_type type
-Room Type. Describes the equalization used during the final mixing session at
-the studio or on the dubbing stage. A large room is a dubbing stage with the
-industry standard X-curve equalization; a small room has flat equalization.
-This field will not be written to the bitstream if both the mixing_level
-option and the room_type
option have the default values.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-large
-Large Room
-
-2
-small
-Small Room
-
-
-
-
-
-
-
-
8.2.1.4 Other Metadata Options# TOC
-
-
--copyright boolean
-Copyright Indicator. Specifies whether a copyright exists for this audio.
-
-0
-off
-No Copyright Exists (default)
-
-1
-on
-Copyright Exists
-
-
-
-
--dialnorm value
-Dialogue Normalization. Indicates how far the average dialogue level of the
-program is below digital 100% full scale (0 dBFS). This parameter determines a
-level shift during audio reproduction that sets the average volume of the
-dialogue to a preset level. The goal is to match volume level between program
-sources. A value of -31dB will result in no volume level change, relative to
-the source volume, during audio reproduction. Valid values are whole numbers in
-the range -31 to -1, with -31 being the default.
-
-
--dsur_mode mode
-Dolby Surround Mode. Specifies whether the stereo signal uses Dolby Surround
-(Pro Logic). This field will only be written to the bitstream if the audio
-stream is stereo. Using this option does NOT mean the encoder will actually
-apply Dolby Surround processing.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-off
-Not Dolby Surround Encoded
-
-2
-on
-Dolby Surround Encoded
-
-
-
-
--original boolean
-Original Bit Stream Indicator. Specifies whether this audio is from the
-original source and not a copy.
-
-0
-off
-Not Original Source
-
-1
-on
-Original Source (default)
-
-
-
-
-
-
-
-
8.2.2 Extended Bitstream Information# TOC
-
The extended bitstream options are part of the Alternate Bit Stream Syntax as
-specified in Annex D of the A/52:2010 standard. It is grouped into 2 parts.
-If any one parameter in a group is specified, all values in that group will be
-written to the bitstream. Default values are used for those that are written
-but have not been specified. If the mixing levels are written, the decoder
-will use these values instead of the ones specified in the center_mixlev
-and surround_mixlev
options if it supports the Alternate Bit Stream
-Syntax.
-
-
-
8.2.2.1 Extended Bitstream Information - Part 1# TOC
-
-
--dmix_mode mode
-Preferred Stereo Downmix Mode. Allows the user to select either Lt/Rt
-(Dolby Surround) or Lo/Ro (normal stereo) as the preferred stereo downmix mode.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-ltrt
-Lt/Rt Downmix Preferred
-
-2
-loro
-Lo/Ro Downmix Preferred
-
-
-
-
--ltrt_cmixlev level
-Lt/Rt Center Mix Level. The amount of gain the decoder should apply to the
-center channel when downmixing to stereo in Lt/Rt mode.
-
-1.414
-Apply +3dB gain
-
-1.189
-Apply +1.5dB gain
-
-1.000
-Apply 0dB gain
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain (default)
-
-0.500
-Apply -6.0dB gain
-
-0.000
-Silence Center Channel
-
-
-
-
--ltrt_surmixlev level
-Lt/Rt Surround Mix Level. The amount of gain the decoder should apply to the
-surround channel(s) when downmixing to stereo in Lt/Rt mode.
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain
-
-0.500
-Apply -6.0dB gain (default)
-
-0.000
-Silence Surround Channel(s)
-
-
-
-
--loro_cmixlev level
-Lo/Ro Center Mix Level. The amount of gain the decoder should apply to the
-center channel when downmixing to stereo in Lo/Ro mode.
-
-1.414
-Apply +3dB gain
-
-1.189
-Apply +1.5dB gain
-
-1.000
-Apply 0dB gain
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain (default)
-
-0.500
-Apply -6.0dB gain
-
-0.000
-Silence Center Channel
-
-
-
-
--loro_surmixlev level
-Lo/Ro Surround Mix Level. The amount of gain the decoder should apply to the
-surround channel(s) when downmixing to stereo in Lo/Ro mode.
-
-0.841
-Apply -1.5dB gain
-
-0.707
-Apply -3.0dB gain
-
-0.595
-Apply -4.5dB gain
-
-0.500
-Apply -6.0dB gain (default)
-
-0.000
-Silence Surround Channel(s)
-
-
-
-
-
-
-
-
8.2.2.2 Extended Bitstream Information - Part 2# TOC
-
-
--dsurex_mode mode
-Dolby Surround EX Mode. Indicates whether the stream uses Dolby Surround EX
-(7.1 matrixed to 5.1). Using this option does NOT mean the encoder will actually
-apply Dolby Surround EX processing.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-on
-Dolby Surround EX Off
-
-2
-off
-Dolby Surround EX On
-
-
-
-
--dheadphone_mode mode
-Dolby Headphone Mode. Indicates whether the stream uses Dolby Headphone
-encoding (multi-channel matrixed to 2.0 for use with headphones). Using this
-option does NOT mean the encoder will actually apply Dolby Headphone
-processing.
-
-0
-notindicated
-Not Indicated (default)
-
-1
-on
-Dolby Headphone Off
-
-2
-off
-Dolby Headphone On
-
-
-
-
--ad_conv_type type
-A/D Converter Type. Indicates whether the audio has passed through HDCD A/D
-conversion.
-
-0
-standard
-Standard A/D Converter (default)
-
-1
-hdcd
-HDCD A/D Converter
-
-
-
-
-
-
-
-
8.2.3 Other AC-3 Encoding Options# TOC
-
-
--stereo_rematrixing boolean
-Stereo Rematrixing. Enables/Disables use of rematrixing for stereo input. This
-is an optional AC-3 feature that increases quality by selectively encoding
-the left/right channels as mid/side. This option is enabled by default, and it
-is highly recommended that it be left as enabled except for testing purposes.
-
-
-
-
-
-
8.2.4 Floating-Point-Only AC-3 Encoding Options# TOC
-
-
These options are only valid for the floating-point encoder and do not exist
-for the fixed-point encoder due to the corresponding features not being
-implemented in fixed-point.
-
-
--channel_coupling boolean
-Enables/Disables use of channel coupling, which is an optional AC-3 feature
-that increases quality by combining high frequency information from multiple
-channels into a single channel. The per-channel high frequency information is
-sent with less accuracy in both the frequency and time domains. This allows
-more bits to be used for lower frequencies while preserving enough information
-to reconstruct the high frequencies. This option is enabled by default for the
-floating-point encoder and should generally be left as enabled except for
-testing purposes or to increase encoding speed.
-
--1
-auto
-Selected by Encoder (default)
-
-0
-off
-Disable Channel Coupling
-
-1
-on
-Enable Channel Coupling
-
-
-
-
--cpl_start_band number
-Coupling Start Band. Sets the channel coupling start band, from 1 to 15. If a
-value higher than the bandwidth is used, it will be reduced to 1 less than the
-coupling end band. If auto is used, the start band will be determined by
-the encoder based on the bit rate, sample rate, and channel layout. This option
-has no effect if channel coupling is disabled.
-
--1
-auto
-Selected by Encoder (default)
-
-
-
-
-
-
-
-
8.3 libfaac# TOC
-
-
libfaac AAC (Advanced Audio Coding) encoder wrapper.
-
-
Requires the presence of the libfaac headers and library during
-configuration. You need to explicitly configure the build with
---enable-libfaac --enable-nonfree
.
-
-
This encoder is considered to be of higher quality with respect to the
-the native experimental FFmpeg AAC encoder .
-
-
For more information see the libfaac project at
-http://www.audiocoding.com/faac.html/ .
-
-
-
8.3.1 Options# TOC
-
-
The following shared FFmpeg codec options are recognized.
-
-
The following options are supported by the libfaac wrapper. The
-faac
-equivalent of the options are listed in parentheses.
-
-
-b (-b )
-Set bit rate in bits/s for ABR (Average Bit Rate) mode. If the bit rate
-is not explicitly specified, it is automatically set to a suitable
-value depending on the selected profile. faac
bitrate is
-expressed in kilobits/s.
-
-Note that libfaac does not support CBR (Constant Bit Rate) but only
-ABR (Average Bit Rate).
-
-If VBR mode is enabled this option is ignored.
-
-
-ar (-R )
-Set audio sampling rate (in Hz).
-
-
-ac (-c )
-Set the number of audio channels.
-
-
-cutoff (-C )
-Set cutoff frequency. If not specified (or explicitly set to 0) it
-will use a value automatically computed by the library. Default value
-is 0.
-
-
-profile
-Set audio profile.
-
-The following profiles are recognized:
-
-‘aac_main ’
-Main AAC (Main)
-
-
-‘aac_low ’
-Low Complexity AAC (LC)
-
-
-‘aac_ssr ’
-Scalable Sample Rate (SSR)
-
-
-‘aac_ltp ’
-Long Term Prediction (LTP)
-
-
-
-If not specified it is set to ‘aac_low ’.
-
-
-flags +qscale
-Set constant quality VBR (Variable Bit Rate) mode.
-
-
-global_quality
-Set quality in VBR mode as an integer number of lambda units.
-
-Only relevant when VBR mode is enabled with flags +qscale
. The
-value is converted to QP units by dividing it by FF_QP2LAMBDA
,
-and used to set the quality value used by libfaac. A reasonable range
-for the option value in QP units is [10-500], the higher the value the
-higher the quality.
-
-
-q (-q )
-Enable VBR mode when set to a non-negative value, and set constant
-quality value as a double floating point value in QP units.
-
-The value sets the quality value used by libfaac. A reasonable range
-for the option value is [10-500], the higher the value the higher the
-quality.
-
-This option is valid only using the ffmpeg
command-line
-tool. For library interface users, use global_quality .
-
-
-
-
-
8.3.2 Examples# TOC
-
-
- Use ffmpeg
to convert an audio file to ABR 128 kbps AAC in an M4A (MP4)
-container:
-
-
ffmpeg -i input.wav -codec:a libfaac -b:a 128k -output.m4a
-
-
- Use ffmpeg
to convert an audio file to VBR AAC, using the
-LTP AAC profile:
-
-
ffmpeg -i input.wav -c:a libfaac -profile:a aac_ltp -q:a 100 output.m4a
-
-
-
-
-
8.4 libfdk_aac# TOC
-
-
libfdk-aac AAC (Advanced Audio Coding) encoder wrapper.
-
-
The libfdk-aac library is based on the Fraunhofer FDK AAC code from
-the Android project.
-
-
Requires the presence of the libfdk-aac headers and library during
-configuration. You need to explicitly configure the build with
---enable-libfdk-aac
. The library is also incompatible with GPL,
-so if you allow the use of GPL, you should configure with
---enable-gpl --enable-nonfree --enable-libfdk-aac
.
-
-
This encoder is considered to be of higher quality with respect to
-both the native experimental FFmpeg AAC encoder and
-libfaac .
-
-
VBR encoding, enabled through the vbr or flags
-+qscale options, is experimental and only works with some
-combinations of parameters.
-
-
Support for encoding 7.1 audio is only available with libfdk-aac 0.1.3 or
-higher.
-
-
For more information see the fdk-aac project at
-http://sourceforge.net/p/opencore-amr/fdk-aac/ .
-
-
-
8.4.1 Options# TOC
-
-
The following options are mapped on the shared FFmpeg codec options.
-
-
-b
-Set bit rate in bits/s. If the bitrate is not explicitly specified, it
-is automatically set to a suitable value depending on the selected
-profile.
-
-In case VBR mode is enabled the option is ignored.
-
-
-ar
-Set audio sampling rate (in Hz).
-
-
-channels
-Set the number of audio channels.
-
-
-flags +qscale
-Enable fixed quality, VBR (Variable Bit Rate) mode.
-Note that VBR is implicitly enabled when the vbr value is
-positive.
-
-
-cutoff
-Set cutoff frequency. If not specified (or explicitly set to 0) it
-will use a value automatically computed by the library. Default value
-is 0.
-
-
-profile
-Set audio profile.
-
-The following profiles are recognized:
-
-‘aac_low ’
-Low Complexity AAC (LC)
-
-
-‘aac_he ’
-High Efficiency AAC (HE-AAC)
-
-
-‘aac_he_v2 ’
-High Efficiency AAC version 2 (HE-AACv2)
-
-
-‘aac_ld ’
-Low Delay AAC (LD)
-
-
-‘aac_eld ’
-Enhanced Low Delay AAC (ELD)
-
-
-
-If not specified it is set to ‘aac_low ’.
-
-
-
-
The following are private options of the libfdk_aac encoder.
-
-
-afterburner
-Enable afterburner feature if set to 1, disabled if set to 0. This
-improves the quality but also the required processing power.
-
-Default value is 1.
-
-
-eld_sbr
-Enable SBR (Spectral Band Replication) for ELD if set to 1, disabled
-if set to 0.
-
-Default value is 0.
-
-
-signaling
-Set SBR/PS signaling style.
-
-It can assume one of the following values:
-
-‘default ’
-choose signaling implicitly (explicit hierarchical by default,
-implicit if global header is disabled)
-
-
-‘implicit ’
-implicit backwards compatible signaling
-
-
-‘explicit_sbr ’
-explicit SBR, implicit PS signaling
-
-
-‘explicit_hierarchical ’
-explicit hierarchical signaling
-
-
-
-Default value is ‘default ’.
-
-
-latm
-Output LATM/LOAS encapsulated data if set to 1, disabled if set to 0.
-
-Default value is 0.
-
-
-header_period
-Set StreamMuxConfig and PCE repetition period (in frames) for sending
-in-band configuration buffers within LATM/LOAS transport layer.
-
-Must be a 16-bits non-negative integer.
-
-Default value is 0.
-
-
-vbr
-Set VBR mode, from 1 to 5. 1 is lowest quality (though still pretty
-good) and 5 is highest quality. A value of 0 will disable VBR, and CBR
-(Constant Bit Rate) is enabled.
-
-Currently only the ‘aac_low ’ profile supports VBR encoding.
-
-VBR modes 1-5 correspond to roughly the following average bit rates:
-
-
-‘1 ’
-32 kbps/channel
-
-‘2 ’
-40 kbps/channel
-
-‘3 ’
-48-56 kbps/channel
-
-‘4 ’
-64 kbps/channel
-
-‘5 ’
-about 80-96 kbps/channel
-
-
-
-Default value is 0.
-
-
-
-
-
8.4.2 Examples# TOC
-
-
- Use ffmpeg
to convert an audio file to VBR AAC in an M4A (MP4)
-container:
-
-
ffmpeg -i input.wav -codec:a libfdk_aac -vbr 3 output.m4a
-
-
- Use ffmpeg
to convert an audio file to CBR 64k kbps AAC, using the
-High-Efficiency AAC profile:
-
-
ffmpeg -i input.wav -c:a libfdk_aac -profile:a aac_he -b:a 64k output.m4a
-
-
-
-
-
8.5 libmp3lame# TOC
-
-
LAME (Lame Ain’t an MP3 Encoder) MP3 encoder wrapper.
-
-
Requires the presence of the libmp3lame headers and library during
-configuration. You need to explicitly configure the build with
---enable-libmp3lame
.
-
-
See libshine for a fixed-point MP3 encoder, although with a
-lower quality.
-
-
-
8.5.1 Options# TOC
-
-
The following options are supported by the libmp3lame wrapper. The
-lame
-equivalent of the options are listed in parentheses.
-
-
-b (-b )
-Set bitrate expressed in bits/s for CBR or ABR. LAME bitrate
is
-expressed in kilobits/s.
-
-
-q (-V )
-Set constant quality setting for VBR. This option is valid only
-using the ffmpeg
command-line tool. For library interface
-users, use global_quality .
-
-
-compression_level (-q )
-Set algorithm quality. Valid arguments are integers in the 0-9 range,
-with 0 meaning highest quality but slowest, and 9 meaning fastest
-while producing the worst quality.
-
-
-reservoir
-Enable use of bit reservoir when set to 1. Default value is 1. LAME
-has this enabled by default, but can be overridden by use
---nores option.
-
-
-joint_stereo (-m j )
-Enable the encoder to use (on a frame by frame basis) either L/R
-stereo or mid/side stereo. Default value is 1.
-
-
-abr (--abr )
-Enable the encoder to use ABR when set to 1. The lame
---abr sets the target bitrate, while this options only
-tells FFmpeg to use ABR still relies on b to set bitrate.
-
-
-
-
-
-
8.6 libopencore-amrnb# TOC
-
-
OpenCORE Adaptive Multi-Rate Narrowband encoder.
-
-
Requires the presence of the libopencore-amrnb headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopencore-amrnb --enable-version3
.
-
-
This is a mono-only encoder. Officially it only supports 8000Hz sample rate,
-but you can override it by setting strict to ‘unofficial ’ or
-lower.
-
-
-
8.6.1 Options# TOC
-
-
-b
-Set bitrate in bits per second. Only the following bitrates are supported,
-otherwise libavcodec will round to the nearest valid bitrate.
-
-
-4750
-5150
-5900
-6700
-7400
-7950
-10200
-12200
-
-
-
-dtx
-Allow discontinuous transmission (generate comfort noise) when set to 1. The
-default value is 0 (disabled).
-
-
-
-
-
-
8.7 libshine# TOC
-
-
Shine Fixed-Point MP3 encoder wrapper.
-
-
Shine is a fixed-point MP3 encoder. It has a far better performance on
-platforms without an FPU, e.g. armel CPUs, and some phones and tablets.
-However, as it is more targeted on performance than quality, it is not on par
-with LAME and other production-grade encoders quality-wise. Also, according to
-the project’s homepage, this encoder may not be free of bugs as the code was
-written a long time ago and the project was dead for at least 5 years.
-
-
This encoder only supports stereo and mono input. This is also CBR-only.
-
-
The original project (last updated in early 2007) is at
-http://sourceforge.net/projects/libshine-fxp/ . We only support the
-updated fork by the Savonet/Liquidsoap project at https://github.com/savonet/shine .
-
-
Requires the presence of the libshine headers and library during
-configuration. You need to explicitly configure the build with
---enable-libshine
.
-
-
See also libmp3lame .
-
-
-
8.7.1 Options# TOC
-
-
The following options are supported by the libshine wrapper. The
-shineenc
-equivalent of the options are listed in parentheses.
-
-
-b (-b )
-Set bitrate expressed in bits/s for CBR. shineenc
-b option
-is expressed in kilobits/s.
-
-
-
-
-
-
8.8 libtwolame# TOC
-
-
TwoLAME MP2 encoder wrapper.
-
-
Requires the presence of the libtwolame headers and library during
-configuration. You need to explicitly configure the build with
---enable-libtwolame
.
-
-
-
8.8.1 Options# TOC
-
-
The following options are supported by the libtwolame wrapper. The
-twolame
-equivalent options follow the FFmpeg ones and are in
-parentheses.
-
-
-b (-b )
-Set bitrate expressed in bits/s for CBR. twolame
b
-option is expressed in kilobits/s. Default value is 128k.
-
-
-q (-V )
-Set quality for experimental VBR support. Maximum value range is
-from -50 to 50, useful range is from -10 to 10. The higher the
-value, the better the quality. This option is valid only using the
-ffmpeg
command-line tool. For library interface users,
-use global_quality .
-
-
-mode (--mode )
-Set the mode of the resulting audio. Possible values:
-
-
-‘auto ’
-Choose mode automatically based on the input. This is the default.
-
-‘stereo ’
-Stereo
-
-‘joint_stereo ’
-Joint stereo
-
-‘dual_channel ’
-Dual channel
-
-‘mono ’
-Mono
-
-
-
-
-psymodel (--psyc-mode )
-Set psychoacoustic model to use in encoding. The argument must be
-an integer between -1 and 4, inclusive. The higher the value, the
-better the quality. The default value is 3.
-
-
-energy_levels (--energy )
-Enable energy levels extensions when set to 1. The default value is
-0 (disabled).
-
-
-error_protection (--protect )
-Enable CRC error protection when set to 1. The default value is 0
-(disabled).
-
-
-copyright (--copyright )
-Set MPEG audio copyright flag when set to 1. The default value is 0
-(disabled).
-
-
-original (--original )
-Set MPEG audio original flag when set to 1. The default value is 0
-(disabled).
-
-
-
-
-
-
8.9 libvo-aacenc# TOC
-
-
VisualOn AAC encoder.
-
-
Requires the presence of the libvo-aacenc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libvo-aacenc --enable-version3
.
-
-
This encoder is considered to be worse than the
-native experimental FFmpeg AAC encoder , according to
-multiple sources.
-
-
-
8.9.1 Options# TOC
-
-
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
-channels. It is also CBR-only.
-
-
-b
-Set bit rate in bits/s.
-
-
-
-
-
-
8.10 libvo-amrwbenc# TOC
-
-
VisualOn Adaptive Multi-Rate Wideband encoder.
-
-
Requires the presence of the libvo-amrwbenc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libvo-amrwbenc --enable-version3
.
-
-
This is a mono-only encoder. Officially it only supports 16000Hz sample
-rate, but you can override it by setting strict to
-‘unofficial ’ or lower.
-
-
-
8.10.1 Options# TOC
-
-
-b
-Set bitrate in bits/s. Only the following bitrates are supported, otherwise
-libavcodec will round to the nearest valid bitrate.
-
-
-‘6600 ’
-‘8850 ’
-‘12650 ’
-‘14250 ’
-‘15850 ’
-‘18250 ’
-‘19850 ’
-‘23050 ’
-‘23850 ’
-
-
-
-dtx
-Allow discontinuous transmission (generate comfort noise) when set to 1. The
-default value is 0 (disabled).
-
-
-
-
-
-
8.11 libopus# TOC
-
-
libopus Opus Interactive Audio Codec encoder wrapper.
-
-
Requires the presence of the libopus headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopus
.
-
-
-
8.11.1 Option Mapping# TOC
-
-
Most libopus options are modelled after the opusenc
utility from
-opus-tools. The following is an option mapping chart describing options
-supported by the libopus wrapper, and their opusenc
-equivalent
-in parentheses.
-
-
-b (bitrate )
-Set the bit rate in bits/s. FFmpeg’s b option is
-expressed in bits/s, while opusenc
’s bitrate in
-kilobits/s.
-
-
-vbr (vbr , hard-cbr , and cvbr )
-Set VBR mode. The FFmpeg vbr option has the following
-valid arguments, with the their opusenc
equivalent options
-in parentheses:
-
-
-‘off (hard-cbr ) ’
-Use constant bit rate encoding.
-
-
-‘on (vbr ) ’
-Use variable bit rate encoding (the default).
-
-
-‘constrained (cvbr ) ’
-Use constrained variable bit rate encoding.
-
-
-
-
-compression_level (comp )
-Set encoding algorithm complexity. Valid options are integers in
-the 0-10 range. 0 gives the fastest encodes but lower quality, while 10
-gives the highest quality but slowest encoding. The default is 10.
-
-
-frame_duration (framesize )
-Set maximum frame size, or duration of a frame in milliseconds. The
-argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
-frame sizes achieve lower latency but less quality at a given bitrate.
-Sizes greater than 20ms are only interesting at fairly low bitrates.
-The default is 20ms.
-
-
-packet_loss (expect-loss )
-Set expected packet loss percentage. The default is 0.
-
-
-application (N.A.)
-Set intended application type. Valid options are listed below:
-
-
-‘voip ’
-Favor improved speech intelligibility.
-
-‘audio ’
-Favor faithfulness to the input (the default).
-
-‘lowdelay ’
-Restrict to only the lowest delay modes.
-
-
-
-
-cutoff (N.A.)
-Set cutoff bandwidth in Hz. The argument must be exactly one of the
-following: 4000, 6000, 8000, 12000, or 20000, corresponding to
-narrowband, mediumband, wideband, super wideband, and fullband
-respectively. The default is 0 (cutoff disabled).
-
-
-
-
-
-
8.12 libvorbis# TOC
-
-
libvorbis encoder wrapper.
-
-
Requires the presence of the libvorbisenc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libvorbis
.
-
-
-
8.12.1 Options# TOC
-
-
The following options are supported by the libvorbis wrapper. The
-oggenc
-equivalent of the options are listed in parentheses.
-
-
To get a more accurate and extensive documentation of the libvorbis
-options, consult the libvorbisenc’s and oggenc
’s documentations.
-See http://xiph.org/vorbis/ ,
-http://wiki.xiph.org/Vorbis-tools , and oggenc(1).
-
-
-b (-b )
-Set bitrate expressed in bits/s for ABR. oggenc
-b is
-expressed in kilobits/s.
-
-
-q (-q )
-Set constant quality setting for VBR. The value should be a float
-number in the range of -1.0 to 10.0. The higher the value, the better
-the quality. The default value is ‘3.0 ’.
-
-This option is valid only using the ffmpeg
command-line tool.
-For library interface users, use global_quality .
-
-
-cutoff (--advanced-encode-option lowpass_frequency=N )
-Set cutoff bandwidth in Hz, a value of 0 disables cutoff. oggenc
’s
-related option is expressed in kHz. The default value is ‘0 ’ (cutoff
-disabled).
-
-
-minrate (-m )
-Set minimum bitrate expressed in bits/s. oggenc
-m is
-expressed in kilobits/s.
-
-
-maxrate (-M )
-Set maximum bitrate expressed in bits/s. oggenc
-M is
-expressed in kilobits/s. This only has effect on ABR mode.
-
-
-iblock (--advanced-encode-option impulse_noisetune=N )
-Set noise floor bias for impulse blocks. The value is a float number from
--15.0 to 0.0. A negative bias instructs the encoder to pay special attention
-to the crispness of transients in the encoded audio. The tradeoff for better
-transient response is a higher bitrate.
-
-
-
-
-
-
8.13 libwavpack# TOC
-
-
A wrapper providing WavPack encoding through libwavpack.
-
-
Only lossless mode using 32-bit integer samples is supported currently.
-
-
Requires the presence of the libwavpack headers and library during
-configuration. You need to explicitly configure the build with
---enable-libwavpack
.
-
-
Note that a libavcodec-native encoder for the WavPack codec exists so users can
-encode audios with this codec without using this encoder. See wavpackenc .
-
-
-
8.13.1 Options# TOC
-
-
wavpack
command line utility’s corresponding options are listed in
-parentheses, if any.
-
-
-frame_size (--blocksize )
-Default is 32768.
-
-
-compression_level
-Set speed vs. compression tradeoff. Acceptable arguments are listed below:
-
-
-‘0 (-f ) ’
-Fast mode.
-
-
-‘1 ’
-Normal (default) settings.
-
-
-‘2 (-h ) ’
-High quality.
-
-
-‘3 (-hh ) ’
-Very high quality.
-
-
-‘4-8 (-hh -x EXTRAPROC ) ’
-Same as ‘3 ’, but with extra processing enabled.
-
-‘4 ’ is the same as -x2 and ‘8 ’ is the same as -x6 .
-
-
-
-
-
-
-
-
8.14 wavpack# TOC
-
-
WavPack lossless audio encoder.
-
-
This is a libavcodec-native WavPack encoder. There is also an encoder based on
-libwavpack, but there is virtually no reason to use that encoder.
-
-
See also libwavpack .
-
-
-
8.14.1 Options# TOC
-
-
The equivalent options for wavpack
command line utility are listed in
-parentheses.
-
-
-
8.14.1.1 Shared options# TOC
-
-
The following shared options are effective for this encoder. Only special notes
-about this particular encoder will be documented here. For the general meaning
-of the options, see the Codec Options chapter .
-
-
-frame_size (--blocksize )
-For this encoder, the range for this option is between 128 and 131072. Default
-is automatically decided based on sample rate and number of channel.
-
-For the complete formula of calculating default, see
-libavcodec/wavpackenc.c .
-
-
-compression_level (-f , -h , -hh , and -x )
-This option’s syntax is consistent with libwavpack ’s.
-
-
-
-
-
8.14.1.2 Private options# TOC
-
-
-joint_stereo (-j )
-Set whether to enable joint stereo. Valid values are:
-
-
-‘on (1 ) ’
-Force mid/side audio encoding.
-
-‘off (0 ) ’
-Force left/right audio encoding.
-
-‘auto ’
-Let the encoder decide automatically.
-
-
-
-
-optimize_mono
-Set whether to enable optimization for mono. This option is only effective for
-non-mono streams. Available values:
-
-
-‘on ’
-enabled
-
-‘off ’
-disabled
-
-
-
-
-
-
-
-
-
9 Video Encoders# TOC
-
-
A description of some of the currently available video encoders
-follows.
-
-
-
9.1 libtheora# TOC
-
-
libtheora Theora encoder wrapper.
-
-
Requires the presence of the libtheora headers and library during
-configuration. You need to explicitly configure the build with
---enable-libtheora
.
-
-
For more information about the libtheora project see
-http://www.theora.org/ .
-
-
-
9.1.1 Options# TOC
-
-
The following global options are mapped to internal libtheora options
-which affect the quality and the bitrate of the encoded stream.
-
-
-b
-Set the video bitrate in bit/s for CBR (Constant Bit Rate) mode. In
-case VBR (Variable Bit Rate) mode is enabled this option is ignored.
-
-
-flags
-Used to enable constant quality mode (VBR) encoding through the
-qscale flag, and to enable the pass1
and pass2
-modes.
-
-
-g
-Set the GOP size.
-
-
-global_quality
-Set the global quality as an integer in lambda units.
-
-Only relevant when VBR mode is enabled with flags +qscale
. The
-value is converted to QP units by dividing it by FF_QP2LAMBDA
,
-clipped in the [0 - 10] range, and then multiplied by 6.3 to get a
-value in the native libtheora range [0-63]. A higher value corresponds
-to a higher quality.
-
-
-q
-Enable VBR mode when set to a non-negative value, and set constant
-quality value as a double floating point value in QP units.
-
-The value is clipped in the [0-10] range, and then multiplied by 6.3
-to get a value in the native libtheora range [0-63].
-
-This option is valid only using the ffmpeg
command-line
-tool. For library interface users, use global_quality .
-
-
-
-
-
9.1.2 Examples# TOC
-
-
- Set maximum constant quality (VBR) encoding with ffmpeg
:
-
-
ffmpeg -i INPUT -codec:v libtheora -q:v 10 OUTPUT.ogg
-
-
- Use ffmpeg
to convert a CBR 1000 kbps Theora video stream:
-
-
ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg
-
-
-
-
-
9.2 libvpx# TOC
-
-
VP8/VP9 format supported through libvpx.
-
-
Requires the presence of the libvpx headers and library during configuration.
-You need to explicitly configure the build with --enable-libvpx
.
-
-
-
9.2.1 Options# TOC
-
-
Mapping from FFmpeg to libvpx options with conversion notes in parentheses.
-
-
-threads
-g_threads
-
-
-profile
-g_profile
-
-
-vb
-rc_target_bitrate
-
-
-g
-kf_max_dist
-
-
-keyint_min
-kf_min_dist
-
-
-qmin
-rc_min_quantizer
-
-
-qmax
-rc_max_quantizer
-
-
-bufsize, vb
-rc_buf_sz
-(bufsize * 1000 / vb)
-
-rc_buf_optimal_sz
-(bufsize * 1000 / vb * 5 / 6)
-
-
-rc_init_occupancy, vb
-rc_buf_initial_sz
-(rc_init_occupancy * 1000 / vb)
-
-
-rc_buffer_aggressivity
-rc_undershoot_pct
-
-
-skip_threshold
-rc_dropframe_thresh
-
-
-qcomp
-rc_2pass_vbr_bias_pct
-
-
-maxrate, vb
-rc_2pass_vbr_maxsection_pct
-(maxrate * 100 / vb)
-
-
-minrate, vb
-rc_2pass_vbr_minsection_pct
-(minrate * 100 / vb)
-
-
-minrate, maxrate, vb
-VPX_CBR
-(minrate == maxrate == vb)
-
-
-crf
-VPX_CQ
, VP8E_SET_CQ_LEVEL
-
-
-quality
-
-best
-VPX_DL_BEST_QUALITY
-
-good
-VPX_DL_GOOD_QUALITY
-
-realtime
-VPX_DL_REALTIME
-
-
-
-
-speed
-VP8E_SET_CPUUSED
-
-
-nr
-VP8E_SET_NOISE_SENSITIVITY
-
-
-mb_threshold
-VP8E_SET_STATIC_THRESHOLD
-
-
-slices
-VP8E_SET_TOKEN_PARTITIONS
-
-
-max-intra-rate
-VP8E_SET_MAX_INTRA_BITRATE_PCT
-
-
-force_key_frames
-VPX_EFLAG_FORCE_KF
-
-
-Alternate reference frame related
-
-vp8flags altref
-VP8E_SET_ENABLEAUTOALTREF
-
-arnr_max_frames
-VP8E_SET_ARNR_MAXFRAMES
-
-arnr_type
-VP8E_SET_ARNR_TYPE
-
-arnr_strength
-VP8E_SET_ARNR_STRENGTH
-
-rc_lookahead
-g_lag_in_frames
-
-
-
-
-vp8flags error_resilient
-g_error_resilient
-
-
-aq_mode
-VP9E_SET_AQ_MODE
-
-
-
-
-
For more information about libvpx see:
-http://www.webmproject.org/
-
-
-
-
9.3 libwebp# TOC
-
-
libwebp WebP Image encoder wrapper
-
-
libwebp is Google’s official encoder for WebP images. It can encode in either
-lossy or lossless mode. Lossy images are essentially a wrapper around a VP8
-frame. Lossless images are a separate codec developed by Google.
-
-
-
9.3.1 Pixel Format# TOC
-
-
Currently, libwebp only supports YUV420 for lossy and RGB for lossless due
-to limitations of the format and libwebp. Alpha is supported for either mode.
-Because of API limitations, if RGB is passed in when encoding lossy or YUV is
-passed in for encoding lossless, the pixel format will automatically be
-converted using functions from libwebp. This is not ideal and is done only for
-convenience.
-
-
-
9.3.2 Options# TOC
-
-
--lossless boolean
-Enables/Disables use of lossless mode. Default is 0.
-
-
--compression_level integer
-For lossy, this is a quality/speed tradeoff. Higher values give better quality
-for a given size at the cost of increased encoding time. For lossless, this is
-a size/speed tradeoff. Higher values give smaller size at the cost of increased
-encoding time. More specifically, it controls the number of extra algorithms
-and compression tools used, and varies the combination of these tools. This
-maps to the method option in libwebp. The valid range is 0 to 6.
-Default is 4.
-
-
--qscale float
-For lossy encoding, this controls image quality, 0 to 100. For lossless
-encoding, this controls the effort and time spent at compressing more. The
-default value is 75. Note that for usage via libavcodec, this option is called
-global_quality and must be multiplied by FF_QP2LAMBDA .
-
-
--preset type
-Configuration preset. This does some automatic settings based on the general
-type of the image.
-
-none
-Do not use a preset.
-
-default
-Use the encoder default.
-
-picture
-Digital picture, like portrait, inner shot
-
-photo
-Outdoor photograph, with natural lighting
-
-drawing
-Hand or line drawing, with high-contrast details
-
-icon
-Small-sized colorful images
-
-text
-Text-like
-
-
-
-
-
-
-
-
9.4 libx264, libx264rgb# TOC
-
-
x264 H.264/MPEG-4 AVC encoder wrapper.
-
-
This encoder requires the presence of the libx264 headers and library
-during configuration. You need to explicitly configure the build with
---enable-libx264
.
-
-
libx264 supports an impressive number of features, including 8x8 and
-4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC
-entropy coding, interlacing (MBAFF), lossless mode, psy optimizations
-for detail retention (adaptive quantization, psy-RD, psy-trellis).
-
-
Many libx264 encoder options are mapped to FFmpeg global codec
-options, while unique encoder options are provided through private
-options. Additionally the x264opts and x264-params
-private options allows one to pass a list of key=value tuples as accepted
-by the libx264 x264_param_parse
function.
-
-
The x264 project website is at
-http://www.videolan.org/developers/x264.html .
-
-
The libx264rgb encoder is the same as libx264, except it accepts packed RGB
-pixel formats as input instead of YUV.
-
-
-
9.4.1 Supported Pixel Formats# TOC
-
-
x264 supports 8- to 10-bit color spaces. The exact bit depth is controlled at
-x264’s configure time. FFmpeg only supports one bit depth in one particular
-build. In other words, it is not possible to build one FFmpeg with multiple
-versions of x264 with different bit depths.
-
-
-
9.4.2 Options# TOC
-
-
The following options are supported by the libx264 wrapper. The
-x264
-equivalent options or values are listed in parentheses
-for easy migration.
-
-
To reduce the duplication of documentation, only the private options
-and some others requiring special attention are documented here. For
-the documentation of the undocumented generic options, see
-the Codec Options chapter .
-
-
To get a more accurate and extensive documentation of the libx264
-options, invoke the command x264 --full-help
or consult
-the libx264 documentation.
-
-
-b (bitrate )
-Set bitrate in bits/s. Note that FFmpeg’s b option is
-expressed in bits/s, while x264
’s bitrate is in
-kilobits/s.
-
-
-bf (bframes )
-g (keyint )
-qmin (qpmin )
-Minimum quantizer scale.
-
-
-qmax (qpmax )
-Maximum quantizer scale.
-
-
-qdiff (qpstep )
-Maximum difference between quantizer scales.
-
-
-qblur (qblur )
-Quantizer curve blur
-
-
-qcomp (qcomp )
-Quantizer curve compression factor
-
-
-refs (ref )
-Number of reference frames each P-frame can use. The range is from 0-16 .
-
-
-sc_threshold (scenecut )
-Sets the threshold for the scene change detection.
-
-
-trellis (trellis )
-Performs Trellis quantization to increase efficiency. Enabled by default.
-
-
-nr (nr )
-me_range (merange )
-Maximum range of the motion search in pixels.
-
-
-me_method (me )
-Set motion estimation method. Possible values in the decreasing order
-of speed:
-
-
-‘dia (dia ) ’
-‘epzs (dia ) ’
-Diamond search with radius 1 (fastest). ‘epzs ’ is an alias for
-‘dia ’.
-
-‘hex (hex ) ’
-Hexagonal search with radius 2.
-
-‘umh (umh ) ’
-Uneven multi-hexagon search.
-
-‘esa (esa ) ’
-Exhaustive search.
-
-‘tesa (tesa ) ’
-Hadamard exhaustive search (slowest).
-
-
-
-
-subq (subme )
-Sub-pixel motion estimation method.
-
-
-b_strategy (b-adapt )
-Adaptive B-frame placement decision algorithm. Use only on first-pass.
-
-
-keyint_min (min-keyint )
-Minimum GOP size.
-
-
-coder
-Set entropy encoder. Possible values:
-
-
-‘ac ’
-Enable CABAC.
-
-
-‘vlc ’
-Enable CAVLC and disable CABAC. It generates the same effect as
-x264
’s --no-cabac option.
-
-
-
-
-cmp
-Set full pixel motion estimation comparation algorithm. Possible values:
-
-
-‘chroma ’
-Enable chroma in motion estimation.
-
-
-‘sad ’
-Ignore chroma in motion estimation. It generates the same effect as
-x264
’s --no-chroma-me option.
-
-
-
-
-threads (threads )
-Number of encoding threads.
-
-
-thread_type
-Set multithreading technique. Possible values:
-
-
-‘slice ’
-Slice-based multithreading. It generates the same effect as
-x264
’s --sliced-threads option.
-
-‘frame ’
-Frame-based multithreading.
-
-
-
-
-flags
-Set encoding flags. It can be used to disable closed GOP and enable
-open GOP by setting it to -cgop
. The result is similar to
-the behavior of x264
’s --open-gop option.
-
-
-rc_init_occupancy (vbv-init )
-preset (preset )
-Set the encoding preset.
-
-
-tune (tune )
-Set tuning of the encoding params.
-
-
-profile (profile )
-Set profile restrictions.
-
-
-fastfirstpass
-Enable fast settings when encoding first pass, when set to 1. When set
-to 0, it has the same effect of x264
’s
---slow-firstpass option.
-
-
-crf (crf )
-Set the quality for constant quality mode.
-
-
-crf_max (crf-max )
-In CRF mode, prevents VBV from lowering quality beyond this point.
-
-
-qp (qp )
-Set constant quantization rate control method parameter.
-
-
-aq-mode (aq-mode )
-Set AQ method. Possible values:
-
-
-‘none (0 ) ’
-Disabled.
-
-
-‘variance (1 ) ’
-Variance AQ (complexity mask).
-
-
-‘autovariance (2 ) ’
-Auto-variance AQ (experimental).
-
-
-
-
-aq-strength (aq-strength )
-Set AQ strength, reduce blocking and blurring in flat and textured areas.
-
-
-psy
-Use psychovisual optimizations when set to 1. When set to 0, it has the
-same effect as x264
’s --no-psy option.
-
-
-psy-rd (psy-rd )
-Set strength of psychovisual optimization, in
-psy-rd :psy-trellis format.
-
-
-rc-lookahead (rc-lookahead )
-Set number of frames to look ahead for frametype and ratecontrol.
-
-
-weightb
-Enable weighted prediction for B-frames when set to 1. When set to 0,
-it has the same effect as x264
’s --no-weightb option.
-
-
-weightp (weightp )
-Set weighted prediction method for P-frames. Possible values:
-
-
-‘none (0 ) ’
-Disabled
-
-‘simple (1 ) ’
-Enable only weighted refs
-
-‘smart (2 ) ’
-Enable both weighted refs and duplicates
-
-
-
-
-ssim (ssim )
-Enable calculation and printing SSIM stats after the encoding.
-
-
-intra-refresh (intra-refresh )
-Enable the use of Periodic Intra Refresh instead of IDR frames when set
-to 1.
-
-
-avcintra-class (class )
-Configure the encoder to generate AVC-Intra.
-Valid values are 50,100 and 200
-
-
-bluray-compat (bluray-compat )
-Configure the encoder to be compatible with the bluray standard.
-It is a shorthand for setting "bluray-compat=1 force-cfr=1".
-
-
-b-bias (b-bias )
-Set the influence on how often B-frames are used.
-
-
-b-pyramid (b-pyramid )
-Set method for keeping of some B-frames as references. Possible values:
-
-
-‘none (none ) ’
-Disabled.
-
-‘strict (strict ) ’
-Strictly hierarchical pyramid.
-
-‘normal (normal ) ’
-Non-strict (not Blu-ray compatible).
-
-
-
-
-mixed-refs
-Enable the use of one reference per partition, as opposed to one
-reference per macroblock when set to 1. When set to 0, it has the
-same effect as x264
’s --no-mixed-refs option.
-
-
-8x8dct
-Enable adaptive spatial transform (high profile 8x8 transform)
-when set to 1. When set to 0, it has the same effect as
-x264
’s --no-8x8dct option.
-
-
-fast-pskip
-Enable early SKIP detection on P-frames when set to 1. When set
-to 0, it has the same effect as x264
’s
---no-fast-pskip option.
-
-
-aud (aud )
-Enable use of access unit delimiters when set to 1.
-
-
-mbtree
-Enable use macroblock tree ratecontrol when set to 1. When set
-to 0, it has the same effect as x264
’s
---no-mbtree option.
-
-
-deblock (deblock )
-Set loop filter parameters, in alpha :beta form.
-
-
-cplxblur (cplxblur )
-Set fluctuations reduction in QP (before curve compression).
-
-
-partitions (partitions )
-Set partitions to consider as a comma-separated list of. Possible
-values in the list:
-
-
-‘p8x8 ’
-8x8 P-frame partition.
-
-‘p4x4 ’
-4x4 P-frame partition.
-
-‘b8x8 ’
-4x4 B-frame partition.
-
-‘i8x8 ’
-8x8 I-frame partition.
-
-‘i4x4 ’
-4x4 I-frame partition.
-(Enabling ‘p4x4 ’ requires ‘p8x8 ’ to be enabled. Enabling
-‘i8x8 ’ requires adaptive spatial transform (8x8dct
-option) to be enabled.)
-
-‘none (none ) ’
-Do not consider any partitions.
-
-‘all (all ) ’
-Consider every partition.
-
-
-
-
-direct-pred (direct )
-Set direct MV prediction mode. Possible values:
-
-
-‘none (none ) ’
-Disable MV prediction.
-
-‘spatial (spatial ) ’
-Enable spatial predicting.
-
-‘temporal (temporal ) ’
-Enable temporal predicting.
-
-‘auto (auto ) ’
-Automatically decided.
-
-
-
-
-slice-max-size (slice-max-size )
-Set the limit of the size of each slice in bytes. If not specified
-but RTP payload size (ps ) is specified, that is used.
-
-
-stats (stats )
-Set the file name for multi-pass stats.
-
-
-nal-hrd (nal-hrd )
-Set signal HRD information (requires vbv-bufsize to be set).
-Possible values:
-
-
-‘none (none ) ’
-Disable HRD information signaling.
-
-‘vbr (vbr ) ’
-Variable bit rate.
-
-‘cbr (cbr ) ’
-Constant bit rate (not allowed in MP4 container).
-
-
-
-
-x264opts (N.A.)
-Set any x264 option, see x264 --fullhelp
for a list.
-
-Argument is a list of key =value couples separated by
-":". In filter and psy-rd options that use ":" as a separator
-themselves, use "," instead. They accept it as well since long ago but this
-is kept undocumented for some reason.
-
-For example to specify libx264 encoding options with ffmpeg
:
-
-
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
-
-
-
-x264-params (N.A.)
-Override the x264 configuration using a :-separated list of key=value
-parameters.
-
-This option is functionally the same as the x264opts , but is
-duplicated for compatibility with the Libav fork.
-
-For example to specify libx264 encoding options with ffmpeg
:
-
-
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
-cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
-no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
-
-
-
-
-
Encoding ffpresets for common usages are provided so they can be used with the
-general presets system (e.g. passing the pre option).
-
-
-
9.5 libx265# TOC
-
-
x265 H.265/HEVC encoder wrapper.
-
-
This encoder requires the presence of the libx265 headers and library
-during configuration. You need to explicitly configure the build with
---enable-libx265 .
-
-
-
9.5.1 Options# TOC
-
-
-preset
-Set the x265 preset.
-
-
-tune
-Set the x265 tune parameter.
-
-
-x265-params
-Set x265 options using a list of key =value couples separated
-by ":". See x265 --help
for a list of options.
-
-For example to specify libx265 encoding options with -x265-params :
-
-
-
ffmpeg -i input -c:v libx265 -x265-params crf=26:psy-rd=1 output.mp4
-
-
-
-
-
-
9.6 libxvid# TOC
-
-
Xvid MPEG-4 Part 2 encoder wrapper.
-
-
This encoder requires the presence of the libxvidcore headers and library
-during configuration. You need to explicitly configure the build with
---enable-libxvid --enable-gpl
.
-
-
The native mpeg4
encoder supports the MPEG-4 Part 2 format, so
-users can encode to this format without this library.
-
-
-
9.6.1 Options# TOC
-
-
The following options are supported by the libxvid wrapper. Some of
-the following options are listed but are not documented, and
-correspond to shared codec options. See the Codec
-Options chapter for their documentation. The other shared options
-which are not listed have no effect for the libxvid encoder.
-
-
-b
-g
-qmin
-qmax
-mpeg_quant
-threads
-bf
-b_qfactor
-b_qoffset
-flags
-Set specific encoding flags. Possible values:
-
-
-‘mv4 ’
-Use four motion vector by macroblock.
-
-
-‘aic ’
-Enable high quality AC prediction.
-
-
-‘gray ’
-Only encode grayscale.
-
-
-‘gmc ’
-Enable the use of global motion compensation (GMC).
-
-
-‘qpel ’
-Enable quarter-pixel motion compensation.
-
-
-‘cgop ’
-Enable closed GOP.
-
-
-‘global_header ’
-Place global headers in extradata instead of every keyframe.
-
-
-
-
-
-trellis
-me_method
-Set motion estimation method. Possible values in decreasing order of
-speed and increasing order of quality:
-
-
-‘zero ’
-Use no motion estimation (default).
-
-
-‘phods ’
-‘x1 ’
-‘log ’
-Enable advanced diamond zonal search for 16x16 blocks and half-pixel
-refinement for 16x16 blocks. ‘x1 ’ and ‘log ’ are aliases for
-‘phods ’.
-
-
-‘epzs ’
-Enable all of the things described above, plus advanced diamond zonal
-search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
-estimation on chroma planes.
-
-
-‘full ’
-Enable all of the things described above, plus extended 16x16 and 8x8
-blocks search.
-
-
-
-
-mbd
-Set macroblock decision algorithm. Possible values in the increasing
-order of quality:
-
-
-‘simple ’
-Use macroblock comparing function algorithm (default).
-
-
-‘bits ’
-Enable rate distortion-based half pixel and quarter pixel refinement for
-16x16 blocks.
-
-
-‘rd ’
-Enable all of the things described above, plus rate distortion-based
-half pixel and quarter pixel refinement for 8x8 blocks, and rate
-distortion-based search using square pattern.
-
-
-
-
-lumi_aq
-Enable lumi masking adaptive quantization when set to 1. Default is 0
-(disabled).
-
-
-variance_aq
-Enable variance adaptive quantization when set to 1. Default is 0
-(disabled).
-
-When combined with lumi_aq , the resulting quality will not
-be better than any of the two specified individually. In other
-words, the resulting quality will be the worse one of the two
-effects.
-
-
-ssim
-Set structural similarity (SSIM) displaying method. Possible values:
-
-
-‘off ’
-Disable displaying of SSIM information.
-
-
-‘avg ’
-Output average SSIM at the end of encoding to stdout. The format of
-showing the average SSIM is:
-
-
-
-For users who are not familiar with C, %f means a float number, or
-a decimal (e.g. 0.939232).
-
-
-‘frame ’
-Output both per-frame SSIM data during encoding and average SSIM at
-the end of encoding to stdout. The format of per-frame information
-is:
-
-
-
SSIM: avg: %1.3f min: %1.3f max: %1.3f
-
-
-For users who are not familiar with C, %1.3f means a float number
-rounded to 3 digits after the dot (e.g. 0.932).
-
-
-
-
-
-ssim_acc
-Set SSIM accuracy. Valid options are integers within the range of
-0-4, while 0 gives the most accurate result and 4 computes the
-fastest.
-
-
-
-
-
-
9.7 mpeg2# TOC
-
-
MPEG-2 video encoder.
-
-
-
9.7.1 Options# TOC
-
-
-seq_disp_ext integer
-Specifies if the encoder should write a sequence_display_extension to the
-output.
-
--1
-auto
-Decide automatically to write it or not (this is the default) by checking if
-the data to be written is different from the default or unspecified values.
-
-0
-never
-Never write it.
-
-1
-always
-Always write it.
-
-
-
-
-
-
-
-
-
PNG image encoder.
-
-
-
9.8.1 Private options# TOC
-
-
-dpi integer
-Set physical density of pixels, in dots per inch, unset by default
-
-dpm integer
-Set physical density of pixels, in dots per meter, unset by default
-
-
-
-
-
9.9 ProRes# TOC
-
-
Apple ProRes encoder.
-
-
FFmpeg contains 2 ProRes encoders, the prores-aw and prores-ks encoder.
-The used encoder can be chosen with the -vcodec
option.
-
-
-
9.9.1 Private Options for prores-ks# TOC
-
-
-profile integer
-Select the ProRes profile to encode
-
-‘proxy ’
-‘lt ’
-‘standard ’
-‘hq ’
-‘4444 ’
-
-
-
-quant_mat integer
-Select quantization matrix.
-
-‘auto ’
-‘default ’
-‘proxy ’
-‘lt ’
-‘standard ’
-‘hq ’
-
-If set to auto , the matrix matching the profile will be picked.
-If not set, the matrix providing the highest quality, default , will be
-picked.
-
-
-bits_per_mb integer
-How many bits to allot for coding one macroblock. Different profiles use
-between 200 and 2400 bits per macroblock, the maximum is 8000.
-
-
-mbs_per_slice integer
-Number of macroblocks in each slice (1-8); the default value (8)
-should be good in almost all situations.
-
-
-vendor string
-Override the 4-byte vendor ID.
-A custom vendor ID like apl0 would claim the stream was produced by
-the Apple encoder.
-
-
-alpha_bits integer
-Specify number of bits for alpha component.
-Possible values are 0 , 8 and 16 .
-Use 0 to disable alpha plane coding.
-
-
-
-
-
-
9.9.2 Speed considerations# TOC
-
-
In the default mode of operation the encoder has to honor frame constraints
-(i.e. not produce frames with size bigger than requested) while still making
-output picture as good as possible.
-A frame containing a lot of small details is harder to compress and the encoder
-would spend more time searching for appropriate quantizers for each slice.
-
-
Setting a higher bits_per_mb limit will improve the speed.
-
-
For the fastest encoding speed set the qscale parameter (4 is the
-recommended value) and do not set a size constraint.
-
-
-
-
10 Subtitles Encoders# TOC
-
-
-
10.1 dvdsub# TOC
-
-
This codec encodes the bitmap subtitle format that is used in DVDs.
-Typically they are stored in VOBSUB file pairs (*.idx + *.sub),
-and they can also be used in Matroska files.
-
-
-
10.1.1 Options# TOC
-
-
-even_rows_fix
-When set to 1, enable a work-around that makes the number of pixel rows
-even in all subtitles. This fixes a problem with some players that
-cut off the bottom row if the number is odd. The work-around just adds
-a fully transparent row if needed. The overhead is low, typically
-one byte per subtitle on average.
-
-By default, this work-around is disabled.
-
-
-
-
-
-
11 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavcodec
-
-
-
-
12 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffmpeg-devices.html b/Externals/ffmpeg/shared/doc/ffmpeg-devices.html
deleted file mode 100644
index a460bd1369..0000000000
--- a/Externals/ffmpeg/shared/doc/ffmpeg-devices.html
+++ /dev/null
@@ -1,1810 +0,0 @@
-
-
-
-
-
-
- FFmpeg Devices Documentation
-
-
-
-
-
-
-
-
- FFmpeg Devices Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes the input and output devices provided by the
-libavdevice library.
-
-
-
-
2 Device Options# TOC
-
-
The libavdevice library provides the same interface as
-libavformat. Namely, an input device is considered like a demuxer, and
-an output device like a muxer, and the interface and generic device
-options are the same provided by libavformat (see the ffmpeg-formats
-manual).
-
-
In addition each input or output device may support so-called private
-options, which are specific for that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the device
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
-
-
3 Input Devices# TOC
-
-
Input devices are configured elements in FFmpeg which allow to access
-the data coming from a multimedia device attached to your system.
-
-
When you configure your FFmpeg build, all the supported input devices
-are enabled by default. You can list all available ones using the
-configure option "–list-indevs".
-
-
You can disable all the input devices using the configure option
-"–disable-indevs", and selectively enable an input device using the
-option "–enable-indev=INDEV ", or you can disable a particular
-input device using the option "–disable-indev=INDEV ".
-
-
The option "-devices" of the ff* tools will display the list of
-supported input devices.
-
-
A description of the currently available input devices follows.
-
-
-
3.1 alsa# TOC
-
-
ALSA (Advanced Linux Sound Architecture) input device.
-
-
To enable this input device during configuration you need libasound
-installed on your system.
-
-
This device allows capturing from an ALSA device. The name of the
-device to capture has to be an ALSA card identifier.
-
-
An ALSA identifier has the syntax:
-
-
hw:CARD [,DEV [,SUBDEV ]]
-
-
-
where the DEV and SUBDEV components are optional.
-
-
The three arguments (in order: CARD ,DEV ,SUBDEV )
-specify card number or identifier, device number and subdevice number
-(-1 means any).
-
-
To see the list of cards currently recognized by your system check the
-files /proc/asound/cards and /proc/asound/devices .
-
-
For example to capture with ffmpeg
from an ALSA device with
-card id 0, you may run the command:
-
-
ffmpeg -f alsa -i hw:0 alsaout.wav
-
-
-
For more information see:
-http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html
-
-
-
3.2 avfoundation# TOC
-
-
AVFoundation input device.
-
-
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
-The older QTKit framework has been marked deprecated since OSX version 10.7.
-
-
The input filename has to be given in the following syntax:
-
-
-i "[[VIDEO]:[AUDIO]]"
-
-
The first entry selects the video input while the latter selects the audio input.
-The stream has to be specified by the device name or the device index as shown by the device list.
-Alternatively, the video and/or audio input device can be chosen by index using the
-
- -video_device_index <INDEX>
-
-and/or
-
- -audio_device_index <INDEX>
-
-, overriding any
-device name or index given in the input filename.
-
-
All available devices can be enumerated by using -list_devices true , listing
-all device names and corresponding indices.
-
-
There are two device name aliases:
-
-default
-Select the AVFoundation default device of the corresponding type.
-
-
-none
-Do not record the corresponding media type.
-This is equivalent to specifying an empty device name or index.
-
-
-
-
-
-
3.2.1 Options# TOC
-
-
AVFoundation supports the following options:
-
-
--list_devices <TRUE|FALSE>
-If set to true, a list of all available input devices is given showing all
-device names and indices.
-
-
--video_device_index <INDEX>
-Specify the video device by its index. Overrides anything given in the input filename.
-
-
--audio_device_index <INDEX>
-Specify the audio device by its index. Overrides anything given in the input filename.
-
-
--pixel_format <FORMAT>
-Request the video device to use a specific pixel format.
-If the specified format is not supported, a list of available formats is given
-und the first one in this list is used instead. Available pixel formats are:
-monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
- bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
- yuv420p, nv12, yuyv422, gray
-
-
-
-
-
-
3.2.2 Examples# TOC
-
-
- Print the list of AVFoundation supported devices and exit:
-
-
$ ffmpeg -f avfoundation -list_devices true -i ""
-
-
- Record video from video device 0 and audio from audio device 0 into out.avi:
-
-
$ ffmpeg -f avfoundation -i "0:0" out.avi
-
-
- Record video from video device 2 and audio from audio device 1 into out.avi:
-
-
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
-
-
- Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
-
-
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
-
-
-
-
-
-
3.3 bktr# TOC
-
-
BSD video input device.
-
-
-
3.4 dshow# TOC
-
-
Windows DirectShow input device.
-
-
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
-Currently only audio and video devices are supported.
-
-
Multiple devices may be opened as separate inputs, but they may also be
-opened on the same input, which should improve synchronism between them.
-
-
The input name should be in the format:
-
-
-
-
where TYPE can be either audio or video ,
-and NAME is the device’s name.
-
-
-
3.4.1 Options# TOC
-
-
If no options are specified, the device’s defaults are used.
-If the device does not support the requested options, it will
-fail to open.
-
-
-video_size
-Set the video size in the captured video.
-
-
-framerate
-Set the frame rate in the captured video.
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-
-
-sample_size
-Set the sample size (in bits) of the captured audio.
-
-
-channels
-Set the number of channels in the captured audio.
-
-
-list_devices
-If set to true , print a list of devices and exit.
-
-
-list_options
-If set to true , print a list of selected device’s options
-and exit.
-
-
-video_device_number
-Set video device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-audio_device_number
-Set audio device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-pixel_format
-Select pixel format to be used by DirectShow. This may only be set when
-the video codec is not set or set to rawvideo.
-
-
-audio_buffer_size
-Set audio device buffer size in milliseconds (which can directly
-impact latency, depending on the device).
-Defaults to using the audio device’s
-default buffer size (typically some multiple of 500ms).
-Setting this value too low can degrade performance.
-See also
-http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx
-
-
-
-
-
-
3.4.2 Examples# TOC
-
-
- Print the list of DirectShow supported devices and exit:
-
-
$ ffmpeg -list_devices true -f dshow -i dummy
-
-
- Open video device Camera :
-
-
$ ffmpeg -f dshow -i video="Camera"
-
-
- Open second video device with name Camera :
-
-
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
-
-
- Open video device Camera and audio device Microphone :
-
-
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
-
-
- Print the list of supported options in selected device and exit:
-
-
$ ffmpeg -list_options true -f dshow -i video="Camera"
-
-
-
-
-
-
3.5 dv1394# TOC
-
-
Linux DV 1394 input device.
-
-
-
3.6 fbdev# TOC
-
-
Linux framebuffer input device.
-
-
The Linux framebuffer is a graphic hardware-independent abstraction
-layer to show graphics on a computer monitor, typically on the
-console. It is accessed through a file device node, usually
-/dev/fb0 .
-
-
For more detailed information read the file
-Documentation/fb/framebuffer.txt included in the Linux source tree.
-
-
To record from the framebuffer device /dev/fb0 with
-ffmpeg
:
-
-
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
-
-
-
You can take a single screenshot image with the command:
-
-
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
-
-
-
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
-
-
-
3.7 gdigrab# TOC
-
-
Win32 GDI-based screen capture device.
-
-
This device allows you to capture a region of the display on Windows.
-
-
There are two options for the input filename:
-
-
or
-
-
-
The first option will capture the entire desktop, or a fixed region of the
-desktop. The second option will instead capture the contents of a single
-window, regardless of its position on the screen.
-
-
For example, to grab the entire desktop using ffmpeg
:
-
-
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
-
-
-
Grab a 640x480 region at position 10,20
:
-
-
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
-
-
-
Grab the contents of the window named "Calculator"
-
-
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
-
-
-
-
3.7.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. Use the value 0
to
-not draw the pointer. Default value is 1
.
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-Note that show_region is incompatible with grabbing the contents
-of a single window.
-
-For example:
-
-
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
-
-
-
-video_size
-Set the video frame size. The default is to capture the full screen if desktop is selected, or the full window size if title=window_title is selected.
-
-
-offset_x
-When capturing a region with video_size , set the distance from the left edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative offset_x value to move the region to that monitor.
-
-
-offset_y
-When capturing a region with video_size , set the distance from the top edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative offset_y value to move the region to that monitor.
-
-
-
-
-
-
3.8 iec61883# TOC
-
-
FireWire DV/HDV input device using libiec61883.
-
-
To enable this input device, you need libiec61883, libraw1394 and
-libavc1394 installed on your system. Use the configure option
---enable-libiec61883
to compile with the device enabled.
-
-
The iec61883 capture device supports capturing from a video device
-connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
-FireWire stack (juju). This is the default DV/HDV input method in Linux
-Kernel 2.6.37 and later, since the old FireWire stack was removed.
-
-
Specify the FireWire port to be used as input file, or "auto"
-to choose the first port connected.
-
-
-
3.8.1 Options# TOC
-
-
-dvtype
-Override autodetection of DV/HDV. This should only be used if auto
-detection does not work, or if usage of a different device type
-should be prohibited. Treating a DV device as HDV (or vice versa) will
-not work and result in undefined behavior.
-The values auto , dv and hdv are supported.
-
-
-dvbuffer
-Set maximum size of buffer for incoming data, in frames. For DV, this
-is an exact value. For HDV, it is not frame exact, since HDV does
-not have a fixed frame size.
-
-
-dvguid
-Select the capture device by specifying it’s GUID. Capturing will only
-be performed from the specified device and fails if no device with the
-given GUID is found. This is useful to select the input if multiple
-devices are connected at the same time.
-Look at /sys/bus/firewire/devices to find out the GUIDs.
-
-
-
-
-
-
3.8.2 Examples# TOC
-
-
- Grab and show the input of a FireWire DV/HDV device.
-
-
ffplay -f iec61883 -i auto
-
-
- Grab and record the input of a FireWire DV/HDV device,
-using a packet buffer of 100000 packets if the source is HDV.
-
-
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
-
-
-
-
-
-
3.9 jack# TOC
-
-
JACK input device.
-
-
To enable this input device during configuration you need libjack
-installed on your system.
-
-
A JACK input device creates one or more JACK writable clients, one for
-each audio channel, with name client_name :input_N , where
-client_name is the name provided by the application, and N
-is a number which identifies the channel.
-Each writable client will send the acquired data to the FFmpeg input
-device.
-
-
Once you have created one or more JACK readable clients, you need to
-connect them to one or more JACK writable clients.
-
-
To connect or disconnect JACK clients you can use the jack_connect
-and jack_disconnect
programs, or do it through a graphical interface,
-for example with qjackctl
.
-
-
To list the JACK clients and their properties you can invoke the command
-jack_lsp
.
-
-
Follows an example which shows how to capture a JACK readable client
-with ffmpeg
.
-
-
# Create a JACK writable client with name "ffmpeg".
-$ ffmpeg -f jack -i ffmpeg -y out.wav
-
-# Start the sample jack_metro readable client.
-$ jack_metro -b 120 -d 0.2 -f 4000
-
-# List the current JACK clients.
-$ jack_lsp -c
-system:capture_1
-system:capture_2
-system:playback_1
-system:playback_2
-ffmpeg:input_1
-metro:120_bpm
-
-# Connect metro to the ffmpeg writable client.
-$ jack_connect metro:120_bpm ffmpeg:input_1
-
-
-
For more information read:
-http://jackaudio.org/
-
-
-
3.10 lavfi# TOC
-
-
Libavfilter input virtual device.
-
-
This input device reads data from the open output pads of a libavfilter
-filtergraph.
-
-
For each filtergraph open output, the input device will create a
-corresponding stream which is mapped to the generated output. Currently
-only video data is supported. The filtergraph is specified through the
-option graph .
-
-
-
3.10.1 Options# TOC
-
-
-graph
-Specify the filtergraph to use as input. Each video open output must be
-labelled by a unique string of the form "outN ", where N is a
-number starting from 0 corresponding to the mapped input stream
-generated by the device.
-The first unlabelled output is automatically assigned to the "out0"
-label, but all the others need to be specified explicitly.
-
-The suffix "+subcc" can be appended to the output label to create an extra
-stream with the closed captions packets attached to that output
-(experimental; only for EIA-608 / CEA-708 for now).
-The subcc streams are created after all the normal streams, in the order of
-the corresponding stream.
-For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
-stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
-
-If not specified defaults to the filename specified for the input
-device.
-
-
-graph_file
-Set the filename of the filtergraph to be read and sent to the other
-filters. Syntax of the filtergraph is the same as the one specified by
-the option graph .
-
-
-
-
-
-
3.10.2 Examples# TOC
-
-
- Create a color video stream and play it back with ffplay
:
-
-
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
-
-
- As the previous example, but use filename for specifying the graph
-description, and omit the "out0" label:
-
-
ffplay -f lavfi color=c=pink
-
-
- Create three different video test filtered sources and play them:
-
-
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
-
-
- Read an audio stream from a file using the amovie source and play it
-back with ffplay
:
-
-
ffplay -f lavfi "amovie=test.wav"
-
-
- Read an audio stream and a video stream and play it back with
-ffplay
:
-
-
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
-
-
- Dump decoded frames to images and closed captions to a file (experimental):
-
-
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
-
-
-
-
-
-
3.11 libcdio# TOC
-
-
Audio-CD input device based on cdio.
-
-
To enable this input device during configuration you need libcdio
-installed on your system. Requires the configure option
---enable-libcdio
.
-
-
This device allows playing and grabbing from an Audio-CD.
-
-
For example to copy with ffmpeg
the entire Audio-CD in /dev/sr0,
-you may run the command:
-
-
ffmpeg -f libcdio -i /dev/sr0 cd.wav
-
-
-
-
3.12 libdc1394# TOC
-
-
IIDC1394 input device, based on libdc1394 and libraw1394.
-
-
Requires the configure option --enable-libdc1394
.
-
-
-
3.13 openal# TOC
-
-
The OpenAL input device provides audio capture on all systems with a
-working OpenAL 1.1 implementation.
-
-
To enable this input device during configuration, you need OpenAL
-headers and libraries installed on your system, and need to configure
-FFmpeg with --enable-openal
.
-
-
OpenAL headers and libraries should be provided as part of your OpenAL
-implementation, or as an additional download (an SDK). Depending on your
-installation you may need to specify additional flags via the
---extra-cflags
and --extra-ldflags
for allowing the build
-system to locate the OpenAL headers and libraries.
-
-
An incomplete list of OpenAL implementations follows:
-
-
-Creative
-The official Windows implementation, providing hardware acceleration
-with supported devices and software fallback.
-See http://openal.org/ .
-
-OpenAL Soft
-Portable, open source (LGPL) software implementation. Includes
-backends for the most common sound APIs on the Windows, Linux,
-Solaris, and BSD operating systems.
-See http://kcat.strangesoft.net/openal.html .
-
-Apple
-OpenAL is part of Core Audio, the official Mac OS X Audio interface.
-See http://developer.apple.com/technologies/mac/audio-and-video.html
-
-
-
-
This device allows one to capture from an audio input device handled
-through OpenAL.
-
-
You need to specify the name of the device to capture in the provided
-filename. If the empty string is provided, the device will
-automatically select the default device. You can get the list of the
-supported devices by using the option list_devices .
-
-
-
3.13.1 Options# TOC
-
-
-channels
-Set the number of channels in the captured audio. Only the values
-1 (monaural) and 2 (stereo) are currently supported.
-Defaults to 2 .
-
-
-sample_size
-Set the sample size (in bits) of the captured audio. Only the values
-8 and 16 are currently supported. Defaults to
-16 .
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-Defaults to 44.1k .
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-
-
-
-
3.13.2 Examples# TOC
-
-
Print the list of OpenAL supported devices and exit:
-
-
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
-
-
-
Capture from the OpenAL device DR-BT101 via PulseAudio :
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
-
-
-
Capture from the default device (note the empty string ” as filename):
-
-
$ ffmpeg -f openal -i '' out.ogg
-
-
-
Capture from two devices simultaneously, writing to two different files,
-within the same ffmpeg
command:
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
-
-
Note: not all OpenAL implementations support multiple simultaneous capture -
-try the latest OpenAL Soft if the above does not work.
-
-
-
3.14 oss# TOC
-
-
Open Sound System input device.
-
-
The filename to provide to the input device is the device node
-representing the OSS input device, and is usually set to
-/dev/dsp .
-
-
For example to grab from /dev/dsp using ffmpeg
use the
-command:
-
-
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
-
-
-
For more information about OSS see:
-http://manuals.opensound.com/usersguide/dsp.html
-
-
-
3.15 pulse# TOC
-
-
PulseAudio input device.
-
-
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
-
-
The filename to provide to the input device is a source device or the
-string "default"
-
-
To list the PulseAudio source devices and their properties you can invoke
-the command pactl list sources
.
-
-
More information about PulseAudio can be found on http://www.pulseaudio.org .
-
-
-
3.15.1 Options# TOC
-
-server
-Connect to a specific PulseAudio server, specified by an IP address.
-Default server is used when not provided.
-
-
-name
-Specify the application name PulseAudio will use when showing active clients,
-by default it is the LIBAVFORMAT_IDENT
string.
-
-
-stream_name
-Specify the stream name PulseAudio will use when showing active streams,
-by default it is "record".
-
-
-sample_rate
-Specify the samplerate in Hz, by default 48kHz is used.
-
-
-channels
-Specify the channels in use, by default 2 (stereo) is set.
-
-
-frame_size
-Specify the number of bytes per frame, by default it is set to 1024.
-
-
-fragment_size
-Specify the minimal buffering fragment in PulseAudio, it will affect the
-audio latency. By default it is unset.
-
-
-
-
-
3.15.2 Examples# TOC
-
Record a stream from default device:
-
-
ffmpeg -f pulse -i default /tmp/pulse.wav
-
-
-
-
3.16 qtkit# TOC
-
-
QTKit input device.
-
-
The filename passed as input is parsed to contain either a device name or index.
-The device index can also be given by using -video_device_index.
-A given device index will override any given device name.
-If the desired device consists of numbers only, use -video_device_index to identify it.
-The default device will be chosen if an empty string or the device name "default" is given.
-The available devices can be enumerated by using -list_devices.
-
-
-
ffmpeg -f qtkit -i "0" out.mpg
-
-
-
-
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
-
-
-
-
ffmpeg -f qtkit -i "default" out.mpg
-
-
-
-
ffmpeg -f qtkit -list_devices true -i ""
-
-
-
-
3.17 sndio# TOC
-
-
sndio input device.
-
-
To enable this input device during configuration you need libsndio
-installed on your system.
-
-
The filename to provide to the input device is the device node
-representing the sndio input device, and is usually set to
-/dev/audio0 .
-
-
For example to grab from /dev/audio0 using ffmpeg
use the
-command:
-
-
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
-
-
-
-
3.18 video4linux2, v4l2# TOC
-
-
Video4Linux2 input video device.
-
-
"v4l2" can be used as alias for "video4linux2".
-
-
If FFmpeg is built with v4l-utils support (by using the
---enable-libv4l2
configure option), it is possible to use it with the
--use_libv4l2
input device option.
-
-
The name of the device to grab is a file device node, usually Linux
-systems tend to automatically create such nodes when the device
-(e.g. an USB webcam) is plugged into the system, and has a name of the
-kind /dev/videoN , where N is a number associated to
-the device.
-
-
Video4Linux2 devices usually support a limited set of
-width xheight sizes and frame rates. You can check which are
-supported using -list_formats all
for Video4Linux2 devices.
-Some devices, like TV cards, support one or more standards. It is possible
-to list all the supported standards using -list_standards all
.
-
-
The time base for the timestamps is 1 microsecond. Depending on the kernel
-version and configuration, the timestamps may be derived from the real time
-clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
-boot time, unaffected by NTP or manual changes to the clock). The
--timestamps abs or -ts abs option can be used to force
-conversion into the real time clock.
-
-
Some usage examples of the video4linux2 device with ffmpeg
-and ffplay
:
-
- Grab and show the input of a video4linux2 device:
-
-
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
-
-
- Grab and record the input of a video4linux2 device, leave the
-frame rate and size as previously set:
-
-
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
-
-
-
-
For more information about Video4Linux, check http://linuxtv.org/ .
-
-
-
3.18.1 Options# TOC
-
-
-standard
-Set the standard. Must be the name of a supported standard. To get a
-list of the supported standards, use the list_standards
-option.
-
-
-channel
-Set the input channel number. Default to -1, which means using the
-previously selected channel.
-
-
-video_size
-Set the video frame size. The argument must be a string in the form
-WIDTH xHEIGHT or a valid size abbreviation.
-
-
-pixel_format
-Select the pixel format (only valid for raw video input).
-
-
-input_format
-Set the preferred pixel format (for raw video) or a codec name.
-This option allows one to select the input format, when several are
-available.
-
-
-framerate
-Set the preferred video frame rate.
-
-
-list_formats
-List available formats (supported pixel formats, codecs, and frame
-sizes) and exit.
-
-Available values are:
-
-‘all ’
-Show all available (compressed and non-compressed) formats.
-
-
-‘raw ’
-Show only raw video (non-compressed) formats.
-
-
-‘compressed ’
-Show only compressed formats.
-
-
-
-
-list_standards
-List supported standards and exit.
-
-Available values are:
-
-‘all ’
-Show all supported standards.
-
-
-
-
-timestamps, ts
-Set type of timestamps for grabbed frames.
-
-Available values are:
-
-‘default ’
-Use timestamps from the kernel.
-
-
-‘abs ’
-Use absolute timestamps (wall clock).
-
-
-‘mono2abs ’
-Force conversion from monotonic to absolute timestamps.
-
-
-
-Default value is default
.
-
-
-
-
-
3.19 vfwcap# TOC
-
-
VfW (Video for Windows) capture input device.
-
-
The filename passed as input is the capture driver number, ranging from
-0 to 9. You may use "list" as filename to print a list of drivers. Any
-other filename will be interpreted as device number 0.
-
-
-
3.20 x11grab# TOC
-
-
X11 video input device.
-
-
Depends on X11, Xext, and Xfixes. Requires the configure option
---enable-x11grab
.
-
-
This device allows one to capture a region of an X11 display.
-
-
The filename passed as input has the syntax:
-
-
[hostname ]:display_number .screen_number [+x_offset ,y_offset ]
-
-
-
hostname :display_number .screen_number specifies the
-X11 display name of the screen to grab from. hostname can be
-omitted, and defaults to "localhost". The environment variable
-DISPLAY
contains the default display name.
-
-
x_offset and y_offset specify the offsets of the grabbed
-area with respect to the top-left border of the X11 screen. They
-default to 0.
-
-
Check the X11 documentation (e.g. man X) for more detailed information.
-
-
Use the dpyinfo
program for getting basic information about the
-properties of your X11 display (e.g. grep for "name" or "dimensions").
-
-
For example to grab from :0.0 using ffmpeg
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
Grab at position 10,20
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-
-
3.20.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. A value of 0
specify
-not to draw the pointer. Default value is 1
.
-
-
-follow_mouse
-Make the grabbed area follow the mouse. The argument can be
-centered
or a number of pixels PIXELS .
-
-When it is specified with "centered", the grabbing region follows the mouse
-pointer and keeps the pointer at the center of region; otherwise, the region
-follows only when the mouse pointer reaches within PIXELS (greater than
-zero) to the edge of region.
-
-For example:
-
-
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-To follow only when the mouse pointer reaches within 100 pixels to edge:
-
-
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-For example:
-
-
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-With follow_mouse :
-
-
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-video_size
-Set the video frame size. Default value is vga
.
-
-
-use_shm
-Use the MIT-SHM extension for shared memory. Default value is 1
.
-It may be necessary to disable it for remote displays.
-
-
-
-
-
3.21 decklink# TOC
-
-
The decklink input device provides capture capabilities for Blackmagic
-DeckLink devices.
-
-
To enable this input device, you need the Blackmagic DeckLink SDK and you
-need to configure with the appropriate --extra-cflags
-and --extra-ldflags
.
-On Windows, you need to run the IDL files through widl
.
-
-
DeckLink is very picky about the formats it supports. Pixel format is always
-uyvy422, framerate and video size must be determined for your device with
--list_formats 1
. Audio sample rate is always 48 kHz and the number
-of channels currently is limited to 2 (stereo).
-
-
-
3.21.1 Options# TOC
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-list_formats
-If set to true , print a list of supported formats and exit.
-Defaults to false .
-
-
-
-
-
-
3.21.2 Examples# TOC
-
-
- List input devices:
-
-
ffmpeg -f decklink -list_devices 1 -i dummy
-
-
- List supported formats:
-
-
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
-
-
- Capture video clip at 1080i50 (format 11):
-
-
ffmpeg -f decklink -i 'Intensity Pro@11' -acodec copy -vcodec copy output.avi
-
-
-
-
-
-
-
4 Output Devices# TOC
-
-
Output devices are configured elements in FFmpeg that can write
-multimedia data to an output device attached to your system.
-
-
When you configure your FFmpeg build, all the supported output devices
-are enabled by default. You can list all available ones using the
-configure option "–list-outdevs".
-
-
You can disable all the output devices using the configure option
-"–disable-outdevs", and selectively enable an output device using the
-option "–enable-outdev=OUTDEV ", or you can disable a particular
-input device using the option "–disable-outdev=OUTDEV ".
-
-
The option "-devices" of the ff* tools will display the list of
-enabled output devices.
-
-
A description of the currently available output devices follows.
-
-
-
4.1 alsa# TOC
-
-
ALSA (Advanced Linux Sound Architecture) output device.
-
-
-
4.1.1 Examples# TOC
-
-
- Play a file on default ALSA device:
-
-
ffmpeg -i INPUT -f alsa default
-
-
- Play a file on soundcard 1, audio device 7:
-
-
ffmpeg -i INPUT -f alsa hw:1,7
-
-
-
-
-
4.2 caca# TOC
-
-
CACA output device.
-
-
This output device allows one to show a video stream in CACA window.
-Only one CACA window is allowed per application, so you can
-have only one instance of this output device in an application.
-
-
To enable this output device you need to configure FFmpeg with
---enable-libcaca
.
-libcaca is a graphics library that outputs text instead of pixels.
-
-
For more information about libcaca, check:
-http://caca.zoy.org/wiki/libcaca
-
-
-
4.2.1 Options# TOC
-
-
-window_title
-Set the CACA window title, if not specified default to the filename
-specified for the output device.
-
-
-window_size
-Set the CACA window size, can be a string of the form
-width xheight or a video size abbreviation.
-If not specified it defaults to the size of the input video.
-
-
-driver
-Set display driver.
-
-
-algorithm
-Set dithering algorithm. Dithering is necessary
-because the picture being rendered has usually far more colours than
-the available palette.
-The accepted values are listed with -list_dither algorithms
.
-
-
-antialias
-Set antialias method. Antialiasing smoothens the rendered
-image and avoids the commonly seen staircase effect.
-The accepted values are listed with -list_dither antialiases
.
-
-
-charset
-Set which characters are going to be used when rendering text.
-The accepted values are listed with -list_dither charsets
.
-
-
-color
-Set color to be used when rendering text.
-The accepted values are listed with -list_dither colors
.
-
-
-list_drivers
-If set to true , print a list of available drivers and exit.
-
-
-list_dither
-List available dither options related to the argument.
-The argument must be one of algorithms
, antialiases
,
-charsets
, colors
.
-
-
-
-
-
4.2.2 Examples# TOC
-
-
- The following command shows the ffmpeg
output is an
-CACA window, forcing its size to 80x25:
-
-
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca -
-
-
- Show the list of available drivers and exit:
-
-
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_drivers true -
-
-
- Show the list of available dither colors and exit:
-
-
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_dither colors -
-
-
-
-
-
4.3 decklink# TOC
-
-
The decklink output device provides playback capabilities for Blackmagic
-DeckLink devices.
-
-
To enable this output device, you need the Blackmagic DeckLink SDK and you
-need to configure with the appropriate --extra-cflags
-and --extra-ldflags
.
-On Windows, you need to run the IDL files through widl
.
-
-
DeckLink is very picky about the formats it supports. Pixel format is always
-uyvy422, framerate and video size must be determined for your device with
--list_formats 1
. Audio sample rate is always 48 kHz.
-
-
-
4.3.1 Options# TOC
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-list_formats
-If set to true , print a list of supported formats and exit.
-Defaults to false .
-
-
-preroll
-Amount of time to preroll video in seconds.
-Defaults to 0.5 .
-
-
-
-
-
-
4.3.2 Examples# TOC
-
-
- List output devices:
-
-
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
-
-
- List supported formats:
-
-
ffmpeg -i test.avi -f decklink -list_formats 1 'DeckLink Mini Monitor'
-
-
- Play video clip:
-
-
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 'DeckLink Mini Monitor'
-
-
- Play video clip with non-standard framerate or video size:
-
-
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLink Mini Monitor'
-
-
-
-
-
-
4.4 fbdev# TOC
-
-
Linux framebuffer output device.
-
-
The Linux framebuffer is a graphic hardware-independent abstraction
-layer to show graphics on a computer monitor, typically on the
-console. It is accessed through a file device node, usually
-/dev/fb0 .
-
-
For more detailed information read the file
-Documentation/fb/framebuffer.txt included in the Linux source tree.
-
-
-
4.4.1 Options# TOC
-
-xoffset
-yoffset
-Set x/y coordinate of top left corner. Default is 0.
-
-
-
-
-
4.4.2 Examples# TOC
-
Play a file on framebuffer device /dev/fb0 .
-Required pixel format depends on current framebuffer settings.
-
-
ffmpeg -re -i INPUT -vcodec rawvideo -pix_fmt bgra -f fbdev /dev/fb0
-
-
-
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
-
-
-
4.5 opengl# TOC
-
OpenGL output device.
-
-
To enable this output device you need to configure FFmpeg with --enable-opengl
.
-
-
This output device allows one to render to OpenGL context.
-Context may be provided by application or default SDL window is created.
-
-
When device renders to external context, application must implement handlers for following messages:
-AV_DEV_TO_APP_CREATE_WINDOW_BUFFER
- create OpenGL context on current thread.
-AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER
- make OpenGL context current.
-AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER
- swap buffers.
-AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER
- destroy OpenGL context.
-Application is also required to inform a device about current resolution by sending AV_APP_TO_DEV_WINDOW_SIZE
message.
-
-
-
4.5.1 Options# TOC
-
-background
-Set background color. Black is a default.
-
-no_window
-Disables default SDL window when set to non-zero value.
-Application must provide OpenGL context and both window_size_cb
and window_swap_buffers_cb
callbacks when set.
-
-window_title
-Set the SDL window title, if not specified default to the filename specified for the output device.
-Ignored when no_window is set.
-
-window_size
-Set preferred window size, can be a string of the form widthxheight or a video size abbreviation.
-If not specified it defaults to the size of the input video, downscaled according to the aspect ratio.
-Mostly usable when no_window is not set.
-
-
-
-
-
-
4.5.2 Examples# TOC
-
Play a file on SDL window using OpenGL rendering:
-
-
ffmpeg -i INPUT -f opengl "window title"
-
-
-
-
-
-
OSS (Open Sound System) output device.
-
-
-
4.7 pulse# TOC
-
-
PulseAudio output device.
-
-
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
-
-
More information about PulseAudio can be found on http://www.pulseaudio.org
-
-
-
4.7.1 Options# TOC
-
-server
-Connect to a specific PulseAudio server, specified by an IP address.
-Default server is used when not provided.
-
-
-name
-Specify the application name PulseAudio will use when showing active clients,
-by default it is the LIBAVFORMAT_IDENT
string.
-
-
-stream_name
-Specify the stream name PulseAudio will use when showing active streams,
-by default it is set to the specified output name.
-
-
-device
-Specify the device to use. Default device is used when not provided.
-List of output devices can be obtained with command pactl list sinks
.
-
-
-buffer_size
-buffer_duration
-Control the size and duration of the PulseAudio buffer. A small buffer
-gives more control, but requires more frequent updates.
-
-buffer_size specifies size in bytes while
-buffer_duration specifies duration in milliseconds.
-
-When both options are provided then the highest value is used
-(duration is recalculated to bytes using stream parameters). If they
-are set to 0 (which is default), the device will use the default
-PulseAudio duration value. By default PulseAudio set buffer duration
-to around 2 seconds.
-
-
-prebuf
-Specify pre-buffering size in bytes. The server does not start with
-playback before at least prebuf bytes are available in the
-buffer. By default this option is initialized to the same value as
-buffer_size or buffer_duration (whichever is bigger).
-
-
-minreq
-Specify minimum request size in bytes. The server does not request less
-than minreq bytes from the client, instead waits until the buffer
-is free enough to request more bytes at once. It is recommended to not set
-this option, which will initialize this to a value that is deemed sensible
-by the server.
-
-
-
-
-
-
4.7.2 Examples# TOC
-
Play a file on default device on default server:
-
-
ffmpeg -i INPUT -f pulse "stream name"
-
-
-
-
-
-
SDL (Simple DirectMedia Layer) output device.
-
-
This output device allows one to show a video stream in an SDL
-window. Only one SDL window is allowed per application, so you can
-have only one instance of this output device in an application.
-
-
To enable this output device you need libsdl installed on your system
-when configuring your build.
-
-
For more information about SDL, check:
-http://www.libsdl.org/
-
-
-
4.8.1 Options# TOC
-
-
-window_title
-Set the SDL window title, if not specified default to the filename
-specified for the output device.
-
-
-icon_title
-Set the name of the iconified SDL window, if not specified it is set
-to the same value of window_title .
-
-
-window_size
-Set the SDL window size, can be a string of the form
-width xheight or a video size abbreviation.
-If not specified it defaults to the size of the input video,
-downscaled according to the aspect ratio.
-
-
-window_fullscreen
-Set fullscreen mode when non-zero value is provided.
-Default value is zero.
-
-
-
-
-
4.8.2 Interactive commands# TOC
-
-
The window created by the device can be controlled through the
-following interactive commands.
-
-
-q, ESC
-Quit the device immediately.
-
-
-
-
-
4.8.3 Examples# TOC
-
-
The following command shows the ffmpeg
output is an
-SDL window, forcing its size to the qcif format:
-
-
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
-
-
-
-
4.9 sndio# TOC
-
-
sndio audio output device.
-
-
-
-
-
XV (XVideo) output device.
-
-
This output device allows one to show a video stream in a X Window System
-window.
-
-
-
4.10.1 Options# TOC
-
-
-display_name
-Specify the hardware display name, which determines the display and
-communications domain to be used.
-
-The display name or DISPLAY environment variable can be a string in
-the format hostname [:number [.screen_number ]].
-
-hostname specifies the name of the host machine on which the
-display is physically attached. number specifies the number of
-the display server on that host machine. screen_number specifies
-the screen to be used on that server.
-
-If unspecified, it defaults to the value of the DISPLAY environment
-variable.
-
-For example, dual-headed:0.1
would specify screen 1 of display
-0 on the machine named “dual-headed”.
-
-Check the X11 specification for more detailed information about the
-display name format.
-
-
-window_id
-When set to non-zero value then device doesn’t create new window,
-but uses existing one with provided window_id . By default
-this options is set to zero and device creates its own window.
-
-
-window_size
-Set the created window size, can be a string of the form
-width xheight or a video size abbreviation. If not
-specified it defaults to the size of the input video.
-Ignored when window_id is set.
-
-
-window_x
-window_y
-Set the X and Y window offsets for the created window. They are both
-set to 0 by default. The values may be ignored by the window manager.
-Ignored when window_id is set.
-
-
-window_title
-Set the window title, if not specified default to the filename
-specified for the output device. Ignored when window_id is set.
-
-
-
-
For more information about XVideo see http://www.x.org/ .
-
-
-
4.10.2 Examples# TOC
-
-
- Decode, display and encode video input with ffmpeg
at the
-same time:
-
-
ffmpeg -i INPUT OUTPUT -f xv display
-
-
- Decode and display the input video to multiple X11 windows:
-
-
ffmpeg -i INPUT -f xv normal -vf negate -f xv negated
-
-
-
-
-
-
5 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavdevice
-
-
-
-
6 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffmpeg-filters.html b/Externals/ffmpeg/shared/doc/ffmpeg-filters.html
deleted file mode 100644
index b0373dc55a..0000000000
--- a/Externals/ffmpeg/shared/doc/ffmpeg-filters.html
+++ /dev/null
@@ -1,13382 +0,0 @@
-
-
-
-
-
-
- FFmpeg Filters Documentation
-
-
-
-
-
-
-
-
- FFmpeg Filters Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes filters, sources, and sinks provided by the
-libavfilter library.
-
-
-
-
2 Filtering Introduction# TOC
-
-
Filtering in FFmpeg is enabled through the libavfilter library.
-
-
In libavfilter, a filter can have multiple inputs and multiple
-outputs.
-To illustrate the sorts of things that are possible, we consider the
-following filtergraph.
-
-
-
[main]
-input --> split ---------------------> overlay --> output
- | ^
- |[tmp] [flip]|
- +-----> crop --> vflip -------+
-
-
-
This filtergraph splits the input stream in two streams, then sends one
-stream through the crop filter and the vflip filter, before merging it
-back with the other stream by overlaying it on top. You can use the
-following command to achieve this:
-
-
-
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
-
-
-
The result will be that the top half of the video is mirrored
-onto the bottom half of the output video.
-
-
Filters in the same linear chain are separated by commas, and distinct
-linear chains of filters are separated by semicolons. In our example,
-crop,vflip are in one linear chain, split and
-overlay are separately in another. The points where the linear
-chains join are labelled by names enclosed in square brackets. In the
-example, the split filter generates two outputs that are associated to
-the labels [main] and [tmp] .
-
-
The stream sent to the second output of split , labelled as
-[tmp] , is processed through the crop filter, which crops
-away the lower half part of the video, and then vertically flipped. The
-overlay filter takes in input the first unchanged output of the
-split filter (which was labelled as [main] ), and overlay on its
-lower half the output generated by the crop,vflip filterchain.
-
-
Some filters take in input a list of parameters: they are specified
-after the filter name and an equal sign, and are separated from each other
-by a colon.
-
-
There exist so-called source filters that do not have an
-audio/video input, and sink filters that will not have audio/video
-output.
-
-
-
-
3 graph2dot# TOC
-
-
The graph2dot program included in the FFmpeg tools
-directory can be used to parse a filtergraph description and issue a
-corresponding textual representation in the dot language.
-
-
Invoke the command:
-
-
-
to see how to use graph2dot .
-
-
You can then pass the dot description to the dot program (from
-the graphviz suite of programs) and obtain a graphical representation
-of the filtergraph.
-
-
For example the sequence of commands:
-
-
echo GRAPH_DESCRIPTION | \
-tools/graph2dot -o graph.tmp && \
-dot -Tpng graph.tmp -o graph.png && \
-display graph.png
-
-
-
can be used to create and display an image representing the graph
-described by the GRAPH_DESCRIPTION string. Note that this string must be
-a complete self-contained graph, with its inputs and outputs explicitly defined.
-For example if your command line is of the form:
-
-
ffmpeg -i infile -vf scale=640:360 outfile
-
-
your GRAPH_DESCRIPTION string will need to be of the form:
-
-
nullsrc,scale=640:360,nullsink
-
-
you may also need to set the nullsrc parameters and add a format
-filter in order to simulate a specific input file.
-
-
-
-
4 Filtergraph description# TOC
-
-
A filtergraph is a directed graph of connected filters. It can contain
-cycles, and there can be multiple links between a pair of
-filters. Each link has one input pad on one side connecting it to one
-filter from which it takes its input, and one output pad on the other
-side connecting it to one filter accepting its output.
-
-
Each filter in a filtergraph is an instance of a filter class
-registered in the application, which defines the features and the
-number of input and output pads of the filter.
-
-
A filter with no input pads is called a "source", and a filter with no
-output pads is called a "sink".
-
-
-
4.1 Filtergraph syntax# TOC
-
-
A filtergraph has a textual representation, which is
-recognized by the -filter /-vf and -filter_complex
-options in ffmpeg
and -vf in ffplay
, and by the
-avfilter_graph_parse()
/avfilter_graph_parse2()
functions defined in
-libavfilter/avfilter.h .
-
-
A filterchain consists of a sequence of connected filters, each one
-connected to the previous one in the sequence. A filterchain is
-represented by a list of ","-separated filter descriptions.
-
-
A filtergraph consists of a sequence of filterchains. A sequence of
-filterchains is represented by a list of ";"-separated filterchain
-descriptions.
-
-
A filter is represented by a string of the form:
-[in_link_1 ]...[in_link_N ]filter_name =arguments [out_link_1 ]...[out_link_M ]
-
-
filter_name is the name of the filter class of which the
-described filter is an instance of, and has to be the name of one of
-the filter classes registered in the program.
-The name of the filter class is optionally followed by a string
-"=arguments ".
-
-
arguments is a string which contains the parameters used to
-initialize the filter instance. It may have one of two forms:
-
- A ’:’-separated list of key=value pairs.
-
- A ’:’-separated list of value . In this case, the keys are assumed to be
-the option names in the order they are declared. E.g. the fade
filter
-declares three options in this order – type , start_frame and
-nb_frames . Then the parameter list in:0:30 means that the value
-in is assigned to the option type , 0 to
-start_frame and 30 to nb_frames .
-
- A ’:’-separated list of mixed direct value and long key=value
-pairs. The direct value must precede the key=value pairs, and
-follow the same constraints order of the previous point. The following
-key=value pairs can be set in any preferred order.
-
-
-
-
If the option value itself is a list of items (e.g. the format
filter
-takes a list of pixel formats), the items in the list are usually separated by
-’|’.
-
-
The list of arguments can be quoted using the character "’" as initial
-and ending mark, and the character ’\’ for escaping the characters
-within the quoted text; otherwise the argument string is considered
-terminated when the next special character (belonging to the set
-"[]=;,") is encountered.
-
-
The name and arguments of the filter are optionally preceded and
-followed by a list of link labels.
-A link label allows one to name a link and associate it to a filter output
-or input pad. The preceding labels in_link_1
-... in_link_N , are associated to the filter input pads,
-the following labels out_link_1 ... out_link_M , are
-associated to the output pads.
-
-
When two link labels with the same name are found in the
-filtergraph, a link between the corresponding input and output pad is
-created.
-
-
If an output pad is not labelled, it is linked by default to the first
-unlabelled input pad of the next filter in the filterchain.
-For example in the filterchain
-
-
nullsrc, split[L1], [L2]overlay, nullsink
-
-
the split filter instance has two output pads, and the overlay filter
-instance two input pads. The first output pad of split is labelled
-"L1", the first input pad of overlay is labelled "L2", and the second
-output pad of split is linked to the second input pad of overlay,
-which are both unlabelled.
-
-
In a complete filterchain all the unlabelled filter input and output
-pads must be connected. A filtergraph is considered valid if all the
-filter input and output pads of all the filterchains are connected.
-
-
Libavfilter will automatically insert scale filters where format
-conversion is required. It is possible to specify swscale flags
-for those automatically inserted scalers by prepending
-sws_flags=flags ;
-to the filtergraph description.
-
-
Here is a BNF description of the filtergraph syntax:
-
-
NAME ::= sequence of alphanumeric characters and '_'
-LINKLABEL ::= "[" NAME "]"
-LINKLABELS ::= LINKLABEL [LINKLABELS ]
-FILTER_ARGUMENTS ::= sequence of chars (possibly quoted)
-FILTER ::= [LINKLABELS ] NAME ["=" FILTER_ARGUMENTS ] [LINKLABELS ]
-FILTERCHAIN ::= FILTER [,FILTERCHAIN ]
-FILTERGRAPH ::= [sws_flags=flags ;] FILTERCHAIN [;FILTERGRAPH ]
-
-
-
-
4.2 Notes on filtergraph escaping# TOC
-
-
Filtergraph description composition entails several levels of
-escaping. See (ffmpeg-utils)the "Quoting and escaping"
-section in the ffmpeg-utils(1) manual for more
-information about the employed escaping procedure.
-
-
A first level escaping affects the content of each filter option
-value, which may contain the special character :
used to
-separate values, or one of the escaping characters \'
.
-
-
A second level escaping affects the whole filter description, which
-may contain the escaping characters \'
or the special
-characters [],;
used by the filtergraph description.
-
-
Finally, when you specify a filtergraph on a shell commandline, you
-need to perform a third level escaping for the shell special
-characters contained within it.
-
-
For example, consider the following string to be embedded in
-the drawtext filter description text value:
-
-
this is a 'string': may contain one, or more, special characters
-
-
-
This string contains the '
special escaping character, and the
-:
special character, so it needs to be escaped in this way:
-
-
text=this is a \'string\'\: may contain one, or more, special characters
-
-
-
A second level of escaping is required when embedding the filter
-description in a filtergraph description, in order to escape all the
-filtergraph special characters. Thus the example above becomes:
-
-
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
-
-
(note that in addition to the \'
escaping special characters,
-also ,
needs to be escaped).
-
-
Finally an additional level of escaping is needed when writing the
-filtergraph description in a shell command, which depends on the
-escaping rules of the adopted shell. For example, assuming that
-\
is special and needs to be escaped with another \
, the
-previous string will finally result in:
-
-
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
-
-
-
-
5 Timeline editing# TOC
-
-
Some filters support a generic enable option. For the filters
-supporting timeline editing, this option can be set to an expression which is
-evaluated before sending a frame to the filter. If the evaluation is non-zero,
-the filter will be enabled, otherwise the frame will be sent unchanged to the
-next filter in the filtergraph.
-
-
The expression accepts the following values:
-
-‘t ’
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-‘n ’
-sequential number of the input frame, starting from 0
-
-
-‘pos ’
-the position in the file of the input frame, NAN if unknown
-
-
-‘w ’
-‘h ’
-width and height of the input frame if video
-
-
-
-
Additionally, these filters support an enable command that can be used
-to re-define the expression.
-
-
Like any other filtering option, the enable option follows the same
-rules.
-
-
For example, to enable a blur filter (smartblur ) from 10 seconds to 3
-minutes, and a curves filter starting at 3 seconds:
-
-
smartblur = enable='between(t,10,3*60)',
-curves = enable='gte(t,3)' : preset=cross_process
-
-
-
-
-
6 Audio Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the audio filters included in your
-build.
-
-
Below is a description of the currently available audio filters.
-
-
-
6.1 adelay# TOC
-
-
Delay one or more audio channels.
-
-
Samples in delayed channel are filled with silence.
-
-
The filter accepts the following option:
-
-
-delays
-Set list of delays in milliseconds for each channel separated by ’|’.
-At least one delay greater than 0 should be provided.
-Unused delays will be silently ignored. If number of given delays is
-smaller than number of channels all remaining channels will not be delayed.
-
-
-
-
-
6.1.1 Examples# TOC
-
-
- Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave
-the second channel (and any other channels that may be present) unchanged.
-
-
-
-
-
6.2 aecho# TOC
-
-
Apply echoing to the input audio.
-
-
Echoes are reflected sound and can occur naturally amongst mountains
-(and sometimes large buildings) when talking or shouting; digital echo
-effects emulate this behaviour and are often used to help fill out the
-sound of a single instrument or vocal. The time difference between the
-original signal and the reflection is the delay
, and the
-loudness of the reflected signal is the decay
.
-Multiple echoes can have different delays and decays.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain of reflected signal. Default is 0.6
.
-
-
-out_gain
-Set output gain of reflected signal. Default is 0.3
.
-
-
-delays
-Set list of time intervals in milliseconds between original signal and reflections
-separated by ’|’. Allowed range for each delay
is (0 - 90000.0]
.
-Default is 1000
.
-
-
-decays
-Set list of loudnesses of reflected signals separated by ’|’.
-Allowed range for each decay
is (0 - 1.0]
.
-Default is 0.5
.
-
-
-
-
-
6.2.1 Examples# TOC
-
-
- Make it sound as if there are twice as many instruments as are actually playing:
-
-
- If delay is very short, then it sound like a (metallic) robot playing music:
-
-
- A longer delay will sound like an open air concert in the mountains:
-
-
aecho=0.8:0.9:1000:0.3
-
-
- Same as above but with one more mountain:
-
-
aecho=0.8:0.9:1000|1800:0.3|0.25
-
-
-
-
-
6.3 aeval# TOC
-
-
Modify an audio signal according to the specified expressions.
-
-
This filter accepts one or more expressions (one for each channel),
-which are evaluated and used to modify a corresponding audio signal.
-
-
It accepts the following parameters:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. If
-the number of input channels is greater than the number of
-expressions, the last specified expression is used for the remaining
-output channels.
-
-
-channel_layout, c
-Set output channel layout. If not specified, the channel layout is
-specified by the number of expressions. If set to ‘same ’, it will
-use by default the same input channel layout.
-
-
-
-
Each expression in exprs can contain the following constants and functions:
-
-
-ch
-channel number of the current expression
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-s
-sample rate
-
-
-t
-time of the evaluated sample expressed in seconds
-
-
-nb_in_channels
-nb_out_channels
-input and output number of channels
-
-
-val(CH)
-the value of input channel with number CH
-
-
-
-
Note: this filter is slow. For faster processing you should use a
-dedicated filter.
-
-
-
6.3.1 Examples# TOC
-
-
- Half volume:
-
-
aeval=val(ch)/2:c=same
-
-
- Invert phase of the second channel:
-
-
-
-
-
6.4 afade# TOC
-
-
Apply fade-in/out effect to input audio.
-
-
A description of the accepted parameters follows.
-
-
-type, t
-Specify the effect type, can be either in
for fade-in, or
-out
for a fade-out effect. Default is in
.
-
-
-start_sample, ss
-Specify the number of the start sample for starting to apply the fade
-effect. Default is 0.
-
-
-nb_samples, ns
-Specify the number of samples for which the fade effect has to last. At
-the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence. Default is 44100.
-
-
-start_time, st
-Specify the start time of the fade effect. Default is 0.
-The value must be specified as a time duration; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-If set this option is used instead of start_sample .
-
-
-duration, d
-Specify the duration of the fade effect. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-At the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence.
-By default the duration is determined by nb_samples .
-If set this option is used instead of nb_samples .
-
-
-curve
-Set curve for fade transition.
-
-It accepts the following values:
-
-tri
-select triangular, linear slope (default)
-
-qsin
-select quarter of sine wave
-
-hsin
-select half of sine wave
-
-esin
-select exponential sine wave
-
-log
-select logarithmic
-
-par
-select inverted parabola
-
-qua
-select quadratic
-
-cub
-select cubic
-
-squ
-select square root
-
-cbr
-select cubic root
-
-
-
-
-
-
-
6.4.1 Examples# TOC
-
-
- Fade in first 15 seconds of audio:
-
-
- Fade out last 25 seconds of a 900 seconds audio:
-
-
afade=t=out:st=875:d=25
-
-
-
-
-
6.5 aformat# TOC
-
-
Set output format constraints for the input audio. The framework will
-negotiate the most appropriate format to minimize conversions.
-
-
It accepts the following parameters:
-
-sample_fmts
-A ’|’-separated list of requested sample formats.
-
-
-sample_rates
-A ’|’-separated list of requested sample rates.
-
-
-channel_layouts
-A ’|’-separated list of requested channel layouts.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-
-
If a parameter is omitted, all values are allowed.
-
-
Force the output to either unsigned 8-bit or signed 16-bit stereo
-
-
aformat=sample_fmts=u8|s16:channel_layouts=stereo
-
-
-
-
6.6 allpass# TOC
-
-
Apply a two-pole all-pass filter with central frequency (in Hz)
-frequency , and filter-width width .
-An all-pass filter changes the audio’s frequency to phase relationship
-without changing its frequency to amplitude relationship.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
6.7 amerge# TOC
-
-
Merge two or more audio streams into a single multi-channel stream.
-
-
The filter accepts the following options:
-
-
-inputs
-Set the number of inputs. Default is 2.
-
-
-
-
-
If the channel layouts of the inputs are disjoint, and therefore compatible,
-the channel layout of the output will be set accordingly and the channels
-will be reordered as necessary. If the channel layouts of the inputs are not
-disjoint, the output will have all the channels of the first input then all
-the channels of the second input, in that order, and the channel layout of
-the output will be the default value corresponding to the total number of
-channels.
-
-
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
-is FC+BL+BR, then the output will be in 5.1, with the channels in the
-following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
-first input, b1 is the first channel of the second input).
-
-
On the other hand, if both input are in stereo, the output channels will be
-in the default order: a1, a2, b1, b2, and the channel layout will be
-arbitrarily set to 4.0, which may or may not be the expected value.
-
-
All inputs must have the same sample rate, and format.
-
-
If inputs do not have the same duration, the output will stop with the
-shortest.
-
-
-
6.7.1 Examples# TOC
-
-
- Merge two mono files into a stereo stream:
-
-
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
-
-
- Multiple merges assuming 1 video stream and 6 audio streams in input.mkv :
-
-
ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
-
-
-
-
-
6.8 amix# TOC
-
-
Mixes multiple audio inputs into a single output.
-
-
Note that this filter only supports float samples (the amerge
-and pan audio filters support many formats). If the amix
-input has integer samples then aresample will be automatically
-inserted to perform the conversion to float samples.
-
-
For example
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
-
-
will mix 3 input audio streams to a single output with the same duration as the
-first input and a dropout transition time of 3 seconds.
-
-
It accepts the following parameters:
-
-inputs
-The number of inputs. If unspecified, it defaults to 2.
-
-
-duration
-How to determine the end-of-stream.
-
-longest
-The duration of the longest input. (default)
-
-
-shortest
-The duration of the shortest input.
-
-
-first
-The duration of the first input.
-
-
-
-
-
-dropout_transition
-The transition time, in seconds, for volume renormalization when an input
-stream ends. The default value is 2 seconds.
-
-
-
-
-
-
6.9 anull# TOC
-
-
Pass the audio source unchanged to the output.
-
-
-
6.10 apad# TOC
-
-
Pad the end of an audio stream with silence.
-
-
This can be used together with ffmpeg
-shortest to
-extend audio streams to the same length as the video stream.
-
-
A description of the accepted options follows.
-
-
-packet_size
-Set silence packet size. Default value is 4096.
-
-
-pad_len
-Set the number of samples of silence to add to the end. After the
-value is reached, the stream is terminated. This option is mutually
-exclusive with whole_len .
-
-
-whole_len
-Set the minimum total number of samples in the output audio stream. If
-the value is longer than the input audio length, silence is added to
-the end, until the value is reached. This option is mutually exclusive
-with pad_len .
-
-
-
-
If neither the pad_len nor the whole_len option is
-set, the filter will add silence to the end of the input stream
-indefinitely.
-
-
-
6.10.1 Examples# TOC
-
-
- Add 1024 samples of silence to the end of the input:
-
-
- Make sure the audio output will contain at least 10000 samples, pad
-the input with silence if required:
-
-
- Use ffmpeg
to pad the audio input with silence, so that the
-video stream will always result the shortest and will be converted
-until the end in the output file when using the shortest
-option:
-
-
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
-
-
-
-
-
6.11 aphaser# TOC
-
Add a phasing effect to the input audio.
-
-
A phaser filter creates series of peaks and troughs in the frequency spectrum.
-The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain. Default is 0.4.
-
-
-out_gain
-Set output gain. Default is 0.74
-
-
-delay
-Set delay in milliseconds. Default is 3.0.
-
-
-decay
-Set decay. Default is 0.4.
-
-
-speed
-Set modulation speed in Hz. Default is 0.5.
-
-
-type
-Set modulation type. Default is triangular.
-
-It accepts the following values:
-
-‘triangular, t ’
-‘sinusoidal, s ’
-
-
-
-
-
-
6.12 aresample# TOC
-
-
Resample the input audio to the specified parameters, using the
-libswresample library. If none are specified then the filter will
-automatically convert between its input and output.
-
-
This filter is also able to stretch/squeeze the audio data to make it match
-the timestamps or to inject silence / cut out audio to make it match the
-timestamps, do a combination of both or do neither.
-
-
The filter accepts the syntax
-[sample_rate :]resampler_options , where sample_rate
-expresses a sample rate and resampler_options is a list of
-key =value pairs, separated by ":". See the
-ffmpeg-resampler manual for the complete list of supported options.
-
-
-
6.12.1 Examples# TOC
-
-
- Resample the input audio to 44100Hz:
-
-
- Stretch/squeeze samples to the given timestamps, with a maximum of 1000
-samples per second compensation:
-
-
-
-
-
6.13 asetnsamples# TOC
-
-
Set the number of samples per each output audio frame.
-
-
The last output packet may contain a different number of samples, as
-the filter will flush all the remaining samples when the input audio
-signal its end.
-
-
The filter accepts the following options:
-
-
-nb_out_samples, n
-Set the number of frames per each output audio frame. The number is
-intended as the number of samples per each channel .
-Default value is 1024.
-
-
-pad, p
-If set to 1, the filter will pad the last audio frame with zeroes, so
-that the last frame will contain the same number of samples as the
-previous ones. Default value is 1.
-
-
-
-
For example, to set the number of per-frame samples to 1234 and
-disable padding for the last frame, use:
-
-
asetnsamples=n=1234:p=0
-
-
-
-
6.14 asetrate# TOC
-
-
Set the sample rate without altering the PCM data.
-This will result in a change of speed and pitch.
-
-
The filter accepts the following options:
-
-
-sample_rate, r
-Set the output sample rate. Default is 44100 Hz.
-
-
-
-
-
6.15 ashowinfo# TOC
-
-
Show a line containing various information for each input audio frame.
-The input audio is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The presentation timestamp of the input frame, in time base units; the time base
-depends on the filter input pad, and is usually 1/sample_rate .
-
-
-pts_time
-The presentation timestamp of the input frame in seconds.
-
-
-pos
-position of the frame in the input stream, -1 if this information in
-unavailable and/or meaningless (for example in case of synthetic audio)
-
-
-fmt
-The sample format.
-
-
-chlayout
-The channel layout.
-
-
-rate
-The sample rate for the audio frame.
-
-
-nb_samples
-The number of samples (per channel) in the frame.
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of the audio data. For planar
-audio, the data is treated as if all the planes were concatenated.
-
-
-plane_checksums
-A list of Adler-32 checksums for each data plane.
-
-
-
-
-
6.16 astats# TOC
-
-
Display time domain statistical information about the audio channels.
-Statistics are calculated and displayed for each audio channel and,
-where applicable, an overall figure is also given.
-
-
It accepts the following option:
-
-length
-Short window length in seconds, used for peak and trough RMS measurement.
-Default is 0.05
(50 milliseconds). Allowed range is [0.1 - 10]
.
-
-
-
-
A description of each shown parameter follows:
-
-
-DC offset
-Mean amplitude displacement from zero.
-
-
-Min level
-Minimal sample level.
-
-
-Max level
-Maximal sample level.
-
-
-Peak level dB
-RMS level dB
-Standard peak and RMS level measured in dBFS.
-
-
-RMS peak dB
-RMS trough dB
-Peak and trough values for RMS level measured over a short window.
-
-
-Crest factor
-Standard ratio of peak to RMS level (note: not in dB).
-
-
-Flat factor
-Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels
-(i.e. either Min level or Max level ).
-
-
-Peak count
-Number of occasions (not the number of samples) that the signal attained either
-Min level or Max level .
-
-
-
-
-
6.17 astreamsync# TOC
-
-
Forward two audio streams and control the order the buffers are forwarded.
-
-
The filter accepts the following options:
-
-
-expr, e
-Set the expression deciding which stream should be
-forwarded next: if the result is negative, the first stream is forwarded; if
-the result is positive or zero, the second stream is forwarded. It can use
-the following variables:
-
-
-b1 b2
-number of buffers forwarded so far on each stream
-
-s1 s2
-number of samples forwarded so far on each stream
-
-t1 t2
-current timestamp of each stream
-
-
-
-The default value is t1-t2
, which means to always forward the stream
-that has a smaller timestamp.
-
-
-
-
-
6.17.1 Examples# TOC
-
-
Stress-test amerge
by randomly sending buffers on the wrong
-input, while avoiding too much of a desynchronization:
-
-
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
-[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
-[a2] [b2] amerge
-
-
-
-
6.18 asyncts# TOC
-
-
Synchronize audio data with timestamps by squeezing/stretching it and/or
-dropping samples/adding silence when needed.
-
-
This filter is not built by default, please use aresample to do squeezing/stretching.
-
-
It accepts the following parameters:
-
-compensate
-Enable stretching/squeezing the data to make it match the timestamps. Disabled
-by default. When disabled, time gaps are covered with silence.
-
-
-min_delta
-The minimum difference between timestamps and audio data (in seconds) to trigger
-adding/dropping samples. The default value is 0.1. If you get an imperfect
-sync with this filter, try setting this parameter to 0.
-
-
-max_comp
-The maximum compensation in samples per second. Only relevant with compensate=1.
-The default value is 500.
-
-
-first_pts
-Assume that the first PTS should be this value. The time base is 1 / sample
-rate. This allows for padding/trimming at the start of the stream. By default,
-no assumption is made about the first frame’s expected PTS, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative PTS due to encoder delay.
-
-
-
-
-
-
6.19 atempo# TOC
-
-
Adjust audio tempo.
-
-
The filter accepts exactly one parameter, the audio tempo. If not
-specified then the filter will assume nominal 1.0 tempo. Tempo must
-be in the [0.5, 2.0] range.
-
-
-
6.19.1 Examples# TOC
-
-
- Slow down audio to 80% tempo:
-
-
- To speed up audio to 125% tempo:
-
-
-
-
-
6.20 atrim# TOC
-
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Timestamp (in seconds) of the start of the section to keep. I.e. the audio
-sample with the timestamp start will be the first sample in the output.
-
-
-end
-Specify time of the first audio sample that will be dropped, i.e. the
-audio sample immediately preceding the one with the timestamp end will be
-the last sample in the output.
-
-
-start_pts
-Same as start , except this option sets the start timestamp in samples
-instead of seconds.
-
-
-end_pts
-Same as end , except this option sets the end timestamp in samples instead
-of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_sample
-The number of the first sample that should be output.
-
-
-end_sample
-The number of the first sample that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _sample options simply count the
-samples that pass through the filter. So start/end_pts and start/end_sample will
-give different results when the timestamps are wrong, inexact or do not start at
-zero. Also note that this filter does not modify the timestamps. If you wish
-to have the output timestamps start at zero, insert the asetpts filter after the
-atrim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all samples that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple atrim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -af atrim=60:120
-
-
- Keep only the first 1000 samples:
-
-
ffmpeg -i INPUT -af atrim=end_sample=1000
-
-
-
-
-
-
6.21 bandpass# TOC
-
-
Apply a two-pole Butterworth band-pass filter with central
-frequency frequency , and (3dB-point) band-width width.
-The csg option selects a constant skirt gain (peak gain = Q)
-instead of the default: constant 0dB peak gain.
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-csg
-Constant skirt gain if set to 1. Defaults to 0.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
6.22 bandreject# TOC
-
-
Apply a two-pole Butterworth band-reject filter with central
-frequency frequency , and (3dB-point) band-width width .
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
6.23 bass# TOC
-
-
Boost or cut the bass (lower) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at 0 Hz. Its useful range is about -20
-(for a large cut) to +20 (for a large boost).
-Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 100
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
6.24 biquad# TOC
-
-
Apply a biquad IIR filter with the given coefficients.
-Where b0 , b1 , b2 and a0 , a1 , a2
-are the numerator and denominator coefficients respectively.
-
-
-
6.25 bs2b# TOC
-
Bauer stereo to binaural transformation, which improves headphone listening of
-stereo audio records.
-
-
It accepts the following parameters:
-
-profile
-Pre-defined crossfeed level.
-
-default
-Default level (fcut=700, feed=50).
-
-
-cmoy
-Chu Moy circuit (fcut=700, feed=60).
-
-
-jmeier
-Jan Meier circuit (fcut=650, feed=95).
-
-
-
-
-
-fcut
-Cut frequency (in Hz).
-
-
-feed
-Feed level (in Hz).
-
-
-
-
-
-
6.26 channelmap# TOC
-
-
Remap input channels to new locations.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the output stream.
-
-
-map
-Map channels from input to output. The argument is a ’|’-separated list of
-mappings, each in the in_channel -out_channel
or
-in_channel form. in_channel can be either the name of the input
-channel (e.g. FL for front left) or its index in the input channel layout.
-out_channel is the name of the output channel or its index in the output
-channel layout. If out_channel is not given then it is implicitly an
-index, starting with zero and increasing by one for each mapping.
-
-
-
-
If no mapping is present, the filter will implicitly map input channels to
-output channels, preserving indices.
-
-
For example, assuming a 5.1+downmix input MOV file,
-
-
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
-
-
will create an output WAV file tagged as stereo from the downmix channels of
-the input.
-
-
To fix a 5.1 WAV improperly encoded in AAC’s native channel order
-
-
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
-
-
-
-
6.27 channelsplit# TOC
-
-
Split each channel from an input audio stream into a separate output stream.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the input stream. The default is "stereo".
-
-
-
-
For example, assuming a stereo input MP3 file,
-
-
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
-
-
will create an output Matroska file with two audio streams, one containing only
-the left channel and the other the right channel.
-
-
Split a 5.1 WAV file into per-channel files:
-
-
ffmpeg -i in.wav -filter_complex
-'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
--map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
-front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
-side_right.wav
-
-
-
-
6.28 compand# TOC
-
Compress or expand the audio’s dynamic range.
-
-
It accepts the following parameters:
-
-
-attacks
-decays
-A list of times in seconds for each channel over which the instantaneous level
-of the input signal is averaged to determine its volume. attacks refers to
-increase of volume and decays refers to decrease of volume. For most
-situations, the attack time (response to the audio getting louder) should be
-shorter than the decay time, because the human ear is more sensitive to sudden
-loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and
-a typical value for decay is 0.8 seconds.
-
-
-points
-A list of points for the transfer function, specified in dB relative to the
-maximum possible signal amplitude. Each key points list must be defined using
-the following syntax: x0/y0|x1/y1|x2/y2|....
or
-x0/y0 x1/y1 x2/y2 ....
-
-The input values must be in strictly increasing order but the transfer function
-does not have to be monotonically rising. The point 0/0
is assumed but
-may be overridden (by 0/out-dBn
). Typical values for the transfer
-function are -70/-70|-60/-20
.
-
-
-soft-knee
-Set the curve radius in dB for all joints. It defaults to 0.01.
-
-
-gain
-Set the additional gain in dB to be applied at all points on the transfer
-function. This allows for easy adjustment of the overall gain.
-It defaults to 0.
-
-
-volume
-Set an initial volume, in dB, to be assumed for each channel when filtering
-starts. This permits the user to supply a nominal level initially, so that, for
-example, a very large gain is not applied to initial signal levels before the
-companding has begun to operate. A typical value for audio which is initially
-quiet is -90 dB. It defaults to 0.
-
-
-delay
-Set a delay, in seconds. The input audio is analyzed immediately, but audio is
-delayed before being fed to the volume adjuster. Specifying a delay
-approximately equal to the attack/decay times allows the filter to effectively
-operate in predictive rather than reactive mode. It defaults to 0.
-
-
-
-
-
-
6.28.1 Examples# TOC
-
-
- Make music with both quiet and loud passages suitable for listening to in a
-noisy environment:
-
-
compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
-
-
- A noise gate for when the noise is at a lower level than the signal:
-
-
compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
-
-
- Here is another noise gate, this time for when the noise is at a higher level
-than the signal (making it, in some ways, similar to squelch):
-
-
compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
-
-
-
-
-
6.29 earwax# TOC
-
-
Make audio easier to listen to on headphones.
-
-
This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio
-so that when listened to on headphones the stereo image is moved from
-inside your head (standard for headphones) to outside and in front of
-the listener (standard for speakers).
-
-
Ported from SoX.
-
-
-
6.30 equalizer# TOC
-
-
Apply a two-pole peaking equalisation (EQ) filter. With this
-filter, the signal-level at and around a selected frequency can
-be increased or decreased, whilst (unlike bandpass and bandreject
-filters) that at all other frequencies is unchanged.
-
-
In order to produce complex equalisation curves, this filter can
-be given several times, each with a different central frequency.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-gain, g
-Set the required gain or attenuation in dB.
-Beware of clipping when using a positive gain.
-
-
-
-
-
6.30.1 Examples# TOC
-
- Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz:
-
-
equalizer=f=1000:width_type=h:width=200:g=-10
-
-
- Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2:
-
-
equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
-
-
-
-
-
6.31 flanger# TOC
-
Apply a flanging effect to the audio.
-
-
The filter accepts the following options:
-
-
-delay
-Set base delay in milliseconds. Range from 0 to 30. Default value is 0.
-
-
-depth
-Set added swep delay in milliseconds. Range from 0 to 10. Default value is 2.
-
-
-regen
-Set percentage regeneration (delayed signal feedback). Range from -95 to 95.
-Default value is 0.
-
-
-width
-Set percentage of delayed signal mixed with original. Range from 0 to 100.
-Default value is 71.
-
-
-speed
-Set sweeps per second (Hz). Range from 0.1 to 10. Default value is 0.5.
-
-
-shape
-Set swept wave shape, can be triangular or sinusoidal .
-Default value is sinusoidal .
-
-
-phase
-Set swept wave percentage-shift for multi channel. Range from 0 to 100.
-Default value is 25.
-
-
-interp
-Set delay-line interpolation, linear or quadratic .
-Default is linear .
-
-
-
-
-
6.32 highpass# TOC
-
-
Apply a high-pass filter with 3dB point frequency.
-The filter can be either single-pole, or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 3000.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
6.33 join# TOC
-
-
Join multiple input streams into one multi-channel stream.
-
-
It accepts the following parameters:
-
-inputs
-The number of input streams. It defaults to 2.
-
-
-channel_layout
-The desired output channel layout. It defaults to stereo.
-
-
-map
-Map channels from inputs to output. The argument is a ’|’-separated list of
-mappings, each in the input_idx .in_channel -out_channel
-form. input_idx is the 0-based index of the input stream. in_channel
-can be either the name of the input channel (e.g. FL for front left) or its
-index in the specified input stream. out_channel is the name of the output
-channel.
-
-
-
-
The filter will attempt to guess the mappings when they are not specified
-explicitly. It does so by first trying to find an unused matching input channel
-and if that fails it picks the first unused input channel.
-
-
Join 3 inputs (with properly set channel layouts):
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
-
-
-
Build a 5.1 output from 6 single-channel streams:
-
-
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
-'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
-out
-
-
-
-
6.34 ladspa# TOC
-
-
Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-ladspa
.
-
-
-file, f
-Specifies the name of LADSPA plugin library to load. If the environment
-variable LADSPA_PATH
is defined, the LADSPA plugin is searched in
-each one of the directories specified by the colon separated list in
-LADSPA_PATH
, otherwise in the standard LADSPA paths, which are in
-this order: HOME/.ladspa/lib/ , /usr/local/lib/ladspa/ ,
-/usr/lib/ladspa/ .
-
-
-plugin, p
-Specifies the plugin within the library. Some libraries contain only
-one plugin, but others contain many of them. If this is not set filter
-will list all available plugins within the specified library.
-
-
-controls, c
-Set the ’|’ separated list of controls which are zero or more floating point
-values that determine the behavior of the loaded plugin (for example delay,
-threshold or gain).
-Controls need to be defined using the following syntax:
-c0=value0 |c1=value1 |c2=value2 |..., where
-valuei is the value set on the i -th control.
-If controls is set to help
, all available controls and
-their valid ranges are printed.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100. Only used if plugin have
-zero inputs.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame, default
-is 1024. Only used if plugin have zero inputs.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified duration,
-as the generated audio is always cut at the end of a complete frame.
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-Only used if plugin have zero inputs.
-
-
-
-
-
-
6.34.1 Examples# TOC
-
-
- List all available plugins within amp (LADSPA example plugin) library:
-
-
- List all available controls and their valid ranges for vcf_notch
-plugin from VCF
library:
-
-
ladspa=f=vcf:p=vcf_notch:c=help
-
-
- Simulate low quality audio equipment using Computer Music Toolkit
(CMT)
-plugin library:
-
-
ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
-
-
- Add reverberation to the audio using TAP-plugins
-(Tom’s Audio Processing plugins):
-
-
ladspa=file=tap_reverb:tap_reverb
-
-
- Generate white noise, with 0.2 amplitude:
-
-
ladspa=file=cmt:noise_source_white:c=c0=.2
-
-
- Generate 20 bpm clicks using plugin C* Click - Metronome
from the
-C* Audio Plugin Suite
(CAPS) library:
-
-
ladspa=file=caps:Click:c=c1=20'
-
-
- Apply C* Eq10X2 - Stereo 10-band equaliser
effect:
-
-
ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
-
-
-
-
-
6.34.2 Commands# TOC
-
-
This filter supports the following commands:
-
-cN
-Modify the N -th control value.
-
-If the specified value is not valid, it is ignored and prior one is kept.
-
-
-
-
-
6.35 lowpass# TOC
-
-
Apply a low-pass filter with 3dB point frequency.
-The filter can be either single-pole or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 500.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
6.36 pan# TOC
-
-
Mix channels with specific gain levels. The filter accepts the output
-channel layout followed by a set of channels definitions.
-
-
This filter is also designed to efficiently remap the channels of an audio
-stream.
-
-
The filter accepts parameters of the form:
-"l |outdef |outdef |..."
-
-
-l
-output channel layout or number of channels
-
-
-outdef
-output channel specification, of the form:
-"out_name =[gain *]in_name [+[gain *]in_name ...]"
-
-
-out_name
-output channel to define, either a channel name (FL, FR, etc.) or a channel
-number (c0, c1, etc.)
-
-
-gain
-multiplicative coefficient for the channel, 1 leaving the volume unchanged
-
-
-in_name
-input channel to use, see out_name for details; it is not possible to mix
-named and numbered input channels
-
-
-
-
If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for
-that specification will be renormalized so that the total is 1, thus
-avoiding clipping noise.
-
-
-
6.36.1 Mixing examples# TOC
-
-
For example, if you want to down-mix from stereo to mono, but with a bigger
-factor for the left channel:
-
-
pan=1c|c0=0.9*c0+0.1*c1
-
-
-
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
-7-channels surround:
-
-
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
-
-
-
Note that ffmpeg
integrates a default down-mix (and up-mix) system
-that should be preferred (see "-ac" option) unless you have very specific
-needs.
-
-
-
6.36.2 Remapping examples# TOC
-
-
The channel remapping will be effective if, and only if:
-
-
- gain coefficients are zeroes or ones,
- only one input per channel output,
-
-
-
If all these conditions are satisfied, the filter will notify the user ("Pure
-channel mapping detected"), and use an optimized and lossless method to do the
-remapping.
-
-
For example, if you have a 5.1 source and want a stereo audio stream by
-dropping the extra channels:
-
-
pan="stereo| c0=FL | c1=FR"
-
-
-
Given the same source, you can also switch front left and front right channels
-and keep the input channel layout:
-
-
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
-
-
-
If the input is a stereo audio stream, you can mute the front left channel (and
-still keep the stereo channel layout) with:
-
-
-
Still with a stereo audio stream input, you can copy the right channel in both
-front left and right:
-
-
pan="stereo| c0=FR | c1=FR"
-
-
-
-
6.37 replaygain# TOC
-
-
ReplayGain scanner filter. This filter takes an audio stream as an input and
-outputs it unchanged.
-At end of filtering it displays track_gain
and track_peak
.
-
-
-
6.38 resample# TOC
-
-
Convert the audio sample format, sample rate and channel layout. It is
-not meant to be used directly.
-
-
-
6.39 silencedetect# TOC
-
-
Detect silence in an audio stream.
-
-
This filter logs a message when it detects that the input audio volume is less
-or equal to a noise tolerance value for a duration greater or equal to the
-minimum detected noise duration.
-
-
The printed times and duration are expressed in seconds.
-
-
The filter accepts the following options:
-
-
-duration, d
-Set silence duration until notification (default is 2 seconds).
-
-
-noise, n
-Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
-specified value) or amplitude ratio. Default is -60dB, or 0.001.
-
-
-
-
-
6.39.1 Examples# TOC
-
-
- Detect 5 seconds of silence with -50dB noise tolerance:
-
-
silencedetect=n=-50dB:d=5
-
-
- Complete example with ffmpeg
to detect silence with 0.0001 noise
-tolerance in silence.mp3 :
-
-
ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
-
-
-
-
-
6.40 silenceremove# TOC
-
-
Remove silence from the beginning, middle or end of the audio.
-
-
The filter accepts the following options:
-
-
-start_periods
-This value is used to indicate if audio should be trimmed at beginning of
-the audio. A value of zero indicates no silence should be trimmed from the
-beginning. When specifying a non-zero value, it trims audio up until it
-finds non-silence. Normally, when trimming silence from beginning of audio
-the start_periods will be 1
but it can be increased to higher
-values to trim all audio up to specific count of non-silence periods.
-Default value is 0
.
-
-
-start_duration
-Specify the amount of time that non-silence must be detected before it stops
-trimming audio. By increasing the duration, bursts of noises can be treated
-as silence and trimmed off. Default value is 0
.
-
-
-start_threshold
-This indicates what sample value should be treated as silence. For digital
-audio, a value of 0
may be fine but for audio recorded from analog,
-you may wish to increase the value to account for background noise.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-stop_periods
-Set the count for trimming silence from the end of audio.
-To remove silence from the middle of a file, specify a stop_periods
-that is negative. This value is then treated as a positive value and is
-used to indicate the effect should restart processing as specified by
-start_periods , making it suitable for removing periods of silence
-in the middle of the audio.
-Default value is 0
.
-
-
-stop_duration
-Specify a duration of silence that must exist before audio is not copied any
-more. By specifying a higher duration, silence that is wanted can be left in
-the audio.
-Default value is 0
.
-
-
-stop_threshold
-This is the same as start_threshold but for trimming silence from
-the end of audio.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-leave_silence
-This indicate that stop_duration length of audio should be left intact
-at the beginning of each period of silence.
-For example, if you want to remove long pauses between words but do not want
-to remove the pauses completely. Default value is 0
.
-
-
-
-
-
-
6.40.1 Examples# TOC
-
-
- The following example shows how this filter can be used to start a recording
-that does not contain the delay at the start which usually occurs between
-pressing the record button and the start of the performance:
-
-
silenceremove=1:5:0.02
-
-
-
-
-
6.41 treble# TOC
-
-
Boost or cut treble (upper) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at whichever is the lower of ~22 kHz and the
-Nyquist frequency. Its useful range is about -20 (for a large cut)
-to +20 (for a large boost). Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 3000
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
6.42 volume# TOC
-
-
Adjust the input audio volume.
-
-
It accepts the following parameters:
-
-volume
-Set audio volume expression.
-
-Output values are clipped to the maximum value.
-
-The output audio volume is given by the relation:
-
-
output_volume = volume * input_volume
-
-
-The default value for volume is "1.0".
-
-
-precision
-This parameter represents the mathematical precision.
-
-It determines which input sample formats will be allowed, which affects the
-precision of the volume scaling.
-
-
-fixed
-8-bit fixed-point; this limits input sample format to U8, S16, and S32.
-
-float
-32-bit floating-point; this limits input sample format to FLT. (default)
-
-double
-64-bit floating-point; this limits input sample format to DBL.
-
-
-
-
-replaygain
-Choose the behaviour on encountering ReplayGain side data in input frames.
-
-
-drop
-Remove ReplayGain side data, ignoring its contents (the default).
-
-
-ignore
-Ignore ReplayGain side data, but leave it in the frame.
-
-
-track
-Prefer the track gain, if present.
-
-
-album
-Prefer the album gain, if present.
-
-
-
-
-replaygain_preamp
-Pre-amplification gain in dB to apply to the selected replaygain gain.
-
-Default value for replaygain_preamp is 0.0.
-
-
-eval
-Set when the volume expression is evaluated.
-
-It accepts the following values:
-
-‘once ’
-only evaluate expression once during the filter initialization, or
-when the ‘volume ’ command is sent
-
-
-‘frame ’
-evaluate expression for each incoming frame
-
-
-
-Default value is ‘once ’.
-
-
-
-
The volume expression can contain the following parameters.
-
-
-n
-frame number (starting at zero)
-
-nb_channels
-number of channels
-
-nb_consumed_samples
-number of samples consumed by the filter
-
-nb_samples
-number of samples in the current frame
-
-pos
-original frame position in the file
-
-pts
-frame PTS
-
-sample_rate
-sample rate
-
-startpts
-PTS at start of stream
-
-startt
-time at start of stream
-
-t
-frame time
-
-tb
-timestamp timebase
-
-volume
-last set volume value
-
-
-
-
Note that when eval is set to ‘once ’ only the
-sample_rate and tb variables are available, all other
-variables will evaluate to NAN.
-
-
-
6.42.1 Commands# TOC
-
-
This filter supports the following commands:
-
-volume
-Modify the volume expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-replaygain_noclip
-Prevent clipping by limiting the gain applied.
-
-Default value for replaygain_noclip is 1.
-
-
-
-
-
-
6.42.2 Examples# TOC
-
-
-
-
-
6.43 volumedetect# TOC
-
-
Detect the volume of the input video.
-
-
The filter has no parameters. The input is not modified. Statistics about
-the volume will be printed in the log when the input stream end is reached.
-
-
In particular it will show the mean volume (root mean square), maximum
-volume (on a per-sample basis), and the beginning of a histogram of the
-registered volume values (from the maximum value to a cumulated 1/1000 of
-the samples).
-
-
All volumes are in decibels relative to the maximum PCM value.
-
-
-
6.43.1 Examples# TOC
-
-
Here is an excerpt of the output:
-
-
[Parsed_volumedetect_0 0xa23120] mean_volume: -27 dB
-[Parsed_volumedetect_0 0xa23120] max_volume: -4 dB
-[Parsed_volumedetect_0 0xa23120] histogram_4db: 6
-[Parsed_volumedetect_0 0xa23120] histogram_5db: 62
-[Parsed_volumedetect_0 0xa23120] histogram_6db: 286
-[Parsed_volumedetect_0 0xa23120] histogram_7db: 1042
-[Parsed_volumedetect_0 0xa23120] histogram_8db: 2551
-[Parsed_volumedetect_0 0xa23120] histogram_9db: 4609
-[Parsed_volumedetect_0 0xa23120] histogram_10db: 8409
-
-
-
It means that:
-
- The mean square energy is approximately -27 dB, or 10^-2.7.
- The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB.
- There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc.
-
-
-
In other words, raising the volume by +4 dB does not cause any clipping,
-raising it by +5 dB causes clipping for 6 samples, etc.
-
-
-
-
7 Audio Sources# TOC
-
-
Below is a description of the currently available audio sources.
-
-
-
7.1 abuffer# TOC
-
-
Buffer audio frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/asrc_abuffer.h .
-
-
It accepts the following parameters:
-
-time_base
-The timebase which will be used for timestamps of submitted frames. It must be
-either a floating-point number or in numerator /denominator form.
-
-
-sample_rate
-The sample rate of the incoming audio buffers.
-
-
-sample_fmt
-The sample format of the incoming audio buffers.
-Either a sample format name or its corresponding integer representation from
-the enum AVSampleFormat in libavutil/samplefmt.h
-
-
-channel_layout
-The channel layout of the incoming audio buffers.
-Either a channel layout name from channel_layout_map in
-libavutil/channel_layout.c or its corresponding integer representation
-from the AV_CH_LAYOUT_* macros in libavutil/channel_layout.h
-
-
-channels
-The number of channels of the incoming audio buffers.
-If both channels and channel_layout are specified, then they
-must be consistent.
-
-
-
-
-
-
7.1.1 Examples# TOC
-
-
-
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
-
-
-
will instruct the source to accept planar 16bit signed stereo at 44100Hz.
-Since the sample format with name "s16p" corresponds to the number
-6 and the "stereo" channel layout corresponds to the value 0x3, this is
-equivalent to:
-
-
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
-
-
-
-
7.2 aevalsrc# TOC
-
-
Generate an audio signal specified by an expression.
-
-
This source accepts in input one or more expressions (one for each
-channel), which are evaluated and used to generate a corresponding
-audio signal.
-
-
This source accepts the following options:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. In case the
-channel_layout option is not specified, the selected channel layout
-depends on the number of provided expressions. Otherwise the last
-specified expression is applied to the remaining output channels.
-
-
-channel_layout, c
-Set the channel layout. The number of channels in the specified layout
-must be equal to the number of specified expressions.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified
-duration, as the generated audio is always cut at the end of a
-complete frame.
-
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame,
-default to 1024.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100.
-
-
-
-
Each expression in exprs can contain the following constants:
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-t
-time of the evaluated sample expressed in seconds, starting from 0
-
-
-s
-sample rate
-
-
-
-
-
-
7.2.1 Examples# TOC
-
-
- Generate silence:
-
-
- Generate a sin signal with frequency of 440 Hz, set sample rate to
-8000 Hz:
-
-
aevalsrc="sin(440*2*PI*t):s=8000"
-
-
- Generate a two channels signal, specify the channel layout (Front
-Center + Back Center) explicitly:
-
-
aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
-
-
- Generate white noise:
-
-
aevalsrc="-2+random(0)"
-
-
- Generate an amplitude modulated signal:
-
-
aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
-
-
- Generate 2.5 Hz binaural beats on a 360 Hz carrier:
-
-
aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
-
-
-
-
-
-
7.3 anullsrc# TOC
-
-
The null audio source, return unprocessed audio frames. It is mainly useful
-as a template and to be employed in analysis / debugging tools, or as
-the source for filters which ignore the input data (for example the sox
-synth filter).
-
-
This source accepts the following options:
-
-
-channel_layout, cl
-
-Specifies the channel layout, and can be either an integer or a string
-representing a channel layout. The default value of channel_layout
-is "stereo".
-
-Check the channel_layout_map definition in
-libavutil/channel_layout.c for the mapping between strings and
-channel layout values.
-
-
-sample_rate, r
-Specifies the sample rate, and defaults to 44100.
-
-
-nb_samples, n
-Set the number of samples per requested frames.
-
-
-
-
-
-
7.3.1 Examples# TOC
-
-
- Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
-
-
- Do the same operation with a more obvious syntax:
-
-
anullsrc=r=48000:cl=mono
-
-
-
-
All the parameters need to be explicitly defined.
-
-
-
7.4 flite# TOC
-
-
Synthesize a voice utterance using the libflite library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libflite
.
-
-
Note that the flite library is not thread-safe.
-
-
The filter accepts the following options:
-
-
-list_voices
-If set to 1, list the names of the available voices and exit
-immediately. Default value is 0.
-
-
-nb_samples, n
-Set the maximum number of samples per frame. Default value is 512.
-
-
-textfile
-Set the filename containing the text to speak.
-
-
-text
-Set the text to speak.
-
-
-voice, v
-Set the voice to use for the speech synthesis. Default value is
-kal
. See also the list_voices option.
-
-
-
-
-
7.4.1 Examples# TOC
-
-
- Read from file speech.txt , and synthesize the text using the
-standard flite voice:
-
-
flite=textfile=speech.txt
-
-
- Read the specified text selecting the slt
voice:
-
-
flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Input text to ffmpeg:
-
-
ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Make ffplay speak the specified text, using flite
and
-the lavfi
device:
-
-
ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
-
-
-
-
For more information about libflite, check:
-http://www.speech.cs.cmu.edu/flite/
-
-
-
7.5 sine# TOC
-
-
Generate an audio signal made of a sine wave with amplitude 1/8.
-
-
The audio signal is bit-exact.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the carrier frequency. Default is 440 Hz.
-
-
-beep_factor, b
-Enable a periodic beep every second with frequency beep_factor times
-the carrier frequency. Default is 0, meaning the beep is disabled.
-
-
-sample_rate, r
-Specify the sample rate, default is 44100.
-
-
-duration, d
-Specify the duration of the generated audio stream.
-
-
-samples_per_frame
-Set the number of samples per output frame, default is 1024.
-
-
-
-
-
7.5.1 Examples# TOC
-
-
- Generate a simple 440 Hz sine wave:
-
-
- Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
-
-
sine=220:4:d=5
-sine=f=220:b=4:d=5
-sine=frequency=220:beep_factor=4:duration=5
-
-
-
-
-
-
-
8 Audio Sinks# TOC
-
-
Below is a description of the currently available audio sinks.
-
-
-
8.1 abuffersink# TOC
-
-
Buffer audio frames, and make them available to the end of filter chain.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVABufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
8.2 anullsink# TOC
-
-
Null audio sink; do absolutely nothing with the input audio. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
9 Video Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the video filters included in your
-build.
-
-
Below is a description of the currently available video filters.
-
-
-
9.1 alphaextract# TOC
-
-
Extract the alpha component from the input as a grayscale video. This
-is especially useful with the alphamerge filter.
-
-
-
9.2 alphamerge# TOC
-
-
Add or replace the alpha component of the primary input with the
-grayscale value of a second input. This is intended for use with
-alphaextract to allow the transmission or storage of frame
-sequences that have alpha in a format that doesn’t support an alpha
-channel.
-
-
For example, to reconstruct full frames from a normal YUV-encoded video
-and a separate video created with alphaextract , you might use:
-
-
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
-
-
-
Since this filter is designed for reconstruction, it operates on frame
-sequences without considering timestamps, and terminates when either
-input reaches end of stream. This will cause problems if your encoding
-pipeline drops frames. If you’re trying to apply an image as an
-overlay to a video stream, consider the overlay filter instead.
-
-
-
-
-
Same as the subtitles filter, except that it doesn’t require libavcodec
-and libavformat to work. On the other hand, it is limited to ASS (Advanced
-Substation Alpha) subtitles files.
-
-
This filter accepts the following option in addition to the common options from
-the subtitles filter:
-
-
-shaping
-Set the shaping engine
-
-Available values are:
-
-‘auto ’
-The default libass shaping engine, which is the best available.
-
-‘simple ’
-Fast, font-agnostic shaper that can do only substitutions
-
-‘complex ’
-Slower shaper using OpenType for substitutions and positioning
-
-
-
-The default is auto
.
-
-
-
-
-
9.4 bbox# TOC
-
-
Compute the bounding box for the non-black pixels in the input frame
-luminance plane.
-
-
This filter computes the bounding box containing all the pixels with a
-luminance value greater than the minimum allowed value.
-The parameters describing the bounding box are printed on the filter
-log.
-
-
The filter accepts the following option:
-
-
-min_val
-Set the minimal luminance value. Default is 16
.
-
-
-
-
-
9.5 blackdetect# TOC
-
-
Detect video intervals that are (almost) completely black. Can be
-useful to detect chapter transitions, commercials, or invalid
-recordings. Output lines contains the time for the start, end and
-duration of the detected black interval expressed in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
The filter accepts the following options:
-
-
-black_min_duration, d
-Set the minimum detected black duration expressed in seconds. It must
-be a non-negative floating point number.
-
-Default value is 2.0.
-
-
-picture_black_ratio_th, pic_th
-Set the threshold for considering a picture "black".
-Express the minimum value for the ratio:
-
-
nb_black_pixels / nb_pixels
-
-
-for which a picture is considered black.
-Default value is 0.98.
-
-
-pixel_black_th, pix_th
-Set the threshold for considering a pixel "black".
-
-The threshold expresses the maximum pixel luminance value for which a
-pixel is considered "black". The provided value is scaled according to
-the following equation:
-
-
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
-
-
-luminance_range_size and luminance_minimum_value depend on
-the input video format, the range is [0-255] for YUV full-range
-formats and [16-235] for YUV non full-range formats.
-
-Default value is 0.10.
-
-
-
-
The following example sets the maximum pixel threshold to the minimum
-value, and detects only black intervals of 2 or more seconds:
-
-
blackdetect=d=2:pix_th=0.00
-
-
-
-
9.6 blackframe# TOC
-
-
Detect frames that are (almost) completely black. Can be useful to
-detect chapter transitions or commercials. Output lines consist of
-the frame number of the detected frame, the percentage of blackness,
-the position in the file if known or -1 and the timestamp in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
It accepts the following parameters:
-
-
-amount
-The percentage of the pixels that have to be below the threshold; it defaults to
-98
.
-
-
-threshold, thresh
-The threshold below which a pixel value is considered black; it defaults to
-32
.
-
-
-
-
-
-
9.7 blend, tblend# TOC
-
-
Blend two video frames into each other.
-
-
The blend
filter takes two input streams and outputs one
-stream, the first input is the "top" layer and second input is
-"bottom" layer. Output terminates when shortest input terminates.
-
-
The tblend
(time blend) filter takes two consecutive frames
-from one single stream, and outputs the result obtained by blending
-the new frame on top of the old frame.
-
-
A description of the accepted options follows.
-
-
-c0_mode
-c1_mode
-c2_mode
-c3_mode
-all_mode
-Set blend mode for specific pixel component or all pixel components in case
-of all_mode . Default value is normal
.
-
-Available values for component modes are:
-
-‘addition ’
-‘and ’
-‘average ’
-‘burn ’
-‘darken ’
-‘difference ’
-‘difference128 ’
-‘divide ’
-‘dodge ’
-‘exclusion ’
-‘hardlight ’
-‘lighten ’
-‘multiply ’
-‘negation ’
-‘normal ’
-‘or ’
-‘overlay ’
-‘phoenix ’
-‘pinlight ’
-‘reflect ’
-‘screen ’
-‘softlight ’
-‘subtract ’
-‘vividlight ’
-‘xor ’
-
-
-
-c0_opacity
-c1_opacity
-c2_opacity
-c3_opacity
-all_opacity
-Set blend opacity for specific pixel component or all pixel components in case
-of all_opacity . Only used in combination with pixel component blend modes.
-
-
-c0_expr
-c1_expr
-c2_expr
-c3_expr
-all_expr
-Set blend expression for specific pixel component or all pixel components in case
-of all_expr . Note that related mode options will be ignored if those are set.
-
-The expressions can use the following variables:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-the coordinates of the current sample
-
-
-W
-H
-the width and height of currently filtered plane
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-TOP, A
-Value of pixel component at current location for first video frame (top layer).
-
-
-BOTTOM, B
-Value of pixel component at current location for second video frame (bottom layer).
-
-
-
-
-shortest
-Force termination when the shortest input terminates. Default is
-0
. This option is only defined for the blend
filter.
-
-
-repeatlast
-Continue applying the last bottom frame after the end of the stream. A value of
-0
disable the filter after the last frame of the bottom layer is reached.
-Default is 1
. This option is only defined for the blend
filter.
-
-
-
-
-
9.7.1 Examples# TOC
-
-
- Apply transition from bottom layer to top layer in first 10 seconds:
-
-
blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
-
-
- Apply 1x1 checkerboard effect:
-
-
blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
-
-
- Apply uncover left effect:
-
-
blend=all_expr='if(gte(N*SW+X,W),A,B)'
-
-
- Apply uncover down effect:
-
-
blend=all_expr='if(gte(Y-N*SH,0),A,B)'
-
-
- Apply uncover up-left effect:
-
-
blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
-
-
- Display differences between the current and the previous frame:
-
-
tblend=all_mode=difference128
-
-
-
-
-
9.8 boxblur# TOC
-
-
Apply a boxblur algorithm to the input video.
-
-
It accepts the following parameters:
-
-
-luma_radius, lr
-luma_power, lp
-chroma_radius, cr
-chroma_power, cp
-alpha_radius, ar
-alpha_power, ap
-
-
-
A description of the accepted options follows.
-
-
-luma_radius, lr
-chroma_radius, cr
-alpha_radius, ar
-Set an expression for the box radius in pixels used for blurring the
-corresponding input plane.
-
-The radius value must be a non-negative number, and must not be
-greater than the value of the expression min(w,h)/2
for the
-luma and alpha planes, and of min(cw,ch)/2
for the chroma
-planes.
-
-Default value for luma_radius is "2". If not specified,
-chroma_radius and alpha_radius default to the
-corresponding value set for luma_radius .
-
-The expressions can contain the following constants:
-
-w
-h
-The input width and height in pixels.
-
-
-cw
-ch
-The input chroma image width and height in pixels.
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p", hsub is 2 and vsub is 1.
-
-
-
-
-luma_power, lp
-chroma_power, cp
-alpha_power, ap
-Specify how many times the boxblur filter is applied to the
-corresponding plane.
-
-Default value for luma_power is 2. If not specified,
-chroma_power and alpha_power default to the
-corresponding value set for luma_power .
-
-A value of 0 will disable the effect.
-
-
-
-
-
9.8.1 Examples# TOC
-
-
- Apply a boxblur filter with the luma, chroma, and alpha radii
-set to 2:
-
-
boxblur=luma_radius=2:luma_power=1
-boxblur=2:1
-
-
- Set the luma radius to 2, and alpha and chroma radius to 0:
-
-
- Set the luma and chroma radii to a fraction of the video dimension:
-
-
boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
-
-
-
-
-
9.9 codecview# TOC
-
-
Visualize information exported by some codecs.
-
-
Some codecs can export information through frames using side-data or other
-means. For example, some MPEG based codecs export motion vectors through the
-export_mvs flag in the codec flags2 option.
-
-
The filter accepts the following option:
-
-
-mv
-Set motion vectors to visualize.
-
-Available flags for mv are:
-
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-
-
-
9.9.1 Examples# TOC
-
-
- Visualizes multi-directionals MVs from P and B-Frames using ffplay
:
-
-
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
-
-
-
-
-
9.10 colorbalance# TOC
-
Modify intensity of primary colors (red, green and blue) of input frames.
-
-
The filter allows an input frame to be adjusted in the shadows, midtones or highlights
-regions for the red-cyan, green-magenta or blue-yellow balance.
-
-
A positive adjustment value shifts the balance towards the primary color, a negative
-value towards the complementary color.
-
-
The filter accepts the following options:
-
-
-rs
-gs
-bs
-Adjust red, green and blue shadows (darkest pixels).
-
-
-rm
-gm
-bm
-Adjust red, green and blue midtones (medium pixels).
-
-
-rh
-gh
-bh
-Adjust red, green and blue highlights (brightest pixels).
-
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-
-
-
9.10.1 Examples# TOC
-
-
- Add red color cast to shadows:
-
-
-
-
-
9.11 colorlevels# TOC
-
-
Adjust video input frames using levels.
-
-
The filter accepts the following options:
-
-
-rimin
-gimin
-bimin
-aimin
-Adjust red, green, blue and alpha input black point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-rimax
-gimax
-bimax
-aimax
-Adjust red, green, blue and alpha input white point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 1
.
-
-Input levels are used to lighten highlights (bright tones), darken shadows
-(dark tones), change the balance of bright and dark tones.
-
-
-romin
-gomin
-bomin
-aomin
-Adjust red, green, blue and alpha output black point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 0
.
-
-
-romax
-gomax
-bomax
-aomax
-Adjust red, green, blue and alpha output white point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 1
.
-
-Output levels allows manual selection of a constrained output level range.
-
-
-
-
-
9.11.1 Examples# TOC
-
-
- Make video output darker:
-
-
colorlevels=rimin=0.058:gimin=0.058:bimin=0.058
-
-
- Increase contrast:
-
-
colorlevels=rimin=0.039:gimin=0.039:bimin=0.039:rimax=0.96:gimax=0.96:bimax=0.96
-
-
- Make video output lighter:
-
-
colorlevels=rimax=0.902:gimax=0.902:bimax=0.902
-
-
- Increase brightness:
-
-
colorlevels=romin=0.5:gomin=0.5:bomin=0.5
-
-
-
-
-
9.12 colorchannelmixer# TOC
-
-
Adjust video input frames by re-mixing color channels.
-
-
This filter modifies a color channel by adding the values associated to
-the other channels of the same pixels. For example if the value to
-modify is red, the output value will be:
-
-
red =red *rr + blue *rb + green *rg + alpha *ra
-
-
-
The filter accepts the following options:
-
-
-rr
-rg
-rb
-ra
-Adjust contribution of input red, green, blue and alpha channels for output red channel.
-Default is 1
for rr , and 0
for rg , rb and ra .
-
-
-gr
-gg
-gb
-ga
-Adjust contribution of input red, green, blue and alpha channels for output green channel.
-Default is 1
for gg , and 0
for gr , gb and ga .
-
-
-br
-bg
-bb
-ba
-Adjust contribution of input red, green, blue and alpha channels for output blue channel.
-Default is 1
for bb , and 0
for br , bg and ba .
-
-
-ar
-ag
-ab
-aa
-Adjust contribution of input red, green, blue and alpha channels for output alpha channel.
-Default is 1
for aa , and 0
for ar , ag and ab .
-
-Allowed ranges for options are [-2.0, 2.0]
.
-
-
-
-
-
9.12.1 Examples# TOC
-
-
- Convert source to grayscale:
-
-
colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
-
- Simulate sepia tones:
-
-
colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
-
-
-
-
-
9.13 colormatrix# TOC
-
-
Convert color matrix.
-
-
The filter accepts the following options:
-
-
-src
-dst
-Specify the source and destination color matrix. Both values must be
-specified.
-
-The accepted values are:
-
-‘bt709 ’
-BT.709
-
-
-‘bt601 ’
-BT.601
-
-
-‘smpte240m ’
-SMPTE-240M
-
-
-‘fcc ’
-FCC
-
-
-
-
-
-
For example to convert from BT.601 to SMPTE-240M, use the command:
-
-
colormatrix=bt601:smpte240m
-
-
-
-
9.14 copy# TOC
-
-
Copy the input source unchanged to the output. This is mainly useful for
-testing purposes.
-
-
-
9.15 crop# TOC
-
-
Crop the input video to given dimensions.
-
-
It accepts the following parameters:
-
-
-w, out_w
-The width of the output video. It defaults to iw
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-h, out_h
-The height of the output video. It defaults to ih
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-x
-The horizontal position, in the input video, of the left edge of the output
-video. It defaults to (in_w-out_w)/2
.
-This expression is evaluated per-frame.
-
-
-y
-The vertical position, in the input video, of the top edge of the output video.
-It defaults to (in_h-out_h)/2
.
-This expression is evaluated per-frame.
-
-
-keep_aspect
-If set to 1 will force the output display aspect ratio
-to be the same of the input, by changing the output sample aspect
-ratio. It defaults to 0.
-
-
-
-
The out_w , out_h , x , y parameters are
-expressions containing the following constants:
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-in_w
-in_h
-The input width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (cropped) width and height.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-n
-The number of the input frame, starting from 0.
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
The expression for out_w may depend on the value of out_h ,
-and the expression for out_h may depend on out_w , but they
-cannot depend on x and y , as x and y are
-evaluated after out_w and out_h .
-
-
The x and y parameters specify the expressions for the
-position of the top-left corner of the output (non-cropped) area. They
-are evaluated for each frame. If the evaluated value is not valid, it
-is approximated to the nearest valid value.
-
-
The expression for x may depend on y , and the expression
-for y may depend on x .
-
-
-
9.15.1 Examples# TOC
-
-
-
-
-
9.16 cropdetect# TOC
-
-
Auto-detect the crop size.
-
-
It calculates the necessary cropping parameters and prints the
-recommended parameters via the logging system. The detected dimensions
-correspond to the non-black area of the input video.
-
-
It accepts the following parameters:
-
-
-limit
-Set higher black value threshold, which can be optionally specified
-from nothing (0) to everything (255 for 8bit based formats). An intensity
-value greater to the set value is considered non-black. It defaults to 24.
-You can also specify a value between 0.0 and 1.0 which will be scaled depending
-on the bitdepth of the pixel format.
-
-
-round
-The value which the width/height should be divisible by. It defaults to
-16. The offset is automatically adjusted to center the video. Use 2 to
-get only even dimensions (needed for 4:2:2 video). 16 is best when
-encoding to most video codecs.
-
-
-reset_count, reset
-Set the counter that determines after how many frames cropdetect will
-reset the previously detected largest video area and start over to
-detect the current optimal crop area. Default value is 0.
-
-This can be useful when channel logos distort the video area. 0
-indicates ’never reset’, and returns the largest area encountered during
-playback.
-
-
-
-
-
9.17 curves# TOC
-
-
Apply color adjustments using curves.
-
-
This filter is similar to the Adobe Photoshop and GIMP curves tools. Each
-component (red, green and blue) has its values defined by N key points
-tied from each other using a smooth curve. The x-axis represents the pixel
-values from the input frame, and the y-axis the new pixel values to be set for
-the output frame.
-
-
By default, a component curve is defined by the two points (0;0) and
-(1;1) . This creates a straight line where each original pixel value is
-"adjusted" to its own value, which means no change to the image.
-
-
The filter allows you to redefine these two points and add some more. A new
-curve (using a natural cubic spline interpolation) will be define to pass
-smoothly through all these new coordinates. The new defined points needs to be
-strictly increasing over the x-axis, and their x and y values must
-be in the [0;1] interval. If the computed curves happened to go outside
-the vector spaces, the values will be clipped accordingly.
-
-
If there is no key point defined in x=0
, the filter will automatically
-insert a (0;0) point. In the same way, if there is no key point defined
-in x=1
, the filter will automatically insert a (1;1) point.
-
-
The filter accepts the following options:
-
-
-preset
-Select one of the available color presets. This option can be used in addition
-to the r , g , b parameters; in this case, the later
-options takes priority on the preset values.
-Available presets are:
-
-‘none ’
-‘color_negative ’
-‘cross_process ’
-‘darker ’
-‘increase_contrast ’
-‘lighter ’
-‘linear_contrast ’
-‘medium_contrast ’
-‘negative ’
-‘strong_contrast ’
-‘vintage ’
-
-Default is none
.
-
-master, m
-Set the master key points. These points will define a second pass mapping. It
-is sometimes called a "luminance" or "value" mapping. It can be used with
-r , g , b or all since it acts like a
-post-processing LUT.
-
-red, r
-Set the key points for the red component.
-
-green, g
-Set the key points for the green component.
-
-blue, b
-Set the key points for the blue component.
-
-all
-Set the key points for all components (not including master).
-Can be used in addition to the other key points component
-options. In this case, the unset component(s) will fallback on this
-all setting.
-
-psfile
-Specify a Photoshop curves file (.asv
) to import the settings from.
-
-
-
-
To avoid some filtergraph syntax conflicts, each key points list need to be
-defined using the following syntax: x0/y0 x1/y1 x2/y2 ...
.
-
-
-
9.17.1 Examples# TOC
-
-
-
-
-
9.18 dctdnoiz# TOC
-
-
Denoise frames using 2D DCT (frequency domain filtering).
-
-
This filter is not designed for real time.
-
-
The filter accepts the following options:
-
-
-sigma, s
-Set the noise sigma constant.
-
-This sigma defines a hard threshold of 3 * sigma
; every DCT
-coefficient (absolute value) below this threshold with be dropped.
-
-If you need a more advanced filtering, see expr .
-
-Default is 0
.
-
-
-overlap
-Set number overlapping pixels for each block. Since the filter can be slow, you
-may want to reduce this value, at the cost of a less effective filter and the
-risk of various artefacts.
-
-If the overlapping value doesn’t allow to process the whole input width or
-height, a warning will be displayed and according borders won’t be denoised.
-
-Default value is blocksize -1, which is the best possible setting.
-
-
-expr, e
-Set the coefficient factor expression.
-
-For each coefficient of a DCT block, this expression will be evaluated as a
-multiplier value for the coefficient.
-
-If this is option is set, the sigma option will be ignored.
-
-The absolute value of the coefficient can be accessed through the c
-variable.
-
-
-n
-Set the blocksize using the number of bits. 1<<n
defines the
-blocksize , which is the width and height of the processed blocks.
-
-The default value is 3 (8x8) and can be raised to 4 for a
-blocksize of 16x16. Note that changing this setting has huge consequences
-on the speed processing. Also, a larger block size does not necessarily means a
-better de-noising.
-
-
-
-
-
9.18.1 Examples# TOC
-
-
Apply a denoise with a sigma of 4.5
:
-
-
-
The same operation can be achieved using the expression system:
-
-
dctdnoiz=e='gte(c, 4.5*3)'
-
-
-
Violent denoise using a block size of 16x16
:
-
-
-
-
9.19 decimate# TOC
-
-
Drop duplicated frames at regular intervals.
-
-
The filter accepts the following options:
-
-
-cycle
-Set the number of frames from which one will be dropped. Setting this to
-N means one frame in every batch of N frames will be dropped.
-Default is 5
.
-
-
-dupthresh
-Set the threshold for duplicate detection. If the difference metric for a frame
-is less than or equal to this value, then it is declared as duplicate. Default
-is 1.1
-
-
-scthresh
-Set scene change threshold. Default is 15
.
-
-
-blockx
-blocky
-Set the size of the x and y-axis blocks used during metric calculations.
-Larger blocks give better noise suppression, but also give worse detection of
-small movements. Must be a power of two. Default is 32
.
-
-
-ppsrc
-Mark main input as a pre-processed input and activate clean source input
-stream. This allows the input to be pre-processed with various filters to help
-the metrics calculation while keeping the frame selection lossless. When set to
-1
, the first stream is for the pre-processed input, and the second
-stream is the clean source from where the kept frames are chosen. Default is
-0
.
-
-
-chroma
-Set whether or not chroma is considered in the metric calculations. Default is
-1
.
-
-
-
-
-
9.20 dejudder# TOC
-
-
Remove judder produced by partially interlaced telecined content.
-
-
Judder can be introduced, for instance, by pullup filter. If the original
-source was partially telecined content then the output of pullup,dejudder
-will have a variable frame rate. May change the recorded frame rate of the
-container. Aside from that change, this filter will not affect constant frame
-rate video.
-
-
The option available in this filter is:
-
-cycle
-Specify the length of the window over which the judder repeats.
-
-Accepts any integer greater than 1. Useful values are:
-
-‘4 ’
-If the original was telecined from 24 to 30 fps (Film to NTSC).
-
-
-‘5 ’
-If the original was telecined from 25 to 30 fps (PAL to NTSC).
-
-
-‘20 ’
-If a mixture of the two.
-
-
-
-The default is ‘4 ’.
-
-
-
-
-
9.21 delogo# TOC
-
-
Suppress a TV station logo by a simple interpolation of the surrounding
-pixels. Just set a rectangle covering the logo and watch it disappear
-(and sometimes something even uglier appear - your mileage may vary).
-
-
It accepts the following parameters:
-
-x
-y
-Specify the top left corner coordinates of the logo. They must be
-specified.
-
-
-w
-h
-Specify the width and height of the logo to clear. They must be
-specified.
-
-
-band, t
-Specify the thickness of the fuzzy edge of the rectangle (added to
-w and h ). The default value is 4.
-
-
-show
-When set to 1, a green rectangle is drawn on the screen to simplify
-finding the right x , y , w , and h parameters.
-The default value is 0.
-
-The rectangle is drawn on the outermost pixels which will be (partly)
-replaced with interpolated values. The values of the next pixels
-immediately outside this rectangle in each direction will be used to
-compute the interpolated pixel values inside the rectangle.
-
-
-
-
-
-
9.21.1 Examples# TOC
-
-
- Set a rectangle covering the area with top left corner coordinates 0,0
-and size 100x77, and a band of size 10:
-
-
delogo=x=0:y=0:w=100:h=77:band=10
-
-
-
-
-
-
9.22 deshake# TOC
-
-
Attempt to fix small changes in horizontal and/or vertical shift. This
-filter helps remove camera shake from hand-holding a camera, bumping a
-tripod, moving on a vehicle, etc.
-
-
The filter accepts the following options:
-
-
-x
-y
-w
-h
-Specify a rectangular area where to limit the search for motion
-vectors.
-If desired the search for motion vectors can be limited to a
-rectangular area of the frame defined by its top left corner, width
-and height. These parameters have the same meaning as the drawbox
-filter which can be used to visualise the position of the bounding
-box.
-
-This is useful when simultaneous movement of subjects within the frame
-might be confused for camera motion by the motion vector search.
-
-If any or all of x , y , w and h are set to -1
-then the full frame is used. This allows later options to be set
-without specifying the bounding box for the motion vector search.
-
-Default - search the whole frame.
-
-
-rx
-ry
-Specify the maximum extent of movement in x and y directions in the
-range 0-64 pixels. Default 16.
-
-
-edge
-Specify how to generate pixels to fill blanks at the edge of the
-frame. Available values are:
-
-‘blank, 0 ’
-Fill zeroes at blank locations
-
-‘original, 1 ’
-Original image at blank locations
-
-‘clamp, 2 ’
-Extruded edge value at blank locations
-
-‘mirror, 3 ’
-Mirrored edge at blank locations
-
-
-Default value is ‘mirror ’.
-
-
-blocksize
-Specify the blocksize to use for motion search. Range 4-128 pixels,
-default 8.
-
-
-contrast
-Specify the contrast threshold for blocks. Only blocks with more than
-the specified contrast (difference between darkest and lightest
-pixels) will be considered. Range 1-255, default 125.
-
-
-search
-Specify the search strategy. Available values are:
-
-‘exhaustive, 0 ’
-Set exhaustive search
-
-‘less, 1 ’
-Set less exhaustive search.
-
-
-Default value is ‘exhaustive ’.
-
-
-filename
-If set then a detailed log of the motion search is written to the
-specified file.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
-
9.23 drawbox# TOC
-
-
Draw a colored box on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the top left corner coordinates of the box. It defaults to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the box; if 0 they are interpreted as
-the input width and height. It defaults to 0.
-
-
-color, c
-Specify the color of the box to write. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the box edge color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the box edge. Default value is 3
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y offset coordinates where the box is drawn.
-
-
-w
-h
-The width and height of the drawn box.
-
-
-t
-The thickness of the drawn box.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
9.23.1 Examples# TOC
-
-
-
-
-
9.24 drawgrid# TOC
-
-
Draw a grid on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the
-input width and height, respectively, minus thickness
, so image gets
-framed. Default to 0.
-
-
-color, c
-Specify the color of the grid. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the grid color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the grid line. Default value is 1
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input grid cell width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y coordinates of some point of grid intersection (meant to configure offset).
-
-
-w
-h
-The width and height of the drawn cell.
-
-
-t
-The thickness of the drawn cell.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
9.24.1 Examples# TOC
-
-
- Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%:
-
-
drawgrid=width=100:height=100:thickness=2:color=red@0.5
-
-
- Draw a white 3x3 grid with an opacity of 50%:
-
-
drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
-
-
-
-
-
9.25 drawtext# TOC
-
-
Draw a text string or text from a specified file on top of a video, using the
-libfreetype library.
-
-
To enable compilation of this filter, you need to configure FFmpeg with
---enable-libfreetype
.
-To enable default font fallback and the font option you need to
-configure FFmpeg with --enable-libfontconfig
.
-To enable the text_shaping option, you need to configure FFmpeg with
---enable-libfribidi
.
-
-
-
9.25.1 Syntax# TOC
-
-
It accepts the following parameters:
-
-
-box
-Used to draw a box around text using the background color.
-The value must be either 1 (enable) or 0 (disable).
-The default value of box is 0.
-
-
-boxcolor
-The color to be used for drawing box around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of boxcolor is "white".
-
-
-borderw
-Set the width of the border to be drawn around the text using bordercolor .
-The default value of borderw is 0.
-
-
-bordercolor
-Set the color to be used for drawing border around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of bordercolor is "black".
-
-
-expansion
-Select how the text is expanded. Can be either none
,
-strftime
(deprecated) or
-normal
(default). See the Text expansion section
-below for details.
-
-
-fix_bounds
-If true, check and fix text coords to avoid clipping.
-
-
-fontcolor
-The color to be used for drawing fonts. For the syntax of this option, check
-the "Color" section in the ffmpeg-utils manual.
-
-The default value of fontcolor is "black".
-
-
-fontcolor_expr
-String which is expanded the same way as text to obtain dynamic
-fontcolor value. By default this option has empty value and is not
-processed. When this option is set, it overrides fontcolor option.
-
-
-font
-The font family to be used for drawing text. By default Sans.
-
-
-fontfile
-The font file to be used for drawing text. The path must be included.
-This parameter is mandatory if the fontconfig support is disabled.
-
-
-fontsize
-The font size to be used for drawing text.
-The default value of fontsize is 16.
-
-
-text_shaping
-If set to 1, attempt to shape the text (for example, reverse the order of
-right-to-left text and join Arabic characters) before drawing it.
-Otherwise, just draw the text exactly as given.
-By default 1 (if supported).
-
-
-ft_load_flags
-The flags to be used for loading the fonts.
-
-The flags map the corresponding flags supported by libfreetype, and are
-a combination of the following values:
-
-default
-no_scale
-no_hinting
-render
-no_bitmap
-vertical_layout
-force_autohint
-crop_bitmap
-pedantic
-ignore_global_advance_width
-no_recurse
-ignore_transform
-monochrome
-linear_design
-no_autohint
-
-
-Default value is "default".
-
-For more information consult the documentation for the FT_LOAD_*
-libfreetype flags.
-
-
-shadowcolor
-The color to be used for drawing a shadow behind the drawn text. For the
-syntax of this option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of shadowcolor is "black".
-
-
-shadowx
-shadowy
-The x and y offsets for the text shadow position with respect to the
-position of the text. They can be either positive or negative
-values. The default value for both is "0".
-
-
-start_number
-The starting frame number for the n/frame_num variable. The default value
-is "0".
-
-
-tabsize
-The size in number of spaces to use for rendering the tab.
-Default value is 4.
-
-
-timecode
-Set the initial timecode representation in "hh:mm:ss[:;.]ff"
-format. It can be used with or without text parameter. timecode_rate
-option must be specified.
-
-
-timecode_rate, rate, r
-Set the timecode frame rate (timecode only).
-
-
-text
-The text string to be drawn. The text must be a sequence of UTF-8
-encoded characters.
-This parameter is mandatory if no file is specified with the parameter
-textfile .
-
-
-textfile
-A text file containing text to be drawn. The text must be a sequence
-of UTF-8 encoded characters.
-
-This parameter is mandatory if no text string is specified with the
-parameter text .
-
-If both text and textfile are specified, an error is thrown.
-
-
-reload
-If set to 1, the textfile will be reloaded before each frame.
-Be sure to update it atomically, or it may be read partially, or even fail.
-
-
-x
-y
-The expressions which specify the offsets where text will be drawn
-within the video frame. They are relative to the top/left border of the
-output image.
-
-The default value of x and y is "0".
-
-See below for the list of accepted constants and functions.
-
-
-
-
The parameters for x and y are expressions containing the
-following constants and functions:
-
-
-dar
-input display aspect ratio, it is the same as (w / h ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-line_h, lh
-the height of each text line
-
-
-main_h, h, H
-the input height
-
-
-main_w, w, W
-the input width
-
-
-max_glyph_a, ascent
-the maximum distance from the baseline to the highest/upper grid
-coordinate used to place a glyph outline point, for all the rendered
-glyphs.
-It is a positive value, due to the grid’s orientation with the Y axis
-upwards.
-
-
-max_glyph_d, descent
-the maximum distance from the baseline to the lowest grid coordinate
-used to place a glyph outline point, for all the rendered glyphs.
-This is a negative value, due to the grid’s orientation, with the Y axis
-upwards.
-
-
-max_glyph_h
-maximum glyph height, that is the maximum height for all the glyphs
-contained in the rendered text, it is equivalent to ascent -
-descent .
-
-
-max_glyph_w
-maximum glyph width, that is the maximum width for all the glyphs
-contained in the rendered text
-
-
-n
-the number of input frame, starting from 0
-
-
-rand(min, max)
-return a random number included between min and max
-
-
-sar
-The input sample aspect ratio.
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-text_h, th
-the height of the rendered text
-
-
-text_w, tw
-the width of the rendered text
-
-
-x
-y
-the x and y offset coordinates where the text is drawn.
-
-These parameters allow the x and y expressions to refer
-each other, so you can for example specify y=x/dar
.
-
-
-
-
-
9.25.2 Text expansion# TOC
-
-
If expansion is set to strftime
,
-the filter recognizes strftime() sequences in the provided text and
-expands them accordingly. Check the documentation of strftime(). This
-feature is deprecated.
-
-
If expansion is set to none
, the text is printed verbatim.
-
-
If expansion is set to normal
(which is the default),
-the following expansion mechanism is used.
-
-
The backslash character ’\’, followed by any character, always expands to
-the second character.
-
-
Sequence of the form %{...}
are expanded. The text between the
-braces is a function name, possibly followed by arguments separated by ’:’.
-If the arguments contain special characters or delimiters (’:’ or ’}’),
-they should be escaped.
-
-
Note that they probably must also be escaped as the value for the
-text option in the filter argument string and as the filter
-argument in the filtergraph description, and possibly also for the shell,
-that makes up to four levels of escaping; using a text file avoids these
-problems.
-
-
The following functions are available:
-
-
-expr, e
-The expression evaluation result.
-
-It must take one argument specifying the expression to be evaluated,
-which accepts the same constants and functions as the x and
-y values. Note that not all constants should be used, for
-example the text size is not known when evaluating the expression, so
-the constants text_w and text_h will have an undefined
-value.
-
-
-expr_int_format, eif
-Evaluate the expression’s value and output as formatted integer.
-
-The first argument is the expression to be evaluated, just as for the expr function.
-The second argument specifies the output format. Allowed values are ’x’, ’X’, ’d’ and
-’u’. They are treated exactly as in the printf function.
-The third parameter is optional and sets the number of positions taken by the output.
-It can be used to add padding with zeros from the left.
-
-
-gmtime
-The time at which the filter is running, expressed in UTC.
-It can accept an argument: a strftime() format string.
-
-
-localtime
-The time at which the filter is running, expressed in the local time zone.
-It can accept an argument: a strftime() format string.
-
-
-metadata
-Frame metadata. It must take one argument specifying metadata key.
-
-
-n, frame_num
-The frame number, starting from 0.
-
-
-pict_type
-A 1 character description of the current picture type.
-
-
-pts
-The timestamp of the current frame.
-It can take up to two arguments.
-
-The first argument is the format of the timestamp; it defaults to flt
-for seconds as a decimal number with microsecond accuracy; hms
stands
-for a formatted [-]HH:MM:SS.mmm timestamp with millisecond accuracy.
-
-The second argument is an offset added to the timestamp.
-
-
-
-
-
-
9.25.3 Examples# TOC
-
-
-
-
For more information about libfreetype, check:
-http://www.freetype.org/ .
-
-
For more information about fontconfig, check:
-http://freedesktop.org/software/fontconfig/fontconfig-user.html .
-
-
For more information about libfribidi, check:
-http://fribidi.org/ .
-
-
-
9.26 edgedetect# TOC
-
-
Detect and draw edges. The filter uses the Canny Edge Detection algorithm.
-
-
The filter accepts the following options:
-
-
-low
-high
-Set low and high threshold values used by the Canny thresholding
-algorithm.
-
-The high threshold selects the "strong" edge pixels, which are then
-connected through 8-connectivity with the "weak" edge pixels selected
-by the low threshold.
-
-low and high threshold values must be chosen in the range
-[0,1], and low should be lesser or equal to high .
-
-Default value for low is 20/255
, and default value for high
-is 50/255
.
-
-
-mode
-Define the drawing mode.
-
-
-‘wires ’
-Draw white/gray wires on black background.
-
-
-‘colormix ’
-Mix the colors to create a paint/cartoon effect.
-
-
-
-Default value is wires .
-
-
-
-
-
9.26.1 Examples# TOC
-
-
- Standard edge detection with custom values for the hysteresis thresholding:
-
-
edgedetect=low=0.1:high=0.4
-
-
- Painting effect without thresholding:
-
-
edgedetect=mode=colormix:high=0
-
-
-
-
-
9.27 extractplanes# TOC
-
-
Extract color channel components from input video stream into
-separate grayscale video streams.
-
-
The filter accepts the following option:
-
-
-planes
-Set plane(s) to extract.
-
-Available values for planes are:
-
-‘y ’
-‘u ’
-‘v ’
-‘a ’
-‘r ’
-‘g ’
-‘b ’
-
-
-Choosing planes not available in the input will result in an error.
-That means you cannot select r
, g
, b
planes
-with y
, u
, v
planes at same time.
-
-
-
-
-
9.27.1 Examples# TOC
-
-
- Extract luma, u and v color channel component from input video frame
-into 3 grayscale outputs:
-
-
ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
-
-
-
-
-
9.28 elbg# TOC
-
-
Apply a posterize effect using the ELBG (Enhanced LBG) algorithm.
-
-
For each input image, the filter will compute the optimal mapping from
-the input to the output given the codebook length, that is the number
-of distinct output colors.
-
-
This filter accepts the following options.
-
-
-codebook_length, l
-Set codebook length. The value must be a positive integer, and
-represents the number of distinct output colors. Default value is 256.
-
-
-nb_steps, n
-Set the maximum number of iterations to apply for computing the optimal
-mapping. The higher the value the better the result and the higher the
-computation time. Default value is 1.
-
-
-seed, s
-Set a random seed, must be an integer included between 0 and
-UINT32_MAX. If not specified, or if explicitly set to -1, the filter
-will try to use a good random seed on a best effort basis.
-
-
-
-
-
9.29 fade# TOC
-
-
Apply a fade-in/out effect to the input video.
-
-
It accepts the following parameters:
-
-
-type, t
-The effect type can be either "in" for a fade-in, or "out" for a fade-out
-effect.
-Default is in
.
-
-
-start_frame, s
-Specify the number of the frame to start applying the fade
-effect at. Default is 0.
-
-
-nb_frames, n
-The number of frames that the fade effect lasts. At the end of the
-fade-in effect, the output video will have the same intensity as the input video.
-At the end of the fade-out transition, the output video will be filled with the
-selected color .
-Default is 25.
-
-
-alpha
-If set to 1, fade only alpha channel, if one exists on the input.
-Default value is 0.
-
-
-start_time, st
-Specify the timestamp (in seconds) of the frame to start to apply the fade
-effect. If both start_frame and start_time are specified, the fade will start at
-whichever comes last. Default is 0.
-
-
-duration, d
-The number of seconds for which the fade effect has to last. At the end of the
-fade-in effect the output video will have the same intensity as the input video,
-at the end of the fade-out transition the output video will be filled with the
-selected color .
-If both duration and nb_frames are specified, duration is used. Default is 0.
-
-
-color, c
-Specify the color of the fade. Default is "black".
-
-
-
-
-
9.29.1 Examples# TOC
-
-
-
-
-
9.30 field# TOC
-
-
Extract a single field from an interlaced image using stride
-arithmetic to avoid wasting CPU time. The output frames are marked as
-non-interlaced.
-
-
The filter accepts the following options:
-
-
-type
-Specify whether to extract the top (if the value is 0
or
-top
) or the bottom field (if the value is 1
or
-bottom
).
-
-
-
-
-
9.31 fieldmatch# TOC
-
-
Field matching filter for inverse telecine. It is meant to reconstruct the
-progressive frames from a telecined stream. The filter does not drop duplicated
-frames, so to achieve a complete inverse telecine fieldmatch
needs to be
-followed by a decimation filter such as decimate in the filtergraph.
-
-
The separation of the field matching and the decimation is notably motivated by
-the possibility of inserting a de-interlacing filter fallback between the two.
-If the source has mixed telecined and real interlaced content,
-fieldmatch
will not be able to match fields for the interlaced parts.
-But these remaining combed frames will be marked as interlaced, and thus can be
-de-interlaced by a later filter such as yadif before decimation.
-
-
In addition to the various configuration options, fieldmatch
can take an
-optional second stream, activated through the ppsrc option. If
-enabled, the frames reconstruction will be based on the fields and frames from
-this second stream. This allows the first input to be pre-processed in order to
-help the various algorithms of the filter, while keeping the output lossless
-(assuming the fields are matched properly). Typically, a field-aware denoiser,
-or brightness/contrast adjustments can help.
-
-
Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project)
-and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
-which fieldmatch
is based on. While the semantic and usage are very
-close, some behaviour and options names can differ.
-
-
The decimate filter currently only works for constant frame rate input.
-Do not use fieldmatch
and decimate if your input has mixed
-telecined and progressive content with changing framerate.
-
-
The filter accepts the following options:
-
-
-order
-Specify the assumed field order of the input stream. Available values are:
-
-
-‘auto ’
-Auto detect parity (use FFmpeg’s internal parity value).
-
-‘bff ’
-Assume bottom field first.
-
-‘tff ’
-Assume top field first.
-
-
-
-Note that it is sometimes recommended not to trust the parity announced by the
-stream.
-
-Default value is auto .
-
-
-mode
-Set the matching mode or strategy to use. pc mode is the safest in the
-sense that it won’t risk creating jerkiness due to duplicate frames when
-possible, but if there are bad edits or blended fields it will end up
-outputting combed frames when a good match might actually exist. On the other
-hand, pcn_ub mode is the most risky in terms of creating jerkiness,
-but will almost always find a good frame if there is one. The other values are
-all somewhere in between pc and pcn_ub in terms of risking
-jerkiness and creating duplicate frames versus finding good matches in sections
-with bad edits, orphaned fields, blended fields, etc.
-
-More details about p/c/n/u/b are available in p/c/n/u/b meaning section.
-
-Available values are:
-
-
-‘pc ’
-2-way matching (p/c)
-
-‘pc_n ’
-2-way matching, and trying 3rd match if still combed (p/c + n)
-
-‘pc_u ’
-2-way matching, and trying 3rd match (same order) if still combed (p/c + u)
-
-‘pc_n_ub ’
-2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if
-still combed (p/c + n + u/b)
-
-‘pcn ’
-3-way matching (p/c/n)
-
-‘pcn_ub ’
-3-way matching, and trying 4th/5th matches if all 3 of the original matches are
-detected as combed (p/c/n + u/b)
-
-
-
-The parenthesis at the end indicate the matches that would be used for that
-mode assuming order =tff (and field on auto or
-top ).
-
-In terms of speed pc mode is by far the fastest and pcn_ub is
-the slowest.
-
-Default value is pc_n .
-
-
-ppsrc
-Mark the main input stream as a pre-processed input, and enable the secondary
-input stream as the clean source to pick the fields from. See the filter
-introduction for more details. It is similar to the clip2 feature from
-VFM/TFM.
-
-Default value is 0
(disabled).
-
-
-field
-Set the field to match from. It is recommended to set this to the same value as
-order unless you experience matching failures with that setting. In
-certain circumstances changing the field that is used to match from can have a
-large impact on matching performance. Available values are:
-
-
-‘auto ’
-Automatic (same value as order ).
-
-‘bottom ’
-Match from the bottom field.
-
-‘top ’
-Match from the top field.
-
-
-
-Default value is auto .
-
-
-mchroma
-Set whether or not chroma is included during the match comparisons. In most
-cases it is recommended to leave this enabled. You should set this to 0
-only if your clip has bad chroma problems such as heavy rainbowing or other
-artifacts. Setting this to 0
could also be used to speed things up at
-the cost of some accuracy.
-
-Default value is 1
.
-
-
-y0
-y1
-These define an exclusion band which excludes the lines between y0 and
-y1 from being included in the field matching decision. An exclusion
-band can be used to ignore subtitles, a logo, or other things that may
-interfere with the matching. y0 sets the starting scan line and
-y1 sets the ending line; all lines in between y0 and
-y1 (including y0 and y1 ) will be ignored. Setting
-y0 and y1 to the same value will disable the feature.
-y0 and y1 defaults to 0
.
-
-
-scthresh
-Set the scene change detection threshold as a percentage of maximum change on
-the luma plane. Good values are in the [8.0, 14.0]
range. Scene change
-detection is only relevant in case combmatch =sc . The range for
-scthresh is [0.0, 100.0]
.
-
-Default value is 12.0
.
-
-
-combmatch
-When combatch is not none , fieldmatch
will take into
-account the combed scores of matches when deciding what match to use as the
-final match. Available values are:
-
-
-‘none ’
-No final matching based on combed scores.
-
-‘sc ’
-Combed scores are only used when a scene change is detected.
-
-‘full ’
-Use combed scores all the time.
-
-
-
-Default is sc .
-
-
-combdbg
-Force fieldmatch
to calculate the combed metrics for certain matches and
-print them. This setting is known as micout in TFM/VFM vocabulary.
-Available values are:
-
-
-‘none ’
-No forced calculation.
-
-‘pcn ’
-Force p/c/n calculations.
-
-‘pcnub ’
-Force p/c/n/u/b calculations.
-
-
-
-Default value is none .
-
-
-cthresh
-This is the area combing threshold used for combed frame detection. This
-essentially controls how "strong" or "visible" combing must be to be detected.
-Larger values mean combing must be more visible and smaller values mean combing
-can be less visible or strong and still be detected. Valid settings are from
--1
(every pixel will be detected as combed) to 255
(no pixel will
-be detected as combed). This is basically a pixel difference value. A good
-range is [8, 12]
.
-
-Default value is 9
.
-
-
-chroma
-Sets whether or not chroma is considered in the combed frame decision. Only
-disable this if your source has chroma problems (rainbowing, etc.) that are
-causing problems for the combed frame detection with chroma enabled. Actually,
-using chroma =0 is usually more reliable, except for the case
-where there is chroma only combing in the source.
-
-Default value is 0
.
-
-
-blockx
-blocky
-Respectively set the x-axis and y-axis size of the window used during combed
-frame detection. This has to do with the size of the area in which
-combpel pixels are required to be detected as combed for a frame to be
-declared combed. See the combpel parameter description for more info.
-Possible values are any number that is a power of 2 starting at 4 and going up
-to 512.
-
-Default value is 16
.
-
-
-combpel
-The number of combed pixels inside any of the blocky by
-blockx size blocks on the frame for the frame to be detected as
-combed. While cthresh controls how "visible" the combing must be, this
-setting controls "how much" combing there must be in any localized area (a
-window defined by the blockx and blocky settings) on the
-frame. Minimum value is 0
and maximum is blocky x blockx
(at
-which point no frames will ever be detected as combed). This setting is known
-as MI in TFM/VFM vocabulary.
-
-Default value is 80
.
-
-
-
-
-
9.31.1 p/c/n/u/b meaning# TOC
-
-
-
9.31.1.1 p/c/n# TOC
-
-
We assume the following telecined stream:
-
-
-
Top fields: 1 2 2 3 4
-Bottom fields: 1 2 3 4 4
-
-
-
The numbers correspond to the progressive frame the fields relate to. Here, the
-first two frames are progressive, the 3rd and 4th are combed, and so on.
-
-
When fieldmatch
is configured to run a matching from bottom
-(field =bottom ) this is how this input stream get transformed:
-
-
-
Input stream:
- T 1 2 2 3 4
- B 1 2 3 4 4 <-- matching reference
-
-Matches: c c n n c
-
-Output stream:
- T 1 2 3 4 4
- B 1 2 3 4 4
-
-
-
As a result of the field matching, we can see that some frames get duplicated.
-To perform a complete inverse telecine, you need to rely on a decimation filter
-after this operation. See for instance the decimate filter.
-
-
The same operation now matching from top fields (field =top )
-looks like this:
-
-
-
Input stream:
- T 1 2 2 3 4 <-- matching reference
- B 1 2 3 4 4
-
-Matches: c c p p c
-
-Output stream:
- T 1 2 2 3 4
- B 1 2 2 3 4
-
-
-
In these examples, we can see what p , c and n mean;
-basically, they refer to the frame and field of the opposite parity:
-
-
- p matches the field of the opposite parity in the previous frame
- c matches the field of the opposite parity in the current frame
- n matches the field of the opposite parity in the next frame
-
-
-
-
9.31.1.2 u/b# TOC
-
-
The u and b matching are a bit special in the sense that they match
-from the opposite parity flag. In the following examples, we assume that we are
-currently matching the 2nd frame (Top:2, bottom:2). According to the match, a
-’x’ is placed above and below each matched fields.
-
-
With bottom matching (field =bottom ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 1 2 2 2
- 2 2 2 1 3
-
-
-
With top matching (field =top ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 2 2 1 2
- 2 1 3 2 2
-
-
-
-
9.31.2 Examples# TOC
-
-
Simple IVTC of a top field first telecined stream:
-
-
fieldmatch=order=tff:combmatch=none, decimate
-
-
-
Advanced IVTC, with fallback on yadif for still combed frames:
-
-
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
-
-
-
-
9.32 fieldorder# TOC
-
-
Transform the field order of the input video.
-
-
It accepts the following parameters:
-
-
-order
-The output field order. Valid values are tff for top field first or bff
-for bottom field first.
-
-
-
-
The default value is ‘tff ’.
-
-
The transformation is done by shifting the picture content up or down
-by one line, and filling the remaining line with appropriate picture content.
-This method is consistent with most broadcast field order converters.
-
-
If the input video is not flagged as being interlaced, or it is already
-flagged as being of the required output field order, then this filter does
-not alter the incoming video.
-
-
It is very useful when converting to or from PAL DV material,
-which is bottom field first.
-
-
For example:
-
-
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
-
-
-
-
9.33 fifo# TOC
-
-
Buffer input images and send them when they are requested.
-
-
It is mainly useful when auto-inserted by the libavfilter
-framework.
-
-
It does not take parameters.
-
-
-
9.34 format# TOC
-
-
Convert the input video to one of the specified pixel formats.
-Libavfilter will try to pick one that is suitable as input to
-the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-"pix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
9.34.1 Examples# TOC
-
-
-
-
-
9.35 fps# TOC
-
-
Convert the video to specified constant frame rate by duplicating or dropping
-frames as necessary.
-
-
It accepts the following parameters:
-
-fps
-The desired output frame rate. The default is 25
.
-
-
-round
-Rounding method.
-
-Possible values are:
-
-zero
-zero round towards 0
-
-inf
-round away from 0
-
-down
-round towards -infinity
-
-up
-round towards +infinity
-
-near
-round to nearest
-
-
-The default is near
.
-
-
-start_time
-Assume the first PTS should be the given value, in seconds. This allows for
-padding/trimming at the start of stream. By default, no assumption is made
-about the first frame’s expected PTS, so no padding or trimming is done.
-For example, this could be set to 0 to pad the beginning with duplicates of
-the first frame if a video stream starts after the audio stream or to trim any
-frames with a negative PTS.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-fps [:round ].
-
-
See also the setpts filter.
-
-
-
9.35.1 Examples# TOC
-
-
- A typical usage in order to set the fps to 25:
-
-
- Sets the fps to 24, using abbreviation and rounding method to round to nearest:
-
-
fps=fps=film:round=near
-
-
-
-
-
9.36 framepack# TOC
-
-
Pack two different video streams into a stereoscopic video, setting proper
-metadata on supported codecs. The two views should have the same size and
-framerate and processing will stop when the shorter video ends. Please note
-that you may conveniently adjust view properties with the scale and
-fps filters.
-
-
It accepts the following parameters:
-
-format
-The desired packing format. Supported values are:
-
-
-sbs
-The views are next to each other (default).
-
-
-tab
-The views are on top of each other.
-
-
-lines
-The views are packed by line.
-
-
-columns
-The views are packed by column.
-
-
-frameseq
-The views are temporally interleaved.
-
-
-
-
-
-
-
-
Some examples:
-
-
-
# Convert left and right views into a frame-sequential video
-ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
-
-# Convert views into a side-by-side video with the same output resolution as the input
-ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
-
-
-
-
9.37 framestep# TOC
-
-
Select one frame every N-th frame.
-
-
This filter accepts the following option:
-
-step
-Select frame after every step
frames.
-Allowed values are positive integers higher than 0. Default value is 1
.
-
-
-
-
-
9.38 frei0r# TOC
-
-
Apply a frei0r effect to the input video.
-
-
To enable the compilation of this filter, you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the frei0r effect to load. If the environment variable
-FREI0R_PATH
is defined, the frei0r effect is searched for in each of the
-directories specified by the colon-separated list in FREIOR_PATH
.
-Otherwise, the standard frei0r paths are searched, in this order:
-HOME/.frei0r-1/lib/ , /usr/local/lib/frei0r-1/ ,
-/usr/lib/frei0r-1/ .
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r effect.
-
-
-
-
-
A frei0r effect parameter can be a boolean (its value is either
-"y" or "n"), a double, a color (specified as
-R /G /B , where R , G , and B are floating point
-numbers between 0.0 and 1.0, inclusive) or by a color description specified in the "Color"
-section in the ffmpeg-utils manual), a position (specified as X /Y , where
-X and Y are floating point numbers) and/or a string.
-
-
The number and types of parameters depend on the loaded effect. If an
-effect parameter is not specified, the default value is set.
-
-
-
9.38.1 Examples# TOC
-
-
- Apply the distort0r effect, setting the first two double parameters:
-
-
frei0r=filter_name=distort0r:filter_params=0.5|0.01
-
-
- Apply the colordistance effect, taking a color as the first parameter:
-
-
frei0r=colordistance:0.2/0.3/0.4
-frei0r=colordistance:violet
-frei0r=colordistance:0x112233
-
-
- Apply the perspective effect, specifying the top left and top right image
-positions:
-
-
frei0r=perspective:0.2/0.2|0.8/0.2
-
-
-
-
For more information, see
-http://frei0r.dyne.org
-
-
-
9.39 fspp# TOC
-
-
Apply fast and simple postprocessing. It is a faster version of spp .
-
-
It splits (I)DCT into horizontal/vertical passes. Unlike the simple post-
-processing filter, one of them is performed once per block, not per pixel.
-This allows for much higher speed.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 4-5. Default value is 4
.
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range 0-63.
-If not set, the filter will use the QP from the video stream (if available).
-
-
-strength
-Set filter strength. It accepts an integer in range -15 to 32. Lower values mean
-more details but also more artifacts, while higher values make the image smoother
-but also blurrier. Default value is 0
− PSNR optimal.
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
-
9.40 geq# TOC
-
-
The filter accepts the following options:
-
-
-lum_expr, lum
-Set the luminance expression.
-
-cb_expr, cb
-Set the chrominance blue expression.
-
-cr_expr, cr
-Set the chrominance red expression.
-
-alpha_expr, a
-Set the alpha expression.
-
-red_expr, r
-Set the red expression.
-
-green_expr, g
-Set the green expression.
-
-blue_expr, b
-Set the blue expression.
-
-
-
-
The colorspace is selected according to the specified options. If one
-of the lum_expr , cb_expr , or cr_expr
-options is specified, the filter will automatically select a YCbCr
-colorspace. If one of the red_expr , green_expr , or
-blue_expr options is specified, it will select an RGB
-colorspace.
-
-
If one of the chrominance expression is not defined, it falls back on the other
-one. If no alpha expression is specified it will evaluate to opaque value.
-If none of chrominance expressions are specified, they will evaluate
-to the luminance expression.
-
-
The expressions can use the following variables and functions:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-The coordinates of the current sample.
-
-
-W
-H
-The width and height of the image.
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-p(x, y)
-Return the value of the pixel at location (x ,y ) of the current
-plane.
-
-
-lum(x, y)
-Return the value of the pixel at location (x ,y ) of the luminance
-plane.
-
-
-cb(x, y)
-Return the value of the pixel at location (x ,y ) of the
-blue-difference chroma plane. Return 0 if there is no such plane.
-
-
-cr(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red-difference chroma plane. Return 0 if there is no such plane.
-
-
-r(x, y)
-g(x, y)
-b(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red/green/blue component. Return 0 if there is no such component.
-
-
-alpha(x, y)
-Return the value of the pixel at location (x ,y ) of the alpha
-plane. Return 0 if there is no such plane.
-
-
-
-
For functions, if x and y are outside the area, the value will be
-automatically clipped to the closer edge.
-
-
-
9.40.1 Examples# TOC
-
-
- Flip the image horizontally:
-
-
- Generate a bidimensional sine wave, with angle PI/3
and a
-wavelength of 100 pixels:
-
-
geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
-
-
- Generate a fancy enigmatic moving light:
-
-
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
-
-
- Generate a quick emboss effect:
-
-
format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
-
-
- Modify RGB components depending on pixel position:
-
-
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
-
-
- Create a radial gradient that is the same size as the input (also see
-the vignette filter):
-
-
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
-
-
- Create a linear gradient to use as a mask for another filter, then
-compose with overlay . In this example the video will gradually
-become more blurry from the top to the bottom of the y-axis as defined
-by the linear gradient:
-
-
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
-
-
-
-
-
9.41 gradfun# TOC
-
-
Fix the banding artifacts that are sometimes introduced into nearly flat
-regions by truncation to 8bit color depth.
-Interpolate the gradients that should go where the bands are, and
-dither them.
-
-
It is designed for playback only. Do not use it prior to
-lossy compression, because compression tends to lose the dither and
-bring back the bands.
-
-
It accepts the following parameters:
-
-
-strength
-The maximum amount by which the filter will change any one pixel. This is also
-the threshold for detecting nearly flat regions. Acceptable values range from
-.51 to 64; the default value is 1.2. Out-of-range values will be clipped to the
-valid range.
-
-
-radius
-The neighborhood to fit the gradient to. A larger radius makes for smoother
-gradients, but also prevents the filter from modifying the pixels near detailed
-regions. Acceptable values are 8-32; the default value is 16. Out-of-range
-values will be clipped to the valid range.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-strength [:radius ]
-
-
-
9.41.1 Examples# TOC
-
-
- Apply the filter with a 3.5
strength and radius of 8
:
-
-
- Specify radius, omitting the strength (which will fall-back to the default
-value):
-
-
-
-
-
-
9.42 haldclut# TOC
-
-
Apply a Hald CLUT to a video stream.
-
-
First input is the video stream to process, and second one is the Hald CLUT.
-The Hald CLUT input can be a simple picture or a complete video stream.
-
-
The filter accepts the following options:
-
-
-shortest
-Force termination when the shortest input terminates. Default is 0
.
-
-repeatlast
-Continue applying the last CLUT after the end of the stream. A value of
-0
disable the filter after the last frame of the CLUT is reached.
-Default is 1
.
-
-
-
-
haldclut
also has the same interpolation options as lut3d (both
-filters share the same internals).
-
-
More information about the Hald CLUT can be found on Eskil Steenberg’s website
-(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html .
-
-
-
9.42.1 Workflow examples# TOC
-
-
-
9.42.1.1 Hald CLUT video stream# TOC
-
-
Generate an identity Hald CLUT stream altered with various effects:
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
-
-
-
Note: make sure you use a lossless codec.
-
-
Then use it with haldclut
to apply it on some random stream:
-
-
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
-
-
-
The Hald CLUT will be applied to the 10 first seconds (duration of
-clut.nut ), then the latest picture of that CLUT stream will be applied
-to the remaining frames of the mandelbrot
stream.
-
-
-
9.42.1.2 Hald CLUT with preview# TOC
-
-
A Hald CLUT is supposed to be a squared image of Level*Level*Level
by
-Level*Level*Level
pixels. For a given Hald CLUT, FFmpeg will select the
-biggest possible square starting at the top left of the picture. The remaining
-padding pixels (bottom or right) will be ignored. This area can be used to add
-a preview of the Hald CLUT.
-
-
Typically, the following generated Hald CLUT will be supported by the
-haldclut
filter:
-
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "
- pad=iw+320 [padded_clut];
- smptebars=s=320x256, split [a][b];
- [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
- [main][b] overlay=W-320" -frames:v 1 clut.png
-
-
-
It contains the original and a preview of the effect of the CLUT: SMPTE color
-bars are displayed on the right-top, and below the same color bars processed by
-the color changes.
-
-
Then, the effect of this Hald CLUT can be visualized with:
-
-
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
-
-
-
-
9.43 hflip# TOC
-
-
Flip the input video horizontally.
-
-
For example, to horizontally flip the input video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "hflip" out.avi
-
-
-
-
9.44 histeq# TOC
-
This filter applies a global color histogram equalization on a
-per-frame basis.
-
-
It can be used to correct video that has a compressed range of pixel
-intensities. The filter redistributes the pixel intensities to
-equalize their distribution across the intensity range. It may be
-viewed as an "automatically adjusting contrast filter". This filter is
-useful only for correcting degraded or poorly captured source
-video.
-
-
The filter accepts the following options:
-
-
-strength
-Determine the amount of equalization to be applied. As the strength
-is reduced, the distribution of pixel intensities more-and-more
-approaches that of the input frame. The value must be a float number
-in the range [0,1] and defaults to 0.200.
-
-
-intensity
-Set the maximum intensity that can generated and scale the output
-values appropriately. The strength should be set as desired and then
-the intensity can be limited if needed to avoid washing-out. The value
-must be a float number in the range [0,1] and defaults to 0.210.
-
-
-antibanding
-Set the antibanding level. If enabled the filter will randomly vary
-the luminance of output pixels by a small amount to avoid banding of
-the histogram. Possible values are none
, weak
or
-strong
. It defaults to none
.
-
-
-
-
-
9.45 histogram# TOC
-
-
Compute and draw a color distribution histogram for the input video.
-
-
The computed histogram is a representation of the color component
-distribution in an image.
-
-
The filter accepts the following options:
-
-
-mode
-Set histogram mode.
-
-It accepts the following values:
-
-‘levels ’
-Standard histogram that displays the color components distribution in an
-image. Displays color graph for each color component. Shows distribution of
-the Y, U, V, A or R, G, B components, depending on input format, in the
-current frame. Below each graph a color component scale meter is shown.
-
-
-‘color ’
-Displays chroma values (U/V color placement) in a two dimensional
-graph (which is called a vectorscope). The brighter a pixel in the
-vectorscope, the more pixels of the input frame correspond to that pixel
-(i.e., more pixels have this chroma value). The V component is displayed on
-the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost
-side being V = 255. The U component is displayed on the vertical (Y) axis,
-with the top representing U = 0 and the bottom representing U = 255.
-
-The position of a white pixel in the graph corresponds to the chroma value of
-a pixel of the input clip. The graph can therefore be used to read the hue
-(color flavor) and the saturation (the dominance of the hue in the color). As
-the hue of a color changes, it moves around the square. At the center of the
-square the saturation is zero, which means that the corresponding pixel has no
-color. If the amount of a specific color is increased (while leaving the other
-colors unchanged) the saturation increases, and the indicator moves towards
-the edge of the square.
-
-
-‘color2 ’
-Chroma values in vectorscope, similar as color
but actual chroma values
-are displayed.
-
-
-‘waveform ’
-Per row/column color component graph. In row mode, the graph on the left side
-represents color component value 0 and the right side represents value = 255.
-In column mode, the top side represents color component value = 0 and bottom
-side represents value = 255.
-
-
-Default value is levels
.
-
-
-level_height
-Set height of level in levels
. Default value is 200
.
-Allowed range is [50, 2048].
-
-
-scale_height
-Set height of color scale in levels
. Default value is 12
.
-Allowed range is [0, 40].
-
-
-step
-Set step for waveform
mode. Smaller values are useful to find out how
-many values of the same luminance are distributed across input rows/columns.
-Default value is 10
. Allowed range is [1, 255].
-
-
-waveform_mode
-Set mode for waveform
. Can be either row
, or column
.
-Default is row
.
-
-
-waveform_mirror
-Set mirroring mode for waveform
. 0
means unmirrored, 1
-means mirrored. In mirrored mode, higher values will be represented on the left
-side for row
mode and at the top for column
mode. Default is
-0
(unmirrored).
-
-
-display_mode
-Set display mode for waveform
and levels
.
-It accepts the following values:
-
-‘parade ’
-Display separate graph for the color components side by side in
-row
waveform mode or one below the other in column
waveform mode
-for waveform
histogram mode. For levels
histogram mode,
-per color component graphs are placed below each other.
-
-Using this display mode in waveform
histogram mode makes it easy to
-spot color casts in the highlights and shadows of an image, by comparing the
-contours of the top and the bottom graphs of each waveform. Since whites,
-grays, and blacks are characterized by exactly equal amounts of red, green,
-and blue, neutral areas of the picture should display three waveforms of
-roughly equal width/height. If not, the correction is easy to perform by
-making level adjustments the three waveforms.
-
-
-‘overlay ’
-Presents information identical to that in the parade
, except
-that the graphs representing color components are superimposed directly
-over one another.
-
-This display mode in waveform
histogram mode makes it easier to spot
-relative differences or similarities in overlapping areas of the color
-components that are supposed to be identical, such as neutral whites, grays,
-or blacks.
-
-
-Default is parade
.
-
-
-levels_mode
-Set mode for levels
. Can be either linear
, or logarithmic
.
-Default is linear
.
-
-
-
-
-
9.45.1 Examples# TOC
-
-
- Calculate and draw histogram:
-
-
ffplay -i input -vf histogram
-
-
-
-
-
-
9.46 hqdn3d# TOC
-
-
This is a high precision/quality 3d denoise filter. It aims to reduce
-image noise, producing smooth images and making still images really
-still. It should enhance compressibility.
-
-
It accepts the following optional parameters:
-
-
-luma_spatial
-A non-negative floating point number which specifies spatial luma strength.
-It defaults to 4.0.
-
-
-chroma_spatial
-A non-negative floating point number which specifies spatial chroma strength.
-It defaults to 3.0*luma_spatial /4.0.
-
-
-luma_tmp
-A floating point number which specifies luma temporal strength. It defaults to
-6.0*luma_spatial /4.0.
-
-
-chroma_tmp
-A floating point number which specifies chroma temporal strength. It defaults to
-luma_tmp *chroma_spatial /luma_spatial .
-
-
-
-
-
9.47 hqx# TOC
-
-
Apply a high-quality magnification filter designed for pixel art. This filter
-was originally created by Maxim Stepin.
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for hq2x
, 3
for
-hq3x
and 4
for hq4x
.
-Default is 3
.
-
-
-
-
-
9.48 hue# TOC
-
-
Modify the hue and/or the saturation of the input.
-
-
It accepts the following parameters:
-
-
-h
-Specify the hue angle as a number of degrees. It accepts an expression,
-and defaults to "0".
-
-
-s
-Specify the saturation in the [-10,10] range. It accepts an expression and
-defaults to "1".
-
-
-H
-Specify the hue angle as a number of radians. It accepts an
-expression, and defaults to "0".
-
-
-b
-Specify the brightness in the [-10,10] range. It accepts an expression and
-defaults to "0".
-
-
-
-
h and H are mutually exclusive, and can’t be
-specified at the same time.
-
-
The b , h , H and s option values are
-expressions containing the following constants:
-
-
-n
-frame count of the input frame starting from 0
-
-
-pts
-presentation timestamp of the input frame expressed in time base units
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-tb
-time base of the input video
-
-
-
-
-
9.48.1 Examples# TOC
-
-
-
-
-
9.48.2 Commands# TOC
-
-
This filter supports the following commands:
-
-b
-s
-h
-H
-Modify the hue and/or the saturation and/or brightness of the input video.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
9.49 idet# TOC
-
-
Detect video interlacing type.
-
-
This filter tries to detect if the input frames as interlaced, progressive,
-top or bottom field first. It will also try and detect fields that are
-repeated between adjacent frames (a sign of telecine).
-
-
Single frame detection considers only immediately adjacent frames when classifying each frame.
-Multiple frame detection incorporates the classification history of previous frames.
-
-
The filter will log these metadata values:
-
-
-single.current_frame
-Detected type of current frame using single-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-single.tff
-Cumulative number of frames detected as top field first using single-frame detection.
-
-
-multiple.tff
-Cumulative number of frames detected as top field first using multiple-frame detection.
-
-
-single.bff
-Cumulative number of frames detected as bottom field first using single-frame detection.
-
-
-multiple.current_frame
-Detected type of current frame using multiple-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-multiple.bff
-Cumulative number of frames detected as bottom field first using multiple-frame detection.
-
-
-single.progressive
-Cumulative number of frames detected as progressive using single-frame detection.
-
-
-multiple.progressive
-Cumulative number of frames detected as progressive using multiple-frame detection.
-
-
-single.undetermined
-Cumulative number of frames that could not be classified using single-frame detection.
-
-
-multiple.undetermined
-Cumulative number of frames that could not be classified using multiple-frame detection.
-
-
-repeated.current_frame
-Which field in the current frame is repeated from the last. One of “neither”, “top”, or “bottom”.
-
-
-repeated.neither
-Cumulative number of frames with no repeated field.
-
-
-repeated.top
-Cumulative number of frames with the top field repeated from the previous frame’s top field.
-
-
-repeated.bottom
-Cumulative number of frames with the bottom field repeated from the previous frame’s bottom field.
-
-
-
-
The filter accepts the following options:
-
-
-intl_thres
-Set interlacing threshold.
-
-prog_thres
-Set progressive threshold.
-
-repeat_thres
-Threshold for repeated field detection.
-
-half_life
-Number of frames after which a given frame’s contribution to the
-statistics is halved (i.e., it contributes only 0.5 to it’s
-classification). The default of 0 means that all frames seen are given
-full weight of 1.0 forever.
-
-analyze_interlaced_flag
-When this is not 0 then idet will use the specified number of frames to determine
-if the interlaced flag is accurate, it will not count undetermined frames.
-If the flag is found to be accurate it will be used without any further
-computations, if it is found to be inaccuarte it will be cleared without any
-further computations. This allows inserting the idet filter as a low computational
-method to clean up the interlaced flag
-
-
-
-
-
-
-
Deinterleave or interleave fields.
-
-
This filter allows one to process interlaced images fields without
-deinterlacing them. Deinterleaving splits the input frame into 2
-fields (so called half pictures). Odd lines are moved to the top
-half of the output image, even lines to the bottom half.
-You can process (filter) them independently and then re-interleave them.
-
-
The filter accepts the following options:
-
-
-luma_mode, l
-chroma_mode, c
-alpha_mode, a
-Available values for luma_mode , chroma_mode and
-alpha_mode are:
-
-
-‘none ’
-Do nothing.
-
-
-‘deinterleave, d ’
-Deinterleave fields, placing one above the other.
-
-
-‘interleave, i ’
-Interleave fields. Reverse the effect of deinterleaving.
-
-
-Default value is none
.
-
-
-luma_swap, ls
-chroma_swap, cs
-alpha_swap, as
-Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0
.
-
-
-
-
-
9.51 interlace# TOC
-
-
Simple interlacing filter from progressive contents. This interleaves upper (or
-lower) lines from odd frames with lower (or upper) lines from even frames,
-halving the frame rate and preserving image height.
-
-
-
Original Original New Frame
- Frame 'j' Frame 'j+1' (tff)
- ========== =========== ==================
- Line 0 --------------------> Frame 'j' Line 0
- Line 1 Line 1 ----> Frame 'j+1' Line 1
- Line 2 ---------------------> Frame 'j' Line 2
- Line 3 Line 3 ----> Frame 'j+1' Line 3
- ... ... ...
-New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
-
-
-
It accepts the following optional parameters:
-
-
-scan
-This determines whether the interlaced frame is taken from the even
-(tff - default) or odd (bff) lines of the progressive frame.
-
-
-lowpass
-Enable (default) or disable the vertical lowpass filter to avoid twitter
-interlacing and reduce moire patterns.
-
-
-
-
-
9.52 kerndeint# TOC
-
-
Deinterlace input video by applying Donald Graft’s adaptive kernel
-deinterling. Work on interlaced parts of a video to produce
-progressive frames.
-
-
The description of the accepted parameters follows.
-
-
-thresh
-Set the threshold which affects the filter’s tolerance when
-determining if a pixel line must be processed. It must be an integer
-in the range [0,255] and defaults to 10. A value of 0 will result in
-applying the process on every pixels.
-
-
-map
-Paint pixels exceeding the threshold value to white if set to 1.
-Default is 0.
-
-
-order
-Set the fields order. Swap fields if set to 1, leave fields alone if
-0. Default is 0.
-
-
-sharp
-Enable additional sharpening if set to 1. Default is 0.
-
-
-twoway
-Enable twoway sharpening if set to 1. Default is 0.
-
-
-
-
-
9.52.1 Examples# TOC
-
-
- Apply default values:
-
-
kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
-
-
- Enable additional sharpening:
-
-
- Paint processed pixels in white:
-
-
-
-
-
9.53 lenscorrection# TOC
-
-
Correct radial lens distortion
-
-
This filter can be used to correct for radial distortion as can result from the use
-of wide angle lenses, and thereby re-rectify the image. To find the right parameters
-one can use tools available for example as part of opencv or simply trial-and-error.
-To use opencv use the calibration sample (under samples/cpp) from the opencv sources
-and extract the k1 and k2 coefficients from the resulting matrix.
-
-
Note that effectively the same filter is available in the open-source tools Krita and
-Digikam from the KDE project.
-
-
In contrast to the vignette filter, which can also be used to compensate lens errors,
-this filter corrects the distortion of the image, whereas vignette corrects the
-brightness distribution, so you may want to use both filters together in certain
-cases, though you will have to take care of ordering, i.e. whether vignetting should
-be applied before or after lens correction.
-
-
-
9.53.1 Options# TOC
-
-
The filter accepts the following options:
-
-
-cx
-Relative x-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-width.
-
-cy
-Relative y-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-height.
-
-k1
-Coefficient of the quadratic correction term. 0.5 means no correction.
-
-k2
-Coefficient of the double quadratic correction term. 0.5 means no correction.
-
-
-
-
The formula that generates the correction is:
-
-
r_src = r_tgt * (1 + k1 * (r_tgt / r_0 )^2 + k2 * (r_tgt / r_0 )^4)
-
-
where r_0 is halve of the image diagonal and r_src and r_tgt are the
-distances from the focal point in the source and target images, respectively.
-
-
-
9.54 lut3d# TOC
-
-
Apply a 3D LUT to an input video.
-
-
The filter accepts the following options:
-
-
-file
-Set the 3D LUT file name.
-
-Currently supported formats:
-
-‘3dl ’
-AfterEffects
-
-‘cube ’
-Iridas
-
-‘dat ’
-DaVinci
-
-‘m3d ’
-Pandora
-
-
-
-interp
-Select interpolation mode.
-
-Available values are:
-
-
-‘nearest ’
-Use values from the nearest defined point.
-
-‘trilinear ’
-Interpolate values using the 8 points defining a cube.
-
-‘tetrahedral ’
-Interpolate values using a tetrahedron.
-
-
-
-
-
-
-
9.55 lut, lutrgb, lutyuv# TOC
-
-
Compute a look-up table for binding each pixel component input value
-to an output value, and apply it to the input video.
-
-
lutyuv applies a lookup table to a YUV input video, lutrgb
-to an RGB input video.
-
-
These filters accept the following parameters:
-
-c0
-set first pixel component expression
-
-c1
-set second pixel component expression
-
-c2
-set third pixel component expression
-
-c3
-set fourth pixel component expression, corresponds to the alpha component
-
-
-r
-set red component expression
-
-g
-set green component expression
-
-b
-set blue component expression
-
-a
-alpha component expression
-
-
-y
-set Y/luminance component expression
-
-u
-set U/Cb component expression
-
-v
-set V/Cr component expression
-
-
-
-
Each of them specifies the expression to use for computing the lookup table for
-the corresponding pixel component values.
-
-
The exact component associated to each of the c* options depends on the
-format in input.
-
-
The lut filter requires either YUV or RGB pixel formats in input,
-lutrgb requires RGB pixel formats in input, and lutyuv requires YUV.
-
-
The expressions can contain the following constants and functions:
-
-
-w
-h
-The input width and height.
-
-
-val
-The input value for the pixel component.
-
-
-clipval
-The input value, clipped to the minval -maxval range.
-
-
-maxval
-The maximum value for the pixel component.
-
-
-minval
-The minimum value for the pixel component.
-
-
-negval
-The negated value for the pixel component value, clipped to the
-minval -maxval range; it corresponds to the expression
-"maxval-clipval+minval".
-
-
-clip(val)
-The computed value in val , clipped to the
-minval -maxval range.
-
-
-gammaval(gamma)
-The computed gamma correction value of the pixel component value,
-clipped to the minval -maxval range. It corresponds to the
-expression
-"pow((clipval-minval)/(maxval-minval)\,gamma )*(maxval-minval)+minval"
-
-
-
-
-
All expressions default to "val".
-
-
-
9.55.1 Examples# TOC
-
-
-
-
-
9.56 mergeplanes# TOC
-
-
Merge color channel components from several video streams.
-
-
The filter accepts up to 4 input streams, and merge selected input
-planes to the output video.
-
-
This filter accepts the following options:
-
-mapping
-Set input to output plane mapping. Default is 0
.
-
-The mappings is specified as a bitmap. It should be specified as a
-hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the
-mapping for the first plane of the output stream. ’A’ sets the number of
-the input stream to use (from 0 to 3), and ’a’ the plane number of the
-corresponding input to use (from 0 to 3). The rest of the mappings is
-similar, ’Bb’ describes the mapping for the output stream second
-plane, ’Cc’ describes the mapping for the output stream third plane and
-’Dd’ describes the mapping for the output stream fourth plane.
-
-
-format
-Set output pixel format. Default is yuva444p
.
-
-
-
-
-
9.56.1 Examples# TOC
-
-
- Merge three gray video streams of same width and height into single video stream:
-
-
[a0][a1][a2]mergeplanes=0x001020:yuv444p
-
-
- Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream:
-
-
[a0][a1]mergeplanes=0x00010210:yuva444p
-
-
- Swap Y and A plane in yuva444p stream:
-
-
format=yuva444p,mergeplanes=0x03010200:yuva444p
-
-
- Swap U and V plane in yuv420p stream:
-
-
format=yuv420p,mergeplanes=0x000201:yuv420p
-
-
- Cast a rgb24 clip to yuv444p:
-
-
format=rgb24,mergeplanes=0x000102:yuv444p
-
-
-
-
-
9.57 mcdeint# TOC
-
-
Apply motion-compensation deinterlacing.
-
-
It needs one field per frame as input and must thus be used together
-with yadif=1/3 or equivalent.
-
-
This filter accepts the following options:
-
-mode
-Set the deinterlacing mode.
-
-It accepts one of the following values:
-
-‘fast ’
-‘medium ’
-‘slow ’
-use iterative motion estimation
-
-‘extra_slow ’
-like ‘slow ’, but use multiple reference frames.
-
-
-Default value is ‘fast ’.
-
-
-parity
-Set the picture field parity assumed for the input video. It must be
-one of the following values:
-
-
-‘0, tff ’
-assume top field first
-
-‘1, bff ’
-assume bottom field first
-
-
-
-Default value is ‘bff ’.
-
-
-qp
-Set per-block quantization parameter (QP) used by the internal
-encoder.
-
-Higher values should result in a smoother motion vector field but less
-optimal individual vectors. Default value is 1.
-
-
-
-
-
-
-
Apply an MPlayer filter to the input video.
-
-
This filter provides a wrapper around some of the filters of
-MPlayer/MEncoder.
-
-
This wrapper is considered experimental. Some of the wrapped filters
-may not work properly and we may drop support for them, as they will
-be implemented natively into FFmpeg. Thus you should avoid
-depending on them when writing portable scripts.
-
-
The filter accepts the parameters:
-filter_name [:=]filter_params
-
-
filter_name is the name of a supported MPlayer filter,
-filter_params is a string containing the parameters accepted by
-the named filter.
-
-
The list of the currently supported filters follows:
-
-eq2
-eq
-ilpack
-softpulldown
-
-
-
The parameter syntax and behavior for the listed filters are the same
-of the corresponding MPlayer filters. For detailed instructions check
-the "VIDEO FILTERS" section in the MPlayer manual.
-
-
-
9.58.1 Examples# TOC
-
-
- Adjust gamma, brightness, contrast:
-
-
-
-
See also mplayer(1), http://www.mplayerhq.hu/ .
-
-
-
9.59 mpdecimate# TOC
-
-
Drop frames that do not differ greatly from the previous frame in
-order to reduce frame rate.
-
-
The main use of this filter is for very-low-bitrate encoding
-(e.g. streaming over dialup modem), but it could in theory be used for
-fixing movies that were inverse-telecined incorrectly.
-
-
A description of the accepted options follows.
-
-
-max
-Set the maximum number of consecutive frames which can be dropped (if
-positive), or the minimum interval between dropped frames (if
-negative). If the value is 0, the frame is dropped unregarding the
-number of previous sequentially dropped frames.
-
-Default value is 0.
-
-
-hi
-lo
-frac
-Set the dropping threshold values.
-
-Values for hi and lo are for 8x8 pixel blocks and
-represent actual pixel value differences, so a threshold of 64
-corresponds to 1 unit of difference for each pixel, or the same spread
-out differently over the block.
-
-A frame is a candidate for dropping if no 8x8 blocks differ by more
-than a threshold of hi , and if no more than frac blocks (1
-meaning the whole image) differ by more than a threshold of lo .
-
-Default value for hi is 64*12, default value for lo is
-64*5, and default value for frac is 0.33.
-
-
-
-
-
-
9.60 negate# TOC
-
-
Negate input video.
-
-
It accepts an integer in input; if non-zero it negates the
-alpha component (if available). The default value in input is 0.
-
-
-
9.61 noformat# TOC
-
-
Force libavfilter not to use any of the specified pixel formats for the
-input to the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-apix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
9.61.1 Examples# TOC
-
-
- Force libavfilter to use a format different from yuv420p for the
-input to the vflip filter:
-
-
noformat=pix_fmts=yuv420p,vflip
-
-
- Convert the input video to any of the formats not contained in the list:
-
-
noformat=yuv420p|yuv444p|yuv410p
-
-
-
-
-
9.62 noise# TOC
-
-
Add noise on video input frame.
-
-
The filter accepts the following options:
-
-
-all_seed
-c0_seed
-c1_seed
-c2_seed
-c3_seed
-Set noise seed for specific pixel component or all pixel components in case
-of all_seed . Default value is 123457
.
-
-
-all_strength, alls
-c0_strength, c0s
-c1_strength, c1s
-c2_strength, c2s
-c3_strength, c3s
-Set noise strength for specific pixel component or all pixel components in case
-all_strength . Default value is 0
. Allowed range is [0, 100].
-
-
-all_flags, allf
-c0_flags, c0f
-c1_flags, c1f
-c2_flags, c2f
-c3_flags, c3f
-Set pixel component flags or set flags for all components if all_flags .
-Available values for component flags are:
-
-‘a ’
-averaged temporal noise (smoother)
-
-‘p ’
-mix random noise with a (semi)regular pattern
-
-‘t ’
-temporal noise (noise pattern changes between frames)
-
-‘u ’
-uniform noise (gaussian otherwise)
-
-
-
-
-
-
-
9.62.1 Examples# TOC
-
-
Add temporal and uniform noise to input video:
-
-
noise=alls=20:allf=t+u
-
-
-
-
9.63 null# TOC
-
-
Pass the video source unchanged to the output.
-
-
-
9.64 ocv# TOC
-
-
Apply a video transform using libopencv.
-
-
To enable this filter, install the libopencv library and headers and
-configure FFmpeg with --enable-libopencv
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the libopencv filter to apply.
-
-
-filter_params
-The parameters to pass to the libopencv filter. If not specified, the default
-values are assumed.
-
-
-
-
-
Refer to the official libopencv documentation for more precise
-information:
-http://docs.opencv.org/master/modules/imgproc/doc/filtering.html
-
-
Several libopencv filters are supported; see the following subsections.
-
-
-
9.64.1 dilate# TOC
-
-
Dilate an image by using a specific structuring element.
-It corresponds to the libopencv function cvDilate
.
-
-
It accepts the parameters: struct_el |nb_iterations .
-
-
struct_el represents a structuring element, and has the syntax:
-cols xrows +anchor_x xanchor_y /shape
-
-
cols and rows represent the number of columns and rows of
-the structuring element, anchor_x and anchor_y the anchor
-point, and shape the shape for the structuring element. shape
-must be "rect", "cross", "ellipse", or "custom".
-
-
If the value for shape is "custom", it must be followed by a
-string of the form "=filename ". The file with name
-filename is assumed to represent a binary image, with each
-printable character corresponding to a bright pixel. When a custom
-shape is used, cols and rows are ignored, the number
-or columns and rows of the read file are assumed instead.
-
-
The default value for struct_el is "3x3+0x0/rect".
-
-
nb_iterations specifies the number of times the transform is
-applied to the image, and defaults to 1.
-
-
Some examples:
-
-
# Use the default values
-ocv=dilate
-
-# Dilate using a structuring element with a 5x5 cross, iterating two times
-ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
-
-# Read the shape from the file diamond.shape, iterating two times.
-# The file diamond.shape may contain a pattern of characters like this
-# *
-# ***
-# *****
-# ***
-# *
-# The specified columns and rows are ignored
-# but the anchor point coordinates are not
-ocv=dilate:0x0+2x2/custom=diamond.shape|2
-
-
-
-
9.64.2 erode# TOC
-
-
Erode an image by using a specific structuring element.
-It corresponds to the libopencv function cvErode
.
-
-
It accepts the parameters: struct_el :nb_iterations ,
-with the same syntax and semantics as the dilate filter.
-
-
-
9.64.3 smooth# TOC
-
-
Smooth the input video.
-
-
The filter takes the following parameters:
-type |param1 |param2 |param3 |param4 .
-
-
type is the type of smooth filter to apply, and must be one of
-the following values: "blur", "blur_no_scale", "median", "gaussian",
-or "bilateral". The default value is "gaussian".
-
-
The meaning of param1 , param2 , param3 , and param4
-depend on the smooth type. param1 and
-param2 accept integer positive values or 0. param3 and
-param4 accept floating point values.
-
-
The default value for param1 is 3. The default value for the
-other parameters is 0.
-
-
These parameters correspond to the parameters assigned to the
-libopencv function cvSmooth
.
-
-
-
9.65 overlay# TOC
-
-
Overlay one video on top of another.
-
-
It takes two inputs and has one output. The first input is the "main"
-video on which the second input is overlaid.
-
-
It accepts the following parameters:
-
-
A description of the accepted options follows.
-
-
-x
-y
-Set the expression for the x and y coordinates of the overlaid video
-on the main video. Default value is "0" for both expressions. In case
-the expression is invalid, it is set to a huge value (meaning that the
-overlay will not be displayed within the output visible area).
-
-
-eof_action
-The action to take when EOF is encountered on the secondary input; it accepts
-one of the following values:
-
-
-repeat
-Repeat the last frame (the default).
-
-endall
-End both streams.
-
-pass
-Pass the main input through.
-
-
-
-
-eval
-Set when the expressions for x , and y are evaluated.
-
-It accepts the following values:
-
-‘init ’
-only evaluate expressions once during the filter initialization or
-when a command is processed
-
-
-‘frame ’
-evaluate expressions for each incoming frame
-
-
-
-Default value is ‘frame ’.
-
-
-shortest
-If set to 1, force the output to terminate when the shortest input
-terminates. Default value is 0.
-
-
-format
-Set the format for the output video.
-
-It accepts the following values:
-
-‘yuv420 ’
-force YUV420 output
-
-
-‘yuv422 ’
-force YUV422 output
-
-
-‘yuv444 ’
-force YUV444 output
-
-
-‘rgb ’
-force RGB output
-
-
-
-Default value is ‘yuv420 ’.
-
-
-rgb (deprecated)
-If set to 1, force the filter to accept inputs in the RGB
-color space. Default value is 0. This option is deprecated, use
-format instead.
-
-
-repeatlast
-If set to 1, force the filter to draw the last overlay frame over the
-main input until the end of the stream. A value of 0 disables this
-behavior. Default value is 1.
-
-
-
-
The x , and y expressions can contain the following
-parameters.
-
-
-main_w, W
-main_h, H
-The main input width and height.
-
-
-overlay_w, w
-overlay_h, h
-The overlay input width and height.
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values of the output
-format. For example for the pixel format "yuv422p" hsub is 2 and
-vsub is 1.
-
-
-n
-the number of input frame, starting from 0
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp, expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
Note that the n , pos , t variables are available only
-when evaluation is done per frame , and will evaluate to NAN
-when eval is set to ‘init ’.
-
-
Be aware that frames are taken from each input video in timestamp
-order, hence, if their initial timestamps differ, it is a good idea
-to pass the two inputs through a setpts=PTS-STARTPTS filter to
-have them begin in the same zero timestamp, as the example for
-the movie filter does.
-
-
You can chain together more overlays but you should test the
-efficiency of such approach.
-
-
-
9.65.1 Commands# TOC
-
-
This filter supports the following commands:
-
-x
-y
-Modify the x and y of the overlay input.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
9.65.2 Examples# TOC
-
-
-
-
-
9.66 owdenoise# TOC
-
-
Apply Overcomplete Wavelet denoiser.
-
-
The filter accepts the following options:
-
-
-depth
-Set depth.
-
-Larger depth values will denoise lower frequency components more, but
-slow down filtering.
-
-Must be an int in the range 8-16, default is 8
.
-
-
-luma_strength, ls
-Set luma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-chroma_strength, cs
-Set chroma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-
-
-
9.67 pad# TOC
-
-
Add paddings to the input image, and place the original input at the
-provided x , y coordinates.
-
-
It accepts the following parameters:
-
-
-width, w
-height, h
-Specify an expression for the size of the output image with the
-paddings added. If the value for width or height is 0, the
-corresponding input size is used for the output.
-
-The width expression can reference the value set by the
-height expression, and vice versa.
-
-The default value of width and height is 0.
-
-
-x
-y
-Specify the offsets to place the input image at within the padded area,
-with respect to the top/left border of the output image.
-
-The x expression can reference the value set by the y
-expression, and vice versa.
-
-The default value of x and y is 0.
-
-
-color
-Specify the color of the padded area. For the syntax of this option,
-check the "Color" section in the ffmpeg-utils manual.
-
-The default value of color is "black".
-
-
-
-
The value for the width , height , x , and y
-options are expressions containing the following constants:
-
-
-in_w
-in_h
-The input video width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output width and height (the size of the padded area), as
-specified by the width and height expressions.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-x
-y
-The x and y offsets as specified by the x and y
-expressions, or NAN if not yet specified.
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
9.67.1 Examples# TOC
-
-
-
-
-
9.68 perspective# TOC
-
-
Correct perspective of video not recorded perpendicular to the screen.
-
-
A description of the accepted parameters follows.
-
-
-x0
-y0
-x1
-y1
-x2
-y2
-x3
-y3
-Set coordinates expression for top left, top right, bottom left and bottom right corners.
-Default values are 0:0:W:0:0:H:W:H
with which perspective will remain unchanged.
-If the sense
option is set to source
, then the specified points will be sent
-to the corners of the destination. If the sense
option is set to destination
,
-then the corners of the source will be sent to the specified coordinates.
-
-The expressions can use the following variables:
-
-
-W
-H
-the width and height of video frame.
-
-
-
-
-interpolation
-Set interpolation for perspective correction.
-
-It accepts the following values:
-
-‘linear ’
-‘cubic ’
-
-
-Default value is ‘linear ’.
-
-
-sense
-Set interpretation of coordinate options.
-
-It accepts the following values:
-
-‘0, source ’
-
-Send point in the source specified by the given coordinates to
-the corners of the destination.
-
-
-‘1, destination ’
-
-Send the corners of the source to the point in the destination specified
-by the given coordinates.
-
-Default value is ‘source ’.
-
-
-
-
-
-
-
9.69 phase# TOC
-
-
Delay interlaced video by one field time so that the field order changes.
-
-
The intended use is to fix PAL movies that have been captured with the
-opposite field order to the film-to-video transfer.
-
-
A description of the accepted parameters follows.
-
-
-mode
-Set phase mode.
-
-It accepts the following values:
-
-‘t ’
-Capture field order top-first, transfer bottom-first.
-Filter will delay the bottom field.
-
-
-‘b ’
-Capture field order bottom-first, transfer top-first.
-Filter will delay the top field.
-
-
-‘p ’
-Capture and transfer with the same field order. This mode only exists
-for the documentation of the other options to refer to, but if you
-actually select it, the filter will faithfully do nothing.
-
-
-‘a ’
-Capture field order determined automatically by field flags, transfer
-opposite.
-Filter selects among ‘t ’ and ‘b ’ modes on a frame by frame
-basis using field flags. If no field information is available,
-then this works just like ‘u ’.
-
-
-‘u ’
-Capture unknown or varying, transfer opposite.
-Filter selects among ‘t ’ and ‘b ’ on a frame by frame basis by
-analyzing the images and selecting the alternative that produces best
-match between the fields.
-
-
-‘T ’
-Capture top-first, transfer unknown or varying.
-Filter selects among ‘t ’ and ‘p ’ using image analysis.
-
-
-‘B ’
-Capture bottom-first, transfer unknown or varying.
-Filter selects among ‘b ’ and ‘p ’ using image analysis.
-
-
-‘A ’
-Capture determined by field flags, transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using field flags and
-image analysis. If no field information is available, then this works just
-like ‘U ’. This is the default mode.
-
-
-‘U ’
-Both capture and transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using image analysis only.
-
-
-
-
-
-
-
9.70 pixdesctest# TOC
-
-
Pixel format descriptor test filter, mainly useful for internal
-testing. The output video should be equal to the input video.
-
-
For example:
-
-
format=monow, pixdesctest
-
-
-
can be used to test the monowhite pixel format descriptor definition.
-
-
-
-
-
Enable the specified chain of postprocessing subfilters using libpostproc. This
-library should be automatically selected with a GPL build (--enable-gpl
).
-Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’.
-Each subfilter and some options have a short and a long name that can be used
-interchangeably, i.e. dr/dering are the same.
-
-
The filters accept the following options:
-
-
-subfilters
-Set postprocessing subfilters string.
-
-
-
-
All subfilters share common options to determine their scope:
-
-
-a/autoq
-Honor the quality commands for this subfilter.
-
-
-c/chrom
-Do chrominance filtering, too (default).
-
-
-y/nochrom
-Do luminance filtering only (no chrominance).
-
-
-n/noluma
-Do chrominance filtering only (no luminance).
-
-
-
-
These options can be appended after the subfilter name, separated by a ’|’.
-
-
Available subfilters are:
-
-
-hb/hdeblock[|difference[|flatness]]
-Horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-vb/vdeblock[|difference[|flatness]]
-Vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-ha/hadeblock[|difference[|flatness]]
-Accurate horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-va/vadeblock[|difference[|flatness]]
-Accurate vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-
-
The horizontal and vertical deblocking filters share the difference and
-flatness values so you cannot set different horizontal and vertical
-thresholds.
-
-
-h1/x1hdeblock
-Experimental horizontal deblocking filter
-
-
-v1/x1vdeblock
-Experimental vertical deblocking filter
-
-
-dr/dering
-Deringing filter
-
-
-tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
-
-threshold1
-larger -> stronger filtering
-
-threshold2
-larger -> stronger filtering
-
-threshold3
-larger -> stronger filtering
-
-
-
-
-al/autolevels[:f/fullyrange], automatic brightness / contrast correction
-
-f/fullyrange
-Stretch luminance to 0-255
.
-
-
-
-
-lb/linblenddeint
-Linear blend deinterlacing filter that deinterlaces the given block by
-filtering all lines with a (1 2 1)
filter.
-
-
-li/linipoldeint
-Linear interpolating deinterlacing filter that deinterlaces the given block by
-linearly interpolating every second line.
-
-
-ci/cubicipoldeint
-Cubic interpolating deinterlacing filter deinterlaces the given block by
-cubically interpolating every second line.
-
-
-md/mediandeint
-Median deinterlacing filter that deinterlaces the given block by applying a
-median filter to every second line.
-
-
-fd/ffmpegdeint
-FFmpeg deinterlacing filter that deinterlaces the given block by filtering every
-second line with a (-1 4 2 4 -1)
filter.
-
-
-l5/lowpass5
-Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given
-block by filtering all lines with a (-1 2 6 2 -1)
filter.
-
-
-fq/forceQuant[|quantizer]
-Overrides the quantizer table from the input with the constant quantizer you
-specify.
-
-quantizer
-Quantizer to use
-
-
-
-
-de/default
-Default pp filter combination (hb|a,vb|a,dr|a
)
-
-
-fa/fast
-Fast pp filter combination (h1|a,v1|a,dr|a
)
-
-
-ac
-High quality pp filter combination (ha|a|128|7,va|a,dr|a
)
-
-
-
-
-
9.71.1 Examples# TOC
-
-
- Apply horizontal and vertical deblocking, deringing and automatic
-brightness/contrast:
-
-
- Apply default filters without brightness/contrast correction:
-
-
- Apply default filters and temporal denoiser:
-
-
pp=default/tmpnoise|1|2|3
-
-
- Apply deblocking on luminance only, and switch vertical deblocking on or off
-automatically depending on available CPU time:
-
-
-
-
-
9.72 pp7# TOC
-
Apply Postprocessing filter 7. It is variant of the spp filter,
-similar to spp = 6 with 7 point DCT, where only the center sample is
-used after IDCT.
-
-
The filter accepts the following options:
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range
-0 to 63. If not set, the filter will use the QP from the video stream
-(if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding.
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-‘medium ’
-Set medium thresholding (good results, default).
-
-
-
-
-
-
-
9.73 psnr# TOC
-
-
Obtain the average, maximum and minimum PSNR (Peak Signal to Noise
-Ratio) between two input videos.
-
-
This filter takes in input two input videos, the first input is
-considered the "main" source and is passed unchanged to the
-output. The second input is used as a "reference" video for computing
-the PSNR.
-
-
Both video inputs must have the same resolution and pixel format for
-this filter to work correctly. Also it assumes that both inputs
-have the same number of frames, which are compared one by one.
-
-
The obtained average PSNR is printed through the logging system.
-
-
The filter stores the accumulated MSE (mean squared error) of each
-frame, and at the end of the processing it is averaged across all frames
-equally, and the following formula is applied to obtain the PSNR:
-
-
-
PSNR = 10*log10(MAX^2/MSE)
-
-
-
Where MAX is the average of the maximum values of each component of the
-image.
-
-
The description of the accepted parameters follows.
-
-
-stats_file, f
-If specified the filter will use the named file to save the PSNR of
-each individual frame.
-
-
-
-
The file printed if stats_file is selected, contains a sequence of
-key/value pairs of the form key :value for each compared
-couple of frames.
-
-
A description of each shown parameter follows:
-
-
-n
-sequential number of the input frame, starting from 1
-
-
-mse_avg
-Mean Square Error pixel-by-pixel average difference of the compared
-frames, averaged over all the image components.
-
-
-mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
-Mean Square Error pixel-by-pixel average difference of the compared
-frames for the component specified by the suffix.
-
-
-psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
-Peak Signal to Noise ratio of the compared frames for the component
-specified by the suffix.
-
-
-
-
For example:
-
-
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
-[main][ref] psnr="stats_file=stats.log" [out]
-
-
-
On this example the input file being processed is compared with the
-reference file ref_movie.mpg . The PSNR of each individual frame
-is stored in stats.log .
-
-
-
9.74 pullup# TOC
-
-
Pulldown reversal (inverse telecine) filter, capable of handling mixed
-hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive
-content.
-
-
The pullup filter is designed to take advantage of future context in making
-its decisions. This filter is stateless in the sense that it does not lock
-onto a pattern to follow, but it instead looks forward to the following
-fields in order to identify matches and rebuild progressive frames.
-
-
To produce content with an even framerate, insert the fps filter after
-pullup, use fps=24000/1001
if the input frame rate is 29.97fps,
-fps=24
for 30fps and the (rare) telecined 25fps input.
-
-
The filter accepts the following options:
-
-
-jl
-jr
-jt
-jb
-These options set the amount of "junk" to ignore at the left, right, top, and
-bottom of the image, respectively. Left and right are in units of 8 pixels,
-while top and bottom are in units of 2 lines.
-The default is 8 pixels on each side.
-
-
-sb
-Set the strict breaks. Setting this option to 1 will reduce the chances of
-filter generating an occasional mismatched frame, but it may also cause an
-excessive number of frames to be dropped during high motion sequences.
-Conversely, setting it to -1 will make filter match fields more easily.
-This may help processing of video where there is slight blurring between
-the fields, but may also cause there to be interlaced frames in the output.
-Default value is 0
.
-
-
-mp
-Set the metric plane to use. It accepts the following values:
-
-‘l ’
-Use luma plane.
-
-
-‘u ’
-Use chroma blue plane.
-
-
-‘v ’
-Use chroma red plane.
-
-
-
-This option may be set to use chroma plane instead of the default luma plane
-for doing filter’s computations. This may improve accuracy on very clean
-source material, but more likely will decrease accuracy, especially if there
-is chroma noise (rainbow effect) or any grayscale video.
-The main purpose of setting mp to a chroma plane is to reduce CPU
-load and make pullup usable in realtime on slow machines.
-
-
-
-
For best results (without duplicated frames in the output file) it is
-necessary to change the output frame rate. For example, to inverse
-telecine NTSC input:
-
-
ffmpeg -i input -vf pullup -r 24000/1001 ...
-
-
-
-
-
-
Change video quantization parameters (QP).
-
-
The filter accepts the following option:
-
-
-qp
-Set expression for quantization parameter.
-
-
-
-
The expression is evaluated through the eval API and can contain, among others,
-the following constants:
-
-
-known
-1 if index is not 129, 0 otherwise.
-
-
-qp
-Sequentional index starting from -129 to 128.
-
-
-
-
-
9.75.1 Examples# TOC
-
-
- Some equation like:
-
-
-
-
-
9.76 removelogo# TOC
-
-
Suppress a TV station logo, using an image file to determine which
-pixels comprise the logo. It works by filling in the pixels that
-comprise the logo with neighboring pixels.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filter bitmap file, which can be any image format supported by
-libavformat. The width and height of the image file must match those of the
-video stream being processed.
-
-
-
-
Pixels in the provided bitmap image with a value of zero are not
-considered part of the logo, non-zero pixels are considered part of
-the logo. If you use white (255) for the logo and black (0) for the
-rest, you will be safe. For making the filter bitmap, it is
-recommended to take a screen capture of a black frame with the logo
-visible, and then using a threshold filter followed by the erode
-filter once or twice.
-
-
If needed, little splotches can be fixed manually. Remember that if
-logo pixels are not covered, the filter quality will be much
-reduced. Marking too many pixels as part of the logo does not hurt as
-much, but it will increase the amount of blurring needed to cover over
-the image and will destroy more information than necessary, and extra
-pixels will slow things down on a large logo.
-
-
-
9.77 rotate# TOC
-
-
Rotate video by an arbitrary angle expressed in radians.
-
-
The filter accepts the following options:
-
-
A description of the optional parameters follows.
-
-angle, a
-Set an expression for the angle by which to rotate the input video
-clockwise, expressed as a number of radians. A negative value will
-result in a counter-clockwise rotation. By default it is set to "0".
-
-This expression is evaluated for each frame.
-
-
-out_w, ow
-Set the output width expression, default value is "iw".
-This expression is evaluated just once during configuration.
-
-
-out_h, oh
-Set the output height expression, default value is "ih".
-This expression is evaluated just once during configuration.
-
-
-bilinear
-Enable bilinear interpolation if set to 1, a value of 0 disables
-it. Default value is 1.
-
-
-fillcolor, c
-Set the color used to fill the output area not covered by the rotated
-image. For the general syntax of this option, check the "Color" section in the
-ffmpeg-utils manual. If the special value "none" is selected then no
-background is printed (useful for example if the background is never shown).
-
-Default value is "black".
-
-
-
-
The expressions for the angle and the output size can contain the
-following constants and functions:
-
-
-n
-sequential number of the input frame, starting from 0. It is always NAN
-before the first frame is filtered.
-
-
-t
-time in seconds of the input frame, it is set to 0 when the filter is
-configured. It is always NAN before the first frame is filtered.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_w, iw
-in_h, ih
-the input video width and height
-
-
-out_w, ow
-out_h, oh
-the output width and height, that is the size of the padded area as
-specified by the width and height expressions
-
-
-rotw(a)
-roth(a)
-the minimal width/height required for completely containing the input
-video rotated by a radians.
-
-These are only available when computing the out_w and
-out_h expressions.
-
-
-
-
-
9.77.1 Examples# TOC
-
-
- Rotate the input by PI/6 radians clockwise:
-
-
- Rotate the input by PI/6 radians counter-clockwise:
-
-
- Rotate the input by 45 degrees clockwise:
-
-
- Apply a constant rotation with period T, starting from an angle of PI/3:
-
-
- Make the input video rotation oscillating with a period of T
-seconds and an amplitude of A radians:
-
-
rotate=A*sin(2*PI/T*t)
-
-
- Rotate the video, output size is chosen so that the whole rotating
-input video is always completely contained in the output:
-
-
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
-
-
- Rotate the video, reduce the output size so that no background is ever
-shown:
-
-
rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
-
-
-
-
-
9.77.2 Commands# TOC
-
-
The filter supports the following commands:
-
-
-a, angle
-Set the angle expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
9.78 sab# TOC
-
-
Apply Shape Adaptive Blur.
-
-
The filter accepts the following options:
-
-
-luma_radius, lr
-Set luma blur filter strength, must be a value in range 0.1-4.0, default
-value is 1.0. A greater value will result in a more blurred image, and
-in slower processing.
-
-
-luma_pre_filter_radius, lpfr
-Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default
-value is 1.0.
-
-
-luma_strength, ls
-Set luma maximum difference between pixels to still be considered, must
-be a value in the 0.1-100.0 range, default value is 1.0.
-
-
-chroma_radius, cr
-Set chroma blur filter strength, must be a value in range 0.1-4.0. A
-greater value will result in a more blurred image, and in slower
-processing.
-
-
-chroma_pre_filter_radius, cpfr
-Set chroma pre-filter radius, must be a value in the 0.1-2.0 range.
-
-
-chroma_strength, cs
-Set chroma maximum difference between pixels to still be considered,
-must be a value in the 0.1-100.0 range.
-
-
-
-
Each chroma option value, if not explicitly specified, is set to the
-corresponding luma option value.
-
-
-
9.79 scale# TOC
-
-
Scale (resize) the input video, using the libswscale library.
-
-
The scale filter forces the output display aspect ratio to be the same
-of the input, by changing the output sample aspect ratio.
-
-
If the input image format is different from the format requested by
-the next filter, the scale filter will convert the input to the
-requested format.
-
-
-
9.79.1 Options# TOC
-
The filter accepts the following options, or any of the options
-supported by the libswscale scaler.
-
-
See (ffmpeg-scaler)the ffmpeg-scaler manual for
-the complete list of scaler options.
-
-
-width, w
-height, h
-Set the output video dimension expression. Default value is the input
-dimension.
-
-If the value is 0, the input width is used for the output.
-
-If one of the values is -1, the scale filter will use a value that
-maintains the aspect ratio of the input image, calculated from the
-other specified dimension. If both of them are -1, the input size is
-used
-
-If one of the values is -n with n > 1, the scale filter will also use a value
-that maintains the aspect ratio of the input image, calculated from the other
-specified dimension. After that it will, however, make sure that the calculated
-dimension is divisible by n and adjust the value if necessary.
-
-See below for the list of accepted constants for use in the dimension
-expression.
-
-
-interl
-Set the interlacing mode. It accepts the following values:
-
-
-‘1 ’
-Force interlaced aware scaling.
-
-
-‘0 ’
-Do not apply interlaced scaling.
-
-
-‘-1 ’
-Select interlaced aware scaling depending on whether the source frames
-are flagged as interlaced or not.
-
-
-
-Default value is ‘0 ’.
-
-
-flags
-Set libswscale scaling flags. See
-(ffmpeg-scaler)the ffmpeg-scaler manual for the
-complete list of values. If not explicitly specified the filter applies
-the default flags.
-
-
-size, s
-Set the video size. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-in_color_matrix
-out_color_matrix
-Set in/output YCbCr color space type.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder.
-
-If not specified, the color space type depends on the pixel format.
-
-Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘bt709 ’
-Format conforming to International Telecommunication Union (ITU)
-Recommendation BT.709.
-
-
-‘fcc ’
-Set color space conforming to the United States Federal Communications
-Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a).
-
-
-‘bt601 ’
-Set color space conforming to:
-
-
- ITU Radiocommunication Sector (ITU-R) Recommendation BT.601
-
- ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G
-
- Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004
-
-
-
-
-‘smpte240m ’
-Set color space conforming to SMPTE ST 240:1999.
-
-
-
-
-in_range
-out_range
-Set in/output YCbCr sample range.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder. If not specified, the
-range depends on the pixel format. Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘jpeg/full/pc ’
-Set full range (0-255 in case of 8-bit luma).
-
-
-‘mpeg/tv ’
-Set "MPEG" range (16-235 in case of 8-bit luma).
-
-
-
-
-force_original_aspect_ratio
-Enable decreasing or increasing output video width or height if necessary to
-keep the original aspect ratio. Possible values:
-
-
-‘disable ’
-Scale the video as specified and disable this feature.
-
-
-‘decrease ’
-The output video dimensions will automatically be decreased if needed.
-
-
-‘increase ’
-The output video dimensions will automatically be increased if needed.
-
-
-
-
-One useful instance of this option is that when you know a specific device’s
-maximum allowed resolution, you can use this to limit the output video to
-that, while retaining the aspect ratio. For example, device A allows
-1280x720 playback, and your video is 1920x800. Using this option (set it to
-decrease) and specifying 1280x720 to the command line makes the output
-1280x533.
-
-Please note that this is a different thing than specifying -1 for w
-or h , you still need to specify the output resolution for this option
-to work.
-
-
-
-
-
The values of the w and h options are expressions
-containing the following constants:
-
-
-in_w
-in_h
-The input width and height
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (scaled) width and height
-
-
-ow
-oh
-These are the same as out_w and out_h
-
-
-a
-The same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-The input display aspect ratio. Calculated from (iw / ih) * sar
.
-
-
-hsub
-vsub
-horizontal and vertical input chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-ohsub
-ovsub
-horizontal and vertical output chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
9.79.2 Examples# TOC
-
-
-
-
-
9.80 separatefields# TOC
-
-
The separatefields
takes a frame-based video input and splits
-each frame into its components fields, producing a new half height clip
-with twice the frame rate and twice the frame count.
-
-
This filter use field-dominance information in frame to decide which
-of each pair of fields to place first in the output.
-If it gets it wrong use setfield filter before separatefields
filter.
-
-
-
9.81 setdar, setsar# TOC
-
-
The setdar
filter sets the Display Aspect Ratio for the filter
-output video.
-
-
This is done by changing the specified Sample (aka Pixel) Aspect
-Ratio, according to the following equation:
-
-
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
-
-
-
Keep in mind that the setdar
filter does not modify the pixel
-dimensions of the video frame. Also, the display aspect ratio set by
-this filter may be changed by later filters in the filterchain,
-e.g. in case of scaling or if another "setdar" or a "setsar" filter is
-applied.
-
-
The setsar
filter sets the Sample (aka Pixel) Aspect Ratio for
-the filter output video.
-
-
Note that as a consequence of the application of this filter, the
-output display aspect ratio will change according to the equation
-above.
-
-
Keep in mind that the sample aspect ratio set by the setsar
-filter may be changed by later filters in the filterchain, e.g. if
-another "setsar" or a "setdar" filter is applied.
-
-
It accepts the following parameters:
-
-
-r, ratio, dar (setdar
only), sar (setsar
only)
-Set the aspect ratio used by the filter.
-
-The parameter can be a floating point number string, an expression, or
-a string of the form num :den , where num and
-den are the numerator and denominator of the aspect ratio. If
-the parameter is not specified, it is assumed the value "0".
-In case the form "num :den " is used, the :
character
-should be escaped.
-
-
-max
-Set the maximum integer value to use for expressing numerator and
-denominator when reducing the expressed aspect ratio to a rational.
-Default value is 100
.
-
-
-
-
-
The parameter sar is an expression containing
-the following constants:
-
-
-E, PI, PHI
-These are approximated values for the mathematical constants e
-(Euler’s number), pi (Greek pi), and phi (the golden ratio).
-
-
-w, h
-The input width and height.
-
-
-a
-These are the same as w / h .
-
-
-sar
-The input sample aspect ratio.
-
-
-dar
-The input display aspect ratio. It is the same as
-(w / h ) * sar .
-
-
-hsub, vsub
-Horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
9.81.1 Examples# TOC
-
-
- To change the display aspect ratio to 16:9, specify one of the following:
-
-
setdar=dar=1.77777
-setdar=dar=16/9
-setdar=dar=1.77777
-
-
- To change the sample aspect ratio to 10:11, specify:
-
-
- To set a display aspect ratio of 16:9, and specify a maximum integer value of
-1000 in the aspect ratio reduction, use the command:
-
-
setdar=ratio=16/9:max=1000
-
-
-
-
-
-
9.82 setfield# TOC
-
-
Force field for the output video frame.
-
-
The setfield
filter marks the interlace type field for the
-output frames. It does not change the input frame, but only sets the
-corresponding property, which affects how the frame is treated by
-following filters (e.g. fieldorder
or yadif
).
-
-
The filter accepts the following options:
-
-
-mode
-Available values are:
-
-
-‘auto ’
-Keep the same field property.
-
-
-‘bff ’
-Mark the frame as bottom-field-first.
-
-
-‘tff ’
-Mark the frame as top-field-first.
-
-
-‘prog ’
-Mark the frame as progressive.
-
-
-
-
-
-
-
9.83 showinfo# TOC
-
-
Show a line containing various information for each input video frame.
-The input video is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The Presentation TimeStamp of the input frame, expressed as a number of
-time base units. The time base unit depends on the filter input pad.
-
-
-pts_time
-The Presentation TimeStamp of the input frame, expressed as a number of
-seconds.
-
-
-pos
-The position of the frame in the input stream, or -1 if this information is
-unavailable and/or meaningless (for example in case of synthetic video).
-
-
-fmt
-The pixel format name.
-
-
-sar
-The sample aspect ratio of the input frame, expressed in the form
-num /den .
-
-
-s
-The size of the input frame. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-i
-The type of interlaced mode ("P" for "progressive", "T" for top field first, "B"
-for bottom field first).
-
-
-iskey
-This is 1 if the frame is a key frame, 0 otherwise.
-
-
-type
-The picture type of the input frame ("I" for an I-frame, "P" for a
-P-frame, "B" for a B-frame, or "?" for an unknown type).
-Also refer to the documentation of the AVPictureType
enum and of
-the av_get_picture_type_char
function defined in
-libavutil/avutil.h .
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame.
-
-
-plane_checksum
-The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
-expressed in the form "[c0 c1 c2 c3 ]".
-
-
-
-
-
9.84 shuffleplanes# TOC
-
-
Reorder and/or duplicate video planes.
-
-
It accepts the following parameters:
-
-
-map0
-The index of the input plane to be used as the first output plane.
-
-
-map1
-The index of the input plane to be used as the second output plane.
-
-
-map2
-The index of the input plane to be used as the third output plane.
-
-
-map3
-The index of the input plane to be used as the fourth output plane.
-
-
-
-
-
The first plane has the index 0. The default is to keep the input unchanged.
-
-
Swap the second and third planes of the input:
-
-
ffmpeg -i INPUT -vf shuffleplanes=0:2:1:3 OUTPUT
-
-
-
-
9.85 signalstats# TOC
-
Evaluate various visual metrics that assist in determining issues associated
-with the digitization of analog video media.
-
-
By default the filter will log these metadata values:
-
-
-YMIN
-Display the minimal Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-YLOW
-Display the Y value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YAVG
-Display the average Y value within the input frame. Expressed in range of
-[0-255].
-
-
-YHIGH
-Display the Y value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YMAX
-Display the maximum Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-UMIN
-Display the minimal U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-ULOW
-Display the U value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UAVG
-Display the average U value within the input frame. Expressed in range of
-[0-255].
-
-
-UHIGH
-Display the U value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UMAX
-Display the maximum U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VMIN
-Display the minimal V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VLOW
-Display the V value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VAVG
-Display the average V value within the input frame. Expressed in range of
-[0-255].
-
-
-VHIGH
-Display the V value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VMAX
-Display the maximum V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-SATMIN
-Display the minimal saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATLOW
-Display the saturation value at the 10% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATAVG
-Display the average saturation value within the input frame. Expressed in range
-of [0-~181.02].
-
-
-SATHIGH
-Display the saturation value at the 90% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATMAX
-Display the maximum saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-HUEMED
-Display the median value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-HUEAVG
-Display the average value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-YDIF
-Display the average of sample value difference between all values of the Y
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-UDIF
-Display the average of sample value difference between all values of the U
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-VDIF
-Display the average of sample value difference between all values of the V
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-
-
The filter accepts the following options:
-
-
-stat
-out
-
-stat specify an additional form of image analysis.
-out output video with the specified type of pixel highlighted.
-
-Both options accept the following values:
-
-
-‘tout ’
-Identify temporal outliers pixels. A temporal outlier is a pixel
-unlike the neighboring pixels of the same field. Examples of temporal outliers
-include the results of video dropouts, head clogs, or tape tracking issues.
-
-
-‘vrep ’
-Identify vertical line repetition . Vertical line repetition includes
-similar rows of pixels within a frame. In born-digital video vertical line
-repetition is common, but this pattern is uncommon in video digitized from an
-analog source. When it occurs in video that results from the digitization of an
-analog source it can indicate concealment from a dropout compensator.
-
-
-‘brng ’
-Identify pixels that fall outside of legal broadcast range.
-
-
-
-
-color, c
-Set the highlight color for the out option. The default color is
-yellow.
-
-
-
-
-
9.85.1 Examples# TOC
-
-
-
-
-
9.86 smartblur# TOC
-
-
Blur the input video without impacting the outlines.
-
-
It accepts the following options:
-
-
-luma_radius, lr
-Set the luma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-luma_strength, ls
-Set the luma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-luma_threshold, lt
-Set the luma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-chroma_radius, cr
-Set the chroma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-chroma_strength, cs
-Set the chroma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-chroma_threshold, ct
-Set the chroma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-
-
If a chroma option is not explicitly set, the corresponding luma value
-is set.
-
-
-
9.87 stereo3d# TOC
-
-
Convert between different stereoscopic image formats.
-
-
The filters accept the following options:
-
-
-in
-Set stereoscopic image format of input.
-
-Available values for input image formats are:
-
-‘sbsl ’
-side by side parallel (left eye left, right eye right)
-
-
-‘sbsr ’
-side by side crosseye (right eye left, left eye right)
-
-
-‘sbs2l ’
-side by side parallel with half width resolution
-(left eye left, right eye right)
-
-
-‘sbs2r ’
-side by side crosseye with half width resolution
-(right eye left, left eye right)
-
-
-‘abl ’
-above-below (left eye above, right eye below)
-
-
-‘abr ’
-above-below (right eye above, left eye below)
-
-
-‘ab2l ’
-above-below with half height resolution
-(left eye above, right eye below)
-
-
-‘ab2r ’
-above-below with half height resolution
-(right eye above, left eye below)
-
-
-‘al ’
-alternating frames (left eye first, right eye second)
-
-
-‘ar ’
-alternating frames (right eye first, left eye second)
-
-Default value is ‘sbsl ’.
-
-
-
-
-out
-Set stereoscopic image format of output.
-
-Available values for output image formats are all the input formats as well as:
-
-‘arbg ’
-anaglyph red/blue gray
-(red filter on left eye, blue filter on right eye)
-
-
-‘argg ’
-anaglyph red/green gray
-(red filter on left eye, green filter on right eye)
-
-
-‘arcg ’
-anaglyph red/cyan gray
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arch ’
-anaglyph red/cyan half colored
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcc ’
-anaglyph red/cyan color
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcd ’
-anaglyph red/cyan color optimized with the least squares projection of dubois
-(red filter on left eye, cyan filter on right eye)
-
-
-‘agmg ’
-anaglyph green/magenta gray
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmh ’
-anaglyph green/magenta half colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmc ’
-anaglyph green/magenta colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmd ’
-anaglyph green/magenta color optimized with the least squares projection of dubois
-(green filter on left eye, magenta filter on right eye)
-
-
-‘aybg ’
-anaglyph yellow/blue gray
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybh ’
-anaglyph yellow/blue half colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybc ’
-anaglyph yellow/blue colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybd ’
-anaglyph yellow/blue color optimized with the least squares projection of dubois
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘irl ’
-interleaved rows (left eye has top row, right eye starts on next row)
-
-
-‘irr ’
-interleaved rows (right eye has top row, left eye starts on next row)
-
-
-‘ml ’
-mono output (left eye only)
-
-
-‘mr ’
-mono output (right eye only)
-
-
-
-Default value is ‘arcd ’.
-
-
-
-
-
9.87.1 Examples# TOC
-
-
- Convert input video from side by side parallel to anaglyph yellow/blue dubois:
-
-
- Convert input video from above bellow (left eye above, right eye below) to side by side crosseye.
-
-
-
-
-
9.88 spp# TOC
-
-
Apply a simple postprocessing filter that compresses and decompresses the image
-at several (or - in the case of quality level 6
- all) shifts
-and average the results.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-6. If set to 0
, the filter will have no
-effect. A value of 6
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding (default).
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
9.89 subtitles# TOC
-
-
Draw subtitles on top of input video using the libass library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libass
. This filter also requires a build with libavcodec and
-libavformat to convert the passed subtitles file to ASS (Advanced Substation
-Alpha) subtitles format.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filename of the subtitle file to read. It must be specified.
-
-
-original_size
-Specify the size of the original video, the video for which the ASS file
-was composed. For the syntax of this option, check the "Video size" section in
-the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic,
-this is necessary to correctly scale the fonts if the aspect ratio has been
-changed.
-
-
-charenc
-Set subtitles input character encoding. subtitles
filter only. Only
-useful if not UTF-8.
-
-
-stream_index, si
-Set subtitles stream index. subtitles
filter only.
-
-
-
-
If the first key is not specified, it is assumed that the first value
-specifies the filename .
-
-
For example, to render the file sub.srt on top of the input
-video, use the command:
-
-
-
which is equivalent to:
-
-
subtitles=filename=sub.srt
-
-
-
To render the default subtitles stream from file video.mkv , use:
-
-
-
To render the second subtitles stream from that file, use:
-
-
subtitles=video.mkv:si=1
-
-
-
-
9.90 super2xsai# TOC
-
-
Scale the input by 2x and smooth using the Super2xSaI (Scale and
-Interpolate) pixel art scaling algorithm.
-
-
Useful for enlarging pixel art images without reducing sharpness.
-
-
-
9.91 swapuv# TOC
-
Swap U & V plane.
-
-
-
9.92 telecine# TOC
-
-
Apply telecine process to the video.
-
-
This filter accepts the following options:
-
-
-first_field
-
-‘top, t ’
-top field first
-
-‘bottom, b ’
-bottom field first
-The default value is top
.
-
-
-
-
-pattern
-A string of numbers representing the pulldown pattern you wish to apply.
-The default value is 23
.
-
-
-
-
-
Some typical patterns:
-
-NTSC output (30i):
-27.5p: 32222
-24p: 23 (classic)
-24p: 2332 (preferred)
-20p: 33
-18p: 334
-16p: 3444
-
-PAL output (25i):
-27.5p: 12222
-24p: 222222222223 ("Euro pulldown")
-16.67p: 33
-16p: 33333334
-
-
-
-
9.93 thumbnail# TOC
-
Select the most representative frame in a given sequence of consecutive frames.
-
-
The filter accepts the following options:
-
-
-n
-Set the frames batch size to analyze; in a set of n frames, the filter
-will pick one of them, and then handle the next batch of n frames until
-the end. Default is 100
.
-
-
-
-
Since the filter keeps track of the whole frames sequence, a bigger n
-value will result in a higher memory usage, so a high value is not recommended.
-
-
-
9.93.1 Examples# TOC
-
-
- Extract one picture each 50 frames:
-
-
- Complete example of a thumbnail creation with ffmpeg
:
-
-
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
-
-
-
-
-
9.94 tile# TOC
-
-
Tile several successive frames together.
-
-
The filter accepts the following options:
-
-
-layout
-Set the grid size (i.e. the number of lines and columns). For the syntax of
-this option, check the "Video size" section in the ffmpeg-utils manual.
-
-
-nb_frames
-Set the maximum number of frames to render in the given area. It must be less
-than or equal to w xh . The default value is 0
, meaning all
-the area will be used.
-
-
-margin
-Set the outer border margin in pixels.
-
-
-padding
-Set the inner border thickness (i.e. the number of pixels between frames). For
-more advanced padding options (such as having different values for the edges),
-refer to the pad video filter.
-
-
-color
-Specify the color of the unused area. For the syntax of this option, check the
-"Color" section in the ffmpeg-utils manual. The default value of color
-is "black".
-
-
-
-
-
9.94.1 Examples# TOC
-
-
-
-
-
9.95 tinterlace# TOC
-
-
Perform various types of temporal field interlacing.
-
-
Frames are counted starting from 1, so the first input frame is
-considered odd.
-
-
The filter accepts the following options:
-
-
-mode
-Specify the mode of the interlacing. This option can also be specified
-as a value alone. See below for a list of values for this option.
-
-Available values are:
-
-
-‘merge, 0 ’
-Move odd frames into the upper field, even into the lower field,
-generating a double height frame at half frame rate.
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-‘drop_odd, 1 ’
-Only output even frames, odd frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
- 22222 44444
- 22222 44444
- 22222 44444
- 22222 44444
-
-
-
-‘drop_even, 2 ’
-Only output odd frames, even frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-11111 33333
-11111 33333
-11111 33333
-
-
-
-‘pad, 3 ’
-Expand each frame to full height, but pad alternate lines with black,
-generating a frame with double height at the same input frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-
-
-
-
-‘interleave_top, 4 ’
-Interleave the upper field from odd frames with the lower field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-
-‘interleave_bottom, 5 ’
-Interleave the lower field from odd frames with the upper field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-
-Output:
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-
-
-
-
-‘interlacex2, 6 ’
-Double frame rate with unchanged height. Frames are inserted each
-containing the second temporal field from the previous input frame and
-the first temporal field from the next input frame. This mode relies on
-the top_field_first flag. Useful for interlaced video displays with no
-field synchronisation.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
- 11111 22222 33333 44444
-11111 22222 33333 44444
- 11111 22222 33333 44444
-
-Output:
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-
-
-
-
-
-
-Numeric values are deprecated but are accepted for backward
-compatibility reasons.
-
-Default mode is merge
.
-
-
-flags
-Specify flags influencing the filter process.
-
-Available value for flags is:
-
-
-low_pass_filter, vlfp
-Enable vertical low-pass filtering in the filter.
-Vertical low-pass filtering is required when creating an interlaced
-destination from a progressive source which contains high-frequency
-vertical detail. Filtering will reduce interlace ’twitter’ and Moire
-patterning.
-
-Vertical low-pass filtering can only be enabled for mode
-interleave_top and interleave_bottom .
-
-
-
-
-
-
-
-
9.96 transpose# TOC
-
-
Transpose rows with columns in the input video and optionally flip it.
-
-
It accepts the following parameters:
-
-
-dir
-Specify the transposition direction.
-
-Can assume the following values:
-
-‘0, 4, cclock_flip ’
-Rotate by 90 degrees counterclockwise and vertically flip (default), that is:
-
-
L.R L.l
-. . -> . .
-l.r R.r
-
-
-
-‘1, 5, clock ’
-Rotate by 90 degrees clockwise, that is:
-
-
L.R l.L
-. . -> . .
-l.r r.R
-
-
-
-‘2, 6, cclock ’
-Rotate by 90 degrees counterclockwise, that is:
-
-
L.R R.r
-. . -> . .
-l.r L.l
-
-
-
-‘3, 7, clock_flip ’
-Rotate by 90 degrees clockwise and vertically flip, that is:
-
-
L.R r.R
-. . -> . .
-l.r l.L
-
-
-
-
-For values between 4-7, the transposition is only done if the input
-video geometry is portrait and not landscape. These values are
-deprecated, the passthrough
option should be used instead.
-
-Numerical values are deprecated, and should be dropped in favor of
-symbolic constants.
-
-
-passthrough
-Do not apply the transposition if the input geometry matches the one
-specified by the specified value. It accepts the following values:
-
-‘none ’
-Always apply transposition.
-
-‘portrait ’
-Preserve portrait geometry (when height >= width ).
-
-‘landscape ’
-Preserve landscape geometry (when width >= height ).
-
-
-
-Default value is none
.
-
-
-
-
For example to rotate by 90 degrees clockwise and preserve portrait
-layout:
-
-
transpose=dir=1:passthrough=portrait
-
-
-
The command above can also be specified as:
-
-
-
-
9.97 trim# TOC
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Specify the time of the start of the kept section, i.e. the frame with the
-timestamp start will be the first frame in the output.
-
-
-end
-Specify the time of the first frame that will be dropped, i.e. the frame
-immediately preceding the one with the timestamp end will be the last
-frame in the output.
-
-
-start_pts
-This is the same as start , except this option sets the start timestamp
-in timebase units instead of seconds.
-
-
-end_pts
-This is the same as end , except this option sets the end timestamp
-in timebase units instead of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_frame
-The number of the first frame that should be passed to the output.
-
-
-end_frame
-The number of the first frame that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _frame variants simply count the
-frames that pass through the filter. Also note that this filter does not modify
-the timestamps. If you wish for the output timestamps to start at zero, insert a
-setpts filter after the trim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all the frames that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple trim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -vf trim=60:120
-
-
- Keep only the first second:
-
-
ffmpeg -i INPUT -vf trim=duration=1
-
-
-
-
-
-
-
9.98 unsharp# TOC
-
-
Sharpen or blur the input video.
-
-
It accepts the following parameters:
-
-
-luma_msize_x, lx
-Set the luma matrix horizontal size. It must be an odd integer between
-3 and 63. The default value is 5.
-
-
-luma_msize_y, ly
-Set the luma matrix vertical size. It must be an odd integer between 3
-and 63. The default value is 5.
-
-
-luma_amount, la
-Set the luma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 1.0.
-
-
-chroma_msize_x, cx
-Set the chroma matrix horizontal size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_msize_y, cy
-Set the chroma matrix vertical size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_amount, ca
-Set the chroma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 0.0.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
All parameters are optional and default to the equivalent of the
-string ’5:5:1.0:5:5:0.0’.
-
-
-
9.98.1 Examples# TOC
-
-
- Apply strong luma sharpen effect:
-
-
unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
-
-
- Apply a strong blur of both luma and chroma parameters:
-
-
-
-
-
9.99 uspp# TOC
-
-
Apply ultra slow/simple postprocessing filter that compresses and decompresses
-the image at several (or - in the case of quality level 8
- all)
-shifts and average the results.
-
-
The way this differs from the behavior of spp is that uspp actually encodes &
-decodes each case with libavcodec Snow, whereas spp uses a simplified intra only 8x8
-DCT similar to MJPEG.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-8. If set to 0
, the filter will have no
-effect. A value of 8
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-
-
-
9.100 vidstabdetect# TOC
-
-
Analyze video stabilization/deshaking. Perform pass 1 of 2, see
-vidstabtransform for pass 2.
-
-
This filter generates a file with relative translation and rotation
-transform information about subsequent frames, which is then used by
-the vidstabtransform filter.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
This filter accepts the following options:
-
-
-result
-Set the path to the file used to write the transforms information.
-Default value is transforms.trf .
-
-
-shakiness
-Set how shaky the video is and how quick the camera is. It accepts an
-integer in the range 1-10, a value of 1 means little shakiness, a
-value of 10 means strong shakiness. Default value is 5.
-
-
-accuracy
-Set the accuracy of the detection process. It must be a value in the
-range 1-15. A value of 1 means low accuracy, a value of 15 means high
-accuracy. Default value is 15.
-
-
-stepsize
-Set stepsize of the search process. The region around minimum is
-scanned with 1 pixel resolution. Default value is 6.
-
-
-mincontrast
-Set minimum contrast. Below this value a local measurement field is
-discarded. Must be a floating point value in the range 0-1. Default
-value is 0.3.
-
-
-tripod
-Set reference frame number for tripod mode.
-
-If enabled, the motion of the frames is compared to a reference frame
-in the filtered stream, identified by the specified number. The idea
-is to compensate all movements in a more-or-less static scene and keep
-the camera view absolutely still.
-
-If set to 0, it is disabled. The frames are counted starting from 1.
-
-
-show
-Show fields and transforms in the resulting frames. It accepts an
-integer in the range 0-2. Default value is 0, which disables any
-visualization.
-
-
-
-
-
9.100.1 Examples# TOC
-
-
- Use default values:
-
-
- Analyze strongly shaky movie and put the results in file
-mytransforms.trf :
-
-
vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
-
-
- Visualize the result of internal transformations in the resulting
-video:
-
-
- Analyze a video with medium shakiness using ffmpeg
:
-
-
ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
-
-
-
-
-
9.101 vidstabtransform# TOC
-
-
Video stabilization/deshaking: pass 2 of 2,
-see vidstabdetect for pass 1.
-
-
Read a file with transform information for each frame and
-apply/compensate them. Together with the vidstabdetect
-filter this can be used to deshake videos. See also
-http://public.hronopik.de/vid.stab . It is important to also use
-the unsharp filter, see below.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
-
9.101.1 Options# TOC
-
-
-input
-Set path to the file used to read the transforms. Default value is
-transforms.trf .
-
-
-smoothing
-Set the number of frames (value*2 + 1) used for lowpass filtering the
-camera movements. Default value is 10.
-
-For example a number of 10 means that 21 frames are used (10 in the
-past and 10 in the future) to smoothen the motion in the video. A
-larger value leads to a smoother video, but limits the acceleration of
-the camera (pan/tilt movements). 0 is a special case where a static
-camera is simulated.
-
-
-optalgo
-Set the camera path optimization algorithm.
-
-Accepted values are:
-
-‘gauss ’
-gaussian kernel low-pass filter on camera motion (default)
-
-‘avg ’
-averaging on transformations
-
-
-
-
-maxshift
-Set maximal number of pixels to translate frames. Default value is -1,
-meaning no limit.
-
-
-maxangle
-Set maximal angle in radians (degree*PI/180) to rotate frames. Default
-value is -1, meaning no limit.
-
-
-crop
-Specify how to deal with borders that may be visible due to movement
-compensation.
-
-Available values are:
-
-‘keep ’
-keep image information from previous frame (default)
-
-‘black ’
-fill the border black
-
-
-
-
-invert
-Invert transforms if set to 1. Default value is 0.
-
-
-relative
-Consider transforms as relative to previous frame if set to 1,
-absolute if set to 0. Default value is 0.
-
-
-zoom
-Set percentage to zoom. A positive value will result in a zoom-in
-effect, a negative value in a zoom-out effect. Default value is 0 (no
-zoom).
-
-
-optzoom
-Set optimal zooming to avoid borders.
-
-Accepted values are:
-
-‘0 ’
-disabled
-
-‘1 ’
-optimal static zoom value is determined (only very strong movements
-will lead to visible borders) (default)
-
-‘2 ’
-optimal adaptive zoom value is determined (no borders will be
-visible), see zoomspeed
-
-
-
-Note that the value given at zoom is added to the one calculated here.
-
-
-zoomspeed
-Set percent to zoom maximally each frame (enabled when
-optzoom is set to 2). Range is from 0 to 5, default value is
-0.25.
-
-
-interpol
-Specify type of interpolation.
-
-Available values are:
-
-‘no ’
-no interpolation
-
-‘linear ’
-linear only horizontal
-
-‘bilinear ’
-linear in both directions (default)
-
-‘bicubic ’
-cubic in both directions (slow)
-
-
-
-
-tripod
-Enable virtual tripod mode if set to 1, which is equivalent to
-relative=0:smoothing=0
. Default value is 0.
-
-Use also tripod
option of vidstabdetect .
-
-
-debug
-Increase log verbosity if set to 1. Also the detected global motions
-are written to the temporary file global_motions.trf . Default
-value is 0.
-
-
-
-
-
9.101.2 Examples# TOC
-
-
-
-
-
9.102 vflip# TOC
-
-
Flip the input video vertically.
-
-
For example, to vertically flip a video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "vflip" out.avi
-
-
-
-
9.103 vignette# TOC
-
-
Make or reverse a natural vignetting effect.
-
-
The filter accepts the following options:
-
-
-angle, a
-Set lens angle expression as a number of radians.
-
-The value is clipped in the [0,PI/2]
range.
-
-Default value: "PI/5"
-
-
-x0
-y0
-Set center coordinates expressions. Respectively "w/2"
and "h/2"
-by default.
-
-
-mode
-Set forward/backward mode.
-
-Available modes are:
-
-‘forward ’
-The larger the distance from the central point, the darker the image becomes.
-
-
-‘backward ’
-The larger the distance from the central point, the brighter the image becomes.
-This can be used to reverse a vignette effect, though there is no automatic
-detection to extract the lens angle and other settings (yet). It can
-also be used to create a burning effect.
-
-
-
-Default value is ‘forward ’.
-
-
-eval
-Set evaluation mode for the expressions (angle , x0 , y0 ).
-
-It accepts the following values:
-
-‘init ’
-Evaluate expressions only once during the filter initialization.
-
-
-‘frame ’
-Evaluate expressions for each incoming frame. This is way slower than the
-‘init ’ mode since it requires all the scalers to be re-computed, but it
-allows advanced dynamic expressions.
-
-
-
-Default value is ‘init ’.
-
-
-dither
-Set dithering to reduce the circular banding effects. Default is 1
-(enabled).
-
-
-aspect
-Set vignette aspect. This setting allows one to adjust the shape of the vignette.
-Setting this value to the SAR of the input will make a rectangular vignetting
-following the dimensions of the video.
-
-Default is 1/1
.
-
-
-
-
-
9.103.1 Expressions# TOC
-
-
The alpha , x0 and y0 expressions can contain the
-following parameters.
-
-
-w
-h
-input width and height
-
-
-n
-the number of input frame, starting from 0
-
-
-pts
-the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in
-TB units, NAN if undefined
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-the PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in seconds, NAN if undefined
-
-
-tb
-time base of the input video
-
-
-
-
-
-
9.103.2 Examples# TOC
-
-
- Apply simple strong vignetting effect:
-
-
- Make a flickering vignetting:
-
-
vignette='PI/4+random(1)*PI/50':eval=frame
-
-
-
-
-
-
9.104 w3fdif# TOC
-
-
Deinterlace the input video ("w3fdif" stands for "Weston 3 Field
-Deinterlacing Filter").
-
-
Based on the process described by Martin Weston for BBC R&D, and
-implemented based on the de-interlace algorithm written by Jim
-Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter
-uses filter coefficients calculated by BBC R&D.
-
-
There are two sets of filter coefficients, so called "simple":
-and "complex". Which set of filter coefficients is used can
-be set by passing an optional parameter:
-
-
-filter
-Set the interlacing filter coefficients. Accepts one of the following values:
-
-
-‘simple ’
-Simple filter coefficient set.
-
-‘complex ’
-More-complex filter coefficient set.
-
-
-Default value is ‘complex ’.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following values:
-
-
-‘all ’
-Deinterlace all frames,
-
-‘interlaced ’
-Only deinterlace frames marked as interlaced.
-
-
-
-Default value is ‘all ’.
-
-
-
-
-
9.105 xbr# TOC
-
Apply the xBR high-quality magnification filter which is designed for pixel
-art. It follows a set of edge-detection rules, see
-http://www.libretro.com/forums/viewtopic.php?f=6&t=134 .
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for 2xBR
, 3
for
-3xBR
and 4
for 4xBR
.
-Default is 3
.
-
-
-
-
-
9.106 yadif# TOC
-
-
Deinterlace the input video ("yadif" means "yet another deinterlacing
-filter").
-
-
It accepts the following parameters:
-
-
-
-mode
-The interlacing mode to adopt. It accepts one of the following values:
-
-
-0, send_frame
-Output one frame for each frame.
-
-1, send_field
-Output one frame for each field.
-
-2, send_frame_nospatial
-Like send_frame
, but it skips the spatial interlacing check.
-
-3, send_field_nospatial
-Like send_field
, but it skips the spatial interlacing check.
-
-
-
-The default value is send_frame
.
-
-
-parity
-The picture field parity assumed for the input interlaced video. It accepts one
-of the following values:
-
-
-0, tff
-Assume the top field is first.
-
-1, bff
-Assume the bottom field is first.
-
--1, auto
-Enable automatic detection of field parity.
-
-
-
-The default value is auto
.
-If the interlacing is unknown or the decoder does not export this information,
-top field first will be assumed.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following
-values:
-
-
-0, all
-Deinterlace all frames.
-
-1, interlaced
-Only deinterlace frames marked as interlaced.
-
-
-
-The default value is all
.
-
-
-
-
-
9.107 zoompan# TOC
-
-
Apply Zoom & Pan effect.
-
-
This filter accepts the following options:
-
-
-zoom, z
-Set the zoom expression. Default is 1.
-
-
-x
-y
-Set the x and y expression. Default is 0.
-
-
-d
-Set the duration expression in number of frames.
-This sets for how many number of frames effect will last for
-single input image.
-
-
-s
-Set the output image size, default is ’hd720’.
-
-
-
-
Each expression can contain the following constants:
-
-
-in_w, iw
-Input width.
-
-
-in_h, ih
-Input height.
-
-
-out_w, ow
-Output width.
-
-
-out_h, oh
-Output height.
-
-
-in
-Input frame count.
-
-
-on
-Output frame count.
-
-
-x
-y
-Last calculated ’x’ and ’y’ position from ’x’ and ’y’ expression
-for current input frame.
-
-
-px
-py
-’x’ and ’y’ of last output frame of previous input frame or 0 when there was
-not yet such frame (first input frame).
-
-
-zoom
-Last calculated zoom from ’z’ expression for current input frame.
-
-
-pzoom
-Last calculated zoom of last output frame of previous input frame.
-
-
-duration
-Number of output frames for current input frame. Calculated from ’d’ expression
-for each input frame.
-
-
-pduration
-number of output frames created for previous input frame
-
-
-a
-Rational number: input width / input height
-
-
-sar
-sample aspect ratio
-
-
-dar
-display aspect ratio
-
-
-
-
-
-
9.107.1 Examples# TOC
-
-
- Zoom-in up to 1.5 and pan at same time to some spot near center of picture:
-
-
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
-
-
-
-
-
-
10 Video Sources# TOC
-
-
Below is a description of the currently available video sources.
-
-
-
10.1 buffer# TOC
-
-
Buffer video frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/vsrc_buffer.h .
-
-
It accepts the following parameters:
-
-
-video_size
-Specify the size (width and height) of the buffered video frames. For the
-syntax of this option, check the "Video size" section in the ffmpeg-utils
-manual.
-
-
-width
-The input video width.
-
-
-height
-The input video height.
-
-
-pix_fmt
-A string representing the pixel format of the buffered video frames.
-It may be a number corresponding to a pixel format, or a pixel format
-name.
-
-
-time_base
-Specify the timebase assumed by the timestamps of the buffered frames.
-
-
-frame_rate
-Specify the frame rate expected for the video stream.
-
-
-pixel_aspect, sar
-The sample (pixel) aspect ratio of the input video.
-
-
-sws_param
-Specify the optional parameters to be used for the scale filter which
-is automatically inserted when an input change is detected in the
-input size or format.
-
-
-
-
For example:
-
-
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
-
-
-
will instruct the source to accept video frames with size 320x240 and
-with format "yuv410p", assuming 1/24 as the timestamps timebase and
-square pixels (1:1 sample aspect ratio).
-Since the pixel format with name "yuv410p" corresponds to the number 6
-(check the enum AVPixelFormat definition in libavutil/pixfmt.h ),
-this example corresponds to:
-
-
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
-
-
-
Alternatively, the options can be specified as a flat string, but this
-syntax is deprecated:
-
-
width :height :pix_fmt :time_base.num :time_base.den :pixel_aspect.num :pixel_aspect.den [:sws_param ]
-
-
-
10.2 cellauto# TOC
-
-
Create a pattern generated by an elementary cellular automaton.
-
-
The initial state of the cellular automaton can be defined through the
-filename , and pattern options. If such options are
-not specified an initial state is created randomly.
-
-
At each new frame a new row in the video is filled with the result of
-the cellular automaton next generation. The behavior when the whole
-frame is filled is defined by the scroll option.
-
-
This source accepts the following options:
-
-
-filename, f
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified file.
-In the file, each non-whitespace character is considered an alive
-cell, a newline will terminate the row, and further characters in the
-file will be ignored.
-
-
-pattern, p
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified string.
-
-Each non-whitespace character in the string is considered an alive
-cell, a newline will terminate the row, and further characters in the
-string will be ignored.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial cellular automaton row. It
-is a floating point number value ranging from 0 to 1, defaults to
-1/PHI.
-
-This option is ignored when a file or a pattern is specified.
-
-
-random_seed, seed
-Set the seed for filling randomly the initial row, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the cellular automaton rule, it is a number ranging from 0 to 255.
-Default value is 110.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual.
-
-If filename or pattern is specified, the size is set
-by default to the width of the specified initial state row, and the
-height is set to width * PHI.
-
-If size is set, it must contain the width of the specified
-pattern string, and the specified pattern will be centered in the
-larger row.
-
-If a filename or a pattern string is not specified, the size value
-defaults to "320x518" (used for a randomly generated initial state).
-
-
-scroll
-If set to 1, scroll the output upward when all the rows in the output
-have been already filled. If set to 0, the new generated row will be
-written over the top row just after the bottom row is filled.
-Defaults to 1.
-
-
-start_full, full
-If set to 1, completely fill the output with generated rows before
-outputting the first frame.
-This is the default behavior, for disabling set the value to 0.
-
-
-stitch
-If set to 1, stitch the left and right row edges together.
-This is the default behavior, for disabling set the value to 0.
-
-
-
-
-
10.2.1 Examples# TOC
-
-
- Read the initial state from pattern , and specify an output of
-size 200x400.
-
-
cellauto=f=pattern:s=200x400
-
-
- Generate a random initial row with a width of 200 cells, with a fill
-ratio of 2/3:
-
-
cellauto=ratio=2/3:s=200x200
-
-
- Create a pattern generated by rule 18 starting by a single alive cell
-centered on an initial row with width 100:
-
-
cellauto=p=@:s=100x400:full=0:rule=18
-
-
- Specify a more elaborated initial pattern:
-
-
cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
-
-
-
-
-
-
10.3 mandelbrot# TOC
-
-
Generate a Mandelbrot set fractal, and progressively zoom towards the
-point specified with start_x and start_y .
-
-
This source accepts the following options:
-
-
-end_pts
-Set the terminal pts value. Default value is 400.
-
-
-end_scale
-Set the terminal scale value.
-Must be a floating point value. Default value is 0.3.
-
-
-inner
-Set the inner coloring mode, that is the algorithm used to draw the
-Mandelbrot fractal internal region.
-
-It shall assume one of the following values:
-
-black
-Set black mode.
-
-convergence
-Show time until convergence.
-
-mincol
-Set color based on point closest to the origin of the iterations.
-
-period
-Set period mode.
-
-
-
-Default value is mincol .
-
-
-bailout
-Set the bailout value. Default value is 10.0.
-
-
-maxiter
-Set the maximum of iterations performed by the rendering
-algorithm. Default value is 7189.
-
-
-outer
-Set outer coloring mode.
-It shall assume one of following values:
-
-iteration_count
-Set iteration cound mode.
-
-normalized_iteration_count
-set normalized iteration count mode.
-
-
-Default value is normalized_iteration_count .
-
-
-rate, r
-Set frame rate, expressed as number of frames per second. Default
-value is "25".
-
-
-size, s
-Set frame size. For the syntax of this option, check the "Video
-size" section in the ffmpeg-utils manual. Default value is "640x480".
-
-
-start_scale
-Set the initial scale value. Default value is 3.0.
-
-
-start_x
-Set the initial x position. Must be a floating point value between
--100 and 100. Default value is -0.743643887037158704752191506114774.
-
-
-start_y
-Set the initial y position. Must be a floating point value between
--100 and 100. Default value is -0.131825904205311970493132056385139.
-
-
-
-
-
10.4 mptestsrc# TOC
-
-
Generate various test patterns, as generated by the MPlayer test filter.
-
-
The size of the generated video is fixed, and is 256x256.
-This source is useful in particular for testing encoding features.
-
-
This source accepts the following options:
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-test, t
-
-Set the number or the name of the test to perform. Supported tests are:
-
-dc_luma
-dc_chroma
-freq_luma
-freq_chroma
-amp_luma
-amp_chroma
-cbp
-mv
-ring1
-ring2
-all
-
-
-Default value is "all", which will cycle through the list of all tests.
-
-
-
-
Some examples:
-
-
-
will generate a "dc_luma" test pattern.
-
-
-
10.5 frei0r_src# TOC
-
-
Provide a frei0r source.
-
-
To enable compilation of this filter you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
This source accepts the following parameters:
-
-
-size
-The size of the video to generate. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-
-framerate
-The framerate of the generated video. It may be a string of the form
-num /den or a frame rate abbreviation.
-
-
-filter_name
-The name to the frei0r source to load. For more information regarding frei0r and
-how to set the parameters, read the frei0r section in the video filters
-documentation.
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r source.
-
-
-
-
-
For example, to generate a frei0r partik0l source with size 200x200
-and frame rate 10 which is overlaid on the overlay filter main input:
-
-
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
-
-
-
-
10.6 life# TOC
-
-
Generate a life pattern.
-
-
This source is based on a generalization of John Conway’s life game.
-
-
The sourced input represents a life grid, each pixel represents a cell
-which can be in one of two possible states, alive or dead. Every cell
-interacts with its eight neighbours, which are the cells that are
-horizontally, vertically, or diagonally adjacent.
-
-
At each interaction the grid evolves according to the adopted rule,
-which specifies the number of neighbor alive cells which will make a
-cell stay alive or born. The rule option allows one to specify
-the rule to adopt.
-
-
This source accepts the following options:
-
-
-filename, f
-Set the file from which to read the initial grid state. In the file,
-each non-whitespace character is considered an alive cell, and newline
-is used to delimit the end of each row.
-
-If this option is not specified, the initial grid is generated
-randomly.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial random grid. It is a
-floating point number value ranging from 0 to 1, defaults to 1/PHI.
-It is ignored when a file is specified.
-
-
-random_seed, seed
-Set the seed for filling the initial random grid, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the life rule.
-
-A rule can be specified with a code of the kind "SNS /BNB ",
-where NS and NB are sequences of numbers in the range 0-8,
-NS specifies the number of alive neighbor cells which make a
-live cell stay alive, and NB the number of alive neighbor cells
-which make a dead cell to become alive (i.e. to "born").
-"s" and "b" can be used in place of "S" and "B", respectively.
-
-Alternatively a rule can be specified by an 18-bits integer. The 9
-high order bits are used to encode the next cell state if it is alive
-for each number of neighbor alive cells, the low order bits specify
-the rule for "borning" new cells. Higher order bits encode for an
-higher number of neighbor cells.
-For example the number 6153 = (12<<9)+9
specifies a stay alive
-rule of 12 and a born rule of 9, which corresponds to "S23/B03".
-
-Default value is "S23/B3", which is the original Conway’s game of life
-rule, and will keep a cell alive if it has 2 or 3 neighbor alive
-cells, and will born a new cell if there are three alive cells around
-a dead cell.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-If filename is specified, the size is set by default to the
-same size of the input file. If size is set, it must contain
-the size specified in the input file, and the initial grid defined in
-that file is centered in the larger resulting area.
-
-If a filename is not specified, the size value defaults to "320x240"
-(used for a randomly generated initial grid).
-
-
-stitch
-If set to 1, stitch the left and right grid edges together, and the
-top and bottom edges also. Defaults to 1.
-
-
-mold
-Set cell mold speed. If set, a dead cell will go from death_color to
-mold_color with a step of mold . mold can have a
-value from 0 to 255.
-
-
-life_color
-Set the color of living (or new born) cells.
-
-
-death_color
-Set the color of dead cells. If mold is set, this is the first color
-used to represent a dead cell.
-
-
-mold_color
-Set mold color, for definitely dead and moldy cells.
-
-For the syntax of these 3 color options, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-
-
-
10.6.1 Examples# TOC
-
-
- Read a grid from pattern , and center it on a grid of size
-300x300 pixels:
-
-
life=f=pattern:s=300x300
-
-
- Generate a random grid of size 200x200, with a fill ratio of 2/3:
-
-
life=ratio=2/3:s=200x200
-
-
- Specify a custom rule for evolving a randomly generated grid:
-
-
- Full example with slow death effect (mold) using ffplay
:
-
-
ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
-
-
-
-
-
10.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc# TOC
-
-
The color
source provides an uniformly colored input.
-
-
The haldclutsrc
source provides an identity Hald CLUT. See also
-haldclut filter.
-
-
The nullsrc
source returns unprocessed video frames. It is
-mainly useful to be employed in analysis / debugging tools, or as the
-source for filters which ignore the input data.
-
-
The rgbtestsrc
source generates an RGB test pattern useful for
-detecting RGB vs BGR issues. You should see a red, green and blue
-stripe from top to bottom.
-
-
The smptebars
source generates a color bars pattern, based on
-the SMPTE Engineering Guideline EG 1-1990.
-
-
The smptehdbars
source generates a color bars pattern, based on
-the SMPTE RP 219-2002.
-
-
The testsrc
source generates a test video pattern, showing a
-color pattern, a scrolling gradient and a timestamp. This is mainly
-intended for testing purposes.
-
-
The sources accept the following parameters:
-
-
-color, c
-Specify the color of the source, only available in the color
-source. For the syntax of this option, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-level
-Specify the level of the Hald CLUT, only available in the haldclutsrc
-source. A level of N
generates a picture of N*N*N
by N*N*N
-pixels to be used as identity matrix for 3D lookup tables. Each component is
-coded on a 1/(N*N)
scale.
-
-
-size, s
-Specify the size of the sourced video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual. The default value is
-"320x240".
-
-This option is not available with the haldclutsrc
filter.
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-sar
-Set the sample aspect ratio of the sourced video.
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-decimals, n
-Set the number of decimals to show in the timestamp, only available in the
-testsrc
source.
-
-The displayed timestamp value will correspond to the original
-timestamp value multiplied by the power of 10 of the specified
-value. Default value is 0.
-
-
-
-
For example the following:
-
-
testsrc=duration=5.3:size=qcif:rate=10
-
-
-
will generate a video with a duration of 5.3 seconds, with size
-176x144 and a frame rate of 10 frames per second.
-
-
The following graph description will generate a red source
-with an opacity of 0.2, with size "qcif" and a frame rate of 10
-frames per second.
-
-
color=c=red@0.2:s=qcif:r=10
-
-
-
If the input content is to be ignored, nullsrc
can be used. The
-following command generates noise in the luminance plane by employing
-the geq
filter:
-
-
nullsrc=s=256x256, geq=random(1)*255:128:128
-
-
-
-
10.7.1 Commands# TOC
-
-
The color
source supports the following commands:
-
-
-c, color
-Set the color of the created image. Accepts the same syntax of the
-corresponding color option.
-
-
-
-
-
-
11 Video Sinks# TOC
-
-
Below is a description of the currently available video sinks.
-
-
-
11.1 buffersink# TOC
-
-
Buffer video frames, and make them available to the end of the filter
-graph.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVBufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
-
11.2 nullsink# TOC
-
-
Null video sink: do absolutely nothing with the input video. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
12 Multimedia Filters# TOC
-
-
Below is a description of the currently available multimedia filters.
-
-
-
12.1 avectorscope# TOC
-
-
Convert input audio to a video output, representing the audio vector
-scope.
-
-
The filter is used to measure the difference between channels of stereo
-audio stream. A monoaural signal, consisting of identical left and right
-signal, results in straight vertical line. Any stereo separation is visible
-as a deviation from this line, creating a Lissajous figure.
-If the straight (or deviation from it) but horizontal line appears this
-indicates that the left and right channels are out of phase.
-
-
The filter accepts the following options:
-
-
-mode, m
-Set the vectorscope mode.
-
-Available values are:
-
-‘lissajous ’
-Lissajous rotated by 45 degrees.
-
-
-‘lissajous_xy ’
-Same as above but not rotated.
-
-
-
-Default value is ‘lissajous ’.
-
-
-size, s
-Set the video size for the output. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual. Default value is 400x400
.
-
-
-rate, r
-Set the output frame rate. Default value is 25
.
-
-
-rc
-gc
-bc
-Specify the red, green and blue contrast. Default values are 40
, 160
and 80
.
-Allowed range is [0, 255]
.
-
-
-rf
-gf
-bf
-Specify the red, green and blue fade. Default values are 15
, 10
and 5
.
-Allowed range is [0, 255]
.
-
-
-zoom
-Set the zoom factor. Default value is 1
. Allowed range is [1, 10]
.
-
-
-
-
-
12.1.1 Examples# TOC
-
-
- Complete example using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
-
-
-
-
-
12.2 concat# TOC
-
-
Concatenate audio and video streams, joining them together one after the
-other.
-
-
The filter works on segments of synchronized video and audio streams. All
-segments must have the same number of streams of each type, and that will
-also be the number of streams at output.
-
-
The filter accepts the following options:
-
-
-n
-Set the number of segments. Default is 2.
-
-
-v
-Set the number of output video streams, that is also the number of video
-streams in each segment. Default is 1.
-
-
-a
-Set the number of output audio streams, that is also the number of audio
-streams in each segment. Default is 0.
-
-
-unsafe
-Activate unsafe mode: do not fail if segments have a different format.
-
-
-
-
-
The filter has v +a outputs: first v video outputs, then
-a audio outputs.
-
-
There are n x(v +a ) inputs: first the inputs for the first
-segment, in the same order as the outputs, then the inputs for the second
-segment, etc.
-
-
Related streams do not always have exactly the same duration, for various
-reasons including codec frame size or sloppy authoring. For that reason,
-related synchronized streams (e.g. a video and its audio track) should be
-concatenated at once. The concat filter will use the duration of the longest
-stream in each segment (except the last one), and if necessary pad shorter
-audio streams with silence.
-
-
For this filter to work correctly, all segments must start at timestamp 0.
-
-
All corresponding streams must have the same parameters in all segments; the
-filtering system will automatically select a common pixel format for video
-streams, and a common sample format, sample rate and channel layout for
-audio streams, but other settings, such as resolution, must be converted
-explicitly by the user.
-
-
Different frame rates are acceptable but will result in variable frame rate
-at output; be sure to configure the output file to handle it.
-
-
-
12.2.1 Examples# TOC
-
-
-
-
-
12.3 ebur128# TOC
-
-
EBU R128 scanner filter. This filter takes an audio stream as input and outputs
-it unchanged. By default, it logs a message at a frequency of 10Hz with the
-Momentary loudness (identified by M
), Short-term loudness (S
),
-Integrated loudness (I
) and Loudness Range (LRA
).
-
-
The filter also has a video output (see the video option) with a real
-time graph to observe the loudness evolution. The graphic contains the logged
-message mentioned above, so it is not printed anymore when this option is set,
-unless the verbose logging is set. The main graphing area contains the
-short-term loudness (3 seconds of analysis), and the gauge on the right is for
-the momentary loudness (400 milliseconds).
-
-
More information about the Loudness Recommendation EBU R128 on
-http://tech.ebu.ch/loudness .
-
-
The filter accepts the following options:
-
-
-video
-Activate the video output. The audio stream is passed unchanged whether this
-option is set or no. The video stream will be the first output stream if
-activated. Default is 0
.
-
-
-size
-Set the video size. This option is for video only. For the syntax of this
-option, check the "Video size" section in the ffmpeg-utils manual. Default
-and minimum resolution is 640x480
.
-
-
-meter
-Set the EBU scale meter. Default is 9
. Common values are 9
and
-18
, respectively for EBU scale meter +9 and EBU scale meter +18. Any
-other integer value between this range is allowed.
-
-
-metadata
-Set metadata injection. If set to 1
, the audio input will be segmented
-into 100ms output frames, each of them containing various loudness information
-in metadata. All the metadata keys are prefixed with lavfi.r128.
.
-
-Default is 0
.
-
-
-framelog
-Force the frame logging level.
-
-Available values are:
-
-‘info ’
-information logging level
-
-‘verbose ’
-verbose logging level
-
-
-
-By default, the logging level is set to info . If the video or
-the metadata options are set, it switches to verbose .
-
-
-peak
-Set peak mode(s).
-
-Available modes can be cumulated (the option is a flag
type). Possible
-values are:
-
-‘none ’
-Disable any peak mode (default).
-
-‘sample ’
-Enable sample-peak mode.
-
-Simple peak mode looking for the higher sample value. It logs a message
-for sample-peak (identified by SPK
).
-
-‘true ’
-Enable true-peak mode.
-
-If enabled, the peak lookup is done on an over-sampled version of the input
-stream for better peak accuracy. It logs a message for true-peak.
-(identified by TPK
) and true-peak per frame (identified by FTPK
).
-This mode requires a build with libswresample
.
-
-
-
-
-
-
-
-
12.3.1 Examples# TOC
-
-
- Real-time graph using ffplay
, with a EBU scale meter +18:
-
-
ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
-
-
- Run an analysis with ffmpeg
:
-
-
ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
-
-
-
-
-
12.4 interleave, ainterleave# TOC
-
-
Temporally interleave frames from several inputs.
-
-
interleave
works with video inputs, ainterleave
with audio.
-
-
These filters read frames from several inputs and send the oldest
-queued frame to the output.
-
-
Input streams must have a well defined, monotonically increasing frame
-timestamp values.
-
-
In order to submit one frame to output, these filters need to enqueue
-at least one frame for each input, so they cannot work in case one
-input is not yet terminated and will not receive incoming frames.
-
-
For example consider the case when one input is a select
filter
-which always drop input frames. The interleave
filter will keep
-reading from that input, but it will never be able to send new frames
-to output until the input will send an end-of-stream signal.
-
-
Also, depending on inputs synchronization, the filters will drop
-frames in case one input receives more frames than the other ones, and
-the queue is already filled.
-
-
These filters accept the following options:
-
-
-nb_inputs, n
-Set the number of different inputs, it is 2 by default.
-
-
-
-
-
12.4.1 Examples# TOC
-
-
- Interleave frames belonging to different streams using ffmpeg
:
-
-
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
-
-
- Add flickering blur effect:
-
-
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
-
-
-
-
-
12.5 perms, aperms# TOC
-
-
Set read/write permissions for the output frames.
-
-
These filters are mainly aimed at developers to test direct path in the
-following filter in the filtergraph.
-
-
The filters accept the following options:
-
-
-mode
-Select the permissions mode.
-
-It accepts the following values:
-
-‘none ’
-Do nothing. This is the default.
-
-‘ro ’
-Set all the output frames read-only.
-
-‘rw ’
-Set all the output frames directly writable.
-
-‘toggle ’
-Make the frame read-only if writable, and writable if read-only.
-
-‘random ’
-Set each output frame read-only or writable randomly.
-
-
-
-
-seed
-Set the seed for the random mode, must be an integer included between
-0
and UINT32_MAX
. If not specified, or if explicitly set to
--1
, the filter will try to use a good random seed on a best effort
-basis.
-
-
-
-
Note: in case of auto-inserted filter between the permission filter and the
-following one, the permission might not be received as expected in that
-following filter. Inserting a format or aformat filter before the
-perms/aperms filter can avoid this problem.
-
-
-
12.6 select, aselect# TOC
-
-
Select frames to pass in output.
-
-
This filter accepts the following options:
-
-
-expr, e
-Set expression, which is evaluated for each input frame.
-
-If the expression is evaluated to zero, the frame is discarded.
-
-If the evaluation result is negative or NaN, the frame is sent to the
-first output; otherwise it is sent to the output with index
-ceil(val)-1
, assuming that the input index starts from 0.
-
-For example a value of 1.2
corresponds to the output with index
-ceil(1.2)-1 = 2-1 = 1
, that is the second output.
-
-
-outputs, n
-Set the number of outputs. The output to which to send the selected
-frame is based on the result of the evaluation. Default value is 1.
-
-
-
-
The expression can contain the following constants:
-
-
-n
-The (sequential) number of the filtered frame, starting from 0.
-
-
-selected_n
-The (sequential) number of the selected frame, starting from 0.
-
-
-prev_selected_n
-The sequential number of the last selected frame. It’s NAN if undefined.
-
-
-TB
-The timebase of the input timestamps.
-
-
-pts
-The PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in TB units. It’s NAN if undefined.
-
-
-t
-The PTS of the filtered video frame,
-expressed in seconds. It’s NAN if undefined.
-
-
-prev_pts
-The PTS of the previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_pts
-The PTS of the last previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_t
-The PTS of the last previously selected video frame. It’s NAN if undefined.
-
-
-start_pts
-The PTS of the first video frame in the video. It’s NAN if undefined.
-
-
-start_t
-The time of the first video frame in the video. It’s NAN if undefined.
-
-
-pict_type (video only)
-The type of the filtered frame. It can assume one of the following
-values:
-
-I
-P
-B
-S
-SI
-SP
-BI
-
-
-
-interlace_type (video only)
-The frame interlace type. It can assume one of the following values:
-
-PROGRESSIVE
-The frame is progressive (not interlaced).
-
-TOPFIRST
-The frame is top-field-first.
-
-BOTTOMFIRST
-The frame is bottom-field-first.
-
-
-
-
-consumed_sample_n (audio only)
-the number of selected samples before the current frame
-
-
-samples_n (audio only)
-the number of samples in the current frame
-
-
-sample_rate (audio only)
-the input sample rate
-
-
-key
-This is 1 if the filtered frame is a key-frame, 0 otherwise.
-
-
-pos
-the position in the file of the filtered frame, -1 if the information
-is not available (e.g. for synthetic video)
-
-
-scene (video only)
-value between 0 and 1 to indicate a new scene; a low value reflects a low
-probability for the current frame to introduce a new scene, while a higher
-value means the current frame is more likely to be one (see the example below)
-
-
-
-
-
The default value of the select expression is "1".
-
-
-
12.6.1 Examples# TOC
-
-
-
-
-
12.7 sendcmd, asendcmd# TOC
-
-
Send commands to filters in the filtergraph.
-
-
These filters read commands to be sent to other filters in the
-filtergraph.
-
-
sendcmd
must be inserted between two video filters,
-asendcmd
must be inserted between two audio filters, but apart
-from that they act the same way.
-
-
The specification of commands can be provided in the filter arguments
-with the commands option, or in a file specified by the
-filename option.
-
-
These filters accept the following options:
-
-commands, c
-Set the commands to be read and sent to the other filters.
-
-filename, f
-Set the filename of the commands to be read and sent to the other
-filters.
-
-
-
-
-
12.7.1 Commands syntax# TOC
-
-
A commands description consists of a sequence of interval
-specifications, comprising a list of commands to be executed when a
-particular event related to that interval occurs. The occurring event
-is typically the current frame time entering or leaving a given time
-interval.
-
-
An interval is specified by the following syntax:
-
-
-
The time interval is specified by the START and END times.
-END is optional and defaults to the maximum time.
-
-
The current frame time is considered within the specified interval if
-it is included in the interval [START , END ), that is when
-the time is greater or equal to START and is lesser than
-END .
-
-
COMMANDS consists of a sequence of one or more command
-specifications, separated by ",", relating to that interval. The
-syntax of a command specification is given by:
-
-
[FLAGS ] TARGET COMMAND ARG
-
-
-
FLAGS is optional and specifies the type of events relating to
-the time interval which enable sending the specified command, and must
-be a non-null sequence of identifier flags separated by "+" or "|" and
-enclosed between "[" and "]".
-
-
The following flags are recognized:
-
-enter
-The command is sent when the current frame timestamp enters the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was not in the given interval, and the
-current is.
-
-
-leave
-The command is sent when the current frame timestamp leaves the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was in the given interval, and the
-current is not.
-
-
-
-
If FLAGS is not specified, a default value of [enter]
is
-assumed.
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional list of argument for
-the given COMMAND .
-
-
Between one interval specification and another, whitespaces, or
-sequences of characters starting with #
until the end of line,
-are ignored and can be used to annotate comments.
-
-
A simplified BNF description of the commands specification syntax
-follows:
-
-
COMMAND_FLAG ::= "enter" | "leave"
-COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG ]
-COMMAND ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG ]
-COMMANDS ::= COMMAND [,COMMANDS ]
-INTERVAL ::= START [-END ] COMMANDS
-INTERVALS ::= INTERVAL [;INTERVALS ]
-
-
-
-
12.7.2 Examples# TOC
-
-
-
-
-
12.8 setpts, asetpts# TOC
-
-
Change the PTS (presentation timestamp) of the input frames.
-
-
setpts
works on video frames, asetpts
on audio frames.
-
-
This filter accepts the following options:
-
-
-expr
-The expression which is evaluated for each frame to construct its timestamp.
-
-
-
-
-
The expression is evaluated through the eval API and can contain the following
-constants:
-
-
-FRAME_RATE
-frame rate, only defined for constant frame-rate video
-
-
-PTS
-The presentation timestamp in input
-
-
-N
-The count of the input frame for video or the number of consumed samples,
-not including the current frame for audio, starting from 0.
-
-
-NB_CONSUMED_SAMPLES
-The number of consumed samples, not including the current frame (only
-audio)
-
-
-NB_SAMPLES, S
-The number of samples in the current frame (only audio)
-
-
-SAMPLE_RATE, SR
-The audio sample rate.
-
-
-STARTPTS
-The PTS of the first frame.
-
-
-STARTT
-the time in seconds of the first frame
-
-
-INTERLACED
-State whether the current frame is interlaced.
-
-
-T
-the time in seconds of the current frame
-
-
-POS
-original position in the file of the frame, or undefined if undefined
-for the current frame
-
-
-PREV_INPTS
-The previous input PTS.
-
-
-PREV_INT
-previous input time in seconds
-
-
-PREV_OUTPTS
-The previous output PTS.
-
-
-PREV_OUTT
-previous output time in seconds
-
-
-RTCTIME
-The wallclock (RTC) time in microseconds. This is deprecated, use time(0)
-instead.
-
-
-RTCSTART
-The wallclock (RTC) time at the start of the movie in microseconds.
-
-
-TB
-The timebase of the input timestamps.
-
-
-
-
-
-
12.8.1 Examples# TOC
-
-
- Start counting PTS from zero
-
-
- Apply fast motion effect:
-
-
- Apply slow motion effect:
-
-
- Set fixed rate of 25 frames per second:
-
-
- Set fixed rate 25 fps with some jitter:
-
-
setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
-
-
- Apply an offset of 10 seconds to the input PTS:
-
-
- Generate timestamps from a "live source" and rebase onto the current timebase:
-
-
setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
-
-
- Generate timestamps by counting samples:
-
-
-
-
-
-
12.9 settb, asettb# TOC
-
-
Set the timebase to use for the output frames timestamps.
-It is mainly useful for testing timebase configuration.
-
-
It accepts the following parameters:
-
-
-expr, tb
-The expression which is evaluated into the output timebase.
-
-
-
-
-
The value for tb is an arithmetic expression representing a
-rational. The expression can contain the constants "AVTB" (the default
-timebase), "intb" (the input timebase) and "sr" (the sample rate,
-audio only). Default value is "intb".
-
-
-
12.9.1 Examples# TOC
-
-
- Set the timebase to 1/25:
-
-
- Set the timebase to 1/10:
-
-
- Set the timebase to 1001/1000:
-
-
- Set the timebase to 2*intb:
-
-
- Set the default timebase value:
-
-
-
-
-
12.10 showcqt# TOC
-
Convert input audio to a video output representing
-frequency spectrum logarithmically (using constant Q transform with
-Brown-Puckette algorithm), with musical tone scale, from E0 to D#10 (10 octaves).
-
-
The filter accepts the following options:
-
-
-volume
-Specify transform volume (multiplier) expression. The expression can contain
-variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-a_weighting(f)
-A-weighting of equal loudness
-
-b_weighting(f)
-B-weighting of equal loudness
-
-c_weighting(f)
-C-weighting of equal loudness
-
-
-Default value is 16
.
-
-
-tlength
-Specify transform length expression. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-Default value is 384/f*tc/(384/f+tc)
.
-
-
-timeclamp
-Specify the transform timeclamp. At low frequency, there is trade-off between
-accuracy in time domain and frequency domain. If timeclamp is lower,
-event in time domain is represented more accurately (such as fast bass drum),
-otherwise event in frequency domain is represented more accurately
-(such as bass guitar). Acceptable value is [0.1, 1.0]. Default value is 0.17
.
-
-
-coeffclamp
-Specify the transform coeffclamp. If coeffclamp is lower, transform is
-more accurate, otherwise transform is faster. Acceptable value is [0.1, 10.0].
-Default value is 1.0
.
-
-
-gamma
-Specify gamma. Lower gamma makes the spectrum more contrast, higher gamma
-makes the spectrum having more range. Acceptable value is [1.0, 7.0].
-Default value is 3.0
.
-
-
-fontfile
-Specify font file for use with freetype. If not specified, use embedded font.
-
-
-fontcolor
-Specify font color expression. This is arithmetic expression that should return
-integer value 0xRRGGBB. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-midi(f)
-midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
-
-r(x), g(x), b(x)
-red, green, and blue value of intensity x
-
-
-Default value is st(0, (midi(f)-59.5)/12);
-st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
-r(1-ld(1)) + b(ld(1))
-
-
-fullhd
-If set to 1 (the default), the video size is 1920x1080 (full HD),
-if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
-
-
-fps
-Specify video fps. Default value is 25
.
-
-
-count
-Specify number of transform per frame, so there are fps*count transforms
-per second. Note that audio data rate must be divisible by fps*count.
-Default value is 6
.
-
-
-
-
-
-
12.10.1 Examples# TOC
-
-
- Playing audio while showing the spectrum:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with frame rate 30 fps:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fps=30:count=5 [out0]'
-
-
- Playing at 960x540 and lower CPU usage:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fullhd=0:count=3 [out0]'
-
-
- A1 and its harmonics: A1, A2, (near)E3, A3:
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with more accuracy in frequency domain (and slower):
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
-
-
- B-weighting of equal loudness
-
-
volume=16*b_weighting(f)
-
-
- Lower Q factor
-
-
tlength=100/f*tc/(100/f+tc)
-
-
- Custom fontcolor, C-note is colored green, others are colored blue
-
-
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
-
-
-
-
-
-
12.11 showspectrum# TOC
-
-
Convert input audio to a video output, representing the audio frequency
-spectrum.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value is
-640x512
.
-
-
-slide
-Specify how the spectrum should slide along the window.
-
-It accepts the following values:
-
-‘replace ’
-the samples start again on the left when they reach the right
-
-‘scroll ’
-the samples scroll from right to left
-
-‘fullframe ’
-frames are only produced when the samples reach the right
-
-
-
-Default value is replace
.
-
-
-mode
-Specify display mode.
-
-It accepts the following values:
-
-‘combined ’
-all channels are displayed in the same row
-
-‘separate ’
-all channels are displayed in separate rows
-
-
-
-Default value is ‘combined ’.
-
-
-color
-Specify display color mode.
-
-It accepts the following values:
-
-‘channel ’
-each channel is displayed in a separate color
-
-‘intensity ’
-each channel is is displayed using the same color scheme
-
-
-
-Default value is ‘channel ’.
-
-
-scale
-Specify scale used for calculating intensity color values.
-
-It accepts the following values:
-
-‘lin ’
-linear
-
-‘sqrt ’
-square root, default
-
-‘cbrt ’
-cubic root
-
-‘log ’
-logarithmic
-
-
-
-Default value is ‘sqrt ’.
-
-
-saturation
-Set saturation modifier for displayed colors. Negative values provide
-alternative color scheme. 0
is no saturation at all.
-Saturation must be in [-10.0, 10.0] range.
-Default value is 1
.
-
-
-win_func
-Set window function.
-
-It accepts the following values:
-
-‘none ’
-No samples pre-processing (do not expect this to be faster)
-
-‘hann ’
-Hann window
-
-‘hamming ’
-Hamming window
-
-‘blackman ’
-Blackman window
-
-
-
-Default value is hann
.
-
-
-
-
The usage is very similar to the showwaves filter; see the examples in that
-section.
-
-
-
12.11.1 Examples# TOC
-
-
- Large window with logarithmic color scaling:
-
-
showspectrum=s=1280x480:scale=log
-
-
- Complete example for a colored and sliding spectrum per channel using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
-
-
-
-
-
12.12 showwaves# TOC
-
-
Convert input audio to a video output, representing the samples waves.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value
-is "600x240".
-
-
-mode
-Set display mode.
-
-Available values are:
-
-‘point ’
-Draw a point for each sample.
-
-
-‘line ’
-Draw a vertical line for each sample.
-
-
-‘p2p ’
-Draw a point for each sample and a line between them.
-
-
-‘cline ’
-Draw a centered vertical line for each sample.
-
-
-
-Default value is point
.
-
-
-n
-Set the number of samples which are printed on the same column. A
-larger value will decrease the frame rate. Must be a positive
-integer. This option can be set only if the value for rate
-is not explicitly specified.
-
-
-rate, r
-Set the (approximate) output frame rate. This is done by setting the
-option n . Default value is "25".
-
-
-split_channels
-Set if channels should be drawn separately or overlap. Default value is 0.
-
-
-
-
-
-
12.12.1 Examples# TOC
-
-
- Output the input file audio and the corresponding video representation
-at the same time:
-
-
amovie=a.mp3,asplit[out0],showwaves[out1]
-
-
- Create a synthetic signal and show it with showwaves, forcing a
-frame rate of 30 frames per second:
-
-
aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
-
-
-
-
-
12.13 split, asplit# TOC
-
-
Split input into several identical outputs.
-
-
asplit
works with audio input, split
with video.
-
-
The filter accepts a single parameter which specifies the number of outputs. If
-unspecified, it defaults to 2.
-
-
-
12.13.1 Examples# TOC
-
-
- Create two separate outputs from the same input:
-
-
[in] split [out0][out1]
-
-
- To create 3 or more outputs, you need to specify the number of
-outputs, like in:
-
-
[in] asplit=3 [out0][out1][out2]
-
-
- Create two separate outputs from the same input, one cropped and
-one padded:
-
-
[in] split [splitout1][splitout2];
-[splitout1] crop=100:100:0:0 [cropout];
-[splitout2] pad=200:200:100:100 [padout];
-
-
- Create 5 copies of the input audio with ffmpeg
:
-
-
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
-
-
-
-
-
12.14 zmq, azmq# TOC
-
-
Receive commands sent through a libzmq client, and forward them to
-filters in the filtergraph.
-
-
zmq
and azmq
work as a pass-through filters. zmq
-must be inserted between two video filters, azmq
between two
-audio filters.
-
-
To enable these filters you need to install the libzmq library and
-headers and configure FFmpeg with --enable-libzmq
.
-
-
For more information about libzmq see:
-http://www.zeromq.org/
-
-
The zmq
and azmq
filters work as a libzmq server, which
-receives messages sent through a network interface defined by the
-bind_address option.
-
-
The received message must be in the form:
-
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional argument list for the
-given COMMAND .
-
-
Upon reception, the message is processed and the corresponding command
-is injected into the filtergraph. Depending on the result, the filter
-will send a reply to the client, adopting the format:
-
-
ERROR_CODE ERROR_REASON
-MESSAGE
-
-
-
MESSAGE is optional.
-
-
-
12.14.1 Examples# TOC
-
-
Look at tools/zmqsend for an example of a zmq client which can
-be used to send commands processed by these filters.
-
-
Consider the following filtergraph generated by ffplay
-
-
ffplay -dumpgraph 1 -f lavfi "
-color=s=100x100:c=red [l];
-color=s=100x100:c=blue [r];
-nullsrc=s=200x100, zmq [bg];
-[bg][l] overlay [bg+l];
-[bg+l][r] overlay=x=100 "
-
-
-
To change the color of the left side of the video, the following
-command can be used:
-
-
echo Parsed_color_0 c yellow | tools/zmqsend
-
-
-
To change the right side:
-
-
echo Parsed_color_1 c pink | tools/zmqsend
-
-
-
-
-
13 Multimedia Sources# TOC
-
-
Below is a description of the currently available multimedia sources.
-
-
-
13.1 amovie# TOC
-
-
This is the same as movie source, except it selects an audio
-stream by default.
-
-
-
13.2 movie# TOC
-
-
Read audio and/or video stream(s) from a movie container.
-
-
It accepts the following parameters:
-
-
-filename
-The name of the resource to read (not necessarily a file; it can also be a
-device or a stream accessed through some protocol).
-
-
-format_name, f
-Specifies the format assumed for the movie to read, and can be either
-the name of a container or an input device. If not specified, the
-format is guessed from movie_name or by probing.
-
-
-seek_point, sp
-Specifies the seek point in seconds. The frames will be output
-starting from this seek point. The parameter is evaluated with
-av_strtod
, so the numerical value may be suffixed by an IS
-postfix. The default value is "0".
-
-
-streams, s
-Specifies the streams to read. Several streams can be specified,
-separated by "+". The source will then have as many outputs, in the
-same order. The syntax is explained in the “Stream specifiers”
-section in the ffmpeg manual. Two special names, "dv" and "da" specify
-respectively the default (best suited) video and audio stream. Default
-is "dv", or "da" if the filter is called as "amovie".
-
-
-stream_index, si
-Specifies the index of the video stream to read. If the value is -1,
-the most suitable video stream will be automatically selected. The default
-value is "-1". Deprecated. If the filter is called "amovie", it will select
-audio instead of video.
-
-
-loop
-Specifies how many times to read the stream in sequence.
-If the value is less than 1, the stream will be read again and again.
-Default value is "1".
-
-Note that when the movie is looped the source timestamps are not
-changed, so it will generate non monotonically increasing timestamps.
-
-
-
-
It allows overlaying a second video on top of the main input of
-a filtergraph, as shown in this graph:
-
-
input -----------> deltapts0 --> overlay --> output
- ^
- |
-movie --> scale--> deltapts1 -------+
-
-
-
13.2.1 Examples# TOC
-
-
- Skip 3.2 seconds from the start of the AVI file in.avi, and overlay it
-on top of the input labelled "in":
-
-
movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read from a video4linux2 device, and overlay it on top of the input
-labelled "in":
-
-
movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read the first video stream and the audio stream with id 0x81 from
-dvd.vob; the video is connected to the pad named "video" and the audio is
-connected to the pad named "audio":
-
-
movie=dvd.vob:s=v:0+#0x81 [video] [audio]
-
-
-
-
-
-
14 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavfilter
-
-
-
-
15 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffmpeg-formats.html b/Externals/ffmpeg/shared/doc/ffmpeg-formats.html
deleted file mode 100644
index 1350b5caba..0000000000
--- a/Externals/ffmpeg/shared/doc/ffmpeg-formats.html
+++ /dev/null
@@ -1,2311 +0,0 @@
-
-
-
-
-
-
- FFmpeg Formats Documentation
-
-
-
-
-
-
-
-
- FFmpeg Formats Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes the supported formats (muxers and demuxers)
-provided by the libavformat library.
-
-
-
-
2 Format Options# TOC
-
-
The libavformat library provides some generic global options, which
-can be set on all the muxers and demuxers. In addition each muxer or
-demuxer may support so-called private options, which are specific for
-that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follows:
-
-
-avioflags flags (input/output )
-Possible values:
-
-‘direct ’
-Reduce buffering.
-
-
-
-
-probesize integer (input )
-Set probing size in bytes, i.e. the size of the data to analyze to get
-stream information. A higher value will allow to detect more
-information in case it is dispersed into the stream, but will increase
-latency. Must be an integer not lesser than 32. It is 5000000 by default.
-
-
-packetsize integer (output )
-Set packet size.
-
-
-fflags flags (input/output )
-Set format flags.
-
-Possible values:
-
-‘ignidx ’
-Ignore index.
-
-‘genpts ’
-Generate PTS.
-
-‘nofillin ’
-Do not fill in missing values that can be exactly calculated.
-
-‘noparse ’
-Disable AVParsers, this needs +nofillin
too.
-
-‘igndts ’
-Ignore DTS.
-
-‘discardcorrupt ’
-Discard corrupted frames.
-
-‘sortdts ’
-Try to interleave output packets by DTS.
-
-‘keepside ’
-Do not merge side data.
-
-‘latm ’
-Enable RTP MP4A-LATM payload.
-
-‘nobuffer ’
-Reduce the latency introduced by optional buffering
-
-‘bitexact ’
-Only write platform-, build- and time-independent data.
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-
-
-
-seek2any integer (input )
-Allow seeking to non-keyframes on demuxer level when supported if set to 1.
-Default is 0.
-
-
-analyzeduration integer (input )
-Specify how many microseconds are analyzed to probe the input. A
-higher value will allow to detect more accurate information, but will
-increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
-
-
-cryptokey hexadecimal string (input )
-Set decryption key.
-
-
-indexmem integer (input )
-Set max memory used for timestamp index (per stream).
-
-
-rtbufsize integer (input )
-Set max memory used for buffering real-time frames.
-
-
-fdebug flags (input/output )
-Print specific debug info.
-
-Possible values:
-
-‘ts ’
-
-
-
-max_delay integer (input/output )
-Set maximum muxing or demuxing delay in microseconds.
-
-
-fpsprobesize integer (input )
-Set number of frames used to probe fps.
-
-
-audio_preload integer (output )
-Set microseconds by which audio packets should be interleaved earlier.
-
-
-chunk_duration integer (output )
-Set microseconds for each chunk.
-
-
-chunk_size integer (output )
-Set size in bytes for each chunk.
-
-
-err_detect, f_err_detect flags (input )
-Set error detection flags. f_err_detect
is deprecated and
-should be used only via the ffmpeg
tool.
-
-Possible values:
-
-‘crccheck ’
-Verify embedded CRCs.
-
-‘bitstream ’
-Detect bitstream specification deviations.
-
-‘buffer ’
-Detect improper bitstream length.
-
-‘explode ’
-Abort decoding on minor error detection.
-
-‘careful ’
-Consider things that violate the spec and have not been seen in the
-wild as errors.
-
-‘compliant ’
-Consider all spec non compliancies as errors.
-
-‘aggressive ’
-Consider things that a sane encoder should not do as an error.
-
-
-
-
-use_wallclock_as_timestamps integer (input )
-Use wallclock as timestamps.
-
-
-avoid_negative_ts integer (output )
-
-Possible values:
-
-‘make_non_negative ’
-Shift timestamps to make them non-negative.
-Also note that this affects only leading negative timestamps, and not
-non-monotonic negative timestamps.
-
-‘make_zero ’
-Shift timestamps so that the first timestamp is 0.
-
-‘auto (default) ’
-Enables shifting when required by the target format.
-
-‘disabled ’
-Disables shifting of timestamp.
-
-
-
-When shifting is enabled, all output timestamps are shifted by the
-same amount. Audio, video, and subtitles desynching and relative
-timestamp differences are preserved compared to how they would have
-been without shifting.
-
-
-skip_initial_bytes integer (input )
-Set number of bytes to skip before reading header and frames if set to 1.
-Default is 0.
-
-
-correct_ts_overflow integer (input )
-Correct single timestamp overflows if set to 1. Default is 1.
-
-
-flush_packets integer (output )
-Flush the underlying I/O stream after each packet. Default 1 enables it, and
-has the effect of reducing the latency; 0 disables it and may slightly
-increase performance in some cases.
-
-
-output_ts_offset offset (output )
-Set the output time offset.
-
-offset must be a time duration specification,
-see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-The offset is added by the muxer to the output timestamps.
-
-Specifying a positive offset means that the corresponding streams are
-delayed bt the time duration specified in offset . Default value
-is 0
(meaning that no offset is applied).
-
-
-format_whitelist list (input )
-"," separated List of allowed demuxers. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
2.1 Format stream specifiers# TOC
-
-
Format stream specifiers allow selection of one or more streams that
-match specific properties.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index.
-
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio,
-’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If
-stream_index is given, then it matches the stream number
-stream_index of this type. Otherwise, it matches all streams of
-this type.
-
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number
-stream_index in the program with the id
-program_id . Otherwise, it matches all streams in the program.
-
-
-#stream_id
-Matches the stream by a format-specific ID.
-
-
-
-
The exact semantics of stream specifiers is defined by the
-avformat_match_stream_specifier()
function declared in the
-libavformat/avformat.h header.
-
-
-
3 Demuxers# TOC
-
-
Demuxers are configured elements in FFmpeg that can read the
-multimedia streams from a particular type of file.
-
-
When you configure your FFmpeg build, all the supported demuxers
-are enabled by default. You can list all available ones using the
-configure option --list-demuxers
.
-
-
You can disable all the demuxers using the configure option
---disable-demuxers
, and selectively enable a single demuxer with
-the option --enable-demuxer=DEMUXER
, or disable it
-with the option --disable-demuxer=DEMUXER
.
-
-
The option -formats
of the ff* tools will display the list of
-enabled demuxers.
-
-
The description of some of the currently available demuxers follows.
-
-
-
3.1 applehttp# TOC
-
-
Apple HTTP Live Streaming demuxer.
-
-
This demuxer presents all AVStreams from all variant streams.
-The id field is set to the bitrate variant index number. By setting
-the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay),
-the caller can decide which variant streams to actually receive.
-The total bitrate of the variant that the stream belongs to is
-available in a metadata key named "variant_bitrate".
-
-
-
3.2 apng# TOC
-
-
Animated Portable Network Graphics demuxer.
-
-
This demuxer is used to demux APNG files.
-All headers, but the PNG signature, up to (but not including) the first
-fcTL chunk are transmitted as extradata.
-Frames are then split as being all the chunks between two fcTL ones, or
-between the last fcTL and IEND chunks.
-
-
--ignore_loop bool
-Ignore the loop variable in the file if set.
-
--max_fps int
-Maximum framerate in frames per second (0 for no limit).
-
--default_fps int
-Default framerate in frames per second when none is specified in the file
-(0 meaning as fast as possible).
-
-
-
-
-
-
-
Advanced Systems Format demuxer.
-
-
This demuxer is used to demux ASF files and MMS network streams.
-
-
--no_resync_search bool
-Do not try to resynchronize by looking for a certain optional start code.
-
-
-
-
-
3.4 concat# TOC
-
-
Virtual concatenation script demuxer.
-
-
This demuxer reads a list of files and other directives from a text file and
-demuxes them one after the other, as if all their packet had been muxed
-together.
-
-
The timestamps in the files are adjusted so that the first file starts at 0
-and each next file starts where the previous one finishes. Note that it is
-done globally and may cause gaps if all streams do not have exactly the same
-length.
-
-
All files must have the same streams (same codecs, same time base, etc.).
-
-
The duration of each file is used to adjust the timestamps of the next file:
-if the duration is incorrect (because it was computed using the bit-rate or
-because the file is truncated, for example), it can cause artifacts. The
-duration
directive can be used to override the duration stored in
-each file.
-
-
-
3.4.1 Syntax# TOC
-
-
The script is a text file in extended-ASCII, with one directive per line.
-Empty lines, leading spaces and lines starting with ’#’ are ignored. The
-following directive is recognized:
-
-
-file path
-Path to a file to read; special characters and spaces must be escaped with
-backslash or single quotes.
-
-All subsequent file-related directives apply to that file.
-
-
-ffconcat version 1.0
-Identify the script type and version. It also sets the safe option
-to 1 if it was to its default -1.
-
-To make FFmpeg recognize the format automatically, this directive must
-appears exactly as is (no extra space or byte-order-mark) on the very first
-line of the script.
-
-
-duration dur
-Duration of the file. This information can be specified from the file;
-specifying it here may be more efficient or help if the information from the
-file is not available or accurate.
-
-If the duration is set for all files, then it is possible to seek in the
-whole concatenated video.
-
-
-stream
-Introduce a stream in the virtual file.
-All subsequent stream-related directives apply to the last introduced
-stream.
-Some streams properties must be set in order to allow identifying the
-matching streams in the subfiles.
-If no streams are defined in the script, the streams from the first file are
-copied.
-
-
-exact_stream_id id
-Set the id of the stream.
-If this directive is given, the string with the corresponding id in the
-subfiles will be used.
-This is especially useful for MPEG-PS (VOB) files, where the order of the
-streams is not reliable.
-
-
-
-
-
-
3.4.2 Options# TOC
-
-
This demuxer accepts the following option:
-
-
-safe
-If set to 1, reject unsafe file paths. A file path is considered safe if it
-does not contain a protocol specification and is relative and all components
-only contain characters from the portable character set (letters, digits,
-period, underscore and hyphen) and have no period at the beginning of a
-component.
-
-If set to 0, any file name is accepted.
-
-The default is -1, it is equivalent to 1 if the format was automatically
-probed and 0 otherwise.
-
-
-auto_convert
-If set to 1, try to perform automatic conversions on packet data to make the
-streams concatenable.
-
-Currently, the only conversion is adding the h264_mp4toannexb bitstream
-filter to H.264 streams in MP4 format. This is necessary in particular if
-there are resolution changes.
-
-
-
-
-
-
-
-
Adobe Flash Video Format demuxer.
-
-
This demuxer is used to demux FLV files and RTMP network streams.
-
-
--flv_metadata bool
-Allocate the streams according to the onMetaData array content.
-
-
-
-
-
3.6 libgme# TOC
-
-
The Game Music Emu library is a collection of video game music file emulators.
-
-
See http://code.google.com/p/game-music-emu/ for more information.
-
-
Some files have multiple tracks. The demuxer will pick the first track by
-default. The track_index option can be used to select a different
-track. Track indexes start at 0. The demuxer exports the number of tracks as
-tracks meta data entry.
-
-
For very large files, the max_size option may have to be adjusted.
-
-
-
3.7 libquvi# TOC
-
-
Play media from Internet services using the quvi project.
-
-
The demuxer accepts a format option to request a specific quality. It
-is by default set to best .
-
-
See http://quvi.sourceforge.net/ for more information.
-
-
FFmpeg needs to be built with --enable-libquvi
for this demuxer to be
-enabled.
-
-
-
-
-
Animated GIF demuxer.
-
-
It accepts the following options:
-
-
-min_delay
-Set the minimum valid delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 2.
-
-
-default_delay
-Set the default delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 10.
-
-
-ignore_loop
-GIF files can contain information to loop a certain number of times (or
-infinitely). If ignore_loop is set to 1, then the loop setting
-from the input will be ignored and looping will not occur. If set to 0,
-then looping will occur and will cycle the number of times according to
-the GIF. Default value is 1.
-
-
-
-
For example, with the overlay filter, place an infinitely looping GIF
-over another video:
-
-
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
-
-
-
Note that in the above example the shortest option for overlay filter is
-used to end the output video at the length of the shortest input file,
-which in this case is input.mp4 as the GIF in this example loops
-infinitely.
-
-
-
3.9 image2# TOC
-
-
Image file demuxer.
-
-
This demuxer reads from a list of image files specified by a pattern.
-The syntax and meaning of the pattern is specified by the
-option pattern_type .
-
-
The pattern may contain a suffix which is used to automatically
-determine the format of the images contained in the files.
-
-
The size, the pixel format, and the format of each image must be the
-same for all the files in the sequence.
-
-
This demuxer accepts the following options:
-
-framerate
-Set the frame rate for the video stream. It defaults to 25.
-
-loop
-If set to 1, loop over the input. Default value is 0.
-
-pattern_type
-Select the pattern type used to interpret the provided filename.
-
-pattern_type accepts one of the following values.
-
-sequence
-Select a sequence pattern type, used to specify a sequence of files
-indexed by sequential numbers.
-
-A sequence pattern may contain the string "%d" or "%0N d", which
-specifies the position of the characters representing a sequential
-number in each filename matched by the pattern. If the form
-"%d0N d" is used, the string representing the number in each
-filename is 0-padded and N is the total number of 0-padded
-digits representing the number. The literal character ’%’ can be
-specified in the pattern with the string "%%".
-
-If the sequence pattern contains "%d" or "%0N d", the first filename of
-the file list specified by the pattern must contain a number
-inclusively contained between start_number and
-start_number +start_number_range -1, and all the following
-numbers must be sequential.
-
-For example the pattern "img-%03d.bmp" will match a sequence of
-filenames of the form img-001.bmp , img-002.bmp , ...,
-img-010.bmp , etc.; the pattern "i%%m%%g-%d.jpg" will match a
-sequence of filenames of the form i%m%g-1.jpg ,
-i%m%g-2.jpg , ..., i%m%g-10.jpg , etc.
-
-Note that the pattern must not necessarily contain "%d" or
-"%0N d", for example to convert a single image file
-img.jpeg you can employ the command:
-
-
ffmpeg -i img.jpeg img.png
-
-
-
-glob
-Select a glob wildcard pattern type.
-
-The pattern is interpreted like a glob()
pattern. This is only
-selectable if libavformat was compiled with globbing support.
-
-
-glob_sequence (deprecated, will be removed)
-Select a mixed glob wildcard/sequence pattern.
-
-If your version of libavformat was compiled with globbing support, and
-the provided pattern contains at least one glob meta character among
-%*?[]{}
that is preceded by an unescaped "%", the pattern is
-interpreted like a glob()
pattern, otherwise it is interpreted
-like a sequence pattern.
-
-All glob special characters %*?[]{}
must be prefixed
-with "%". To escape a literal "%" you shall use "%%".
-
-For example the pattern foo-%*.jpeg
will match all the
-filenames prefixed by "foo-" and terminating with ".jpeg", and
-foo-%?%?%?.jpeg
will match all the filenames prefixed with
-"foo-", followed by a sequence of three characters, and terminating
-with ".jpeg".
-
-This pattern type is deprecated in favor of glob and
-sequence .
-
-
-
-Default value is glob_sequence .
-
-pixel_format
-Set the pixel format of the images to read. If not specified the pixel
-format is guessed from the first image file in the sequence.
-
-start_number
-Set the index of the file matched by the image file pattern to start
-to read from. Default value is 0.
-
-start_number_range
-Set the index interval range to check when looking for the first image
-file in the sequence, starting from start_number . Default value
-is 5.
-
-ts_from_file
-If set to 1, will set frame timestamp to modification time of image file. Note
-that monotonity of timestamps is not provided: images go in the same order as
-without this option. Default value is 0.
-If set to 2, will set frame timestamp to the modification time of the image file in
-nanosecond precision.
-
-video_size
-Set the video size of the images to read. If not specified the video
-size is guessed from the first image file in the sequence.
-
-
-
-
-
3.9.1 Examples# TOC
-
-
- Use ffmpeg
for creating a video from the images in the file
-sequence img-001.jpeg , img-002.jpeg , ..., assuming an
-input frame rate of 10 frames per second:
-
-
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
-
-
- As above, but start by reading from a file with index 100 in the sequence:
-
-
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
-
-
- Read images matching the "*.png" glob pattern , that is all the files
-terminating with the ".png" suffix:
-
-
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
-
-
-
-
-
3.10 mpegts# TOC
-
-
MPEG-2 transport stream demuxer.
-
-
-fix_teletext_pts
-Overrides teletext packet PTS and DTS values with the timestamps calculated
-from the PCR of the first program which the teletext stream is part of and is
-not discarded. Default value is 1, set this option to 0 if you want your
-teletext packet PTS and DTS values untouched.
-
-
-
-
-
3.11 rawvideo# TOC
-
-
Raw video demuxer.
-
-
This demuxer allows one to read raw video data. Since there is no header
-specifying the assumed video parameters, the user must specify them
-in order to be able to decode the data correctly.
-
-
This demuxer accepts the following options:
-
-framerate
-Set input video frame rate. Default value is 25.
-
-
-pixel_format
-Set the input video pixel format. Default value is yuv420p
.
-
-
-video_size
-Set the input video size. This value must be specified explicitly.
-
-
-
-
For example to read a rawvideo file input.raw with
-ffplay
, assuming a pixel format of rgb24
, a video
-size of 320x240
, and a frame rate of 10 images per second, use
-the command:
-
-
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
-
-
-
-
3.12 sbg# TOC
-
-
SBaGen script demuxer.
-
-
This demuxer reads the script language used by SBaGen
-http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG
-script looks like that:
-
-
-SE
-a: 300-2.5/3 440+4.5/0
-b: 300-2.5/0 440+4.5/3
-off: -
-NOW == a
-+0:07:00 == b
-+0:14:00 == a
-+0:21:00 == b
-+0:30:00 off
-
-
-
A SBG script can mix absolute and relative timestamps. If the script uses
-either only absolute timestamps (including the script start time) or only
-relative ones, then its layout is fixed, and the conversion is
-straightforward. On the other hand, if the script mixes both kind of
-timestamps, then the NOW reference for relative timestamps will be
-taken from the current time of day at the time the script is read, and the
-script layout will be frozen according to that reference. That means that if
-the script is directly played, the actual times will match the absolute
-timestamps up to the sound controller’s clock accuracy, but if the user
-somehow pauses the playback or seeks, all times will be shifted accordingly.
-
-
-
3.13 tedcaptions# TOC
-
-
JSON captions used for TED Talks .
-
-
TED does not provide links to the captions, but they can be guessed from the
-page. The file tools/bookmarklets.html from the FFmpeg source tree
-contains a bookmarklet to expose them.
-
-
This demuxer accepts the following option:
-
-start_time
-Set the start time of the TED talk, in milliseconds. The default is 15000
-(15s). It is used to sync the captions with the downloadable videos, because
-they include a 15s intro.
-
-
-
-
Example: convert the captions to a format most players understand:
-
-
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
-
-
-
-
4 Muxers# TOC
-
-
Muxers are configured elements in FFmpeg which allow writing
-multimedia streams to a particular type of file.
-
-
When you configure your FFmpeg build, all the supported muxers
-are enabled by default. You can list all available muxers using the
-configure option --list-muxers
.
-
-
You can disable all the muxers with the configure option
---disable-muxers
and selectively enable / disable single muxers
-with the options --enable-muxer=MUXER
/
---disable-muxer=MUXER
.
-
-
The option -formats
of the ff* tools will display the list of
-enabled muxers.
-
-
A description of some of the currently available muxers follows.
-
-
-
4.1 aiff# TOC
-
-
Audio Interchange File Format muxer.
-
-
-
4.1.1 Options# TOC
-
-
It accepts the following options:
-
-
-write_id3v2
-Enable ID3v2 tags writing when set to 1. Default is 0 (disabled).
-
-
-id3v2_version
-Select ID3v2 version to write. Currently only version 3 and 4 (aka.
-ID3v2.3 and ID3v2.4) are supported. The default is version 4.
-
-
-
-
-
-
-
-
CRC (Cyclic Redundancy Check) testing format.
-
-
This muxer computes and prints the Adler-32 CRC of all the input audio
-and video frames. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-CRC.
-
-
The output of the muxer consists of a single line of the form:
-CRC=0xCRC , where CRC is a hexadecimal number 0-padded to
-8 digits containing the CRC for all the decoded input frames.
-
-
See also the framecrc muxer.
-
-
-
4.2.1 Examples# TOC
-
-
For example to compute the CRC of the input, and store it in the file
-out.crc :
-
-
ffmpeg -i INPUT -f crc out.crc
-
-
-
You can print the CRC to stdout with the command:
-
-
ffmpeg -i INPUT -f crc -
-
-
-
You can select the output format of each frame with ffmpeg
by
-specifying the audio and video codec and format. For example to
-compute the CRC of the input audio converted to PCM unsigned 8-bit
-and the input video converted to MPEG-2 video, use the command:
-
-
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
-
-
-
-
4.3 framecrc# TOC
-
-
Per-packet CRC (Cyclic Redundancy Check) testing format.
-
-
This muxer computes and prints the Adler-32 CRC for each audio
-and video packet. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-CRC.
-
-
The output of the muxer consists of a line for each audio and video
-packet of the form:
-
-
stream_index , packet_dts , packet_pts , packet_duration , packet_size , 0xCRC
-
-
-
CRC is a hexadecimal number 0-padded to 8 digits containing the
-CRC of the packet.
-
-
-
4.3.1 Examples# TOC
-
-
For example to compute the CRC of the audio and video frames in
-INPUT , converted to raw audio and video packets, and store it
-in the file out.crc :
-
-
ffmpeg -i INPUT -f framecrc out.crc
-
-
-
To print the information to stdout, use the command:
-
-
ffmpeg -i INPUT -f framecrc -
-
-
-
With ffmpeg
, you can select the output format to which the
-audio and video frames are encoded before computing the CRC for each
-packet by specifying the audio and video codec. For example, to
-compute the CRC of each decoded input audio frame converted to PCM
-unsigned 8-bit and of each decoded input video frame converted to
-MPEG-2 video, use the command:
-
-
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
-
-
-
See also the crc muxer.
-
-
-
4.4 framemd5# TOC
-
-
Per-packet MD5 testing format.
-
-
This muxer computes and prints the MD5 hash for each audio
-and video packet. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-hash.
-
-
The output of the muxer consists of a line for each audio and video
-packet of the form:
-
-
stream_index , packet_dts , packet_pts , packet_duration , packet_size , MD5
-
-
-
MD5 is a hexadecimal number representing the computed MD5 hash
-for the packet.
-
-
-
4.4.1 Examples# TOC
-
-
For example to compute the MD5 of the audio and video frames in
-INPUT , converted to raw audio and video packets, and store it
-in the file out.md5 :
-
-
ffmpeg -i INPUT -f framemd5 out.md5
-
-
-
To print the information to stdout, use the command:
-
-
ffmpeg -i INPUT -f framemd5 -
-
-
-
See also the md5 muxer.
-
-
-
-
-
Animated GIF muxer.
-
-
It accepts the following options:
-
-
-loop
-Set the number of times to loop the output. Use -1
for no loop, 0
-for looping indefinitely (default).
-
-
-final_delay
-Force the delay (expressed in centiseconds) after the last frame. Each frame
-ends with a delay until the next frame. The default is -1
, which is a
-special value to tell the muxer to re-use the previous delay. In case of a
-loop, you might want to customize this value to mark a pause for instance.
-
-
-
-
For example, to encode a gif looping 10 times, with a 5 seconds delay between
-the loops:
-
-
ffmpeg -i INPUT -loop 10 -final_delay 500 out.gif
-
-
-
Note 1: if you wish to extract the frames in separate GIF files, you need to
-force the image2 muxer:
-
-
ffmpeg -i INPUT -c:v gif -f image2 "out%d.gif"
-
-
-
Note 2: the GIF format has a very small time base: the delay between two frames
-can not be smaller than one centi second.
-
-
-
-
-
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
-the HTTP Live Streaming (HLS) specification.
-
-
It creates a playlist file, and one or more segment files. The output filename
-specifies the playlist filename.
-
-
By default, the muxer creates a file for each segment produced. These files
-have the same name as the playlist, followed by a sequential number and a
-.ts extension.
-
-
For example, to convert an input file with ffmpeg
:
-
-
ffmpeg -i in.nut out.m3u8
-
-
This example will produce the playlist, out.m3u8 , and segment files:
-out0.ts , out1.ts , out2.ts , etc.
-
-
See also the segment muxer, which provides a more generic and
-flexible implementation of a segmenter, and can be used to perform HLS
-segmentation.
-
-
-
4.6.1 Options# TOC
-
-
This muxer supports the following options:
-
-
-hls_time seconds
-Set the segment length in seconds. Default value is 2.
-
-
-hls_list_size size
-Set the maximum number of playlist entries. If set to 0 the list file
-will contain all the segments. Default value is 5.
-
-
-hls_ts_options options_list
-Set output format options using a :-separated list of key=value
-parameters. Values containing :
special characters must be
-escaped.
-
-
-hls_wrap wrap
-Set the number after which the segment filename number (the number
-specified in each segment file) wraps. If set to 0 the number will be
-never wrapped. Default value is 0.
-
-This option is useful to avoid to fill the disk with many segment
-files, and limits the maximum number of segment files written to disk
-to wrap .
-
-
-start_number number
-Start the playlist sequence number from number . Default value is
-0.
-
-
-hls_allow_cache allowcache
-Explicitly set whether the client MAY (1) or MUST NOT (0) cache media segments.
-
-
-hls_base_url baseurl
-Append baseurl to every entry in the playlist.
-Useful to generate playlists with absolute paths.
-
-Note that the playlist sequence number must be unique for each segment
-and it is not to be confused with the segment filename sequence number
-which can be cyclic, for example if the wrap option is
-specified.
-
-
-hls_segment_filename filename
-Set the segment filename. Unless hls_flags single_file is set filename
-is used as a string format with the segment number:
-
-
ffmpeg in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
-
-This example will produce the playlist, out.m3u8 , and segment files:
-file000.ts , file001.ts , file002.ts , etc.
-
-
-hls_flags single_file
-If this flag is set, the muxer will store all segments in a single MPEG-TS
-file, and will use byte ranges in the playlist. HLS playlists generated with
-this way will have the version number 4.
-For example:
-
-
ffmpeg -i in.nut -hls_flags single_file out.m3u8
-
-Will produce the playlist, out.m3u8 , and a single segment file,
-out.ts .
-
-
-hls_flags delete_segments
-Segment files removed from the playlist are deleted after a period of time
-equal to the duration of the segment plus the duration of the playlist.
-
-
-
-
-
-
-
ICO file muxer.
-
-
Microsoft’s icon file format (ICO) has some strict limitations that should be noted:
-
-
- Size cannot exceed 256 pixels in any dimension
-
- Only BMP and PNG images can be stored
-
- If a BMP image is used, it must be one of the following pixel formats:
-
-
BMP Bit Depth FFmpeg Pixel Format
-1bit pal8
-4bit pal8
-8bit pal8
-16bit rgb555le
-24bit bgr24
-32bit bgra
-
-
- If a BMP image is used, it must use the BITMAPINFOHEADER DIB header
-
- If a PNG image is used, it must use the rgba pixel format
-
-
-
-
4.8 image2# TOC
-
-
Image file muxer.
-
-
The image file muxer writes video frames to image files.
-
-
The output filenames are specified by a pattern, which can be used to
-produce sequentially numbered series of files.
-The pattern may contain the string "%d" or "%0N d", this string
-specifies the position of the characters representing a numbering in
-the filenames. If the form "%0N d" is used, the string
-representing the number in each filename is 0-padded to N
-digits. The literal character ’%’ can be specified in the pattern with
-the string "%%".
-
-
If the pattern contains "%d" or "%0N d", the first filename of
-the file list specified will contain the number 1, all the following
-numbers will be sequential.
-
-
The pattern may contain a suffix which is used to automatically
-determine the format of the image files to write.
-
-
For example the pattern "img-%03d.bmp" will specify a sequence of
-filenames of the form img-001.bmp , img-002.bmp , ...,
-img-010.bmp , etc.
-The pattern "img%%-%d.jpg" will specify a sequence of filenames of the
-form img%-1.jpg , img%-2.jpg , ..., img%-10.jpg ,
-etc.
-
-
-
4.8.1 Examples# TOC
-
-
The following example shows how to use ffmpeg
for creating a
-sequence of files img-001.jpeg , img-002.jpeg , ...,
-taking one image every second from the input video:
-
-
ffmpeg -i in.avi -vsync 1 -r 1 -f image2 'img-%03d.jpeg'
-
-
-
Note that with ffmpeg
, if the format is not specified with the
--f
option and the output filename specifies an image file
-format, the image2 muxer is automatically selected, so the previous
-command can be written as:
-
-
ffmpeg -i in.avi -vsync 1 -r 1 'img-%03d.jpeg'
-
-
-
Note also that the pattern must not necessarily contain "%d" or
-"%0N d", for example to create a single image file
-img.jpeg from the input video you can employ the command:
-
-
ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg
-
-
-
The strftime option allows you to expand the filename with
-date and time information. Check the documentation of
-the strftime()
function for the syntax.
-
-
For example to generate image files from the strftime()
-"%Y-%m-%d_%H-%M-%S" pattern, the following ffmpeg
command
-can be used:
-
-
ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
-
-
-
-
4.8.2 Options# TOC
-
-
-start_number
-Start the sequence from the specified number. Default value is 1. Must
-be a non-negative number.
-
-
-update
-If set to 1, the filename will always be interpreted as just a
-filename, not a pattern, and the corresponding file will be continuously
-overwritten with new images. Default value is 0.
-
-
-strftime
-If set to 1, expand the filename with date and time information from
-strftime()
. Default value is 0.
-
-
-
-
The image muxer supports the .Y.U.V image file format. This format is
-special in that that each image frame consists of three files, for
-each of the YUV420P components. To read or write this image file format,
-specify the name of the ’.Y’ file. The muxer will automatically open the
-’.U’ and ’.V’ files as required.
-
-
-
4.9 matroska# TOC
-
-
Matroska container muxer.
-
-
This muxer implements the matroska and webm container specs.
-
-
-
4.9.1 Metadata# TOC
-
-
The recognized metadata settings in this muxer are:
-
-
-title
-Set title name provided to a single track.
-
-
-language
-Specify the language of the track in the Matroska languages form.
-
-The language can be either the 3 letters bibliographic ISO-639-2 (ISO
-639-2/B) form (like "fre" for French), or a language code mixed with a
-country code for specialities in languages (like "fre-ca" for Canadian
-French).
-
-
-stereo_mode
-Set stereo 3D video layout of two views in a single video track.
-
-The following values are recognized:
-
-‘mono ’
-video is not stereo
-
-‘left_right ’
-Both views are arranged side by side, Left-eye view is on the left
-
-‘bottom_top ’
-Both views are arranged in top-bottom orientation, Left-eye view is at bottom
-
-‘top_bottom ’
-Both views are arranged in top-bottom orientation, Left-eye view is on top
-
-‘checkerboard_rl ’
-Each view is arranged in a checkerboard interleaved pattern, Left-eye view being first
-
-‘checkerboard_lr ’
-Each view is arranged in a checkerboard interleaved pattern, Right-eye view being first
-
-‘row_interleaved_rl ’
-Each view is constituted by a row based interleaving, Right-eye view is first row
-
-‘row_interleaved_lr ’
-Each view is constituted by a row based interleaving, Left-eye view is first row
-
-‘col_interleaved_rl ’
-Both views are arranged in a column based interleaving manner, Right-eye view is first column
-
-‘col_interleaved_lr ’
-Both views are arranged in a column based interleaving manner, Left-eye view is first column
-
-‘anaglyph_cyan_red ’
-All frames are in anaglyph format viewable through red-cyan filters
-
-‘right_left ’
-Both views are arranged side by side, Right-eye view is on the left
-
-‘anaglyph_green_magenta ’
-All frames are in anaglyph format viewable through green-magenta filters
-
-‘block_lr ’
-Both eyes laced in one Block, Left-eye view is first
-
-‘block_rl ’
-Both eyes laced in one Block, Right-eye view is first
-
-
-
-
-
-
For example a 3D WebM clip can be created using the following command line:
-
-
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
-
-
-
-
4.9.2 Options# TOC
-
-
This muxer supports the following options:
-
-
-reserve_index_space
-By default, this muxer writes the index for seeking (called cues in Matroska
-terms) at the end of the file, because it cannot know in advance how much space
-to leave for the index at the beginning of the file. However for some use cases
-– e.g. streaming where seeking is possible but slow – it is useful to put the
-index at the beginning of the file.
-
-If this option is set to a non-zero value, the muxer will reserve a given amount
-of space in the file header and then try to write the cues there when the muxing
-finishes. If the available space does not suffice, muxing will fail. A safe size
-for most use cases should be about 50kB per hour of video.
-
-Note that cues are only written if the output is seekable and this option will
-have no effect if it is not.
-
-
-
-
-
4.10 md5# TOC
-
-
MD5 testing format.
-
-
This muxer computes and prints the MD5 hash of all the input audio
-and video frames. By default audio frames are converted to signed
-16-bit raw audio and video frames to raw video before computing the
-hash.
-
-
The output of the muxer consists of a single line of the form:
-MD5=MD5 , where MD5 is a hexadecimal number representing
-the computed MD5 hash.
-
-
For example to compute the MD5 hash of the input converted to raw
-audio and video, and store it in the file out.md5 :
-
-
ffmpeg -i INPUT -f md5 out.md5
-
-
-
You can print the MD5 to stdout with the command:
-
-
ffmpeg -i INPUT -f md5 -
-
-
-
See also the framemd5 muxer.
-
-
-
4.11 mov, mp4, ismv# TOC
-
-
MOV/MP4/ISMV (Smooth Streaming) muxer.
-
-
The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4
-file has all the metadata about all packets stored in one location
-(written at the end of the file, it can be moved to the start for
-better playback by adding faststart to the movflags , or
-using the qt-faststart
tool). A fragmented
-file consists of a number of fragments, where packets and metadata
-about these packets are stored together. Writing a fragmented
-file has the advantage that the file is decodable even if the
-writing is interrupted (while a normal MOV/MP4 is undecodable if
-it is not properly finished), and it requires less memory when writing
-very long files (since writing normal MOV/MP4 files stores info about
-every single packet in memory until the file is closed). The downside
-is that it is less compatible with other applications.
-
-
-
4.11.1 Options# TOC
-
-
Fragmentation is enabled by setting one of the AVOptions that define
-how to cut the file into fragments:
-
-
--moov_size bytes
-Reserves space for the moov atom at the beginning of the file instead of placing the
-moov atom at the end. If the space reserved is insufficient, muxing will fail.
-
--movflags frag_keyframe
-Start a new fragment at each video keyframe.
-
--frag_duration duration
-Create fragments that are duration microseconds long.
-
--frag_size size
-Create fragments that contain up to size bytes of payload data.
-
--movflags frag_custom
-Allow the caller to manually choose when to cut fragments, by
-calling av_write_frame(ctx, NULL)
to write a fragment with
-the packets written so far. (This is only useful with other
-applications integrating libavformat, not from ffmpeg
.)
-
--min_frag_duration duration
-Don’t create fragments that are shorter than duration microseconds long.
-
-
-
-
If more than one condition is specified, fragments are cut when
-one of the specified conditions is fulfilled. The exception to this is
--min_frag_duration
, which has to be fulfilled for any of the other
-conditions to apply.
-
-
Additionally, the way the output file is written can be adjusted
-through a few other options:
-
-
--movflags empty_moov
-Write an initial moov atom directly at the start of the file, without
-describing any samples in it. Generally, an mdat/moov pair is written
-at the start of the file, as a normal MOV/MP4 file, containing only
-a short portion of the file. With this option set, there is no initial
-mdat atom, and the moov atom only describes the tracks but has
-a zero duration.
-
-This option is implicitly set when writing ismv (Smooth Streaming) files.
-
--movflags separate_moof
-Write a separate moof (movie fragment) atom for each track. Normally,
-packets for all tracks are written in a moof atom (which is slightly
-more efficient), but with this option set, the muxer writes one moof/mdat
-pair for each track, making it easier to separate tracks.
-
-This option is implicitly set when writing ismv (Smooth Streaming) files.
-
--movflags faststart
-Run a second pass moving the index (moov atom) to the beginning of the file.
-This operation can take a while, and will not work in various situations such
-as fragmented output, thus it is not enabled by default.
-
--movflags rtphint
-Add RTP hinting tracks to the output file.
-
--movflags disable_chpl
-Disable Nero chapter markers (chpl atom). Normally, both Nero chapters
-and a QuickTime chapter track are written to the file. With this option
-set, only the QuickTime chapter track will be written. Nero chapters can
-cause failures when the file is reprocessed with certain tagging programs, like
-mp3Tag 2.61a and iTunes 11.3, most likely other versions are affected as well.
-
--movflags omit_tfhd_offset
-Do not write any absolute base_data_offset in tfhd atoms. This avoids
-tying fragments to absolute byte positions in the file/streams.
-
--movflags default_base_moof
-Similarly to the omit_tfhd_offset, this flag avoids writing the
-absolute base_data_offset field in tfhd atoms, but does so by using
-the new default-base-is-moof flag instead. This flag is new from
-14496-12:2012. This may make the fragments easier to parse in certain
-circumstances (avoiding basing track fragment location calculations
-on the implicit end of the previous track fragment).
-
-
-
-
-
4.11.2 Example# TOC
-
-
Smooth Streaming content can be pushed in real time to a publishing
-point on IIS with this muxer. Example:
-
-
ffmpeg -re <normal input/transcoding options> -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1)
-
-
-
-
4.12 mp3# TOC
-
-
The MP3 muxer writes a raw MP3 stream with the following optional features:
-
- An ID3v2 metadata header at the beginning (enabled by default). Versions 2.3 and
-2.4 are supported, the id3v2_version
private option controls which one is
-used (3 or 4). Setting id3v2_version
to 0 disables the ID3v2 header
-completely.
-
-The muxer supports writing attached pictures (APIC frames) to the ID3v2 header.
-The pictures are supplied to the muxer in form of a video stream with a single
-packet. There can be any number of those streams, each will correspond to a
-single APIC frame. The stream metadata tags title and comment map
-to APIC description and picture type respectively. See
-http://id3.org/id3v2.4.0-frames for allowed picture types.
-
-Note that the APIC frames must be written at the beginning, so the muxer will
-buffer the audio frames until it gets all the pictures. It is therefore advised
-to provide the pictures as soon as possible to avoid excessive buffering.
-
- A Xing/LAME frame right after the ID3v2 header (if present). It is enabled by
-default, but will be written only if the output is seekable. The
-write_xing
private option can be used to disable it. The frame contains
-various information that may be useful to the decoder, like the audio duration
-or encoder delay.
-
- A legacy ID3v1 tag at the end of the file (disabled by default). It may be
-enabled with the write_id3v1
private option, but as its capabilities are
-very limited, its usage is not recommended.
-
-
-
Examples:
-
-
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
-
-
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
-
-
-
To attach a picture to an mp3 file select both the audio and the picture stream
-with map
:
-
-
ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
--metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
-
-
-
Write a "clean" MP3 without any extra features:
-
-
ffmpeg -i input.wav -write_xing 0 -id3v2_version 0 out.mp3
-
-
-
-
4.13 mpegts# TOC
-
-
MPEG transport stream muxer.
-
-
This muxer implements ISO 13818-1 and part of ETSI EN 300 468.
-
-
The recognized metadata settings in mpegts muxer are service_provider
-and service_name
. If they are not set the default for
-service_provider
is "FFmpeg" and the default for
-service_name
is "Service01".
-
-
-
4.13.1 Options# TOC
-
-
The muxer options are:
-
-
--mpegts_original_network_id number
-Set the original_network_id (default 0x0001). This is unique identifier
-of a network in DVB. Its main use is in the unique identification of a
-service through the path Original_Network_ID, Transport_Stream_ID.
-
--mpegts_transport_stream_id number
-Set the transport_stream_id (default 0x0001). This identifies a
-transponder in DVB.
-
--mpegts_service_id number
-Set the service_id (default 0x0001) also known as program in DVB.
-
--mpegts_pmt_start_pid number
-Set the first PID for PMT (default 0x1000, max 0x1f00).
-
--mpegts_start_pid number
-Set the first PID for data packets (default 0x0100, max 0x0f00).
-
--mpegts_m2ts_mode number
-Enable m2ts mode if set to 1. Default value is -1 which disables m2ts mode.
-
--muxrate number
-Set a constant muxrate (default VBR).
-
--pcr_period numer
-Override the default PCR retransmission time (default 20ms), ignored
-if variable muxrate is selected.
-
--pes_payload_size number
-Set minimum PES packet payload in bytes.
-
--mpegts_flags flags
-Set flags (see below).
-
--mpegts_copyts number
-Preserve original timestamps, if value is set to 1. Default value is -1, which
-results in shifting timestamps so that they start from 0.
-
--tables_version number
-Set PAT, PMT and SDT version (default 0, valid values are from 0 to 31, inclusively).
-This option allows updating stream structure so that standard consumer may
-detect the change. To do so, reopen output AVFormatContext (in case of API
-usage) or restart ffmpeg instance, cyclically changing tables_version value:
-
-
ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
-ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
-...
-ffmpeg -i source3.ts -codec copy -f mpegts -tables_version 31 udp://1.1.1.1:1111
-ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
-ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
-...
-
-
-
-
-
Option mpegts_flags may take a set of such flags:
-
-
-resend_headers
-Reemit PAT/PMT before writing the next packet.
-
-latm
-Use LATM packetization for AAC.
-
-
-
-
-
4.13.2 Example# TOC
-
-
-
ffmpeg -i file.mpg -c copy \
- -mpegts_original_network_id 0x1122 \
- -mpegts_transport_stream_id 0x3344 \
- -mpegts_service_id 0x5566 \
- -mpegts_pmt_start_pid 0x1500 \
- -mpegts_start_pid 0x150 \
- -metadata service_provider="Some provider" \
- -metadata service_name="Some Channel" \
- -y out.ts
-
-
-
-
4.14 null# TOC
-
-
Null muxer.
-
-
This muxer does not generate any output file, it is mainly useful for
-testing or benchmarking purposes.
-
-
For example to benchmark decoding with ffmpeg
you can use the
-command:
-
-
ffmpeg -benchmark -i INPUT -f null out.null
-
-
-
Note that the above command does not read or write the out.null
-file, but specifying the output file is required by the ffmpeg
-syntax.
-
-
Alternatively you can write the command as:
-
-
ffmpeg -benchmark -i INPUT -f null -
-
-
-
-
4.15 nut# TOC
-
-
--syncpoints flags
-Change the syncpoint usage in nut:
-
-default use the normal low-overhead seeking aids.
-none do not use the syncpoints at all, reducing the overhead but making the stream non-seekable;
-Use of this option is not recommended, as the resulting files are very damage
- sensitive and seeking is not possible. Also in general the overhead from
- syncpoints is negligible. Note, -write_index
0 can be used to disable
- all growing data tables, allowing to mux endless streams with limited memory
- and without these disadvantages.
-
-timestamped extend the syncpoint with a wallclock field.
-
-The none and timestamped flags are experimental.
-
--write_index bool
-Write index at the end, the default is to write an index.
-
-
-
-
-
ffmpeg -i INPUT -f_strict experimental -syncpoints none - | processor
-
-
-
-
4.16 ogg# TOC
-
-
Ogg container muxer.
-
-
--page_duration duration
-Preferred page duration, in microseconds. The muxer will attempt to create
-pages that are approximately duration microseconds long. This allows the
-user to compromise between seek granularity and container overhead. The default
-is 1 second. A value of 0 will fill all segments, making pages as large as
-possible. A value of 1 will effectively use 1 packet-per-page in most
-situations, giving a small seek granularity at the cost of additional container
-overhead.
-
-
-
-
-
4.17 segment, stream_segment, ssegment# TOC
-
-
Basic stream segmenter.
-
-
This muxer outputs streams to a number of separate files of nearly
-fixed duration. Output filename pattern can be set in a fashion similar to
-image2 .
-
-
stream_segment
is a variant of the muxer used to write to
-streaming output formats, i.e. which do not require global headers,
-and is recommended for outputting e.g. to MPEG transport stream segments.
-ssegment
is a shorter alias for stream_segment
.
-
-
Every segment starts with a keyframe of the selected reference stream,
-which is set through the reference_stream option.
-
-
Note that if you want accurate splitting for a video file, you need to
-make the input key frames correspond to the exact splitting times
-expected by the segmenter, or the segment muxer will start the new
-segment with the key frame found next after the specified start
-time.
-
-
The segment muxer works best with a single constant frame rate video.
-
-
Optionally it can generate a list of the created segments, by setting
-the option segment_list . The list type is specified by the
-segment_list_type option. The entry filenames in the segment
-list are set by default to the basename of the corresponding segment
-files.
-
-
See also the hls muxer, which provides a more specific
-implementation for HLS segmentation.
-
-
-
4.17.1 Options# TOC
-
-
The segment muxer supports the following options:
-
-
-reference_stream specifier
-Set the reference stream, as specified by the string specifier .
-If specifier is set to auto
, the reference is chosen
-automatically. Otherwise it must be a stream specifier (see the “Stream
-specifiers” chapter in the ffmpeg manual) which specifies the
-reference stream. The default value is auto
.
-
-
-segment_format format
-Override the inner container format, by default it is guessed by the filename
-extension.
-
-
-segment_format_options options_list
-Set output format options using a :-separated list of key=value
-parameters. Values containing the :
special character must be
-escaped.
-
-
-segment_list name
-Generate also a listfile named name . If not specified no
-listfile is generated.
-
-
-segment_list_flags flags
-Set flags affecting the segment list generation.
-
-It currently supports the following flags:
-
-‘cache ’
-Allow caching (only affects M3U8 list files).
-
-
-‘live ’
-Allow live-friendly file generation.
-
-
-
-
-segment_list_type type
-Select the listing format.
-
-flat use a simple flat list of entries.
-hls use a m3u8-like structure.
-
-
-
-segment_list_size size
-Update the list file so that it contains at most size
-segments. If 0 the list file will contain all the segments. Default
-value is 0.
-
-
-segment_list_entry_prefix prefix
-Prepend prefix to each entry. Useful to generate absolute paths.
-By default no prefix is applied.
-
-The following values are recognized:
-
-‘flat ’
-Generate a flat list for the created segments, one segment per line.
-
-
-‘csv, ext ’
-Generate a list for the created segments, one segment per line,
-each line matching the format (comma-separated values):
-
-
segment_filename ,segment_start_time ,segment_end_time
-
-
-segment_filename is the name of the output file generated by the
-muxer according to the provided pattern. CSV escaping (according to
-RFC4180) is applied if required.
-
-segment_start_time and segment_end_time specify
-the segment start and end time expressed in seconds.
-
-A list file with the suffix ".csv"
or ".ext"
will
-auto-select this format.
-
-‘ext ’ is deprecated in favor or ‘csv ’.
-
-
-‘ffconcat ’
-Generate an ffconcat file for the created segments. The resulting file
-can be read using the FFmpeg concat demuxer.
-
-A list file with the suffix ".ffcat"
or ".ffconcat"
will
-auto-select this format.
-
-
-‘m3u8 ’
-Generate an extended M3U8 file, version 3, compliant with
-http://tools.ietf.org/id/draft-pantos-http-live-streaming .
-
-A list file with the suffix ".m3u8"
will auto-select this format.
-
-
-
-If not specified the type is guessed from the list file name suffix.
-
-
-segment_time time
-Set segment duration to time , the value must be a duration
-specification. Default value is "2". See also the
-segment_times option.
-
-Note that splitting may not be accurate, unless you force the
-reference stream key-frames at the given time. See the introductory
-notice and the examples below.
-
-
-segment_atclocktime 1|0
-If set to "1" split at regular clock time intervals starting from 00:00
-o’clock. The time value specified in segment_time is
-used for setting the length of the splitting interval.
-
-For example with segment_time set to "900" this makes it possible
-to create files at 12:00 o’clock, 12:15, 12:30, etc.
-
-Default value is "0".
-
-
-segment_time_delta delta
-Specify the accuracy time when selecting the start time for a
-segment, expressed as a duration specification. Default value is "0".
-
-When delta is specified a key-frame will start a new segment if its
-PTS satisfies the relation:
-
-
PTS >= start_time - time_delta
-
-
-This option is useful when splitting video content, which is always
-split at GOP boundaries, in case a key frame is found just before the
-specified split time.
-
-In particular may be used in combination with the ffmpeg option
-force_key_frames . The key frame times specified by
-force_key_frames may not be set accurately because of rounding
-issues, with the consequence that a key frame time may result set just
-before the specified time. For constant frame rate videos a value of
-1/(2*frame_rate ) should address the worst case mismatch between
-the specified time and the time set by force_key_frames .
-
-
-segment_times times
-Specify a list of split points. times contains a list of comma
-separated duration specifications, in increasing order. See also
-the segment_time option.
-
-
-segment_frames frames
-Specify a list of split video frame numbers. frames contains a
-list of comma separated integer numbers, in increasing order.
-
-This option specifies to start a new segment whenever a reference
-stream key frame is found and the sequential number (starting from 0)
-of the frame is greater or equal to the next value in the list.
-
-
-segment_wrap limit
-Wrap around segment index once it reaches limit .
-
-
-segment_start_number number
-Set the sequence number of the first segment. Defaults to 0
.
-
-
-reset_timestamps 1|0
-Reset timestamps at the begin of each segment, so that each segment
-will start with near-zero timestamps. It is meant to ease the playback
-of the generated segments. May not work with some combinations of
-muxers/codecs. It is set to 0
by default.
-
-
-initial_offset offset
-Specify timestamp offset to apply to the output packet timestamps. The
-argument must be a time duration specification, and defaults to 0.
-
-
-
-
-
4.17.2 Examples# TOC
-
-
-
-
-
4.18 smoothstreaming# TOC
-
-
Smooth Streaming muxer generates a set of files (Manifest, chunks) suitable for serving with conventional web server.
-
-
-window_size
-Specify the number of fragments kept in the manifest. Default 0 (keep all).
-
-
-extra_window_size
-Specify the number of fragments kept outside of the manifest before removing from disk. Default 5.
-
-
-lookahead_count
-Specify the number of lookahead fragments. Default 2.
-
-
-min_frag_duration
-Specify the minimum fragment duration (in microseconds). Default 5000000.
-
-
-remove_at_exit
-Specify whether to remove all fragments when finished. Default 0 (do not remove).
-
-
-
-
-
-
4.19 tee# TOC
-
-
The tee muxer can be used to write the same data to several files or any
-other kind of muxer. It can be used, for example, to both stream a video to
-the network and save it to disk at the same time.
-
-
It is different from specifying several outputs to the ffmpeg
-command-line tool because the audio and video data will be encoded only once
-with the tee muxer; encoding can be a very expensive process. It is not
-useful when using the libavformat API directly because it is then possible
-to feed the same packets to several muxers directly.
-
-
The slave outputs are specified in the file name given to the muxer,
-separated by ’|’. If any of the slave name contains the ’|’ separator,
-leading or trailing spaces or any special character, it must be
-escaped (see (ffmpeg-utils)the "Quoting and escaping"
-section in the ffmpeg-utils(1) manual ).
-
-
Muxer options can be specified for each slave by prepending them as a list of
-key =value pairs separated by ’:’, between square brackets. If
-the options values contain a special character or the ’:’ separator, they
-must be escaped; note that this is a second level escaping.
-
-
The following special options are also recognized:
-
-f
-Specify the format name. Useful if it cannot be guessed from the
-output name suffix.
-
-
-bsfs[/spec ]
-Specify a list of bitstream filters to apply to the specified
-output.
-
-It is possible to specify to which streams a given bitstream filter
-applies, by appending a stream specifier to the option separated by
-/
. spec must be a stream specifier (see Format stream specifiers ). If the stream specifier is not specified, the
-bitstream filters will be applied to all streams in the output.
-
-Several bitstream filters can be specified, separated by ",".
-
-
-select
-Select the streams that should be mapped to the slave output,
-specified by a stream specifier. If not specified, this defaults to
-all the input streams.
-
-
-
-
-
4.19.1 Examples# TOC
-
-
- Encode something and both archive it in a WebM file and stream it
-as MPEG-TS over UDP (the streams need to be explicitly mapped):
-
-
ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a
- "archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/"
-
-
- Use ffmpeg
to encode the input, and send the output
-to three different destinations. The dump_extra
bitstream
-filter is used to add extradata information to all the output video
-keyframes packets, as requested by the MPEG-TS format. The select
-option is applied to out.aac in order to make it contain only
-audio packets.
-
-
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
- -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac"
-
-
- As below, but select only stream a:1
for the audio output. Note
-that a second level escaping must be performed, as ":" is a special
-character used to separate options.
-
-
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
- -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac"
-
-
-
-
Note: some codecs may need different options depending on the output format;
-the auto-detection of this can not work with the tee muxer. The main example
-is the global_header flag.
-
-
-
4.20 webm_dash_manifest# TOC
-
-
WebM DASH Manifest muxer.
-
-
This muxer implements the WebM DASH Manifest specification to generate the DASH manifest XML.
-
-
-
4.20.1 Options# TOC
-
-
This muxer supports the following options:
-
-
-adaptation_sets
-This option has the following syntax: "id=x,streams=a,b,c id=y,streams=d,e" where x and y are the
-unique identifiers of the adaptation sets and a,b,c,d and e are the indices of the corresponding
-audio and video streams. Any number of adaptation sets can be added using this option.
-
-
-
-
-
4.20.2 Example# TOC
-
-
ffmpeg -f webm_dash_manifest -i video1.webm \
- -f webm_dash_manifest -i video2.webm \
- -f webm_dash_manifest -i audio1.webm \
- -f webm_dash_manifest -i audio2.webm \
- -map 0 -map 1 -map 2 -map 3 \
- -c copy \
- -f webm_dash_manifest \
- -adaptation_sets "id=0,streams=0,1 id=1,streams=2,3" \
- manifest.xml
-
-
-
-
5 Metadata# TOC
-
-
FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded
-INI-like text file and then load it back using the metadata muxer/demuxer.
-
-
The file format is as follows:
-
- A file consists of a header and a number of metadata tags divided into sections,
-each on its own line.
-
- The header is a ’;FFMETADATA’ string, followed by a version number (now 1).
-
- Metadata tags are of the form ’key=value’
-
- Immediately after header follows global metadata
-
- After global metadata there may be sections with per-stream/per-chapter
-metadata.
-
- A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
-brackets (’[’, ’]’) and ends with next section or end of file.
-
- At the beginning of a chapter section there may be an optional timebase to be
-used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and
-den are integers. If the timebase is missing then start/end times are assumed to
-be in milliseconds.
-Next a chapter section must contain chapter start and end times in form
-’START=num’, ’END=num’, where num is a positive integer.
-
- Empty lines and lines starting with ’;’ or ’#’ are ignored.
-
- Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a
-newline) must be escaped with a backslash ’\’.
-
- Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
-the tag (in the example above key is ’foo ’, value is ’ bar’).
-
-
-
A ffmetadata file might look like this:
-
-
;FFMETADATA1
-title=bike\\shed
-;this is a comment
-artist=FFmpeg troll team
-
-[CHAPTER]
-TIMEBASE=1/1000
-START=0
-#chapter ends at 0:01:00
-END=60000
-title=chapter \#1
-[STREAM]
-title=multi\
-line
-
-
-
By using the ffmetadata muxer and demuxer it is possible to extract
-metadata from an input file to an ffmetadata file, and then transcode
-the file into an output file with the edited ffmetadata file.
-
-
Extracting an ffmetadata file with ffmpeg goes as follows:
-
-
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
-
-
-
Reinserting edited metadata information from the FFMETADATAFILE file can
-be done as:
-
-
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
-
-
-
-
-
6 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavformat
-
-
-
-
7 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffmpeg-protocols.html b/Externals/ffmpeg/shared/doc/ffmpeg-protocols.html
deleted file mode 100644
index 0fd895cce0..0000000000
--- a/Externals/ffmpeg/shared/doc/ffmpeg-protocols.html
+++ /dev/null
@@ -1,1545 +0,0 @@
-
-
-
-
-
-
- FFmpeg Protocols Documentation
-
-
-
-
-
-
-
-
- FFmpeg Protocols Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes the input and output protocols provided by the
-libavformat library.
-
-
-
-
2 Protocols# TOC
-
-
Protocols are configured elements in FFmpeg that enable access to
-resources that require specific protocols.
-
-
When you configure your FFmpeg build, all the supported protocols are
-enabled by default. You can list all available ones using the
-configure option "–list-protocols".
-
-
You can disable all the protocols using the configure option
-"–disable-protocols", and selectively enable a protocol using the
-option "–enable-protocol=PROTOCOL ", or you can disable a
-particular protocol using the option
-"–disable-protocol=PROTOCOL ".
-
-
The option "-protocols" of the ff* tools will display the list of
-supported protocols.
-
-
A description of the currently available protocols follows.
-
-
-
2.1 bluray# TOC
-
-
Read BluRay playlist.
-
-
The accepted options are:
-
-angle
-BluRay angle
-
-
-chapter
-Start chapter (1...N)
-
-
-playlist
-Playlist to read (BDMV/PLAYLIST/?????.mpls)
-
-
-
-
-
Examples:
-
-
Read longest playlist from BluRay mounted to /mnt/bluray:
-
-
-
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
-
-
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
-
-
-
-
2.2 cache# TOC
-
-
Caching wrapper for input stream.
-
-
Cache the input stream to temporary file. It brings seeking capability to live streams.
-
-
-
-
-
2.3 concat# TOC
-
-
Physical concatenation protocol.
-
-
Allow to read and seek from many resource in sequence as if they were
-a unique resource.
-
-
A URL accepted by this protocol has the syntax:
-
-
concat:URL1 |URL2 |...|URLN
-
-
-
where URL1 , URL2 , ..., URLN are the urls of the
-resource to be concatenated, each one possibly specifying a distinct
-protocol.
-
-
For example to read a sequence of files split1.mpeg ,
-split2.mpeg , split3.mpeg with ffplay
use the
-command:
-
-
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
-
-
-
Note that you may need to escape the character "|" which is special for
-many shells.
-
-
-
2.4 crypto# TOC
-
-
AES-encrypted stream reading protocol.
-
-
The accepted options are:
-
-key
-Set the AES decryption key binary block from given hexadecimal representation.
-
-
-iv
-Set the AES decryption initialization vector binary block from given hexadecimal representation.
-
-
-
-
Accepted URL formats:
-
-
crypto:URL
-crypto+URL
-
-
-
-
2.5 data# TOC
-
-
Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme .
-
-
For example, to convert a GIF file given inline with ffmpeg
:
-
-
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
-
-
-
-
2.6 file# TOC
-
-
File access protocol.
-
-
Allow to read from or write to a file.
-
-
A file URL can have the form:
-
-
-
where filename is the path of the file to read.
-
-
An URL that does not have a protocol prefix will be assumed to be a
-file URL. Depending on the build, an URL that looks like a Windows
-path with the drive letter at the beginning will also be assumed to be
-a file URL (usually not the case in builds for unix-like systems).
-
-
For example to read from a file input.mpeg with ffmpeg
-use the command:
-
-
ffmpeg -i file:input.mpeg output.mpeg
-
-
-
This protocol accepts the following options:
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable for files on slow medium.
-
-
-
-
-
-
-
FTP (File Transfer Protocol).
-
-
Allow to read from or write to remote resources using FTP protocol.
-
-
Following syntax is required.
-
-
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-ftp-anonymous-password
-Password used when login as anonymous user. Typically an e-mail address
-should be used.
-
-
-ftp-write-seekable
-Control seekability of connection during encoding. If set to 1 the
-resource is supposed to be seekable, if set to 0 it is assumed not
-to be seekable. Default value is 0.
-
-
-
-
NOTE: Protocol can be used as output, but it is recommended to not do
-it, unless special care is taken (tests, customized server configuration
-etc.). Different FTP servers behave in different way during seek
-operation. ff* tools may produce incomplete content due to server limitations.
-
-
-
2.8 gopher# TOC
-
-
Gopher protocol.
-
-
-
-
-
Read Apple HTTP Live Streaming compliant segmented stream as
-a uniform one. The M3U8 playlists describing the segments can be
-remote HTTP resources or local files, accessed using the standard
-file protocol.
-The nested protocol is declared by specifying
-"+proto " after the hls URI scheme name, where proto
-is either "file" or "http".
-
-
-
hls+http://host/path/to/remote/resource.m3u8
-hls+file://path/to/local/resource.m3u8
-
-
-
Using this protocol is discouraged - the hls demuxer should work
-just as well (if not, please report the issues) and is more complete.
-To use the hls demuxer instead, simply use the direct URLs to the
-m3u8 files.
-
-
-
2.10 http# TOC
-
-
HTTP (Hyper Text Transfer Protocol).
-
-
This protocol accepts the following options:
-
-
-seekable
-Control seekability of connection. If set to 1 the resource is
-supposed to be seekable, if set to 0 it is assumed not to be seekable,
-if set to -1 it will try to autodetect if it is seekable. Default
-value is -1.
-
-
-chunked_post
-If set to 1 use chunked Transfer-Encoding for posts, default is 1.
-
-
-content_type
-Set a specific content type for the POST messages.
-
-
-headers
-Set custom HTTP headers, can override built in default headers. The
-value must be a string encoding the headers.
-
-
-multiple_requests
-Use persistent connections if set to 1, default is 0.
-
-
-post_data
-Set custom HTTP post data.
-
-
-user-agent
-user_agent
-Override the User-Agent header. If not specified the protocol will use a
-string describing the libavformat build. ("Lavf/<version>")
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-mime_type
-Export the MIME type.
-
-
-icy
-If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
-supports this, the metadata has to be retrieved by the application by reading
-the icy_metadata_headers and icy_metadata_packet options.
-The default is 1.
-
-
-icy_metadata_headers
-If the server supports ICY metadata, this contains the ICY-specific HTTP reply
-headers, separated by newline characters.
-
-
-icy_metadata_packet
-If the server supports ICY metadata, and icy was set to 1, this
-contains the last non-empty metadata packet sent by the server. It should be
-polled in regular intervals by applications interested in mid-stream metadata
-updates.
-
-
-cookies
-Set the cookies to be sent in future requests. The format of each cookie is the
-same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
-delimited by a newline character.
-
-
-offset
-Set initial byte offset.
-
-
-end_offset
-Try to limit the request to bytes preceding this offset.
-
-
-
-
-
2.10.1 HTTP Cookies# TOC
-
-
Some HTTP requests will be denied unless cookie values are passed in with the
-request. The cookies option allows these cookies to be specified. At
-the very least, each cookie must specify a value along with a path and domain.
-HTTP requests that match both the domain and path will automatically include the
-cookie value in the HTTP Cookie header field. Multiple cookies can be delimited
-by a newline.
-
-
The required syntax to play a stream specifying a cookie is:
-
-
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
-
-
-
-
2.11 Icecast# TOC
-
-
Icecast protocol (stream to Icecast servers)
-
-
This protocol accepts the following options:
-
-
-ice_genre
-Set the stream genre.
-
-
-ice_name
-Set the stream name.
-
-
-ice_description
-Set the stream description.
-
-
-ice_url
-Set the stream website URL.
-
-
-ice_public
-Set if the stream should be public.
-The default is 0 (not public).
-
-
-user_agent
-Override the User-Agent header. If not specified a string of the form
-"Lavf/<version>" will be used.
-
-
-password
-Set the Icecast mountpoint password.
-
-
-content_type
-Set the stream content type. This must be set if it is different from
-audio/mpeg.
-
-
-legacy_icecast
-This enables support for Icecast versions < 2.4.0, that do not support the
-HTTP PUT method but the SOURCE method.
-
-
-
-
-
-
icecast://[username [:password ]@]server :port /mountpoint
-
-
-
-
2.12 mmst# TOC
-
-
MMS (Microsoft Media Server) protocol over TCP.
-
-
-
2.13 mmsh# TOC
-
-
MMS (Microsoft Media Server) protocol over HTTP.
-
-
The required syntax is:
-
-
mmsh://server [:port ][/app ][/playpath ]
-
-
-
-
2.14 md5# TOC
-
-
MD5 output protocol.
-
-
Computes the MD5 hash of the data to be written, and on close writes
-this to the designated output or stdout if none is specified. It can
-be used to test muxers without writing an actual file.
-
-
Some examples follow.
-
-
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
-ffmpeg -i input.flv -f avi -y md5:output.avi.md5
-
-# Write the MD5 hash of the encoded AVI file to stdout.
-ffmpeg -i input.flv -f avi -y md5:
-
-
-
Note that some formats (typically MOV) require the output protocol to
-be seekable, so they will fail with the MD5 output protocol.
-
-
-
2.15 pipe# TOC
-
-
UNIX pipe access protocol.
-
-
Allow to read and write from UNIX pipes.
-
-
The accepted syntax is:
-
-
-
number is the number corresponding to the file descriptor of the
-pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number
-is not specified, by default the stdout file descriptor will be used
-for writing, stdin for reading.
-
-
For example to read from stdin with ffmpeg
:
-
-
cat test.wav | ffmpeg -i pipe:0
-# ...this is the same as...
-cat test.wav | ffmpeg -i pipe:
-
-
-
For writing to stdout with ffmpeg
:
-
-
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
-# ...this is the same as...
-ffmpeg -i test.wav -f avi pipe: | cat > test.avi
-
-
-
This protocol accepts the following options:
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable if data transmission is slow.
-
-
-
-
Note that some formats (typically MOV), require the output protocol to
-be seekable, so they will fail with the pipe output protocol.
-
-
-
2.16 rtmp# TOC
-
-
Real-Time Messaging Protocol.
-
-
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
-content across a TCP/IP network.
-
-
The required syntax is:
-
-
rtmp://[username :password @]server [:port ][/app ][/instance ][/playpath ]
-
-
-
The accepted parameters are:
-
-username
-An optional username (mostly for publishing).
-
-
-password
-An optional password (mostly for publishing).
-
-
-server
-The address of the RTMP server.
-
-
-port
-The number of the TCP port to use (by default is 1935).
-
-
-app
-It is the name of the application to access. It usually corresponds to
-the path where the application is installed on the RTMP server
-(e.g. /ondemand/ , /flash/live/ , etc.). You can override
-the value parsed from the URI through the rtmp_app
option, too.
-
-
-playpath
-It is the path or name of the resource to play with reference to the
-application specified in app , may be prefixed by "mp4:". You
-can override the value parsed from the URI through the rtmp_playpath
-option, too.
-
-
-listen
-Act as a server, listening for an incoming connection.
-
-
-timeout
-Maximum time to wait for the incoming connection. Implies listen.
-
-
-
-
Additionally, the following parameters can be set via command line options
-(or in code via AVOption
s):
-
-rtmp_app
-Name of application to connect on the RTMP server. This option
-overrides the parameter specified in the URI.
-
-
-rtmp_buffer
-Set the client buffer time in milliseconds. The default is 3000.
-
-
-rtmp_conn
-Extra arbitrary AMF connection parameters, parsed from a string,
-e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0
.
-Each value is prefixed by a single character denoting the type,
-B for Boolean, N for number, S for string, O for object, or Z for null,
-followed by a colon. For Booleans the data must be either 0 or 1 for
-FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
-1 to end or begin an object, respectively. Data items in subobjects may
-be named, by prefixing the type with ’N’ and specifying the name before
-the value (i.e. NB:myFlag:1
). This option may be used multiple
-times to construct arbitrary AMF sequences.
-
-
-rtmp_flashver
-Version of the Flash plugin used to run the SWF player. The default
-is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
-<libavformat version>).)
-
-
-rtmp_flush_interval
-Number of packets flushed in the same request (RTMPT only). The default
-is 10.
-
-
-rtmp_live
-Specify that the media is a live stream. No resuming or seeking in
-live streams is possible. The default value is any
, which means the
-subscriber first tries to play the live stream specified in the
-playpath. If a live stream of that name is not found, it plays the
-recorded stream. The other possible values are live
and
-recorded
.
-
-
-rtmp_pageurl
-URL of the web page in which the media was embedded. By default no
-value will be sent.
-
-
-rtmp_playpath
-Stream identifier to play or to publish. This option overrides the
-parameter specified in the URI.
-
-
-rtmp_subscribe
-Name of live stream to subscribe to. By default no value will be sent.
-It is only sent if the option is specified or if rtmp_live
-is set to live.
-
-
-rtmp_swfhash
-SHA256 hash of the decompressed SWF file (32 bytes).
-
-
-rtmp_swfsize
-Size of the decompressed SWF file, required for SWFVerification.
-
-
-rtmp_swfurl
-URL of the SWF player for the media. By default no value will be sent.
-
-
-rtmp_swfverify
-URL to player swf file, compute hash/size automatically.
-
-
-rtmp_tcurl
-URL of the target stream. Defaults to proto://host[:port]/app.
-
-
-
-
-
For example to read with ffplay
a multimedia resource named
-"sample" from the application "vod" from an RTMP server "myserver":
-
-
ffplay rtmp://myserver/vod/sample
-
-
-
To publish to a password protected server, passing the playpath and
-app names separately:
-
-
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
-
-
-
-
2.17 rtmpe# TOC
-
-
Encrypted Real-Time Messaging Protocol.
-
-
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
-streaming multimedia content within standard cryptographic primitives,
-consisting of Diffie-Hellman key exchange and HMACSHA256, generating
-a pair of RC4 keys.
-
-
-
2.18 rtmps# TOC
-
-
Real-Time Messaging Protocol over a secure SSL connection.
-
-
The Real-Time Messaging Protocol (RTMPS) is used for streaming
-multimedia content across an encrypted connection.
-
-
-
2.19 rtmpt# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
-for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
2.20 rtmpte# TOC
-
-
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
-is used for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
2.21 rtmpts# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTPS.
-
-
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
-for streaming multimedia content within HTTPS requests to traverse
-firewalls.
-
-
-
2.22 libsmbclient# TOC
-
-
libsmbclient permits one to manipulate CIFS/SMB network resources.
-
-
Following syntax is required.
-
-
-
smb://[[domain:]user[:password@]]server[/share[/path[/file]]]
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in miliseconds of socket I/O operations used by the underlying
-low level operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-workgroup
-Set the workgroup used for making connections. By default workgroup is not specified.
-
-
-
-
-
For more information see: http://www.samba.org/ .
-
-
-
2.23 libssh# TOC
-
-
Secure File Transfer Protocol via libssh
-
-
Allow to read from or write to remote resources using SFTP protocol.
-
-
Following syntax is required.
-
-
-
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-private_key
-Specify the path of the file containing private key to use during authorization.
-By default libssh searches for keys in the ~/.ssh/ directory.
-
-
-
-
-
Example: Play a file stored on remote server.
-
-
-
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
-
-
-
-
2.24 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte# TOC
-
-
Real-Time Messaging Protocol and its variants supported through
-librtmp.
-
-
Requires the presence of the librtmp headers and library during
-configuration. You need to explicitly configure the build with
-"–enable-librtmp". If enabled this will replace the native RTMP
-protocol.
-
-
This protocol provides most client functions and a few server
-functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT),
-encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled
-variants of these encrypted types (RTMPTE, RTMPTS).
-
-
The required syntax is:
-
-
rtmp_proto ://server [:port ][/app ][/playpath ] options
-
-
-
where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe",
-"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and
-server , port , app and playpath have the same
-meaning as specified for the RTMP native protocol.
-options contains a list of space-separated options of the form
-key =val .
-
-
See the librtmp manual page (man 3 librtmp) for more information.
-
-
For example, to stream a file in real-time to an RTMP server using
-ffmpeg
:
-
-
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
-
-
-
To play the same stream using ffplay
:
-
-
ffplay "rtmp://myserver/live/mystream live=1"
-
-
-
-
2.25 rtp# TOC
-
-
Real-time Transport Protocol.
-
-
The required syntax for an RTP URL is:
-rtp://hostname [:port ][?option =val ...]
-
-
port specifies the RTP port to use.
-
-
The following URL options are supported:
-
-
-ttl=n
-Set the TTL (Time-To-Live) value (for multicast only).
-
-
-rtcpport=n
-Set the remote RTCP port to n .
-
-
-localrtpport=n
-Set the local RTP port to n .
-
-
-localrtcpport=n '
-Set the local RTCP port to n .
-
-
-pkt_size=n
-Set max packet size (in bytes) to n .
-
-
-connect=0|1
-Do a connect()
on the UDP socket (if set to 1) or not (if set
-to 0).
-
-
-sources=ip [,ip ]
-List allowed source IP addresses.
-
-
-block=ip [,ip ]
-List disallowed (blocked) source IP addresses.
-
-
-write_to_source=0|1
-Send packets to the source address of the latest received packet (if
-set to 1) or to a default remote address (if set to 0).
-
-
-localport=n
-Set the local RTP port to n .
-
-This is a deprecated option. Instead, localrtpport should be
-used.
-
-
-
-
-
Important notes:
-
-
- If rtcpport is not set the RTCP port will be set to the RTP
-port value plus 1.
-
- If localrtpport (the local RTP port) is not set any available
-port will be used for the local RTP and RTCP ports.
-
- If localrtcpport (the local RTCP port) is not set it will be
-set to the local RTP port value plus 1.
-
-
-
-
2.26 rtsp# TOC
-
-
Real-Time Streaming Protocol.
-
-
RTSP is not technically a protocol handler in libavformat, it is a demuxer
-and muxer. The demuxer supports both normal RTSP (with data transferred
-over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
-data transferred over RDT).
-
-
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
-supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s
-RTSP server ).
-
-
The required syntax for a RTSP url is:
-
-
rtsp://hostname [:port ]/path
-
-
-
Options can be set on the ffmpeg
/ffplay
command
-line, or set in code via AVOption
s or in
-avformat_open_input
.
-
-
The following options are supported.
-
-
-initial_pause
-Do not start playing the stream immediately if set to 1. Default value
-is 0.
-
-
-rtsp_transport
-Set RTSP transport protocols.
-
-It accepts the following values:
-
-‘udp ’
-Use UDP as lower transport protocol.
-
-
-‘tcp ’
-Use TCP (interleaving within the RTSP control channel) as lower
-transport protocol.
-
-
-‘udp_multicast ’
-Use UDP multicast as lower transport protocol.
-
-
-‘http ’
-Use HTTP tunneling as lower transport protocol, which is useful for
-passing proxies.
-
-
-
-Multiple lower transport protocols may be specified, in that case they are
-tried one at a time (if the setup of one fails, the next one is tried).
-For the muxer, only the ‘tcp ’ and ‘udp ’ options are supported.
-
-
-rtsp_flags
-Set RTSP flags.
-
-The following values are accepted:
-
-‘filter_src ’
-Accept packets only from negotiated peer address and port.
-
-‘listen ’
-Act as a server, listening for an incoming connection.
-
-‘prefer_tcp ’
-Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
-
-
-
-Default value is ‘none ’.
-
-
-allowed_media_types
-Set media types to accept from the server.
-
-The following flags are accepted:
-
-‘video ’
-‘audio ’
-‘data ’
-
-
-By default it accepts all media types.
-
-
-min_port
-Set minimum local UDP port. Default value is 5000.
-
-
-max_port
-Set maximum local UDP port. Default value is 65000.
-
-
-timeout
-Set maximum timeout (in seconds) to wait for incoming connections.
-
-A value of -1 means infinite (default). This option implies the
-rtsp_flags set to ‘listen ’.
-
-
-reorder_queue_size
-Set number of packets to buffer for handling of reordered packets.
-
-
-stimeout
-Set socket TCP I/O timeout in microseconds.
-
-
-user-agent
-Override User-Agent header. If not specified, it defaults to the
-libavformat identifier string.
-
-
-
-
When receiving data over UDP, the demuxer tries to reorder received packets
-(since they may arrive out of order, or packets may get lost totally). This
-can be disabled by setting the maximum demuxing delay to zero (via
-the max_delay
field of AVFormatContext).
-
-
When watching multi-bitrate Real-RTSP streams with ffplay
, the
-streams to display can be chosen with -vst
n and
--ast
n for video and audio respectively, and can be switched
-on the fly by pressing v
and a
.
-
-
-
2.26.1 Examples# TOC
-
-
The following examples all make use of the ffplay
and
-ffmpeg
tools.
-
-
- Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
-
-
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
-
-
- Watch a stream tunneled over HTTP:
-
-
ffplay -rtsp_transport http rtsp://server/video.mp4
-
-
- Send a stream in realtime to a RTSP server, for others to watch:
-
-
ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
-
-
- Receive a stream in realtime:
-
-
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
-
-
-
-
-
2.27 sap# TOC
-
-
Session Announcement Protocol (RFC 2974). This is not technically a
-protocol handler in libavformat, it is a muxer and demuxer.
-It is used for signalling of RTP streams, by announcing the SDP for the
-streams regularly on a separate port.
-
-
-
2.27.1 Muxer# TOC
-
-
The syntax for a SAP url given to the muxer is:
-
-
sap://destination [:port ][?options ]
-
-
-
The RTP packets are sent to destination on port port ,
-or to port 5004 if no port is specified.
-options is a &
-separated list. The following options
-are supported:
-
-
-announce_addr=address
-Specify the destination IP address for sending the announcements to.
-If omitted, the announcements are sent to the commonly used SAP
-announcement multicast address 224.2.127.254 (sap.mcast.net), or
-ff0e::2:7ffe if destination is an IPv6 address.
-
-
-announce_port=port
-Specify the port to send the announcements on, defaults to
-9875 if not specified.
-
-
-ttl=ttl
-Specify the time to live value for the announcements and RTP packets,
-defaults to 255.
-
-
-same_port=0|1
-If set to 1, send all RTP streams on the same port pair. If zero (the
-default), all streams are sent on unique ports, with each stream on a
-port 2 numbers higher than the previous.
-VLC/Live555 requires this to be set to 1, to be able to receive the stream.
-The RTP stack in libavformat for receiving requires all streams to be sent
-on unique ports.
-
-
-
-
Example command lines follow.
-
-
To broadcast a stream on the local subnet, for watching in VLC:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
-
-
-
Similarly, for watching in ffplay
:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255
-
-
-
And for watching in ffplay
, over IPv6:
-
-
-
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
-
-
-
-
2.27.2 Demuxer# TOC
-
-
The syntax for a SAP url given to the demuxer is:
-
-
sap://[address ][:port ]
-
-
-
address is the multicast address to listen for announcements on,
-if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port
-is the port that is listened on, 9875 if omitted.
-
-
The demuxers listens for announcements on the given address and port.
-Once an announcement is received, it tries to receive that particular stream.
-
-
Example command lines follow.
-
-
To play back the first stream announced on the normal SAP multicast address:
-
-
-
-
To play back the first stream announced on one the default IPv6 SAP multicast address:
-
-
-
ffplay sap://[ff0e::2:7ffe]
-
-
-
-
2.28 sctp# TOC
-
-
Stream Control Transmission Protocol.
-
-
The accepted URL syntax is:
-
-
sctp://host :port [?options ]
-
-
-
The protocol accepts the following options:
-
-listen
-If set to any value, listen for an incoming connection. Outgoing connection is done by default.
-
-
-max_streams
-Set the maximum number of streams. By default no limit is set.
-
-
-
-
-
2.29 srtp# TOC
-
-
Secure Real-time Transport Protocol.
-
-
The accepted options are:
-
-srtp_in_suite
-srtp_out_suite
-Select input and output encoding suites.
-
-Supported values:
-
-‘AES_CM_128_HMAC_SHA1_80 ’
-‘SRTP_AES128_CM_HMAC_SHA1_80 ’
-‘AES_CM_128_HMAC_SHA1_32 ’
-‘SRTP_AES128_CM_HMAC_SHA1_32 ’
-
-
-
-srtp_in_params
-srtp_out_params
-Set input and output encoding parameters, which are expressed by a
-base64-encoded representation of a binary block. The first 16 bytes of
-this binary block are used as master key, the following 14 bytes are
-used as master salt.
-
-
-
-
-
2.30 subfile# TOC
-
-
Virtually extract a segment of a file or another stream.
-The underlying stream must be seekable.
-
-
Accepted options:
-
-start
-Start offset of the extracted segment, in bytes.
-
-end
-End offset of the extracted segment, in bytes.
-
-
-
-
Examples:
-
-
Extract a chapter from a DVD VOB file (start and end sectors obtained
-externally and multiplied by 2048):
-
-
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
-
-
-
Play an AVI file directly from a TAR archive:
-subfile,,start,183241728,end,366490624,,:archive.tar
-
-
-
2.31 tcp# TOC
-
-
Transmission Control Protocol.
-
-
The required syntax for a TCP url is:
-
-
tcp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form
-key =val .
-
-
The list of supported options follows.
-
-
-listen=1|0
-Listen for an incoming connection. Default value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-listen_timeout=microseconds
-Set listen timeout, expressed in microseconds.
-
-
-
-
The following example shows how to setup a listening TCP connection
-with ffmpeg
, which is then accessed with ffplay
:
-
-
ffmpeg -i input -f format tcp://hostname :port ?listen
-ffplay tcp://hostname :port
-
-
-
-
2.32 tls# TOC
-
-
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
-
-
The required syntax for a TLS/SSL url is:
-
-
tls://hostname :port [?options ]
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-ca_file, cafile=filename
-A file containing certificate authority (CA) root certificates to treat
-as trusted. If the linked TLS library contains a default this might not
-need to be specified for verification to work, but not all libraries and
-setups have defaults built in.
-The file must be in OpenSSL PEM format.
-
-
-tls_verify=1|0
-If enabled, try to verify the peer that we are communicating with.
-Note, if using OpenSSL, this currently only makes sure that the
-peer certificate is signed by one of the root certificates in the CA
-database, but it does not validate that the certificate actually
-matches the host name we are trying to connect to. (With GnuTLS,
-the host name is validated as well.)
-
-This is disabled by default since it requires a CA database to be
-provided by the caller in many cases.
-
-
-cert_file, cert=filename
-A file containing a certificate to use in the handshake with the peer.
-(When operating as server, in listen mode, this is more often required
-by the peer, while client certificates only are mandated in certain
-setups.)
-
-
-key_file, key=filename
-A file containing the private key for the certificate.
-
-
-listen=1|0
-If enabled, listen for connections on the provided port, and assume
-the server role in the handshake instead of the client role.
-
-
-
-
-
Example command lines:
-
-
To create a TLS/SSL server that serves an input stream.
-
-
-
ffmpeg -i input -f format tls://hostname :port ?listen&cert=server.crt &key=server.key
-
-
-
To play back a stream from the TLS/SSL server using ffplay
:
-
-
-
ffplay tls://hostname :port
-
-
-
-
2.33 udp# TOC
-
-
User Datagram Protocol.
-
-
The required syntax for an UDP URL is:
-
-
udp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form key =val .
-
-
In case threading is enabled on the system, a circular buffer is used
-to store the incoming data, which allows one to reduce loss of data due to
-UDP socket buffer overruns. The fifo_size and
-overrun_nonfatal options are related to this buffer.
-
-
The list of supported options follows.
-
-
-buffer_size=size
-Set the UDP maximum socket buffer size in bytes. This is used to set either
-the receive or send buffer size, depending on what the socket is used for.
-Default is 64KB. See also fifo_size .
-
-
-localport=port
-Override the local UDP port to bind with.
-
-
-localaddr=addr
-Choose the local IP address. This is useful e.g. if sending multicast
-and the host has multiple interfaces, where the user can choose
-which interface to send on by specifying the IP address of that interface.
-
-
-pkt_size=size
-Set the size in bytes of UDP packets.
-
-
-reuse=1|0
-Explicitly allow or disallow reusing UDP sockets.
-
-
-ttl=ttl
-Set the time to live value (for multicast only).
-
-
-connect=1|0
-Initialize the UDP socket with connect()
. In this case, the
-destination address can’t be changed with ff_udp_set_remote_url later.
-If the destination address isn’t known at the start, this option can
-be specified in ff_udp_set_remote_url, too.
-This allows finding out the source address for the packets with getsockname,
-and makes writes return with AVERROR(ECONNREFUSED) if "destination
-unreachable" is received.
-For receiving, this gives the benefit of only receiving packets from
-the specified peer address/port.
-
-
-sources=address [,address ]
-Only receive packets sent to the multicast group from one of the
-specified sender IP addresses.
-
-
-block=address [,address ]
-Ignore packets sent to the multicast group from the specified
-sender IP addresses.
-
-
-fifo_size=units
-Set the UDP receiving circular buffer size, expressed as a number of
-packets with size of 188 bytes. If not specified defaults to 7*4096.
-
-
-overrun_nonfatal=1|0
-Survive in case of UDP receiving circular buffer overrun. Default
-value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-broadcast=1|0
-Explicitly allow or disallow UDP broadcasting.
-
-Note that broadcasting may not work properly on networks having
-a broadcast storm protection.
-
-
-
-
-
2.33.1 Examples# TOC
-
-
- Use ffmpeg
to stream over UDP to a remote endpoint:
-
-
ffmpeg -i input -f format udp://hostname :port
-
-
- Use ffmpeg
to stream in mpegts format over UDP using 188
-sized UDP packets, using a large input buffer:
-
-
ffmpeg -i input -f mpegts udp://hostname :port ?pkt_size=188&buffer_size=65535
-
-
- Use ffmpeg
to receive over UDP from a remote endpoint:
-
-
ffmpeg -i udp://[multicast-address ]:port ...
-
-
-
-
-
2.34 unix# TOC
-
-
Unix local socket
-
-
The required syntax for a Unix socket URL is:
-
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-timeout
-Timeout in ms.
-
-listen
-Create the Unix socket in listening mode.
-
-
-
-
-
-
3 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavformat
-
-
-
-
4 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffmpeg-resampler.html b/Externals/ffmpeg/shared/doc/ffmpeg-resampler.html
deleted file mode 100644
index 2611dfc1f0..0000000000
--- a/Externals/ffmpeg/shared/doc/ffmpeg-resampler.html
+++ /dev/null
@@ -1,357 +0,0 @@
-
-
-
-
-
-
- FFmpeg Resampler Documentation
-
-
-
-
-
-
-
-
- FFmpeg Resampler Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The FFmpeg resampler provides a high-level interface to the
-libswresample library audio resampling utilities. In particular it
-allows one to perform audio resampling, audio channel layout rematrixing,
-and convert audio format and packing layout.
-
-
-
-
2 Resampler Options# TOC
-
-
The audio resampler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, option =value for the aresample filter,
-by setting the value explicitly in the
-SwrContext
options or using the libavutil/opt.h API for
-programmatic use.
-
-
-ich, in_channel_count
-Set the number of input channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-in_channel_layout is set.
-
-
-och, out_channel_count
-Set the number of output channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-out_channel_layout is set.
-
-
-uch, used_channel_count
-Set the number of used input channels. Default value is 0. This option is
-only used for special remapping.
-
-
-isr, in_sample_rate
-Set the input sample rate. Default value is 0.
-
-
-osr, out_sample_rate
-Set the output sample rate. Default value is 0.
-
-
-isf, in_sample_fmt
-Specify the input sample format. It is set by default to none
.
-
-
-osf, out_sample_fmt
-Specify the output sample format. It is set by default to none
.
-
-
-tsf, internal_sample_fmt
-Set the internal sample format. Default value is none
.
-This will automatically be chosen when it is not explicitly set.
-
-
-icl, in_channel_layout
-ocl, out_channel_layout
-Set the input/output channel layout.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-clev, center_mix_level
-Set the center mix level. It is a value expressed in deciBel, and must be
-in the interval [-32,32].
-
-
-slev, surround_mix_level
-Set the surround mix level. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-lfe_mix_level
-Set LFE mix into non LFE level. It is used when there is a LFE input but no
-LFE output. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-rmvol, rematrix_volume
-Set rematrix volume. Default value is 1.0.
-
-
-rematrix_maxval
-Set maximum output value for rematrixing.
-This can be used to prevent clipping vs. preventing volumn reduction
-A value of 1.0 prevents cliping.
-
-
-flags, swr_flags
-Set flags used by the converter. Default value is 0.
-
-It supports the following individual flags:
-
-res
-force resampling, this flag forces resampling to be used even when the
-input and output sample rates match.
-
-
-
-
-dither_scale
-Set the dither scale. Default value is 1.
-
-
-dither_method
-Set dither method. Default value is 0.
-
-Supported values:
-
-‘rectangular ’
-select rectangular dither
-
-‘triangular ’
-select triangular dither
-
-‘triangular_hp ’
-select triangular dither with high pass
-
-‘lipshitz ’
-select lipshitz noise shaping dither
-
-‘shibata ’
-select shibata noise shaping dither
-
-‘low_shibata ’
-select low shibata noise shaping dither
-
-‘high_shibata ’
-select high shibata noise shaping dither
-
-‘f_weighted ’
-select f-weighted noise shaping dither
-
-‘modified_e_weighted ’
-select modified-e-weighted noise shaping dither
-
-‘improved_e_weighted ’
-select improved-e-weighted noise shaping dither
-
-
-
-
-
-resampler
-Set resampling engine. Default value is swr.
-
-Supported values:
-
-‘swr ’
-select the native SW Resampler; filter options precision and cheby are not
-applicable in this case.
-
-‘soxr ’
-select the SoX Resampler (where available); compensation, and filter options
-filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
-case.
-
-
-
-
-filter_size
-For swr only, set resampling filter size, default value is 32.
-
-
-phase_shift
-For swr only, set resampling phase shift, default value is 10, and must be in
-the interval [0,30].
-
-
-linear_interp
-Use Linear Interpolation if set to 1, default value is 0.
-
-
-cutoff
-Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
-value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
-(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
-
-
-precision
-For soxr only, the precision in bits to which the resampled signal will be
-calculated. The default value of 20 (which, with suitable dithering, is
-appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
-value of 28 gives SoX’s ’Very High Quality’.
-
-
-cheby
-For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
-approximation for ’irrational’ ratios. Default value is 0.
-
-
-async
-For swr only, simple 1 parameter audio sync to timestamps using stretching,
-squeezing, filling and trimming. Setting this to 1 will enable filling and
-trimming, larger values represent the maximum amount in samples that the data
-may be stretched or squeezed for each second.
-Default value is 0, thus no compensation is applied to make the samples match
-the audio timestamps.
-
-
-first_pts
-For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
-This allows for padding/trimming at the start of stream. By default, no
-assumption is made about the first frame’s expected pts, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative pts due to encoder delay.
-
-
-min_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger stretching/squeezing/filling or trimming of the
-data to make it match the timestamps. The default is that
-stretching/squeezing/filling and trimming is disabled
-(min_comp = FLT_MAX
).
-
-
-min_hard_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger adding/dropping samples to make it match the
-timestamps. This option effectively is a threshold to select between
-hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
-all compensation is by default disabled through min_comp .
-The default is 0.1.
-
-
-comp_duration
-For swr only, set duration (in seconds) over which data is stretched/squeezed
-to make it match the timestamps. Must be a non-negative double float value,
-default value is 1.0.
-
-
-max_soft_comp
-For swr only, set maximum factor by which data is stretched/squeezed to make it
-match the timestamps. Must be a non-negative double float value, default value
-is 0.
-
-
-matrix_encoding
-Select matrixed stereo encoding.
-
-It accepts the following values:
-
-‘none ’
-select none
-
-‘dolby ’
-select Dolby
-
-‘dplii ’
-select Dolby Pro Logic II
-
-
-
-Default value is none
.
-
-
-filter_type
-For swr only, select resampling filter type. This only affects resampling
-operations.
-
-It accepts the following values:
-
-‘cubic ’
-select cubic
-
-‘blackman_nuttall ’
-select Blackman Nuttall Windowed Sinc
-
-‘kaiser ’
-select Kaiser Windowed Sinc
-
-
-
-
-kaiser_beta
-For swr only, set Kaiser Window Beta value. Must be an integer in the
-interval [2,16], default value is 9.
-
-
-output_sample_bits
-For swr only, set number of used output sample bits for dithering. Must be an integer in the
-interval [0,64], default value is 0, which means it’s not used.
-
-
-
-
-
-
-
3 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libswresample
-
-
-
-
4 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffmpeg-scaler.html b/Externals/ffmpeg/shared/doc/ffmpeg-scaler.html
deleted file mode 100644
index b7e57e3891..0000000000
--- a/Externals/ffmpeg/shared/doc/ffmpeg-scaler.html
+++ /dev/null
@@ -1,231 +0,0 @@
-
-
-
-
-
-
- FFmpeg Scaler Documentation
-
-
-
-
-
-
-
-
- FFmpeg Scaler Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The FFmpeg rescaler provides a high-level interface to the libswscale
-library image conversion utilities. In particular it allows one to perform
-image rescaling and pixel format conversion.
-
-
-
-
2 Scaler Options# TOC
-
-
The video scaler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools. For programmatic use, they can be set explicitly in the
-SwsContext
options or through the libavutil/opt.h API.
-
-
-
-
-sws_flags
-Set the scaler flags. This is also used to set the scaling
-algorithm. Only a single algorithm should be selected.
-
-It accepts the following values:
-
-‘fast_bilinear ’
-Select fast bilinear scaling algorithm.
-
-
-‘bilinear ’
-Select bilinear scaling algorithm.
-
-
-‘bicubic ’
-Select bicubic scaling algorithm.
-
-
-‘experimental ’
-Select experimental scaling algorithm.
-
-
-‘neighbor ’
-Select nearest neighbor rescaling algorithm.
-
-
-‘area ’
-Select averaging area rescaling algorithm.
-
-
-‘bicublin ’
-Select bicubic scaling algorithm for the luma component, bilinear for
-chroma components.
-
-
-‘gauss ’
-Select Gaussian rescaling algorithm.
-
-
-‘sinc ’
-Select sinc rescaling algorithm.
-
-
-‘lanczos ’
-Select lanczos rescaling algorithm.
-
-
-‘spline ’
-Select natural bicubic spline rescaling algorithm.
-
-
-‘print_info ’
-Enable printing/debug logging.
-
-
-‘accurate_rnd ’
-Enable accurate rounding.
-
-
-‘full_chroma_int ’
-Enable full chroma interpolation.
-
-
-‘full_chroma_inp ’
-Select full chroma input.
-
-
-‘bitexact ’
-Enable bitexact output.
-
-
-
-
-srcw
-Set source width.
-
-
-srch
-Set source height.
-
-
-dstw
-Set destination width.
-
-
-dsth
-Set destination height.
-
-
-src_format
-Set source pixel format (must be expressed as an integer).
-
-
-dst_format
-Set destination pixel format (must be expressed as an integer).
-
-
-src_range
-Select source range.
-
-
-dst_range
-Select destination range.
-
-
-param0, param1
-Set scaling algorithm parameters. The specified values are specific of
-some scaling algorithms and ignored by others. The specified values
-are floating point number values.
-
-
-sws_dither
-Set the dithering algorithm. Accepts one of the following
-values. Default value is ‘auto ’.
-
-
-‘auto ’
-automatic choice
-
-
-‘none ’
-no dithering
-
-
-‘bayer ’
-bayer dither
-
-
-‘ed ’
-error diffusion dither
-
-
-‘a_dither ’
-arithmetic dither, based using addition
-
-
-‘x_dither ’
-arithmetic dither, based using xor (more random/less apparent patterning that
-a_dither).
-
-
-
-
-
-
-
-
-
-
3 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libswscale
-
-
-
-
4 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffmpeg-utils.html b/Externals/ffmpeg/shared/doc/ffmpeg-utils.html
deleted file mode 100644
index 127e624d9d..0000000000
--- a/Externals/ffmpeg/shared/doc/ffmpeg-utils.html
+++ /dev/null
@@ -1,1468 +0,0 @@
-
-
-
-
-
-
- FFmpeg Utilities Documentation
-
-
-
-
-
-
-
-
- FFmpeg Utilities Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
This document describes some generic features and utilities provided
-by the libavutil library.
-
-
-
-
2 Syntax# TOC
-
-
This section documents the syntax and formats employed by the FFmpeg
-libraries and tools.
-
-
-
2.1 Quoting and escaping# TOC
-
-
FFmpeg adopts the following quoting and escaping mechanism, unless
-explicitly specified. The following rules are applied:
-
-
- '
and \
are special characters (respectively used for
-quoting and escaping). In addition to them, there might be other
-special characters depending on the specific syntax where the escaping
-and quoting are employed.
-
- A special character is escaped by prefixing it with a ’\’.
-
- All characters enclosed between ” are included literally in the
-parsed string. The quote character '
itself cannot be quoted,
-so you may need to close the quote and escape it.
-
- Leading and trailing whitespaces, unless escaped or quoted, are
-removed from the parsed string.
-
-
-
Note that you may need to add a second level of escaping when using
-the command line or a script, which depends on the syntax of the
-adopted shell language.
-
-
The function av_get_token
defined in
-libavutil/avstring.h can be used to parse a token quoted or
-escaped according to the rules defined above.
-
-
The tool tools/ffescape in the FFmpeg source tree can be used
-to automatically quote or escape a string in a script.
-
-
-
2.1.1 Examples# TOC
-
-
- Escape the string Crime d'Amour
containing the '
special
-character:
-
-
- The string above contains a quote, so the '
needs to be escaped
-when quoting it:
-
-
- Include leading or trailing whitespaces using quoting:
-
-
' this string starts and ends with whitespaces '
-
-
- Escaping and quoting can be mixed together:
-
-
' The string '\'string\'' is a string '
-
-
- To include a literal \
you can use either escaping or quoting:
-
-
'c:\foo' can be written as c:\\foo
-
-
-
-
-
2.2 Date# TOC
-
-
The accepted syntax is:
-
-
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
-now
-
-
-
If the value is "now" it takes the current time.
-
-
Time is local time unless Z is appended, in which case it is
-interpreted as UTC.
-If the year-month-day part is not specified it takes the current
-year-month-day.
-
-
-
2.3 Time duration# TOC
-
-
There are two accepted syntaxes for expressing time duration.
-
-
-
-
HH expresses the number of hours, MM the number of minutes
-for a maximum of 2 digits, and SS the number of seconds for a
-maximum of 2 digits. The m at the end expresses decimal value for
-SS .
-
-
or
-
-
-
-
S expresses the number of seconds, with the optional decimal part
-m .
-
-
In both expressions, the optional ‘- ’ indicates negative duration.
-
-
-
2.3.1 Examples# TOC
-
-
The following examples are all valid time duration:
-
-
-‘55 ’
-55 seconds
-
-
-‘12:03:45 ’
-12 hours, 03 minutes and 45 seconds
-
-
-‘23.189 ’
-23.189 seconds
-
-
-
-
-
2.4 Video size# TOC
-
Specify the size of the sourced video, it may be a string of the form
-width xheight , or the name of a size abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-720x480
-
-‘pal ’
-720x576
-
-‘qntsc ’
-352x240
-
-‘qpal ’
-352x288
-
-‘sntsc ’
-640x480
-
-‘spal ’
-768x576
-
-‘film ’
-352x240
-
-‘ntsc-film ’
-352x240
-
-‘sqcif ’
-128x96
-
-‘qcif ’
-176x144
-
-‘cif ’
-352x288
-
-‘4cif ’
-704x576
-
-‘16cif ’
-1408x1152
-
-‘qqvga ’
-160x120
-
-‘qvga ’
-320x240
-
-‘vga ’
-640x480
-
-‘svga ’
-800x600
-
-‘xga ’
-1024x768
-
-‘uxga ’
-1600x1200
-
-‘qxga ’
-2048x1536
-
-‘sxga ’
-1280x1024
-
-‘qsxga ’
-2560x2048
-
-‘hsxga ’
-5120x4096
-
-‘wvga ’
-852x480
-
-‘wxga ’
-1366x768
-
-‘wsxga ’
-1600x1024
-
-‘wuxga ’
-1920x1200
-
-‘woxga ’
-2560x1600
-
-‘wqsxga ’
-3200x2048
-
-‘wquxga ’
-3840x2400
-
-‘whsxga ’
-6400x4096
-
-‘whuxga ’
-7680x4800
-
-‘cga ’
-320x200
-
-‘ega ’
-640x350
-
-‘hd480 ’
-852x480
-
-‘hd720 ’
-1280x720
-
-‘hd1080 ’
-1920x1080
-
-‘2k ’
-2048x1080
-
-‘2kflat ’
-1998x1080
-
-‘2kscope ’
-2048x858
-
-‘4k ’
-4096x2160
-
-‘4kflat ’
-3996x2160
-
-‘4kscope ’
-4096x1716
-
-‘nhd ’
-640x360
-
-‘hqvga ’
-240x160
-
-‘wqvga ’
-400x240
-
-‘fwqvga ’
-432x240
-
-‘hvga ’
-480x320
-
-‘qhd ’
-960x540
-
-
-
-
-
2.5 Video rate# TOC
-
-
Specify the frame rate of a video, expressed as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a float
-number or a valid video frame rate abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-30000/1001
-
-‘pal ’
-25/1
-
-‘qntsc ’
-30000/1001
-
-‘qpal ’
-25/1
-
-‘sntsc ’
-30000/1001
-
-‘spal ’
-25/1
-
-‘film ’
-24/1
-
-‘ntsc-film ’
-24000/1001
-
-
-
-
-
2.6 Ratio# TOC
-
-
A ratio can be expressed as an expression, or in the form
-numerator :denominator .
-
-
Note that a ratio with infinite (1/0) or negative value is
-considered valid, so you should check on the returned value if you
-want to exclude those values.
-
-
The undefined value can be expressed using the "0:0" string.
-
-
-
2.7 Color# TOC
-
-
It can be the name of a color as defined below (case insensitive match) or a
-[0x|#]RRGGBB[AA]
sequence, possibly followed by @ and a string
-representing the alpha component.
-
-
The alpha component may be a string composed by "0x" followed by an
-hexadecimal number or a decimal number between 0.0 and 1.0, which
-represents the opacity value (‘0x00 ’ or ‘0.0 ’ means completely
-transparent, ‘0xff ’ or ‘1.0 ’ completely opaque). If the alpha
-component is not specified then ‘0xff ’ is assumed.
-
-
The string ‘random ’ will result in a random color.
-
-
The following names of colors are recognized:
-
-‘AliceBlue ’
-0xF0F8FF
-
-‘AntiqueWhite ’
-0xFAEBD7
-
-‘Aqua ’
-0x00FFFF
-
-‘Aquamarine ’
-0x7FFFD4
-
-‘Azure ’
-0xF0FFFF
-
-‘Beige ’
-0xF5F5DC
-
-‘Bisque ’
-0xFFE4C4
-
-‘Black ’
-0x000000
-
-‘BlanchedAlmond ’
-0xFFEBCD
-
-‘Blue ’
-0x0000FF
-
-‘BlueViolet ’
-0x8A2BE2
-
-‘Brown ’
-0xA52A2A
-
-‘BurlyWood ’
-0xDEB887
-
-‘CadetBlue ’
-0x5F9EA0
-
-‘Chartreuse ’
-0x7FFF00
-
-‘Chocolate ’
-0xD2691E
-
-‘Coral ’
-0xFF7F50
-
-‘CornflowerBlue ’
-0x6495ED
-
-‘Cornsilk ’
-0xFFF8DC
-
-‘Crimson ’
-0xDC143C
-
-‘Cyan ’
-0x00FFFF
-
-‘DarkBlue ’
-0x00008B
-
-‘DarkCyan ’
-0x008B8B
-
-‘DarkGoldenRod ’
-0xB8860B
-
-‘DarkGray ’
-0xA9A9A9
-
-‘DarkGreen ’
-0x006400
-
-‘DarkKhaki ’
-0xBDB76B
-
-‘DarkMagenta ’
-0x8B008B
-
-‘DarkOliveGreen ’
-0x556B2F
-
-‘Darkorange ’
-0xFF8C00
-
-‘DarkOrchid ’
-0x9932CC
-
-‘DarkRed ’
-0x8B0000
-
-‘DarkSalmon ’
-0xE9967A
-
-‘DarkSeaGreen ’
-0x8FBC8F
-
-‘DarkSlateBlue ’
-0x483D8B
-
-‘DarkSlateGray ’
-0x2F4F4F
-
-‘DarkTurquoise ’
-0x00CED1
-
-‘DarkViolet ’
-0x9400D3
-
-‘DeepPink ’
-0xFF1493
-
-‘DeepSkyBlue ’
-0x00BFFF
-
-‘DimGray ’
-0x696969
-
-‘DodgerBlue ’
-0x1E90FF
-
-‘FireBrick ’
-0xB22222
-
-‘FloralWhite ’
-0xFFFAF0
-
-‘ForestGreen ’
-0x228B22
-
-‘Fuchsia ’
-0xFF00FF
-
-‘Gainsboro ’
-0xDCDCDC
-
-‘GhostWhite ’
-0xF8F8FF
-
-‘Gold ’
-0xFFD700
-
-‘GoldenRod ’
-0xDAA520
-
-‘Gray ’
-0x808080
-
-‘Green ’
-0x008000
-
-‘GreenYellow ’
-0xADFF2F
-
-‘HoneyDew ’
-0xF0FFF0
-
-‘HotPink ’
-0xFF69B4
-
-‘IndianRed ’
-0xCD5C5C
-
-‘Indigo ’
-0x4B0082
-
-‘Ivory ’
-0xFFFFF0
-
-‘Khaki ’
-0xF0E68C
-
-‘Lavender ’
-0xE6E6FA
-
-‘LavenderBlush ’
-0xFFF0F5
-
-‘LawnGreen ’
-0x7CFC00
-
-‘LemonChiffon ’
-0xFFFACD
-
-‘LightBlue ’
-0xADD8E6
-
-‘LightCoral ’
-0xF08080
-
-‘LightCyan ’
-0xE0FFFF
-
-‘LightGoldenRodYellow ’
-0xFAFAD2
-
-‘LightGreen ’
-0x90EE90
-
-‘LightGrey ’
-0xD3D3D3
-
-‘LightPink ’
-0xFFB6C1
-
-‘LightSalmon ’
-0xFFA07A
-
-‘LightSeaGreen ’
-0x20B2AA
-
-‘LightSkyBlue ’
-0x87CEFA
-
-‘LightSlateGray ’
-0x778899
-
-‘LightSteelBlue ’
-0xB0C4DE
-
-‘LightYellow ’
-0xFFFFE0
-
-‘Lime ’
-0x00FF00
-
-‘LimeGreen ’
-0x32CD32
-
-‘Linen ’
-0xFAF0E6
-
-‘Magenta ’
-0xFF00FF
-
-‘Maroon ’
-0x800000
-
-‘MediumAquaMarine ’
-0x66CDAA
-
-‘MediumBlue ’
-0x0000CD
-
-‘MediumOrchid ’
-0xBA55D3
-
-‘MediumPurple ’
-0x9370D8
-
-‘MediumSeaGreen ’
-0x3CB371
-
-‘MediumSlateBlue ’
-0x7B68EE
-
-‘MediumSpringGreen ’
-0x00FA9A
-
-‘MediumTurquoise ’
-0x48D1CC
-
-‘MediumVioletRed ’
-0xC71585
-
-‘MidnightBlue ’
-0x191970
-
-‘MintCream ’
-0xF5FFFA
-
-‘MistyRose ’
-0xFFE4E1
-
-‘Moccasin ’
-0xFFE4B5
-
-‘NavajoWhite ’
-0xFFDEAD
-
-‘Navy ’
-0x000080
-
-‘OldLace ’
-0xFDF5E6
-
-‘Olive ’
-0x808000
-
-‘OliveDrab ’
-0x6B8E23
-
-‘Orange ’
-0xFFA500
-
-‘OrangeRed ’
-0xFF4500
-
-‘Orchid ’
-0xDA70D6
-
-‘PaleGoldenRod ’
-0xEEE8AA
-
-‘PaleGreen ’
-0x98FB98
-
-‘PaleTurquoise ’
-0xAFEEEE
-
-‘PaleVioletRed ’
-0xD87093
-
-‘PapayaWhip ’
-0xFFEFD5
-
-‘PeachPuff ’
-0xFFDAB9
-
-‘Peru ’
-0xCD853F
-
-‘Pink ’
-0xFFC0CB
-
-‘Plum ’
-0xDDA0DD
-
-‘PowderBlue ’
-0xB0E0E6
-
-‘Purple ’
-0x800080
-
-‘Red ’
-0xFF0000
-
-‘RosyBrown ’
-0xBC8F8F
-
-‘RoyalBlue ’
-0x4169E1
-
-‘SaddleBrown ’
-0x8B4513
-
-‘Salmon ’
-0xFA8072
-
-‘SandyBrown ’
-0xF4A460
-
-‘SeaGreen ’
-0x2E8B57
-
-‘SeaShell ’
-0xFFF5EE
-
-‘Sienna ’
-0xA0522D
-
-‘Silver ’
-0xC0C0C0
-
-‘SkyBlue ’
-0x87CEEB
-
-‘SlateBlue ’
-0x6A5ACD
-
-‘SlateGray ’
-0x708090
-
-‘Snow ’
-0xFFFAFA
-
-‘SpringGreen ’
-0x00FF7F
-
-‘SteelBlue ’
-0x4682B4
-
-‘Tan ’
-0xD2B48C
-
-‘Teal ’
-0x008080
-
-‘Thistle ’
-0xD8BFD8
-
-‘Tomato ’
-0xFF6347
-
-‘Turquoise ’
-0x40E0D0
-
-‘Violet ’
-0xEE82EE
-
-‘Wheat ’
-0xF5DEB3
-
-‘White ’
-0xFFFFFF
-
-‘WhiteSmoke ’
-0xF5F5F5
-
-‘Yellow ’
-0xFFFF00
-
-‘YellowGreen ’
-0x9ACD32
-
-
-
-
-
2.8 Channel Layout# TOC
-
-
A channel layout specifies the spatial disposition of the channels in
-a multi-channel audio stream. To specify a channel layout, FFmpeg
-makes use of a special syntax.
-
-
Individual channels are identified by an id, as given by the table
-below:
-
-‘FL ’
-front left
-
-‘FR ’
-front right
-
-‘FC ’
-front center
-
-‘LFE ’
-low frequency
-
-‘BL ’
-back left
-
-‘BR ’
-back right
-
-‘FLC ’
-front left-of-center
-
-‘FRC ’
-front right-of-center
-
-‘BC ’
-back center
-
-‘SL ’
-side left
-
-‘SR ’
-side right
-
-‘TC ’
-top center
-
-‘TFL ’
-top front left
-
-‘TFC ’
-top front center
-
-‘TFR ’
-top front right
-
-‘TBL ’
-top back left
-
-‘TBC ’
-top back center
-
-‘TBR ’
-top back right
-
-‘DL ’
-downmix left
-
-‘DR ’
-downmix right
-
-‘WL ’
-wide left
-
-‘WR ’
-wide right
-
-‘SDL ’
-surround direct left
-
-‘SDR ’
-surround direct right
-
-‘LFE2 ’
-low frequency 2
-
-
-
-
Standard channel layout compositions can be specified by using the
-following identifiers:
-
-‘mono ’
-FC
-
-‘stereo ’
-FL+FR
-
-‘2.1 ’
-FL+FR+LFE
-
-‘3.0 ’
-FL+FR+FC
-
-‘3.0(back) ’
-FL+FR+BC
-
-‘4.0 ’
-FL+FR+FC+BC
-
-‘quad ’
-FL+FR+BL+BR
-
-‘quad(side) ’
-FL+FR+SL+SR
-
-‘3.1 ’
-FL+FR+FC+LFE
-
-‘5.0 ’
-FL+FR+FC+BL+BR
-
-‘5.0(side) ’
-FL+FR+FC+SL+SR
-
-‘4.1 ’
-FL+FR+FC+LFE+BC
-
-‘5.1 ’
-FL+FR+FC+LFE+BL+BR
-
-‘5.1(side) ’
-FL+FR+FC+LFE+SL+SR
-
-‘6.0 ’
-FL+FR+FC+BC+SL+SR
-
-‘6.0(front) ’
-FL+FR+FLC+FRC+SL+SR
-
-‘hexagonal ’
-FL+FR+FC+BL+BR+BC
-
-‘6.1 ’
-FL+FR+FC+LFE+BC+SL+SR
-
-‘6.1 ’
-FL+FR+FC+LFE+BL+BR+BC
-
-‘6.1(front) ’
-FL+FR+LFE+FLC+FRC+SL+SR
-
-‘7.0 ’
-FL+FR+FC+BL+BR+SL+SR
-
-‘7.0(front) ’
-FL+FR+FC+FLC+FRC+SL+SR
-
-‘7.1 ’
-FL+FR+FC+LFE+BL+BR+SL+SR
-
-‘7.1(wide) ’
-FL+FR+FC+LFE+BL+BR+FLC+FRC
-
-‘7.1(wide-side) ’
-FL+FR+FC+LFE+FLC+FRC+SL+SR
-
-‘octagonal ’
-FL+FR+FC+BL+BR+BC+SL+SR
-
-‘downmix ’
-DL+DR
-
-
-
-
A custom channel layout can be specified as a sequence of terms, separated by
-’+’ or ’|’. Each term can be:
-
- the name of a standard channel layout (e.g. ‘mono ’,
-‘stereo ’, ‘4.0 ’, ‘quad ’, ‘5.0 ’, etc.)
-
- the name of a single channel (e.g. ‘FL ’, ‘FR ’, ‘FC ’, ‘LFE ’, etc.)
-
- a number of channels, in decimal, optionally followed by ’c’, yielding
-the default channel layout for that number of channels (see the
-function av_get_default_channel_layout
)
-
- a channel layout mask, in hexadecimal starting with "0x" (see the
-AV_CH_*
macros in libavutil/channel_layout.h .
-
-
-
Starting from libavutil version 53 the trailing character "c" to
-specify a number of channels will be required, while a channel layout
-mask could also be specified as a decimal number (if and only if not
-followed by "c").
-
-
See also the function av_get_channel_layout
defined in
-libavutil/channel_layout.h .
-
-
-
3 Expression Evaluation# TOC
-
-
When evaluating an arithmetic expression, FFmpeg uses an internal
-formula evaluator, implemented through the libavutil/eval.h
-interface.
-
-
An expression may contain unary, binary operators, constants, and
-functions.
-
-
Two expressions expr1 and expr2 can be combined to form
-another expression "expr1 ;expr2 ".
-expr1 and expr2 are evaluated in turn, and the new
-expression evaluates to the value of expr2 .
-
-
The following binary operators are available: +
, -
,
-*
, /
, ^
.
-
-
The following unary operators are available: +
, -
.
-
-
The following functions are available:
-
-abs(x)
-Compute absolute value of x .
-
-
-acos(x)
-Compute arccosine of x .
-
-
-asin(x)
-Compute arcsine of x .
-
-
-atan(x)
-Compute arctangent of x .
-
-
-between(x, min, max)
-Return 1 if x is greater than or equal to min and lesser than or
-equal to max , 0 otherwise.
-
-
-bitand(x, y)
-bitor(x, y)
-Compute bitwise and/or operation on x and y .
-
-The results of the evaluation of x and y are converted to
-integers before executing the bitwise operation.
-
-Note that both the conversion to integer and the conversion back to
-floating point can lose precision. Beware of unexpected results for
-large numbers (usually 2^53 and larger).
-
-
-ceil(expr)
-Round the value of expression expr upwards to the nearest
-integer. For example, "ceil(1.5)" is "2.0".
-
-
-clip(x, min, max)
-Return the value of x clipped between min and max .
-
-
-cos(x)
-Compute cosine of x .
-
-
-cosh(x)
-Compute hyperbolic cosine of x .
-
-
-eq(x, y)
-Return 1 if x and y are equivalent, 0 otherwise.
-
-
-exp(x)
-Compute exponential of x (with base e
, the Euler’s number).
-
-
-floor(expr)
-Round the value of expression expr downwards to the nearest
-integer. For example, "floor(-1.5)" is "-2.0".
-
-
-gauss(x)
-Compute Gauss function of x , corresponding to
-exp(-x*x/2) / sqrt(2*PI)
.
-
-
-gcd(x, y)
-Return the greatest common divisor of x and y . If both x and
-y are 0 or either or both are less than zero then behavior is undefined.
-
-
-gt(x, y)
-Return 1 if x is greater than y , 0 otherwise.
-
-
-gte(x, y)
-Return 1 if x is greater than or equal to y , 0 otherwise.
-
-
-hypot(x, y)
-This function is similar to the C function with the same name; it returns
-"sqrt(x *x + y *y )", the length of the hypotenuse of a
-right triangle with sides of length x and y , or the distance of the
-point (x , y ) from the origin.
-
-
-if(x, y)
-Evaluate x , and if the result is non-zero return the result of
-the evaluation of y , return 0 otherwise.
-
-
-if(x, y, z)
-Evaluate x , and if the result is non-zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-ifnot(x, y)
-Evaluate x , and if the result is zero return the result of the
-evaluation of y , return 0 otherwise.
-
-
-ifnot(x, y, z)
-Evaluate x , and if the result is zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-isinf(x)
-Return 1.0 if x is +/-INFINITY, 0.0 otherwise.
-
-
-isnan(x)
-Return 1.0 if x is NAN, 0.0 otherwise.
-
-
-ld(var)
-Allow to load the value of the internal variable with number
-var , which was previously stored with st(var , expr ).
-The function returns the loaded value.
-
-
-log(x)
-Compute natural logarithm of x .
-
-
-lt(x, y)
-Return 1 if x is lesser than y , 0 otherwise.
-
-
-lte(x, y)
-Return 1 if x is lesser than or equal to y , 0 otherwise.
-
-
-max(x, y)
-Return the maximum between x and y .
-
-
-min(x, y)
-Return the maximum between x and y .
-
-
-mod(x, y)
-Compute the remainder of division of x by y .
-
-
-not(expr)
-Return 1.0 if expr is zero, 0.0 otherwise.
-
-
-pow(x, y)
-Compute the power of x elevated y , it is equivalent to
-"(x )^(y )".
-
-
-print(t)
-print(t, l)
-Print the value of expression t with loglevel l . If
-l is not specified then a default log level is used.
-Returns the value of the expression printed.
-
-Prints t with loglevel l
-
-
-random(x)
-Return a pseudo random value between 0.0 and 1.0. x is the index of the
-internal variable which will be used to save the seed/state.
-
-
-root(expr, max)
-Find an input value for which the function represented by expr
-with argument ld(0) is 0 in the interval 0..max .
-
-The expression in expr must denote a continuous function or the
-result is undefined.
-
-ld(0) is used to represent the function input value, which means
-that the given expression will be evaluated multiple times with
-various input values that the expression can access through
-ld(0)
. When the expression evaluates to 0 then the
-corresponding input value will be returned.
-
-
-sin(x)
-Compute sine of x .
-
-
-sinh(x)
-Compute hyperbolic sine of x .
-
-
-sqrt(expr)
-Compute the square root of expr . This is equivalent to
-"(expr )^.5".
-
-
-squish(x)
-Compute expression 1/(1 + exp(4*x))
.
-
-
-st(var, expr)
-Allow to store the value of the expression expr in an internal
-variable. var specifies the number of the variable where to
-store the value, and it is a value ranging from 0 to 9. The function
-returns the value stored in the internal variable.
-Note, Variables are currently not shared between expressions.
-
-
-tan(x)
-Compute tangent of x .
-
-
-tanh(x)
-Compute hyperbolic tangent of x .
-
-
-taylor(expr, x)
-taylor(expr, x, id)
-Evaluate a Taylor series at x , given an expression representing
-the ld(id)
-th derivative of a function at 0.
-
-When the series does not converge the result is undefined.
-
-ld(id) is used to represent the derivative order in expr ,
-which means that the given expression will be evaluated multiple times
-with various input values that the expression can access through
-ld(id)
. If id is not specified then 0 is assumed.
-
-Note, when you have the derivatives at y instead of 0,
-taylor(expr, x-y)
can be used.
-
-
-time(0)
-Return the current (wallclock) time in seconds.
-
-
-trunc(expr)
-Round the value of expression expr towards zero to the nearest
-integer. For example, "trunc(-1.5)" is "-1.0".
-
-
-while(cond, expr)
-Evaluate expression expr while the expression cond is
-non-zero, and returns the value of the last expr evaluation, or
-NAN if cond was always false.
-
-
-
-
The following constants are available:
-
-PI
-area of the unit disc, approximately 3.14
-
-E
-exp(1) (Euler’s number), approximately 2.718
-
-PHI
-golden ratio (1+sqrt(5))/2, approximately 1.618
-
-
-
-
Assuming that an expression is considered "true" if it has a non-zero
-value, note that:
-
-
*
works like AND
-
-
+
works like OR
-
-
For example the construct:
-
-
is equivalent to:
-
-
-
In your C code, you can extend the list of unary and binary functions,
-and define recognized constants, so that they are available for your
-expressions.
-
-
The evaluator also recognizes the International System unit prefixes.
-If ’i’ is appended after the prefix, binary prefixes are used, which
-are based on powers of 1024 instead of powers of 1000.
-The ’B’ postfix multiplies the value by 8, and can be appended after a
-unit prefix or used alone. This allows using for example ’KB’, ’MiB’,
-’G’ and ’B’ as number postfix.
-
-
The list of available International System prefixes follows, with
-indication of the corresponding powers of 10 and of 2.
-
-y
-10^-24 / 2^-80
-
-z
-10^-21 / 2^-70
-
-a
-10^-18 / 2^-60
-
-f
-10^-15 / 2^-50
-
-p
-10^-12 / 2^-40
-
-n
-10^-9 / 2^-30
-
-u
-10^-6 / 2^-20
-
-m
-10^-3 / 2^-10
-
-c
-10^-2
-
-d
-10^-1
-
-h
-10^2
-
-k
-10^3 / 2^10
-
-K
-10^3 / 2^10
-
-M
-10^6 / 2^20
-
-G
-10^9 / 2^30
-
-T
-10^12 / 2^40
-
-P
-10^15 / 2^40
-
-E
-10^18 / 2^50
-
-Z
-10^21 / 2^60
-
-Y
-10^24 / 2^70
-
-
-
-
-
-
4 OpenCL Options# TOC
-
-
When FFmpeg is configured with --enable-opencl
, it is possible
-to set the options for the global OpenCL context.
-
-
The list of supported options follows:
-
-
-build_options
-Set build options used to compile the registered kernels.
-
-See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
-
-
-platform_idx
-Select the index of the platform to run OpenCL code.
-
-The specified index must be one of the indexes in the device list
-which can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-device_idx
-Select the index of the device used to run OpenCL code.
-
-The specified index must be one of the indexes in the device list which
-can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-
-
-
-
-
5 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-libavutil
-
-
-
-
6 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffmpeg.html b/Externals/ffmpeg/shared/doc/ffmpeg.html
deleted file mode 100644
index d7524f0917..0000000000
--- a/Externals/ffmpeg/shared/doc/ffmpeg.html
+++ /dev/null
@@ -1,2109 +0,0 @@
-
-
-
-
-
-
- ffmpeg Documentation
-
-
-
-
-
-
-
-
- ffmpeg Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Synopsis# TOC
-
-
ffmpeg [global_options ] {[input_file_options ] -i input_file } ... {[output_file_options ] output_file } ...
-
-
-
2 Description# TOC
-
-
ffmpeg
is a very fast video and audio converter that can also grab from
-a live audio/video source. It can also convert between arbitrary sample
-rates and resize video on the fly with a high quality polyphase filter.
-
-
ffmpeg
reads from an arbitrary number of input "files" (which can be regular
-files, pipes, network streams, grabbing devices, etc.), specified by the
--i
option, and writes to an arbitrary number of output "files", which are
-specified by a plain output filename. Anything found on the command line which
-cannot be interpreted as an option is considered to be an output filename.
-
-
Each input or output file can, in principle, contain any number of streams of
-different types (video/audio/subtitle/attachment/data). The allowed number and/or
-types of streams may be limited by the container format. Selecting which
-streams from which inputs will go into which output is either done automatically
-or with the -map
option (see the Stream selection chapter).
-
-
To refer to input files in options, you must use their indices (0-based). E.g.
-the first input file is 0
, the second is 1
, etc. Similarly, streams
-within a file are referred to by their indices. E.g. 2:3
refers to the
-fourth stream in the third input file. Also see the Stream specifiers chapter.
-
-
As a general rule, options are applied to the next specified
-file. Therefore, order is important, and you can have the same
-option on the command line multiple times. Each occurrence is
-then applied to the next input or output file.
-Exceptions from this rule are the global options (e.g. verbosity level),
-which should be specified first.
-
-
Do not mix input and output files – first specify all input files, then all
-output files. Also do not mix options which belong to different files. All
-options apply ONLY to the next input or output file and are reset between files.
-
-
- To set the video bitrate of the output file to 64 kbit/s:
-
-
ffmpeg -i input.avi -b:v 64k -bufsize 64k output.avi
-
-
- To force the frame rate of the output file to 24 fps:
-
-
ffmpeg -i input.avi -r 24 output.avi
-
-
- To force the frame rate of the input file (valid for raw formats only)
-to 1 fps and the frame rate of the output file to 24 fps:
-
-
ffmpeg -r 1 -i input.m2v -r 24 output.avi
-
-
-
-
The format option may be needed for raw input files.
-
-
-
-
3 Detailed description# TOC
-
-
The transcoding process in ffmpeg
for each output can be described by
-the following diagram:
-
-
-
_______ ______________
-| | | |
-| input | demuxer | encoded data | decoder
-| file | ---------> | packets | -----+
-|_______| |______________| |
- v
- _________
- | |
- | decoded |
- | frames |
- |_________|
- ________ ______________ |
-| | | | |
-| output | <-------- | encoded data | <----+
-| file | muxer | packets | encoder
-|________| |______________|
-
-
-
-
-
ffmpeg
calls the libavformat library (containing demuxers) to read
-input files and get packets containing encoded data from them. When there are
-multiple input files, ffmpeg
tries to keep them synchronized by
-tracking lowest timestamp on any active input stream.
-
-
Encoded packets are then passed to the decoder (unless streamcopy is selected
-for the stream, see further for a description). The decoder produces
-uncompressed frames (raw video/PCM audio/...) which can be processed further by
-filtering (see next section). After filtering, the frames are passed to the
-encoder, which encodes them and outputs encoded packets. Finally those are
-passed to the muxer, which writes the encoded packets to the output file.
-
-
-
3.1 Filtering# TOC
-
Before encoding, ffmpeg
can process raw audio and video frames using
-filters from the libavfilter library. Several chained filters form a filter
-graph. ffmpeg
distinguishes between two types of filtergraphs:
-simple and complex.
-
-
-
3.1.1 Simple filtergraphs# TOC
-
Simple filtergraphs are those that have exactly one input and output, both of
-the same type. In the above diagram they can be represented by simply inserting
-an additional step between decoding and encoding:
-
-
-
_________ ______________
-| | | |
-| decoded | | encoded data |
-| frames |\ _ | packets |
-|_________| \ /||______________|
- \ __________ /
- simple _\|| | / encoder
- filtergraph | filtered |/
- | frames |
- |__________|
-
-
-
-
Simple filtergraphs are configured with the per-stream -filter option
-(with -vf and -af aliases for video and audio respectively).
-A simple filtergraph for video can look for example like this:
-
-
-
_______ _____________ _______ ________
-| | | | | | | |
-| input | ---> | deinterlace | ---> | scale | ---> | output |
-|_______| |_____________| |_______| |________|
-
-
-
-
Note that some filters change frame properties but not frame contents. E.g. the
-fps
filter in the example above changes number of frames, but does not
-touch the frame contents. Another example is the setpts
filter, which
-only sets timestamps and otherwise passes the frames unchanged.
-
-
-
3.1.2 Complex filtergraphs# TOC
-
Complex filtergraphs are those which cannot be described as simply a linear
-processing chain applied to one stream. This is the case, for example, when the graph has
-more than one input and/or output, or when output stream type is different from
-input. They can be represented with the following diagram:
-
-
-
_________
-| |
-| input 0 |\ __________
-|_________| \ | |
- \ _________ /| output 0 |
- \ | | / |__________|
- _________ \| complex | /
-| | | |/
-| input 1 |---->| filter |\
-|_________| | | \ __________
- /| graph | \ | |
- / | | \| output 1 |
- _________ / |_________| |__________|
-| | /
-| input 2 |/
-|_________|
-
-
-
-
Complex filtergraphs are configured with the -filter_complex option.
-Note that this option is global, since a complex filtergraph, by its nature,
-cannot be unambiguously associated with a single stream or file.
-
-
The -lavfi option is equivalent to -filter_complex .
-
-
A trivial example of a complex filtergraph is the overlay
filter, which
-has two video inputs and one video output, containing one video overlaid on top
-of the other. Its audio counterpart is the amix
filter.
-
-
-
3.2 Stream copy# TOC
-
Stream copy is a mode selected by supplying the copy
parameter to the
--codec option. It makes ffmpeg
omit the decoding and encoding
-step for the specified stream, so it does only demuxing and muxing. It is useful
-for changing the container format or modifying container-level metadata. The
-diagram above will, in this case, simplify to this:
-
-
-
_______ ______________ ________
-| | | | | |
-| input | demuxer | encoded data | muxer | output |
-| file | ---------> | packets | -------> | file |
-|_______| |______________| |________|
-
-
-
-
Since there is no decoding or encoding, it is very fast and there is no quality
-loss. However, it might not work in some cases because of many factors. Applying
-filters is obviously also impossible, since filters work on uncompressed data.
-
-
-
-
4 Stream selection# TOC
-
-
By default, ffmpeg
includes only one stream of each type (video, audio, subtitle)
-present in the input files and adds them to each output file. It picks the
-"best" of each based upon the following criteria: for video, it is the stream
-with the highest resolution, for audio, it is the stream with the most channels, for
-subtitles, it is the first subtitle stream. In the case where several streams of
-the same type rate equally, the stream with the lowest index is chosen.
-
-
You can disable some of those defaults by using the -vn/-an/-sn
options. For
-full manual control, use the -map
option, which disables the defaults just
-described.
-
-
-
-
5 Options# TOC
-
-
All the numerical options, if not specified otherwise, accept a string
-representing a number as input, which may be followed by one of the SI
-unit prefixes, for example: ’K’, ’M’, or ’G’.
-
-
If ’i’ is appended to the SI unit prefix, the complete prefix will be
-interpreted as a unit prefix for binary multiples, which are based on
-powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
-prefix multiplies the value by 8. This allows using, for example:
-’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
-
-
Options which do not take arguments are boolean options, and set the
-corresponding value to true. They can be set to false by prefixing
-the option name with "no". For example using "-nofoo"
-will set the boolean option with name "foo" to false.
-
-
-
5.1 Stream specifiers# TOC
-
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
-are used to precisely specify which stream(s) a given option belongs to.
-
-
A stream specifier is a string generally appended to the option name and
-separated from it by a colon. E.g. -codec:a:1 ac3
contains the
-a:1
stream specifier, which matches the second audio stream. Therefore, it
-would select the ac3 codec for the second audio stream.
-
-
A stream specifier can match several streams, so that the option is applied to all
-of them. E.g. the stream specifier in -b:a 128k
matches all audio
-streams.
-
-
An empty stream specifier matches all streams. For example, -codec copy
-or -codec: copy
would copy all the streams without reencoding.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index. E.g. -threads:1 4
would set the
-thread count for the second stream to 4.
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
-’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
-stream number stream_index of this type. Otherwise, it matches all
-streams of this type.
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number stream_index
-in the program with the id program_id . Otherwise, it matches all streams in the
-program.
-
-#stream_id or i:stream_id
-Match the stream by stream id (e.g. PID in MPEG-TS container).
-
-m:key [:value ]
-Matches streams with the metadata tag key having the specified value. If
-value is not given, matches streams that contain the given tag with any
-value.
-
-Note that in ffmpeg
, matching by metadata will only work properly for
-input files.
-
-
-
-
-
5.2 Generic options# TOC
-
-
These options are shared amongst the ff* tools.
-
-
--L
-Show license.
-
-
--h, -?, -help, --help [arg ]
-Show help. An optional parameter may be specified to print help about a specific
-item. If no argument is specified, only basic (non advanced) tool
-options are shown.
-
-Possible values of arg are:
-
-long
-Print advanced tool options in addition to the basic tool options.
-
-
-full
-Print complete list of options, including shared and private options
-for encoders, decoders, demuxers, muxers, filters, etc.
-
-
-decoder=decoder_name
-Print detailed information about the decoder named decoder_name . Use the
--decoders option to get a list of all decoders.
-
-
-encoder=encoder_name
-Print detailed information about the encoder named encoder_name . Use the
--encoders option to get a list of all encoders.
-
-
-demuxer=demuxer_name
-Print detailed information about the demuxer named demuxer_name . Use the
--formats option to get a list of all demuxers and muxers.
-
-
-muxer=muxer_name
-Print detailed information about the muxer named muxer_name . Use the
--formats option to get a list of all muxers and demuxers.
-
-
-filter=filter_name
-Print detailed information about the filter name filter_name . Use the
--filters option to get a list of all filters.
-
-
-
-
--version
-Show version.
-
-
--formats
-Show available formats (including devices).
-
-
--devices
-Show available devices.
-
-
--codecs
-Show all codecs known to libavcodec.
-
-Note that the term ’codec’ is used throughout this documentation as a shortcut
-for what is more correctly called a media bitstream format.
-
-
--decoders
-Show available decoders.
-
-
--encoders
-Show all available encoders.
-
-
--bsfs
-Show available bitstream filters.
-
-
--protocols
-Show available protocols.
-
-
--filters
-Show available libavfilter filters.
-
-
--pix_fmts
-Show available pixel formats.
-
-
--sample_fmts
-Show available sample formats.
-
-
--layouts
-Show channel names and standard channel layouts.
-
-
--colors
-Show recognized color names.
-
-
--sources device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sources of the intput device.
-Some devices may provide system-dependent source names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sources pulse,server=192.168.0.4
-
-
-
--sinks device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sinks of the output device.
-Some devices may provide system-dependent sink names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sinks pulse,server=192.168.0.4
-
-
-
--loglevel [repeat+]loglevel | -v [repeat+]loglevel
-Set the logging level used by the library.
-Adding "repeat+" indicates that repeated log output should not be compressed
-to the first line and the "Last message repeated n times" line will be
-omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-’repeat’ will not change the loglevel.
-loglevel is a string or a number containing one of the following values:
-
-‘quiet, -8 ’
-Show nothing at all; be silent.
-
-‘panic, 0 ’
-Only show fatal errors which could lead the process to crash, such as
-and assert failure. This is not currently used for anything.
-
-‘fatal, 8 ’
-Only show fatal errors. These are errors after which the process absolutely
-cannot continue after.
-
-‘error, 16 ’
-Show all errors, including ones which can be recovered from.
-
-‘warning, 24 ’
-Show all warnings and errors. Any message related to possibly
-incorrect or unexpected events will be shown.
-
-‘info, 32 ’
-Show informative messages during processing. This is in addition to
-warnings and errors. This is the default value.
-
-‘verbose, 40 ’
-Same as info
, except more verbose.
-
-‘debug, 48 ’
-Show everything, including debugging information.
-
-
-
-By default the program logs to stderr, if coloring is supported by the
-terminal, colors are used to mark errors and warnings. Log coloring
-can be disabled setting the environment variable
-AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
-the environment variable AV_LOG_FORCE_COLOR
.
-The use of the environment variable NO_COLOR
is deprecated and
-will be dropped in a following FFmpeg version.
-
-
--report
-Dump full command line and console output to a file named
-program -YYYYMMDD -HHMMSS .log
in the current
-directory.
-This file can be useful for bug reports.
-It also implies -loglevel verbose
.
-
-Setting the environment variable FFREPORT
to any value has the
-same effect. If the value is a ’:’-separated key=value sequence, these
-options will affect the report; option values must be escaped if they
-contain special characters or the options delimiter ’:’ (see the
-“Quoting and escaping” section in the ffmpeg-utils manual).
-
-The following options are recognized:
-
-file
-set the file name to use for the report; %p
is expanded to the name
-of the program, %t
is expanded to a timestamp, %%
is expanded
-to a plain %
-
-level
-set the log verbosity level using a numerical value (see -loglevel
).
-
-
-
-For example, to output a report to a file named ffreport.log
-using a log level of 32
(alias for log level info
):
-
-
-
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
-
-
-Errors in parsing the environment variable are not fatal, and will not
-appear in the report.
-
-
--hide_banner
-Suppress printing banner.
-
-All FFmpeg tools will normally show a copyright notice, build options
-and library versions. This option can be used to suppress printing
-this information.
-
-
--cpuflags flags (global )
-Allows setting and clearing cpu flags. This option is intended
-for testing. Do not use it unless you know what you’re doing.
-
-
ffmpeg -cpuflags -sse+mmx ...
-ffmpeg -cpuflags mmx ...
-ffmpeg -cpuflags 0 ...
-
-Possible flags for this option are:
-
-‘x86 ’
-
-‘mmx ’
-‘mmxext ’
-‘sse ’
-‘sse2 ’
-‘sse2slow ’
-‘sse3 ’
-‘sse3slow ’
-‘ssse3 ’
-‘atom ’
-‘sse4.1 ’
-‘sse4.2 ’
-‘avx ’
-‘xop ’
-‘fma4 ’
-‘3dnow ’
-‘3dnowext ’
-‘cmov ’
-
-
-‘ARM ’
-
-‘armv5te ’
-‘armv6 ’
-‘armv6t2 ’
-‘vfp ’
-‘vfpv3 ’
-‘neon ’
-
-
-‘PowerPC ’
-
-‘altivec ’
-
-
-‘Specific Processors ’
-
-‘pentium2 ’
-‘pentium3 ’
-‘pentium4 ’
-‘k6 ’
-‘k62 ’
-‘athlon ’
-‘athlonxp ’
-‘k8 ’
-
-
-
-
-
--opencl_bench
-Benchmark all available OpenCL devices and show the results. This option
-is only available when FFmpeg has been compiled with --enable-opencl
.
-
-
--opencl_options options (global )
-Set OpenCL environment options. This option is only available when
-FFmpeg has been compiled with --enable-opencl
.
-
-options must be a list of key =value option pairs
-separated by ’:’. See the “OpenCL Options” section in the
-ffmpeg-utils manual for the list of supported options.
-
-
-
-
-
5.3 AVOptions# TOC
-
-
These options are provided directly by the libavformat, libavdevice and
-libavcodec libraries. To see the list of available AVOptions, use the
--help option. They are separated into two categories:
-
-generic
-These options can be set for any container, codec or device. Generic options
-are listed under AVFormatContext options for containers/devices and under
-AVCodecContext options for codecs.
-
-private
-These options are specific to the given container, device or codec. Private
-options are listed under their corresponding containers/devices/codecs.
-
-
-
-
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
-an MP3 file, use the id3v2_version private option of the MP3
-muxer:
-
-
ffmpeg -i input.flac -id3v2_version 3 out.mp3
-
-
-
All codec AVOptions are per-stream, and thus a stream specifier
-should be attached to them.
-
-
Note: the -nooption syntax cannot be used for boolean
-AVOptions, use -option 0 /-option 1 .
-
-
Note: the old undocumented way of specifying per-stream AVOptions by
-prepending v/a/s to the options name is now obsolete and will be
-removed soon.
-
-
-
5.4 Main options# TOC
-
-
--f fmt (input/output )
-Force input or output file format. The format is normally auto detected for input
-files and guessed from the file extension for output files, so this option is not
-needed in most cases.
-
-
--i filename (input )
-input file name
-
-
--y (global )
-Overwrite output files without asking.
-
-
--n (global )
-Do not overwrite output files, and exit immediately if a specified
-output file already exists.
-
-
--c[:stream_specifier ] codec (input/output,per-stream )
--codec[:stream_specifier ] codec (input/output,per-stream )
-Select an encoder (when used before an output file) or a decoder (when used
-before an input file) for one or more streams. codec is the name of a
-decoder/encoder or a special value copy
(output only) to indicate that
-the stream is not to be re-encoded.
-
-For example
-
-
ffmpeg -i INPUT -map 0 -c:v libx264 -c:a copy OUTPUT
-
-encodes all video streams with libx264 and copies all audio streams.
-
-For each stream, the last matching c
option is applied, so
-
-
ffmpeg -i INPUT -map 0 -c copy -c:v:1 libx264 -c:a:137 libvorbis OUTPUT
-
-will copy all the streams except the second video, which will be encoded with
-libx264, and the 138th audio, which will be encoded with libvorbis.
-
-
--t duration (input/output )
-When used as an input option (before -i
), limit the duration of
-data read from the input file.
-
-When used as an output option (before an output filename), stop writing the
-output after its duration reaches duration .
-
-duration may be a number in seconds, or in hh:mm:ss[.xxx]
form.
-
--to and -t are mutually exclusive and -t has priority.
-
-
--to position (output )
-Stop writing the output at position .
-position may be a number in seconds, or in hh:mm:ss[.xxx]
form.
-
--to and -t are mutually exclusive and -t has priority.
-
-
--fs limit_size (output )
-Set the file size limit, expressed in bytes.
-
-
--ss position (input/output )
-When used as an input option (before -i
), seeks in this input file to
-position . Note the in most formats it is not possible to seek exactly, so
-ffmpeg
will seek to the closest seek point before position .
-When transcoding and -accurate_seek is enabled (the default), this
-extra segment between the seek point and position will be decoded and
-discarded. When doing stream copy or when -noaccurate_seek is used, it
-will be preserved.
-
-When used as an output option (before an output filename), decodes but discards
-input until the timestamps reach position .
-
-position may be either in seconds or in hh:mm:ss[.xxx]
form.
-
-
--itsoffset offset (input )
-Set the input time offset.
-
-offset must be a time duration specification,
-see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-The offset is added to the timestamps of the input files. Specifying
-a positive offset means that the corresponding streams are delayed by
-the time duration specified in offset .
-
-
--timestamp date (output )
-Set the recording timestamp in the container.
-
-date must be a time duration specification,
-see (ffmpeg-utils)the Date section in the ffmpeg-utils(1) manual .
-
-
--metadata[:metadata_specifier] key =value (output,per-metadata )
-Set a metadata key/value pair.
-
-An optional metadata_specifier may be given to set metadata
-on streams or chapters. See -map_metadata
documentation for
-details.
-
-This option overrides metadata set with -map_metadata
. It is
-also possible to delete metadata by using an empty value.
-
-For example, for setting the title in the output file:
-
-
ffmpeg -i in.avi -metadata title="my title" out.flv
-
-
-To set the language of the first audio stream:
-
-
ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
-
-
-
--target type (output )
-Specify target file type (vcd
, svcd
, dvd
, dv
,
-dv50
). type may be prefixed with pal-
, ntsc-
or
-film-
to use the corresponding standard. All the format options
-(bitrate, codecs, buffer sizes) are then set automatically. You can just type:
-
-
-
ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg
-
-
-Nevertheless you can specify additional options as long as you know
-they do not conflict with the standard, as in:
-
-
-
ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
-
-
-
--dframes number (output )
-Set the number of data frames to output. This is an alias for -frames:d
.
-
-
--frames[:stream_specifier ] framecount (output,per-stream )
-Stop writing to the stream after framecount frames.
-
-
--q[:stream_specifier ] q (output,per-stream )
--qscale[:stream_specifier ] q (output,per-stream )
-Use fixed quality scale (VBR). The meaning of q /qscale is
-codec-dependent.
-If qscale is used without a stream_specifier then it applies only
-to the video stream, this is to maintain compatibility with previous behavior
-and as specifying the same codec specific value to 2 different codecs that is
-audio and video generally is not what is intended when no stream_specifier is
-used.
-
-
--filter[:stream_specifier ] filtergraph (output,per-stream )
-Create the filtergraph specified by filtergraph and use it to
-filter the stream.
-
-filtergraph is a description of the filtergraph to apply to
-the stream, and must have a single input and a single output of the
-same type of the stream. In the filtergraph, the input is associated
-to the label in
, and the output to the label out
. See
-the ffmpeg-filters manual for more information about the filtergraph
-syntax.
-
-See the -filter_complex option if you
-want to create filtergraphs with multiple inputs and/or outputs.
-
-
--filter_script[:stream_specifier ] filename (output,per-stream )
-This option is similar to -filter , the only difference is that its
-argument is the name of the file from which a filtergraph description is to be
-read.
-
-
--pre[:stream_specifier ] preset_name (output,per-stream )
-Specify the preset for matching stream(s).
-
-
--stats (global )
-Print encoding progress/statistics. It is on by default, to explicitly
-disable it you need to specify -nostats
.
-
-
--progress url (global )
-Send program-friendly progress information to url .
-
-Progress information is written approximately every second and at the end of
-the encoding process. It is made of "key =value " lines. key
-consists of only alphanumeric characters. The last key of a sequence of
-progress information is always "progress".
-
-
--stdin
-Enable interaction on standard input. On by default unless standard input is
-used as an input. To explicitly disable interaction you need to specify
--nostdin
.
-
-Disabling interaction on standard input is useful, for example, if
-ffmpeg is in the background process group. Roughly the same result can
-be achieved with ffmpeg ... < /dev/null
but it requires a
-shell.
-
-
--debug_ts (global )
-Print timestamp information. It is off by default. This option is
-mostly useful for testing and debugging purposes, and the output
-format may change from one version to another, so it should not be
-employed by portable scripts.
-
-See also the option -fdebug ts
.
-
-
--attach filename (output )
-Add an attachment to the output file. This is supported by a few formats
-like Matroska for e.g. fonts used in rendering subtitles. Attachments
-are implemented as a specific type of stream, so this option will add
-a new stream to the file. It is then possible to use per-stream options
-on this stream in the usual way. Attachment streams created with this
-option will be created after all the other streams (i.e. those created
-with -map
or automatic mappings).
-
-Note that for Matroska you also have to set the mimetype metadata tag:
-
-
ffmpeg -i INPUT -attach DejaVuSans.ttf -metadata:s:2 mimetype=application/x-truetype-font out.mkv
-
-(assuming that the attachment stream will be third in the output file).
-
-
--dump_attachment[:stream_specifier ] filename (input,per-stream )
-Extract the matching attachment stream into a file named filename . If
-filename is empty, then the value of the filename
metadata tag
-will be used.
-
-E.g. to extract the first attachment to a file named ’out.ttf’:
-
-
ffmpeg -dump_attachment:t:0 out.ttf -i INPUT
-
-To extract all attachments to files determined by the filename
tag:
-
-
ffmpeg -dump_attachment:t "" -i INPUT
-
-
-Technical note – attachments are implemented as codec extradata, so this
-option can actually be used to extract extradata from any stream, not just
-attachments.
-
-
-
-
-
-
5.5 Video Options# TOC
-
-
--vframes number (output )
-Set the number of video frames to output. This is an alias for -frames:v
.
-
--r[:stream_specifier ] fps (input/output,per-stream )
-Set frame rate (Hz value, fraction or abbreviation).
-
-As an input option, ignore any timestamps stored in the file and instead
-generate timestamps assuming constant frame rate fps .
-This is not the same as the -framerate option used for some input formats
-like image2 or v4l2 (it used to be the same in older versions of FFmpeg).
-If in doubt use -framerate instead of the input option -r .
-
-As an output option, duplicate or drop input frames to achieve constant output
-frame rate fps .
-
-
--s[:stream_specifier ] size (input/output,per-stream )
-Set frame size.
-
-As an input option, this is a shortcut for the video_size private
-option, recognized by some demuxers for which the frame size is either not
-stored in the file or is configurable – e.g. raw video or video grabbers.
-
-As an output option, this inserts the scale
video filter to the
-end of the corresponding filtergraph. Please use the scale
filter
-directly to insert it at the beginning or some other place.
-
-The format is ‘wxh ’ (default - same as source).
-
-
--aspect[:stream_specifier ] aspect (output,per-stream )
-Set the video display aspect ratio specified by aspect .
-
-aspect can be a floating point number string, or a string of the
-form num :den , where num and den are the
-numerator and denominator of the aspect ratio. For example "4:3",
-"16:9", "1.3333", and "1.7777" are valid argument values.
-
-If used together with -vcodec copy , it will affect the aspect ratio
-stored at container level, but not the aspect ratio stored in encoded
-frames, if it exists.
-
-
--vn (output )
-Disable video recording.
-
-
--vcodec codec (output )
-Set the video codec. This is an alias for -codec:v
.
-
-
--pass[:stream_specifier ] n (output,per-stream )
-Select the pass number (1 or 2). It is used to do two-pass
-video encoding. The statistics of the video are recorded in the first
-pass into a log file (see also the option -passlogfile),
-and in the second pass that log file is used to generate the video
-at the exact requested bitrate.
-On pass 1, you may just deactivate audio and set output to null,
-examples for Windows and Unix:
-
-
ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y NUL
-ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y /dev/null
-
-
-
--passlogfile[:stream_specifier ] prefix (output,per-stream )
-Set two-pass log file name prefix to prefix , the default file name
-prefix is “ffmpeg2pass”. The complete file name will be
-PREFIX-N.log , where N is a number specific to the output
-stream
-
-
--vf filtergraph (output )
-Create the filtergraph specified by filtergraph and use it to
-filter the stream.
-
-This is an alias for -filter:v
, see the -filter option .
-
-
-
-
-
5.6 Advanced Video options# TOC
-
-
--pix_fmt[:stream_specifier ] format (input/output,per-stream )
-Set pixel format. Use -pix_fmts
to show all the supported
-pixel formats.
-If the selected pixel format can not be selected, ffmpeg will print a
-warning and select the best pixel format supported by the encoder.
-If pix_fmt is prefixed by a +
, ffmpeg will exit with an error
-if the requested pixel format can not be selected, and automatic conversions
-inside filtergraphs are disabled.
-If pix_fmt is a single +
, ffmpeg selects the same pixel format
-as the input (or graph output) and automatic conversions are disabled.
-
-
--sws_flags flags (input/output )
-Set SwScaler flags.
-
--vdt n
-Discard threshold.
-
-
--rc_override[:stream_specifier ] override (output,per-stream )
-Rate control override for specific intervals, formatted as "int,int,int"
-list separated with slashes. Two first values are the beginning and
-end frame numbers, last one is quantizer to use if positive, or quality
-factor if negative.
-
-
--ilme
-Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
-Use this option if your input file is interlaced and you want
-to keep the interlaced format for minimum losses.
-The alternative is to deinterlace the input stream with
--deinterlace , but deinterlacing introduces losses.
-
--psnr
-Calculate PSNR of compressed frames.
-
--vstats
-Dump video coding statistics to vstats_HHMMSS.log .
-
--vstats_file file
-Dump video coding statistics to file .
-
--top[:stream_specifier ] n (output,per-stream )
-top=1/bottom=0/auto=-1 field first
-
--dc precision
-Intra_dc_precision.
-
--vtag fourcc/tag (output )
-Force video tag/fourcc. This is an alias for -tag:v
.
-
--qphist (global )
-Show QP histogram
-
--vbsf bitstream_filter
-Deprecated see -bsf
-
-
--force_key_frames[:stream_specifier ] time [,time ...] (output,per-stream )
--force_key_frames[:stream_specifier ] expr:expr (output,per-stream )
-Force key frames at the specified timestamps, more precisely at the first
-frames after each specified time.
-
-If the argument is prefixed with expr:
, the string expr
-is interpreted like an expression and is evaluated for each frame. A
-key frame is forced in case the evaluation is non-zero.
-
-If one of the times is "chapters
[delta ]", it is expanded into
-the time of the beginning of all chapters in the file, shifted by
-delta , expressed as a time in seconds.
-This option can be useful to ensure that a seek point is present at a
-chapter mark or any other designated place in the output file.
-
-For example, to insert a key frame at 5 minutes, plus key frames 0.1 second
-before the beginning of every chapter:
-
-
-force_key_frames 0:05:00,chapters-0.1
-
-
-The expression in expr can contain the following constants:
-
-n
-the number of current processed frame, starting from 0
-
-n_forced
-the number of forced frames
-
-prev_forced_n
-the number of the previous forced frame, it is NAN
when no
-keyframe was forced yet
-
-prev_forced_t
-the time of the previous forced frame, it is NAN
when no
-keyframe was forced yet
-
-t
-the time of the current processed frame
-
-
-
-For example to force a key frame every 5 seconds, you can specify:
-
-
-force_key_frames expr:gte(t,n_forced*5)
-
-
-To force a key frame 5 seconds after the time of the last forced one,
-starting from second 13:
-
-
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
-
-
-Note that forcing too many keyframes is very harmful for the lookahead
-algorithms of certain encoders: using fixed-GOP options or similar
-would be more efficient.
-
-
--copyinkf[:stream_specifier ] (output,per-stream )
-When doing stream copy, copy also non-key frames found at the
-beginning.
-
-
--hwaccel[:stream_specifier ] hwaccel (input,per-stream )
-Use hardware acceleration to decode the matching stream(s). The allowed values
-of hwaccel are:
-
-none
-Do not use any hardware acceleration (the default).
-
-
-auto
-Automatically select the hardware acceleration method.
-
-
-vda
-Use Apple VDA hardware acceleration.
-
-
-vdpau
-Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
-
-
-dxva2
-Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
-
-
-
-This option has no effect if the selected hwaccel is not available or not
-supported by the chosen decoder.
-
-Note that most acceleration methods are intended for playback and will not be
-faster than software decoding on modern CPUs. Additionally, ffmpeg
-will usually need to copy the decoded frames from the GPU memory into the system
-memory, resulting in further performance loss. This option is thus mainly
-useful for testing.
-
-
--hwaccel_device[:stream_specifier ] hwaccel_device (input,per-stream )
-Select a device to use for hardware acceleration.
-
-This option only makes sense when the -hwaccel option is also
-specified. Its exact meaning depends on the specific hardware acceleration
-method chosen.
-
-
-vdpau
-For VDPAU, this option specifies the X11 display/screen to use. If this option
-is not specified, the value of the DISPLAY environment variable is used
-
-
-dxva2
-For DXVA2, this option should contain the number of the display adapter to use.
-If this option is not specified, the default adapter is used.
-
-
-
-
-
-
-
5.7 Audio Options# TOC
-
-
--aframes number (output )
-Set the number of audio frames to output. This is an alias for -frames:a
.
-
--ar[:stream_specifier ] freq (input/output,per-stream )
-Set the audio sampling frequency. For output streams it is set by
-default to the frequency of the corresponding input stream. For input
-streams this option only makes sense for audio grabbing devices and raw
-demuxers and is mapped to the corresponding demuxer options.
-
--aq q (output )
-Set the audio quality (codec-specific, VBR). This is an alias for -q:a.
-
--ac[:stream_specifier ] channels (input/output,per-stream )
-Set the number of audio channels. For output streams it is set by
-default to the number of input audio channels. For input streams
-this option only makes sense for audio grabbing devices and raw demuxers
-and is mapped to the corresponding demuxer options.
-
--an (output )
-Disable audio recording.
-
--acodec codec (input/output )
-Set the audio codec. This is an alias for -codec:a
.
-
--sample_fmt[:stream_specifier ] sample_fmt (output,per-stream )
-Set the audio sample format. Use -sample_fmts
to get a list
-of supported sample formats.
-
-
--af filtergraph (output )
-Create the filtergraph specified by filtergraph and use it to
-filter the stream.
-
-This is an alias for -filter:a
, see the -filter option .
-
-
-
-
-
5.8 Advanced Audio options# TOC
-
-
--atag fourcc/tag (output )
-Force audio tag/fourcc. This is an alias for -tag:a
.
-
--absf bitstream_filter
-Deprecated, see -bsf
-
--guess_layout_max channels (input,per-stream )
-If some input channel layout is not known, try to guess only if it
-corresponds to at most the specified number of channels. For example, 2
-tells to ffmpeg
to recognize 1 channel as mono and 2 channels as
-stereo but not 6 channels as 5.1. The default is to always try to guess. Use
-0 to disable all guessing.
-
-
-
-
-
5.9 Subtitle options# TOC
-
-
--scodec codec (input/output )
-Set the subtitle codec. This is an alias for -codec:s
.
-
--sn (output )
-Disable subtitle recording.
-
--sbsf bitstream_filter
-Deprecated, see -bsf
-
-
-
-
-
5.10 Advanced Subtitle options# TOC
-
-
--fix_sub_duration
-Fix subtitles durations. For each subtitle, wait for the next packet in the
-same stream and adjust the duration of the first to avoid overlap. This is
-necessary with some subtitles codecs, especially DVB subtitles, because the
-duration in the original packet is only a rough estimate and the end is
-actually marked by an empty subtitle frame. Failing to use this option when
-necessary can result in exaggerated durations or muxing failures due to
-non-monotonic timestamps.
-
-Note that this option will delay the output of all data until the next
-subtitle packet is decoded: it may increase memory consumption and latency a
-lot.
-
-
--canvas_size size
-Set the size of the canvas used to render subtitles.
-
-
-
-
-
-
5.11 Advanced options# TOC
-
-
--map [-]input_file_id [:stream_specifier ][,sync_file_id [:stream_specifier ]] | [linklabel] (output )
-
-Designate one or more input streams as a source for the output file. Each input
-stream is identified by the input file index input_file_id and
-the input stream index input_stream_id within the input
-file. Both indices start at 0. If specified,
-sync_file_id :stream_specifier sets which input stream
-is used as a presentation sync reference.
-
-The first -map
option on the command line specifies the
-source for output stream 0, the second -map
option specifies
-the source for output stream 1, etc.
-
-A -
character before the stream identifier creates a "negative" mapping.
-It disables matching streams from already created mappings.
-
-An alternative [linklabel] form will map outputs from complex filter
-graphs (see the -filter_complex option) to the output file.
-linklabel must correspond to a defined output link label in the graph.
-
-For example, to map ALL streams from the first input file to output
-
-
ffmpeg -i INPUT -map 0 output
-
-
-For example, if you have two audio streams in the first input file,
-these streams are identified by "0:0" and "0:1". You can use
--map
to select which streams to place in an output file. For
-example:
-
-
ffmpeg -i INPUT -map 0:1 out.wav
-
-will map the input stream in INPUT identified by "0:1" to
-the (single) output stream in out.wav .
-
-For example, to select the stream with index 2 from input file
-a.mov (specified by the identifier "0:2"), and stream with
-index 6 from input b.mov (specified by the identifier "1:6"),
-and copy them to the output file out.mov :
-
-
ffmpeg -i a.mov -i b.mov -c copy -map 0:2 -map 1:6 out.mov
-
-
-To select all video and the third audio stream from an input file:
-
-
ffmpeg -i INPUT -map 0:v -map 0:a:2 OUTPUT
-
-
-To map all the streams except the second audio, use negative mappings
-
-
ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT
-
-
-To pick the English audio stream:
-
-
ffmpeg -i INPUT -map 0:m:language:eng OUTPUT
-
-
-Note that using this option disables the default mappings for this output file.
-
-
--map_channel [input_file_id .stream_specifier .channel_id |-1][:output_file_id .stream_specifier ]
-Map an audio channel from a given input to an output. If
-output_file_id .stream_specifier is not set, the audio channel will
-be mapped on all the audio streams.
-
-Using "-1" instead of
-input_file_id .stream_specifier .channel_id will map a muted
-channel.
-
-For example, assuming INPUT is a stereo audio file, you can switch the
-two audio channels with the following command:
-
-
ffmpeg -i INPUT -map_channel 0.0.1 -map_channel 0.0.0 OUTPUT
-
-
-If you want to mute the first channel and keep the second:
-
-
ffmpeg -i INPUT -map_channel -1 -map_channel 0.0.1 OUTPUT
-
-
-The order of the "-map_channel" option specifies the order of the channels in
-the output stream. The output channel layout is guessed from the number of
-channels mapped (mono if one "-map_channel", stereo if two, etc.). Using "-ac"
-in combination of "-map_channel" makes the channel gain levels to be updated if
-input and output channel layouts don’t match (for instance two "-map_channel"
-options and "-ac 6").
-
-You can also extract each channel of an input to specific outputs; the following
-command extracts two channels of the INPUT audio stream (file 0, stream 0)
-to the respective OUTPUT_CH0 and OUTPUT_CH1 outputs:
-
-
ffmpeg -i INPUT -map_channel 0.0.0 OUTPUT_CH0 -map_channel 0.0.1 OUTPUT_CH1
-
-
-The following example splits the channels of a stereo input into two separate
-streams, which are put into the same output file:
-
-
ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg
-
-
-Note that currently each output stream can only contain channels from a single
-input stream; you can’t for example use "-map_channel" to pick multiple input
-audio channels contained in different streams (from the same or different files)
-and merge them into a single output stream. It is therefore not currently
-possible, for example, to turn two separate mono streams into a single stereo
-stream. However splitting a stereo stream into two single channel mono streams
-is possible.
-
-If you need this feature, a possible workaround is to use the amerge
-filter. For example, if you need to merge a media (here input.mkv ) with 2
-mono audio streams into one single stereo channel audio stream (and keep the
-video stream), you can use the following command:
-
-
ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv
-
-
-
--map_metadata[:metadata_spec_out ] infile [:metadata_spec_in ] (output,per-metadata )
-Set metadata information of the next output file from infile . Note that
-those are file indices (zero-based), not filenames.
-Optional metadata_spec_in/out parameters specify, which metadata to copy.
-A metadata specifier can have the following forms:
-
-g
-global metadata, i.e. metadata that applies to the whole file
-
-
-s [:stream_spec ]
-per-stream metadata. stream_spec is a stream specifier as described
-in the Stream specifiers chapter. In an input metadata specifier, the first
-matching stream is copied from. In an output metadata specifier, all matching
-streams are copied to.
-
-
-c :chapter_index
-per-chapter metadata. chapter_index is the zero-based chapter index.
-
-
-p :program_index
-per-program metadata. program_index is the zero-based program index.
-
-
-If metadata specifier is omitted, it defaults to global.
-
-By default, global metadata is copied from the first input file,
-per-stream and per-chapter metadata is copied along with streams/chapters. These
-default mappings are disabled by creating any mapping of the relevant type. A negative
-file index can be used to create a dummy mapping that just disables automatic copying.
-
-For example to copy metadata from the first stream of the input file to global metadata
-of the output file:
-
-
ffmpeg -i in.ogg -map_metadata 0:s:0 out.mp3
-
-
-To do the reverse, i.e. copy global metadata to all audio streams:
-
-
ffmpeg -i in.mkv -map_metadata:s:a 0:g out.mkv
-
-Note that simple 0
would work as well in this example, since global
-metadata is assumed by default.
-
-
--map_chapters input_file_index (output )
-Copy chapters from input file with index input_file_index to the next
-output file. If no chapter mapping is specified, then chapters are copied from
-the first input file with at least one chapter. Use a negative file index to
-disable any chapter copying.
-
-
--benchmark (global )
-Show benchmarking information at the end of an encode.
-Shows CPU time used and maximum memory consumption.
-Maximum memory consumption is not supported on all systems,
-it will usually display as 0 if not supported.
-
--benchmark_all (global )
-Show benchmarking information during the encode.
-Shows CPU time used in various steps (audio/video encode/decode).
-
--timelimit duration (global )
-Exit after ffmpeg has been running for duration seconds.
-
--dump (global )
-Dump each input packet to stderr.
-
--hex (global )
-When dumping packets, also dump the payload.
-
--re (input )
-Read input at native frame rate. Mainly used to simulate a grab device.
-or live input stream (e.g. when reading from a file). Should not be used
-with actual grab devices or live input streams (where it can cause packet
-loss).
-By default ffmpeg
attempts to read the input(s) as fast as possible.
-This option will slow down the reading of the input(s) to the native frame rate
-of the input(s). It is useful for real-time output (e.g. live streaming).
-
--loop_input
-Loop over the input stream. Currently it works only for image
-streams. This option is used for automatic FFserver testing.
-This option is deprecated, use -loop 1.
-
--loop_output number_of_times
-Repeatedly loop output for formats that support looping such as animated GIF
-(0 will loop the output infinitely).
-This option is deprecated, use -loop.
-
--vsync parameter
-Video sync method.
-For compatibility reasons old values can be specified as numbers.
-Newly added values will have to be specified as strings always.
-
-
-0, passthrough
-Each frame is passed with its timestamp from the demuxer to the muxer.
-
-1, cfr
-Frames will be duplicated and dropped to achieve exactly the requested
-constant frame rate.
-
-2, vfr
-Frames are passed through with their timestamp or dropped so as to
-prevent 2 frames from having the same timestamp.
-
-drop
-As passthrough but destroys all timestamps, making the muxer generate
-fresh timestamps based on frame-rate.
-
--1, auto
-Chooses between 1 and 2 depending on muxer capabilities. This is the
-default method.
-
-
-
-Note that the timestamps may be further modified by the muxer, after this.
-For example, in the case that the format option avoid_negative_ts
-is enabled.
-
-With -map you can select from which stream the timestamps should be
-taken. You can leave either video or audio unchanged and sync the
-remaining stream(s) to the unchanged one.
-
-
--async samples_per_second
-Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
-the parameter is the maximum samples per second by which the audio is changed.
--async 1 is a special case where only the start of the audio stream is corrected
-without any later correction.
-
-Note that the timestamps may be further modified by the muxer, after this.
-For example, in the case that the format option avoid_negative_ts
-is enabled.
-
-This option has been deprecated. Use the aresample
audio filter instead.
-
-
--copyts
-Do not process input timestamps, but keep their values without trying
-to sanitize them. In particular, do not remove the initial start time
-offset value.
-
-Note that, depending on the vsync option or on specific muxer
-processing (e.g. in case the format option avoid_negative_ts
-is enabled) the output timestamps may mismatch with the input
-timestamps even when this option is selected.
-
-
--start_at_zero
-When used with copyts , shift input timestamps so they start at zero.
-
-This means that using e.g. -ss 50
will make output timestamps start at
-50 seconds, regardless of what timestamp the input file started at.
-
-
--copytb mode
-Specify how to set the encoder timebase when stream copying. mode is an
-integer numeric value, and can assume one of the following values:
-
-
-1
-Use the demuxer timebase.
-
-The time base is copied to the output encoder from the corresponding input
-demuxer. This is sometimes required to avoid non monotonically increasing
-timestamps when copying video streams with variable frame rate.
-
-
-0
-Use the decoder timebase.
-
-The time base is copied to the output encoder from the corresponding input
-decoder.
-
-
--1
-Try to make the choice automatically, in order to generate a sane output.
-
-
-
-Default value is -1.
-
-
--shortest (output )
-Finish encoding when the shortest input stream ends.
-
--dts_delta_threshold
-Timestamp discontinuity delta threshold.
-
--muxdelay seconds (input )
-Set the maximum demux-decode delay.
-
--muxpreload seconds (input )
-Set the initial demux-decode delay.
-
--streamid output-stream-index :new-value (output )
-Assign a new stream-id value to an output stream. This option should be
-specified prior to the output filename to which it applies.
-For the situation where multiple output files exist, a streamid
-may be reassigned to a different value.
-
-For example, to set the stream 0 PID to 33 and the stream 1 PID to 36 for
-an output mpegts file:
-
-
ffmpeg -i infile -streamid 0:33 -streamid 1:36 out.ts
-
-
-
--bsf[:stream_specifier ] bitstream_filters (output,per-stream )
-Set bitstream filters for matching streams. bitstream_filters is
-a comma-separated list of bitstream filters. Use the -bsfs
option
-to get the list of bitstream filters.
-
-
ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
-
-
-
ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
-
-
-
--tag[:stream_specifier ] codec_tag (input/output,per-stream )
-Force a tag/fourcc for matching streams.
-
-
--timecode hh :mm :ss SEPff
-Specify Timecode for writing. SEP is ’:’ for non drop timecode and ’;’
-(or ’.’) for drop.
-
-
ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
-
-
-
--filter_complex filtergraph (global )
-Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or
-outputs. For simple graphs – those with one input and one output of the same
-type – see the -filter options. filtergraph is a description of
-the filtergraph, as described in the “Filtergraph syntax” section of the
-ffmpeg-filters manual.
-
-Input link labels must refer to input streams using the
-[file_index:stream_specifier]
syntax (i.e. the same as -map
-uses). If stream_specifier matches multiple streams, the first one will be
-used. An unlabeled input will be connected to the first unused input stream of
-the matching type.
-
-Output link labels are referred to with -map . Unlabeled outputs are
-added to the first output file.
-
-Note that with this option it is possible to use only lavfi sources without
-normal input files.
-
-For example, to overlay an image over video
-
-
ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
-'[out]' out.mkv
-
-Here [0:v]
refers to the first video stream in the first input file,
-which is linked to the first (main) input of the overlay filter. Similarly the
-first video stream in the second input is linked to the second (overlay) input
-of overlay.
-
-Assuming there is only one video stream in each input file, we can omit input
-labels, so the above is equivalent to
-
-
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map
-'[out]' out.mkv
-
-
-Furthermore we can omit the output label and the single output from the filter
-graph will be added to the output file automatically, so we can simply write
-
-
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
-
-
-To generate 5 seconds of pure red video using lavfi color
source:
-
-
ffmpeg -filter_complex 'color=c=red' -t 5 out.mkv
-
-
-
--lavfi filtergraph (global )
-Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or
-outputs. Equivalent to -filter_complex .
-
-
--filter_complex_script filename (global )
-This option is similar to -filter_complex , the only difference is that
-its argument is the name of the file from which a complex filtergraph
-description is to be read.
-
-
--accurate_seek (input )
-This option enables or disables accurate seeking in input files with the
--ss option. It is enabled by default, so seeking is accurate when
-transcoding. Use -noaccurate_seek to disable it, which may be useful
-e.g. when copying some streams and transcoding the others.
-
-
--override_ffserver (global )
-Overrides the input specifications from ffserver
. Using this
-option you can map any input stream to ffserver
and control
-many aspects of the encoding from ffmpeg
. Without this
-option ffmpeg
will transmit to ffserver
what is
-requested by ffserver
.
-
-The option is intended for cases where features are needed that cannot be
-specified to ffserver
but can be to ffmpeg
.
-
-
--sdp_file file (global )
-Print sdp information to file .
-This allows dumping sdp information when at least one output isn’t an
-rtp stream.
-
-
--discard (input )
-Allows discarding specific streams or frames of streams at the demuxer.
-Not all demuxers support this.
-
-
-none
-Discard no frame.
-
-
-default
-Default, which discards no frames.
-
-
-noref
-Discard all non-reference frames.
-
-
-bidir
-Discard all bidirectional frames.
-
-
-nokey
-Discard all frames excepts keyframes.
-
-
-all
-Discard all frames.
-
-
-
-
-
-
-
As a special exception, you can use a bitmap subtitle stream as input: it
-will be converted into a video with the same size as the largest video in
-the file, or 720x576 if no video is present. Note that this is an
-experimental and temporary solution. It will be removed once libavfilter has
-proper support for subtitles.
-
-
For example, to hardcode subtitles on top of a DVB-T recording stored in
-MPEG-TS format, delaying the subtitles by 1 second:
-
-
ffmpeg -i input.ts -filter_complex \
- '[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \
- -sn -map '#0x2dc' output.mkv
-
-
(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video,
-audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too)
-
-
-
5.12 Preset files# TOC
-
A preset file contains a sequence of option =value pairs,
-one for each line, specifying a sequence of options which would be
-awkward to specify on the command line. Lines starting with the hash
-(’#’) character are ignored and are used to provide comments. Check
-the presets directory in the FFmpeg source tree for examples.
-
-
There are two types of preset files: ffpreset and avpreset files.
-
-
-
5.12.1 ffpreset files# TOC
-
ffpreset files are specified with the vpre
, apre
,
-spre
, and fpre
options. The fpre
option takes the
-filename of the preset instead of a preset name as input and can be
-used for any kind of codec. For the vpre
, apre
, and
-spre
options, the options specified in a preset file are
-applied to the currently selected codec of the same type as the preset
-option.
-
-
The argument passed to the vpre
, apre
, and spre
-preset options identifies the preset file to use according to the
-following rules:
-
-
First ffmpeg searches for a file named arg .ffpreset in the
-directories $FFMPEG_DATADIR (if set), and $HOME/.ffmpeg , and in
-the datadir defined at configuration time (usually PREFIX/share/ffmpeg )
-or in a ffpresets folder along the executable on win32,
-in that order. For example, if the argument is libvpx-1080p
, it will
-search for the file libvpx-1080p.ffpreset .
-
-
If no such file is found, then ffmpeg will search for a file named
-codec_name -arg .ffpreset in the above-mentioned
-directories, where codec_name is the name of the codec to which
-the preset file options will be applied. For example, if you select
-the video codec with -vcodec libvpx
and use -vpre 1080p
,
-then it will search for the file libvpx-1080p.ffpreset .
-
-
-
5.12.2 avpreset files# TOC
-
avpreset files are specified with the pre
option. They work similar to
-ffpreset files, but they only allow encoder- specific options. Therefore, an
-option =value pair specifying an encoder cannot be used.
-
-
When the pre
option is specified, ffmpeg will look for files with the
-suffix .avpreset in the directories $AVCONV_DATADIR (if set), and
-$HOME/.avconv , and in the datadir defined at configuration time (usually
-PREFIX/share/ffmpeg ), in that order.
-
-
First ffmpeg searches for a file named codec_name -arg .avpreset in
-the above-mentioned directories, where codec_name is the name of the codec
-to which the preset file options will be applied. For example, if you select the
-video codec with -vcodec libvpx
and use -pre 1080p
, then it will
-search for the file libvpx-1080p.avpreset .
-
-
If no such file is found, then ffmpeg will search for a file named
-arg .avpreset in the same directories.
-
-
-
-
-
-
- For streaming at very low bitrates, use a low frame rate
-and a small GOP size. This is especially true for RealVideo where
-the Linux player does not seem to be very fast, so it can miss
-frames. An example is:
-
-
-
ffmpeg -g 3 -r 3 -t 10 -b:v 50k -s qcif -f rv10 /tmp/b.rm
-
-
- The parameter ’q’ which is displayed while encoding is the current
-quantizer. The value 1 indicates that a very good quality could
-be achieved. The value 31 indicates the worst quality. If q=31 appears
-too often, it means that the encoder cannot compress enough to meet
-your bitrate. You must either increase the bitrate, decrease the
-frame rate or decrease the frame size.
-
- If your computer is not fast enough, you can speed up the
-compression at the expense of the compression ratio. You can use
-’-me zero’ to speed up motion estimation, and ’-g 0’ to disable
-motion estimation completely (you have only I-frames, which means it
-is about as good as JPEG compression).
-
- To have very low audio bitrates, reduce the sampling frequency
-(down to 22050 Hz for MPEG audio, 22050 or 11025 for AC-3).
-
- To have a constant quality (but a variable bitrate), use the option
-’-qscale n’ when ’n’ is between 1 (excellent quality) and 31 (worst
-quality).
-
-
-
-
-
7 Examples# TOC
-
-
-
7.1 Video and Audio grabbing# TOC
-
-
If you specify the input format and device then ffmpeg can grab video
-and audio directly.
-
-
-
ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg
-
-
-
Or with an ALSA audio source (mono input, card id 1) instead of OSS:
-
-
ffmpeg -f alsa -ac 1 -i hw:1 -f video4linux2 -i /dev/video0 /tmp/out.mpg
-
-
-
Note that you must activate the right video source and channel before
-launching ffmpeg with any TV viewer such as
-xawtv by Gerd Knorr. You also
-have to set the audio recording levels correctly with a
-standard mixer.
-
-
-
7.2 X11 grabbing# TOC
-
-
Grab the X11 display with ffmpeg via
-
-
-
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0 /tmp/out.mpg
-
-
-
0.0 is display.screen number of your X11 server, same as
-the DISPLAY environment variable.
-
-
-
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0+10,20 /tmp/out.mpg
-
-
-
0.0 is display.screen number of your X11 server, same as the DISPLAY environment
-variable. 10 is the x-offset and 20 the y-offset for the grabbing.
-
-
-
7.3 Video and Audio file format conversion# TOC
-
-
Any supported file format and protocol can serve as input to ffmpeg:
-
-
Examples:
-
- You can use YUV files as input:
-
-
-
ffmpeg -i /tmp/test%d.Y /tmp/out.mpg
-
-
-It will use the files:
-
-
/tmp/test0.Y, /tmp/test0.U, /tmp/test0.V,
-/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc...
-
-
-The Y files use twice the resolution of the U and V files. They are
-raw files, without header. They can be generated by all decent video
-decoders. You must specify the size of the image with the -s option
-if ffmpeg cannot guess it.
-
- You can input from a raw YUV420P file:
-
-
-
ffmpeg -i /tmp/test.yuv /tmp/out.avi
-
-
-test.yuv is a file containing raw YUV planar data. Each frame is composed
-of the Y plane followed by the U and V planes at half vertical and
-horizontal resolution.
-
- You can output to a raw YUV420P file:
-
-
-
ffmpeg -i mydivx.avi hugefile.yuv
-
-
- You can set several input files and output files:
-
-
-
ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg
-
-
-Converts the audio file a.wav and the raw YUV video file a.yuv
-to MPEG file a.mpg.
-
- You can also do audio and video conversions at the same time:
-
-
-
ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2
-
-
-Converts a.wav to MPEG audio at 22050 Hz sample rate.
-
- You can encode to several formats at the same time and define a
-mapping from input stream to output streams:
-
-
-
ffmpeg -i /tmp/a.wav -map 0:a -b:a 64k /tmp/a.mp2 -map 0:a -b:a 128k /tmp/b.mp2
-
-
-Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. ’-map
-file:index’ specifies which input stream is used for each output
-stream, in the order of the definition of output streams.
-
- You can transcode decrypted VOBs:
-
-
-
ffmpeg -i snatch_1.vob -f avi -c:v mpeg4 -b:v 800k -g 300 -bf 2 -c:a libmp3lame -b:a 128k snatch.avi
-
-
-This is a typical DVD ripping example; the input is a VOB file, the
-output an AVI file with MPEG-4 video and MP3 audio. Note that in this
-command we use B-frames so the MPEG-4 stream is DivX5 compatible, and
-GOP size is 300 which means one intra frame every 10 seconds for 29.97fps
-input video. Furthermore, the audio stream is MP3-encoded so you need
-to enable LAME support by passing --enable-libmp3lame
to configure.
-The mapping is particularly useful for DVD transcoding
-to get the desired audio language.
-
-NOTE: To see the supported input formats, use ffmpeg -formats
.
-
- You can extract images from a video, or create a video from many images:
-
-For extracting images from a video:
-
-
ffmpeg -i foo.avi -r 1 -s WxH -f image2 foo-%03d.jpeg
-
-
-This will extract one video frame per second from the video and will
-output them in files named foo-001.jpeg , foo-002.jpeg ,
-etc. Images will be rescaled to fit the new WxH values.
-
-If you want to extract just a limited number of frames, you can use the
-above command in combination with the -vframes or -t option, or in
-combination with -ss to start extracting from a certain point in time.
-
-For creating a video from many images:
-
-
ffmpeg -f image2 -i foo-%03d.jpeg -r 12 -s WxH foo.avi
-
-
-The syntax foo-%03d.jpeg
specifies to use a decimal number
-composed of three digits padded with zeroes to express the sequence
-number. It is the same syntax supported by the C printf function, but
-only formats accepting a normal integer are suitable.
-
-When importing an image sequence, -i also supports expanding
-shell-like wildcard patterns (globbing) internally, by selecting the
-image2-specific -pattern_type glob
option.
-
-For example, for creating a video from filenames matching the glob pattern
-foo-*.jpeg
:
-
-
ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
-
-
- You can put many streams of the same type in the output:
-
-
-
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
-
-
-The resulting output file test12.nut will contain the first four streams
-from the input files in reverse order.
-
- To force CBR video output:
-
-
ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
-
-
- The four options lmin, lmax, mblmin and mblmax use ’lambda’ units,
-but you may use the QP2LAMBDA constant to easily convert from ’q’ units:
-
-
ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
-
-
-
-
-
-
-
8 See Also# TOC
-
-
ffmpeg-all ,
-ffplay , ffprobe , ffserver ,
-ffmpeg-utils ,
-ffmpeg-scaler ,
-ffmpeg-resampler ,
-ffmpeg-codecs ,
-ffmpeg-bitstream-filters ,
-ffmpeg-formats ,
-ffmpeg-devices ,
-ffmpeg-protocols ,
-ffmpeg-filters
-
-
-
-
9 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffplay-all.html b/Externals/ffmpeg/shared/doc/ffplay-all.html
deleted file mode 100644
index 1264b50ebd..0000000000
--- a/Externals/ffmpeg/shared/doc/ffplay-all.html
+++ /dev/null
@@ -1,21308 +0,0 @@
-
-
-
-
-
-
- ffplay Documentation
-
-
-
-
-
-
-
-
- ffplay Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Synopsis# TOC
-
-
ffplay [options ] [input_file ]
-
-
-
2 Description# TOC
-
-
FFplay is a very simple and portable media player using the FFmpeg
-libraries and the SDL library. It is mostly used as a testbed for the
-various FFmpeg APIs.
-
-
-
3 Options# TOC
-
-
All the numerical options, if not specified otherwise, accept a string
-representing a number as input, which may be followed by one of the SI
-unit prefixes, for example: ’K’, ’M’, or ’G’.
-
-
If ’i’ is appended to the SI unit prefix, the complete prefix will be
-interpreted as a unit prefix for binary multiples, which are based on
-powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
-prefix multiplies the value by 8. This allows using, for example:
-’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
-
-
Options which do not take arguments are boolean options, and set the
-corresponding value to true. They can be set to false by prefixing
-the option name with "no". For example using "-nofoo"
-will set the boolean option with name "foo" to false.
-
-
-
3.1 Stream specifiers# TOC
-
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
-are used to precisely specify which stream(s) a given option belongs to.
-
-
A stream specifier is a string generally appended to the option name and
-separated from it by a colon. E.g. -codec:a:1 ac3
contains the
-a:1
stream specifier, which matches the second audio stream. Therefore, it
-would select the ac3 codec for the second audio stream.
-
-
A stream specifier can match several streams, so that the option is applied to all
-of them. E.g. the stream specifier in -b:a 128k
matches all audio
-streams.
-
-
An empty stream specifier matches all streams. For example, -codec copy
-or -codec: copy
would copy all the streams without reencoding.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index. E.g. -threads:1 4
would set the
-thread count for the second stream to 4.
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
-’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
-stream number stream_index of this type. Otherwise, it matches all
-streams of this type.
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number stream_index
-in the program with the id program_id . Otherwise, it matches all streams in the
-program.
-
-#stream_id or i:stream_id
-Match the stream by stream id (e.g. PID in MPEG-TS container).
-
-m:key [:value ]
-Matches streams with the metadata tag key having the specified value. If
-value is not given, matches streams that contain the given tag with any
-value.
-
-Note that in ffmpeg
, matching by metadata will only work properly for
-input files.
-
-
-
-
-
3.2 Generic options# TOC
-
-
These options are shared amongst the ff* tools.
-
-
--L
-Show license.
-
-
--h, -?, -help, --help [arg ]
-Show help. An optional parameter may be specified to print help about a specific
-item. If no argument is specified, only basic (non advanced) tool
-options are shown.
-
-Possible values of arg are:
-
-long
-Print advanced tool options in addition to the basic tool options.
-
-
-full
-Print complete list of options, including shared and private options
-for encoders, decoders, demuxers, muxers, filters, etc.
-
-
-decoder=decoder_name
-Print detailed information about the decoder named decoder_name . Use the
--decoders option to get a list of all decoders.
-
-
-encoder=encoder_name
-Print detailed information about the encoder named encoder_name . Use the
--encoders option to get a list of all encoders.
-
-
-demuxer=demuxer_name
-Print detailed information about the demuxer named demuxer_name . Use the
--formats option to get a list of all demuxers and muxers.
-
-
-muxer=muxer_name
-Print detailed information about the muxer named muxer_name . Use the
--formats option to get a list of all muxers and demuxers.
-
-
-filter=filter_name
-Print detailed information about the filter name filter_name . Use the
--filters option to get a list of all filters.
-
-
-
-
--version
-Show version.
-
-
--formats
-Show available formats (including devices).
-
-
--devices
-Show available devices.
-
-
--codecs
-Show all codecs known to libavcodec.
-
-Note that the term ’codec’ is used throughout this documentation as a shortcut
-for what is more correctly called a media bitstream format.
-
-
--decoders
-Show available decoders.
-
-
--encoders
-Show all available encoders.
-
-
--bsfs
-Show available bitstream filters.
-
-
--protocols
-Show available protocols.
-
-
--filters
-Show available libavfilter filters.
-
-
--pix_fmts
-Show available pixel formats.
-
-
--sample_fmts
-Show available sample formats.
-
-
--layouts
-Show channel names and standard channel layouts.
-
-
--colors
-Show recognized color names.
-
-
--sources device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sources of the intput device.
-Some devices may provide system-dependent source names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sources pulse,server=192.168.0.4
-
-
-
--sinks device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sinks of the output device.
-Some devices may provide system-dependent sink names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sinks pulse,server=192.168.0.4
-
-
-
--loglevel [repeat+]loglevel | -v [repeat+]loglevel
-Set the logging level used by the library.
-Adding "repeat+" indicates that repeated log output should not be compressed
-to the first line and the "Last message repeated n times" line will be
-omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-’repeat’ will not change the loglevel.
-loglevel is a string or a number containing one of the following values:
-
-‘quiet, -8 ’
-Show nothing at all; be silent.
-
-‘panic, 0 ’
-Only show fatal errors which could lead the process to crash, such as
-and assert failure. This is not currently used for anything.
-
-‘fatal, 8 ’
-Only show fatal errors. These are errors after which the process absolutely
-cannot continue after.
-
-‘error, 16 ’
-Show all errors, including ones which can be recovered from.
-
-‘warning, 24 ’
-Show all warnings and errors. Any message related to possibly
-incorrect or unexpected events will be shown.
-
-‘info, 32 ’
-Show informative messages during processing. This is in addition to
-warnings and errors. This is the default value.
-
-‘verbose, 40 ’
-Same as info
, except more verbose.
-
-‘debug, 48 ’
-Show everything, including debugging information.
-
-
-
-By default the program logs to stderr, if coloring is supported by the
-terminal, colors are used to mark errors and warnings. Log coloring
-can be disabled setting the environment variable
-AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
-the environment variable AV_LOG_FORCE_COLOR
.
-The use of the environment variable NO_COLOR
is deprecated and
-will be dropped in a following FFmpeg version.
-
-
--report
-Dump full command line and console output to a file named
-program -YYYYMMDD -HHMMSS .log
in the current
-directory.
-This file can be useful for bug reports.
-It also implies -loglevel verbose
.
-
-Setting the environment variable FFREPORT
to any value has the
-same effect. If the value is a ’:’-separated key=value sequence, these
-options will affect the report; option values must be escaped if they
-contain special characters or the options delimiter ’:’ (see the
-“Quoting and escaping” section in the ffmpeg-utils manual).
-
-The following options are recognized:
-
-file
-set the file name to use for the report; %p
is expanded to the name
-of the program, %t
is expanded to a timestamp, %%
is expanded
-to a plain %
-
-level
-set the log verbosity level using a numerical value (see -loglevel
).
-
-
-
-For example, to output a report to a file named ffreport.log
-using a log level of 32
(alias for log level info
):
-
-
-
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
-
-
-Errors in parsing the environment variable are not fatal, and will not
-appear in the report.
-
-
--hide_banner
-Suppress printing banner.
-
-All FFmpeg tools will normally show a copyright notice, build options
-and library versions. This option can be used to suppress printing
-this information.
-
-
--cpuflags flags (global )
-Allows setting and clearing cpu flags. This option is intended
-for testing. Do not use it unless you know what you’re doing.
-
-
ffmpeg -cpuflags -sse+mmx ...
-ffmpeg -cpuflags mmx ...
-ffmpeg -cpuflags 0 ...
-
-Possible flags for this option are:
-
-‘x86 ’
-
-‘mmx ’
-‘mmxext ’
-‘sse ’
-‘sse2 ’
-‘sse2slow ’
-‘sse3 ’
-‘sse3slow ’
-‘ssse3 ’
-‘atom ’
-‘sse4.1 ’
-‘sse4.2 ’
-‘avx ’
-‘xop ’
-‘fma4 ’
-‘3dnow ’
-‘3dnowext ’
-‘cmov ’
-
-
-‘ARM ’
-
-‘armv5te ’
-‘armv6 ’
-‘armv6t2 ’
-‘vfp ’
-‘vfpv3 ’
-‘neon ’
-
-
-‘PowerPC ’
-
-‘altivec ’
-
-
-‘Specific Processors ’
-
-‘pentium2 ’
-‘pentium3 ’
-‘pentium4 ’
-‘k6 ’
-‘k62 ’
-‘athlon ’
-‘athlonxp ’
-‘k8 ’
-
-
-
-
-
--opencl_bench
-Benchmark all available OpenCL devices and show the results. This option
-is only available when FFmpeg has been compiled with --enable-opencl
.
-
-
--opencl_options options (global )
-Set OpenCL environment options. This option is only available when
-FFmpeg has been compiled with --enable-opencl
.
-
-options must be a list of key =value option pairs
-separated by ’:’. See the “OpenCL Options” section in the
-ffmpeg-utils manual for the list of supported options.
-
-
-
-
-
3.3 AVOptions# TOC
-
-
These options are provided directly by the libavformat, libavdevice and
-libavcodec libraries. To see the list of available AVOptions, use the
--help option. They are separated into two categories:
-
-generic
-These options can be set for any container, codec or device. Generic options
-are listed under AVFormatContext options for containers/devices and under
-AVCodecContext options for codecs.
-
-private
-These options are specific to the given container, device or codec. Private
-options are listed under their corresponding containers/devices/codecs.
-
-
-
-
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
-an MP3 file, use the id3v2_version private option of the MP3
-muxer:
-
-
ffmpeg -i input.flac -id3v2_version 3 out.mp3
-
-
-
All codec AVOptions are per-stream, and thus a stream specifier
-should be attached to them.
-
-
Note: the -nooption syntax cannot be used for boolean
-AVOptions, use -option 0 /-option 1 .
-
-
Note: the old undocumented way of specifying per-stream AVOptions by
-prepending v/a/s to the options name is now obsolete and will be
-removed soon.
-
-
-
3.4 Main options# TOC
-
-
--x width
-Force displayed width.
-
--y height
-Force displayed height.
-
--s size
-Set frame size (WxH or abbreviation), needed for videos which do
-not contain a header with the frame size like raw YUV. This option
-has been deprecated in favor of private options, try -video_size.
-
--fs
-Start in fullscreen mode.
-
--an
-Disable audio.
-
--vn
-Disable video.
-
--sn
-Disable subtitles.
-
--ss pos
-Seek to a given position in seconds.
-
--t duration
-play <duration> seconds of audio/video
-
--bytes
-Seek by bytes.
-
--nodisp
-Disable graphical display.
-
--f fmt
-Force format.
-
--window_title title
-Set window title (default is the input filename).
-
--loop number
-Loops movie playback <number> times. 0 means forever.
-
--showmode mode
-Set the show mode to use.
-Available values for mode are:
-
-‘0, video ’
-show video
-
-‘1, waves ’
-show audio waves
-
-‘2, rdft ’
-show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform)
-
-
-
-Default value is "video", if video is not present or cannot be played
-"rdft" is automatically selected.
-
-You can interactively cycle through the available show modes by
-pressing the key w .
-
-
--vf filtergraph
-Create the filtergraph specified by filtergraph and use it to
-filter the video stream.
-
-filtergraph is a description of the filtergraph to apply to
-the stream, and must have a single video input and a single video
-output. In the filtergraph, the input is associated to the label
-in
, and the output to the label out
. See the
-ffmpeg-filters manual for more information about the filtergraph
-syntax.
-
-You can specify this parameter multiple times and cycle through the specified
-filtergraphs along with the show modes by pressing the key w .
-
-
--af filtergraph
-filtergraph is a description of the filtergraph to apply to
-the input audio.
-Use the option "-filters" to show all the available filters (including
-sources and sinks).
-
-
--i input_file
-Read input_file .
-
-
-
-
-
3.5 Advanced options# TOC
-
--pix_fmt format
-Set pixel format.
-This option has been deprecated in favor of private options, try -pixel_format.
-
-
--stats
-Print several playback statistics, in particular show the stream
-duration, the codec parameters, the current position in the stream and
-the audio/video synchronisation drift. It is on by default, to
-explicitly disable it you need to specify -nostats
.
-
-
--fast
-Non-spec-compliant optimizations.
-
--genpts
-Generate pts.
-
--sync type
-Set the master clock to audio (type=audio
), video
-(type=video
) or external (type=ext
). Default is audio. The
-master clock is used to control audio-video synchronization. Most media
-players use audio as master clock, but in some cases (streaming or high
-quality broadcast) it is necessary to change that. This option is mainly
-used for debugging purposes.
-
--ast audio_stream_specifier
-Select the desired audio stream using the given stream specifier. The stream
-specifiers are described in the Stream specifiers chapter. If this option
-is not specified, the "best" audio stream is selected in the program of the
-already selected video stream.
-
--vst video_stream_specifier
-Select the desired video stream using the given stream specifier. The stream
-specifiers are described in the Stream specifiers chapter. If this option
-is not specified, the "best" video stream is selected.
-
--sst subtitle_stream_specifier
-Select the desired subtitle stream using the given stream specifier. The stream
-specifiers are described in the Stream specifiers chapter. If this option
-is not specified, the "best" subtitle stream is selected in the program of the
-already selected video or audio stream.
-
--autoexit
-Exit when video is done playing.
-
--exitonkeydown
-Exit if any key is pressed.
-
--exitonmousedown
-Exit if any mouse button is pressed.
-
-
--codec:media_specifier codec_name
-Force a specific decoder implementation for the stream identified by
-media_specifier , which can assume the values a
(audio),
-v
(video), and s
subtitle.
-
-
--acodec codec_name
-Force a specific audio decoder.
-
-
--vcodec codec_name
-Force a specific video decoder.
-
-
--scodec codec_name
-Force a specific subtitle decoder.
-
-
--autorotate
-Automatically rotate the video according to presentation metadata. Enabled by
-default, use -noautorotate to disable it.
-
-
--framedrop
-Drop video frames if video is out of sync. Enabled by default if the master
-clock is not set to video. Use this option to enable frame dropping for all
-master clock sources, use -noframedrop to disable it.
-
-
--infbuf
-Do not limit the input buffer size, read as much data as possible from the
-input as soon as possible. Enabled by default for realtime streams, where data
-may be dropped if not read in time. Use this option to enable infinite buffers
-for all inputs, use -noinfbuf to disable it.
-
-
-
-
-
-
3.6 While playing# TOC
-
-
-q, ESC
-Quit.
-
-
-f
-Toggle full screen.
-
-
-p, SPC
-Pause.
-
-
-a
-Cycle audio channel in the current program.
-
-
-v
-Cycle video channel.
-
-
-t
-Cycle subtitle channel in the current program.
-
-
-c
-Cycle program.
-
-
-w
-Cycle video filters or show modes.
-
-
-s
-Step to the next frame.
-
-Pause if the stream is not already paused, step to the next video
-frame, and pause.
-
-
-left/right
-Seek backward/forward 10 seconds.
-
-
-down/up
-Seek backward/forward 1 minute.
-
-
-page down/page up
-Seek to the previous/next chapter.
-or if there are no chapters
-Seek backward/forward 10 minutes.
-
-
-mouse click
-Seek to percentage in file corresponding to fraction of width.
-
-
-
-
-
-
-
4 Syntax# TOC
-
-
This section documents the syntax and formats employed by the FFmpeg
-libraries and tools.
-
-
-
4.1 Quoting and escaping# TOC
-
-
FFmpeg adopts the following quoting and escaping mechanism, unless
-explicitly specified. The following rules are applied:
-
-
- '
and \
are special characters (respectively used for
-quoting and escaping). In addition to them, there might be other
-special characters depending on the specific syntax where the escaping
-and quoting are employed.
-
- A special character is escaped by prefixing it with a ’\’.
-
- All characters enclosed between ” are included literally in the
-parsed string. The quote character '
itself cannot be quoted,
-so you may need to close the quote and escape it.
-
- Leading and trailing whitespaces, unless escaped or quoted, are
-removed from the parsed string.
-
-
-
Note that you may need to add a second level of escaping when using
-the command line or a script, which depends on the syntax of the
-adopted shell language.
-
-
The function av_get_token
defined in
-libavutil/avstring.h can be used to parse a token quoted or
-escaped according to the rules defined above.
-
-
The tool tools/ffescape in the FFmpeg source tree can be used
-to automatically quote or escape a string in a script.
-
-
-
4.1.1 Examples# TOC
-
-
- Escape the string Crime d'Amour
containing the '
special
-character:
-
-
- The string above contains a quote, so the '
needs to be escaped
-when quoting it:
-
-
- Include leading or trailing whitespaces using quoting:
-
-
' this string starts and ends with whitespaces '
-
-
- Escaping and quoting can be mixed together:
-
-
' The string '\'string\'' is a string '
-
-
- To include a literal \
you can use either escaping or quoting:
-
-
'c:\foo' can be written as c:\\foo
-
-
-
-
-
4.2 Date# TOC
-
-
The accepted syntax is:
-
-
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
-now
-
-
-
If the value is "now" it takes the current time.
-
-
Time is local time unless Z is appended, in which case it is
-interpreted as UTC.
-If the year-month-day part is not specified it takes the current
-year-month-day.
-
-
-
4.3 Time duration# TOC
-
-
There are two accepted syntaxes for expressing time duration.
-
-
-
-
HH expresses the number of hours, MM the number of minutes
-for a maximum of 2 digits, and SS the number of seconds for a
-maximum of 2 digits. The m at the end expresses decimal value for
-SS .
-
-
or
-
-
-
-
S expresses the number of seconds, with the optional decimal part
-m .
-
-
In both expressions, the optional ‘- ’ indicates negative duration.
-
-
-
4.3.1 Examples# TOC
-
-
The following examples are all valid time duration:
-
-
-‘55 ’
-55 seconds
-
-
-‘12:03:45 ’
-12 hours, 03 minutes and 45 seconds
-
-
-‘23.189 ’
-23.189 seconds
-
-
-
-
-
4.4 Video size# TOC
-
Specify the size of the sourced video, it may be a string of the form
-width xheight , or the name of a size abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-720x480
-
-‘pal ’
-720x576
-
-‘qntsc ’
-352x240
-
-‘qpal ’
-352x288
-
-‘sntsc ’
-640x480
-
-‘spal ’
-768x576
-
-‘film ’
-352x240
-
-‘ntsc-film ’
-352x240
-
-‘sqcif ’
-128x96
-
-‘qcif ’
-176x144
-
-‘cif ’
-352x288
-
-‘4cif ’
-704x576
-
-‘16cif ’
-1408x1152
-
-‘qqvga ’
-160x120
-
-‘qvga ’
-320x240
-
-‘vga ’
-640x480
-
-‘svga ’
-800x600
-
-‘xga ’
-1024x768
-
-‘uxga ’
-1600x1200
-
-‘qxga ’
-2048x1536
-
-‘sxga ’
-1280x1024
-
-‘qsxga ’
-2560x2048
-
-‘hsxga ’
-5120x4096
-
-‘wvga ’
-852x480
-
-‘wxga ’
-1366x768
-
-‘wsxga ’
-1600x1024
-
-‘wuxga ’
-1920x1200
-
-‘woxga ’
-2560x1600
-
-‘wqsxga ’
-3200x2048
-
-‘wquxga ’
-3840x2400
-
-‘whsxga ’
-6400x4096
-
-‘whuxga ’
-7680x4800
-
-‘cga ’
-320x200
-
-‘ega ’
-640x350
-
-‘hd480 ’
-852x480
-
-‘hd720 ’
-1280x720
-
-‘hd1080 ’
-1920x1080
-
-‘2k ’
-2048x1080
-
-‘2kflat ’
-1998x1080
-
-‘2kscope ’
-2048x858
-
-‘4k ’
-4096x2160
-
-‘4kflat ’
-3996x2160
-
-‘4kscope ’
-4096x1716
-
-‘nhd ’
-640x360
-
-‘hqvga ’
-240x160
-
-‘wqvga ’
-400x240
-
-‘fwqvga ’
-432x240
-
-‘hvga ’
-480x320
-
-‘qhd ’
-960x540
-
-
-
-
-
4.5 Video rate# TOC
-
-
Specify the frame rate of a video, expressed as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a float
-number or a valid video frame rate abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-30000/1001
-
-‘pal ’
-25/1
-
-‘qntsc ’
-30000/1001
-
-‘qpal ’
-25/1
-
-‘sntsc ’
-30000/1001
-
-‘spal ’
-25/1
-
-‘film ’
-24/1
-
-‘ntsc-film ’
-24000/1001
-
-
-
-
-
4.6 Ratio# TOC
-
-
A ratio can be expressed as an expression, or in the form
-numerator :denominator .
-
-
Note that a ratio with infinite (1/0) or negative value is
-considered valid, so you should check on the returned value if you
-want to exclude those values.
-
-
The undefined value can be expressed using the "0:0" string.
-
-
-
4.7 Color# TOC
-
-
It can be the name of a color as defined below (case insensitive match) or a
-[0x|#]RRGGBB[AA]
sequence, possibly followed by @ and a string
-representing the alpha component.
-
-
The alpha component may be a string composed by "0x" followed by an
-hexadecimal number or a decimal number between 0.0 and 1.0, which
-represents the opacity value (‘0x00 ’ or ‘0.0 ’ means completely
-transparent, ‘0xff ’ or ‘1.0 ’ completely opaque). If the alpha
-component is not specified then ‘0xff ’ is assumed.
-
-
The string ‘random ’ will result in a random color.
-
-
The following names of colors are recognized:
-
-‘AliceBlue ’
-0xF0F8FF
-
-‘AntiqueWhite ’
-0xFAEBD7
-
-‘Aqua ’
-0x00FFFF
-
-‘Aquamarine ’
-0x7FFFD4
-
-‘Azure ’
-0xF0FFFF
-
-‘Beige ’
-0xF5F5DC
-
-‘Bisque ’
-0xFFE4C4
-
-‘Black ’
-0x000000
-
-‘BlanchedAlmond ’
-0xFFEBCD
-
-‘Blue ’
-0x0000FF
-
-‘BlueViolet ’
-0x8A2BE2
-
-‘Brown ’
-0xA52A2A
-
-‘BurlyWood ’
-0xDEB887
-
-‘CadetBlue ’
-0x5F9EA0
-
-‘Chartreuse ’
-0x7FFF00
-
-‘Chocolate ’
-0xD2691E
-
-‘Coral ’
-0xFF7F50
-
-‘CornflowerBlue ’
-0x6495ED
-
-‘Cornsilk ’
-0xFFF8DC
-
-‘Crimson ’
-0xDC143C
-
-‘Cyan ’
-0x00FFFF
-
-‘DarkBlue ’
-0x00008B
-
-‘DarkCyan ’
-0x008B8B
-
-‘DarkGoldenRod ’
-0xB8860B
-
-‘DarkGray ’
-0xA9A9A9
-
-‘DarkGreen ’
-0x006400
-
-‘DarkKhaki ’
-0xBDB76B
-
-‘DarkMagenta ’
-0x8B008B
-
-‘DarkOliveGreen ’
-0x556B2F
-
-‘Darkorange ’
-0xFF8C00
-
-‘DarkOrchid ’
-0x9932CC
-
-‘DarkRed ’
-0x8B0000
-
-‘DarkSalmon ’
-0xE9967A
-
-‘DarkSeaGreen ’
-0x8FBC8F
-
-‘DarkSlateBlue ’
-0x483D8B
-
-‘DarkSlateGray ’
-0x2F4F4F
-
-‘DarkTurquoise ’
-0x00CED1
-
-‘DarkViolet ’
-0x9400D3
-
-‘DeepPink ’
-0xFF1493
-
-‘DeepSkyBlue ’
-0x00BFFF
-
-‘DimGray ’
-0x696969
-
-‘DodgerBlue ’
-0x1E90FF
-
-‘FireBrick ’
-0xB22222
-
-‘FloralWhite ’
-0xFFFAF0
-
-‘ForestGreen ’
-0x228B22
-
-‘Fuchsia ’
-0xFF00FF
-
-‘Gainsboro ’
-0xDCDCDC
-
-‘GhostWhite ’
-0xF8F8FF
-
-‘Gold ’
-0xFFD700
-
-‘GoldenRod ’
-0xDAA520
-
-‘Gray ’
-0x808080
-
-‘Green ’
-0x008000
-
-‘GreenYellow ’
-0xADFF2F
-
-‘HoneyDew ’
-0xF0FFF0
-
-‘HotPink ’
-0xFF69B4
-
-‘IndianRed ’
-0xCD5C5C
-
-‘Indigo ’
-0x4B0082
-
-‘Ivory ’
-0xFFFFF0
-
-‘Khaki ’
-0xF0E68C
-
-‘Lavender ’
-0xE6E6FA
-
-‘LavenderBlush ’
-0xFFF0F5
-
-‘LawnGreen ’
-0x7CFC00
-
-‘LemonChiffon ’
-0xFFFACD
-
-‘LightBlue ’
-0xADD8E6
-
-‘LightCoral ’
-0xF08080
-
-‘LightCyan ’
-0xE0FFFF
-
-‘LightGoldenRodYellow ’
-0xFAFAD2
-
-‘LightGreen ’
-0x90EE90
-
-‘LightGrey ’
-0xD3D3D3
-
-‘LightPink ’
-0xFFB6C1
-
-‘LightSalmon ’
-0xFFA07A
-
-‘LightSeaGreen ’
-0x20B2AA
-
-‘LightSkyBlue ’
-0x87CEFA
-
-‘LightSlateGray ’
-0x778899
-
-‘LightSteelBlue ’
-0xB0C4DE
-
-‘LightYellow ’
-0xFFFFE0
-
-‘Lime ’
-0x00FF00
-
-‘LimeGreen ’
-0x32CD32
-
-‘Linen ’
-0xFAF0E6
-
-‘Magenta ’
-0xFF00FF
-
-‘Maroon ’
-0x800000
-
-‘MediumAquaMarine ’
-0x66CDAA
-
-‘MediumBlue ’
-0x0000CD
-
-‘MediumOrchid ’
-0xBA55D3
-
-‘MediumPurple ’
-0x9370D8
-
-‘MediumSeaGreen ’
-0x3CB371
-
-‘MediumSlateBlue ’
-0x7B68EE
-
-‘MediumSpringGreen ’
-0x00FA9A
-
-‘MediumTurquoise ’
-0x48D1CC
-
-‘MediumVioletRed ’
-0xC71585
-
-‘MidnightBlue ’
-0x191970
-
-‘MintCream ’
-0xF5FFFA
-
-‘MistyRose ’
-0xFFE4E1
-
-‘Moccasin ’
-0xFFE4B5
-
-‘NavajoWhite ’
-0xFFDEAD
-
-‘Navy ’
-0x000080
-
-‘OldLace ’
-0xFDF5E6
-
-‘Olive ’
-0x808000
-
-‘OliveDrab ’
-0x6B8E23
-
-‘Orange ’
-0xFFA500
-
-‘OrangeRed ’
-0xFF4500
-
-‘Orchid ’
-0xDA70D6
-
-‘PaleGoldenRod ’
-0xEEE8AA
-
-‘PaleGreen ’
-0x98FB98
-
-‘PaleTurquoise ’
-0xAFEEEE
-
-‘PaleVioletRed ’
-0xD87093
-
-‘PapayaWhip ’
-0xFFEFD5
-
-‘PeachPuff ’
-0xFFDAB9
-
-‘Peru ’
-0xCD853F
-
-‘Pink ’
-0xFFC0CB
-
-‘Plum ’
-0xDDA0DD
-
-‘PowderBlue ’
-0xB0E0E6
-
-‘Purple ’
-0x800080
-
-‘Red ’
-0xFF0000
-
-‘RosyBrown ’
-0xBC8F8F
-
-‘RoyalBlue ’
-0x4169E1
-
-‘SaddleBrown ’
-0x8B4513
-
-‘Salmon ’
-0xFA8072
-
-‘SandyBrown ’
-0xF4A460
-
-‘SeaGreen ’
-0x2E8B57
-
-‘SeaShell ’
-0xFFF5EE
-
-‘Sienna ’
-0xA0522D
-
-‘Silver ’
-0xC0C0C0
-
-‘SkyBlue ’
-0x87CEEB
-
-‘SlateBlue ’
-0x6A5ACD
-
-‘SlateGray ’
-0x708090
-
-‘Snow ’
-0xFFFAFA
-
-‘SpringGreen ’
-0x00FF7F
-
-‘SteelBlue ’
-0x4682B4
-
-‘Tan ’
-0xD2B48C
-
-‘Teal ’
-0x008080
-
-‘Thistle ’
-0xD8BFD8
-
-‘Tomato ’
-0xFF6347
-
-‘Turquoise ’
-0x40E0D0
-
-‘Violet ’
-0xEE82EE
-
-‘Wheat ’
-0xF5DEB3
-
-‘White ’
-0xFFFFFF
-
-‘WhiteSmoke ’
-0xF5F5F5
-
-‘Yellow ’
-0xFFFF00
-
-‘YellowGreen ’
-0x9ACD32
-
-
-
-
-
4.8 Channel Layout# TOC
-
-
A channel layout specifies the spatial disposition of the channels in
-a multi-channel audio stream. To specify a channel layout, FFmpeg
-makes use of a special syntax.
-
-
Individual channels are identified by an id, as given by the table
-below:
-
-‘FL ’
-front left
-
-‘FR ’
-front right
-
-‘FC ’
-front center
-
-‘LFE ’
-low frequency
-
-‘BL ’
-back left
-
-‘BR ’
-back right
-
-‘FLC ’
-front left-of-center
-
-‘FRC ’
-front right-of-center
-
-‘BC ’
-back center
-
-‘SL ’
-side left
-
-‘SR ’
-side right
-
-‘TC ’
-top center
-
-‘TFL ’
-top front left
-
-‘TFC ’
-top front center
-
-‘TFR ’
-top front right
-
-‘TBL ’
-top back left
-
-‘TBC ’
-top back center
-
-‘TBR ’
-top back right
-
-‘DL ’
-downmix left
-
-‘DR ’
-downmix right
-
-‘WL ’
-wide left
-
-‘WR ’
-wide right
-
-‘SDL ’
-surround direct left
-
-‘SDR ’
-surround direct right
-
-‘LFE2 ’
-low frequency 2
-
-
-
-
Standard channel layout compositions can be specified by using the
-following identifiers:
-
-‘mono ’
-FC
-
-‘stereo ’
-FL+FR
-
-‘2.1 ’
-FL+FR+LFE
-
-‘3.0 ’
-FL+FR+FC
-
-‘3.0(back) ’
-FL+FR+BC
-
-‘4.0 ’
-FL+FR+FC+BC
-
-‘quad ’
-FL+FR+BL+BR
-
-‘quad(side) ’
-FL+FR+SL+SR
-
-‘3.1 ’
-FL+FR+FC+LFE
-
-‘5.0 ’
-FL+FR+FC+BL+BR
-
-‘5.0(side) ’
-FL+FR+FC+SL+SR
-
-‘4.1 ’
-FL+FR+FC+LFE+BC
-
-‘5.1 ’
-FL+FR+FC+LFE+BL+BR
-
-‘5.1(side) ’
-FL+FR+FC+LFE+SL+SR
-
-‘6.0 ’
-FL+FR+FC+BC+SL+SR
-
-‘6.0(front) ’
-FL+FR+FLC+FRC+SL+SR
-
-‘hexagonal ’
-FL+FR+FC+BL+BR+BC
-
-‘6.1 ’
-FL+FR+FC+LFE+BC+SL+SR
-
-‘6.1 ’
-FL+FR+FC+LFE+BL+BR+BC
-
-‘6.1(front) ’
-FL+FR+LFE+FLC+FRC+SL+SR
-
-‘7.0 ’
-FL+FR+FC+BL+BR+SL+SR
-
-‘7.0(front) ’
-FL+FR+FC+FLC+FRC+SL+SR
-
-‘7.1 ’
-FL+FR+FC+LFE+BL+BR+SL+SR
-
-‘7.1(wide) ’
-FL+FR+FC+LFE+BL+BR+FLC+FRC
-
-‘7.1(wide-side) ’
-FL+FR+FC+LFE+FLC+FRC+SL+SR
-
-‘octagonal ’
-FL+FR+FC+BL+BR+BC+SL+SR
-
-‘downmix ’
-DL+DR
-
-
-
-
A custom channel layout can be specified as a sequence of terms, separated by
-’+’ or ’|’. Each term can be:
-
- the name of a standard channel layout (e.g. ‘mono ’,
-‘stereo ’, ‘4.0 ’, ‘quad ’, ‘5.0 ’, etc.)
-
- the name of a single channel (e.g. ‘FL ’, ‘FR ’, ‘FC ’, ‘LFE ’, etc.)
-
- a number of channels, in decimal, optionally followed by ’c’, yielding
-the default channel layout for that number of channels (see the
-function av_get_default_channel_layout
)
-
- a channel layout mask, in hexadecimal starting with "0x" (see the
-AV_CH_*
macros in libavutil/channel_layout.h .
-
-
-
Starting from libavutil version 53 the trailing character "c" to
-specify a number of channels will be required, while a channel layout
-mask could also be specified as a decimal number (if and only if not
-followed by "c").
-
-
See also the function av_get_channel_layout
defined in
-libavutil/channel_layout.h .
-
-
-
5 Expression Evaluation# TOC
-
-
When evaluating an arithmetic expression, FFmpeg uses an internal
-formula evaluator, implemented through the libavutil/eval.h
-interface.
-
-
An expression may contain unary, binary operators, constants, and
-functions.
-
-
Two expressions expr1 and expr2 can be combined to form
-another expression "expr1 ;expr2 ".
-expr1 and expr2 are evaluated in turn, and the new
-expression evaluates to the value of expr2 .
-
-
The following binary operators are available: +
, -
,
-*
, /
, ^
.
-
-
The following unary operators are available: +
, -
.
-
-
The following functions are available:
-
-abs(x)
-Compute absolute value of x .
-
-
-acos(x)
-Compute arccosine of x .
-
-
-asin(x)
-Compute arcsine of x .
-
-
-atan(x)
-Compute arctangent of x .
-
-
-between(x, min, max)
-Return 1 if x is greater than or equal to min and lesser than or
-equal to max , 0 otherwise.
-
-
-bitand(x, y)
-bitor(x, y)
-Compute bitwise and/or operation on x and y .
-
-The results of the evaluation of x and y are converted to
-integers before executing the bitwise operation.
-
-Note that both the conversion to integer and the conversion back to
-floating point can lose precision. Beware of unexpected results for
-large numbers (usually 2^53 and larger).
-
-
-ceil(expr)
-Round the value of expression expr upwards to the nearest
-integer. For example, "ceil(1.5)" is "2.0".
-
-
-clip(x, min, max)
-Return the value of x clipped between min and max .
-
-
-cos(x)
-Compute cosine of x .
-
-
-cosh(x)
-Compute hyperbolic cosine of x .
-
-
-eq(x, y)
-Return 1 if x and y are equivalent, 0 otherwise.
-
-
-exp(x)
-Compute exponential of x (with base e
, the Euler’s number).
-
-
-floor(expr)
-Round the value of expression expr downwards to the nearest
-integer. For example, "floor(-1.5)" is "-2.0".
-
-
-gauss(x)
-Compute Gauss function of x , corresponding to
-exp(-x*x/2) / sqrt(2*PI)
.
-
-
-gcd(x, y)
-Return the greatest common divisor of x and y . If both x and
-y are 0 or either or both are less than zero then behavior is undefined.
-
-
-gt(x, y)
-Return 1 if x is greater than y , 0 otherwise.
-
-
-gte(x, y)
-Return 1 if x is greater than or equal to y , 0 otherwise.
-
-
-hypot(x, y)
-This function is similar to the C function with the same name; it returns
-"sqrt(x *x + y *y )", the length of the hypotenuse of a
-right triangle with sides of length x and y , or the distance of the
-point (x , y ) from the origin.
-
-
-if(x, y)
-Evaluate x , and if the result is non-zero return the result of
-the evaluation of y , return 0 otherwise.
-
-
-if(x, y, z)
-Evaluate x , and if the result is non-zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-ifnot(x, y)
-Evaluate x , and if the result is zero return the result of the
-evaluation of y , return 0 otherwise.
-
-
-ifnot(x, y, z)
-Evaluate x , and if the result is zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-isinf(x)
-Return 1.0 if x is +/-INFINITY, 0.0 otherwise.
-
-
-isnan(x)
-Return 1.0 if x is NAN, 0.0 otherwise.
-
-
-ld(var)
-Allow to load the value of the internal variable with number
-var , which was previously stored with st(var , expr ).
-The function returns the loaded value.
-
-
-log(x)
-Compute natural logarithm of x .
-
-
-lt(x, y)
-Return 1 if x is lesser than y , 0 otherwise.
-
-
-lte(x, y)
-Return 1 if x is lesser than or equal to y , 0 otherwise.
-
-
-max(x, y)
-Return the maximum between x and y .
-
-
-min(x, y)
-Return the maximum between x and y .
-
-
-mod(x, y)
-Compute the remainder of division of x by y .
-
-
-not(expr)
-Return 1.0 if expr is zero, 0.0 otherwise.
-
-
-pow(x, y)
-Compute the power of x elevated y , it is equivalent to
-"(x )^(y )".
-
-
-print(t)
-print(t, l)
-Print the value of expression t with loglevel l . If
-l is not specified then a default log level is used.
-Returns the value of the expression printed.
-
-Prints t with loglevel l
-
-
-random(x)
-Return a pseudo random value between 0.0 and 1.0. x is the index of the
-internal variable which will be used to save the seed/state.
-
-
-root(expr, max)
-Find an input value for which the function represented by expr
-with argument ld(0) is 0 in the interval 0..max .
-
-The expression in expr must denote a continuous function or the
-result is undefined.
-
-ld(0) is used to represent the function input value, which means
-that the given expression will be evaluated multiple times with
-various input values that the expression can access through
-ld(0)
. When the expression evaluates to 0 then the
-corresponding input value will be returned.
-
-
-sin(x)
-Compute sine of x .
-
-
-sinh(x)
-Compute hyperbolic sine of x .
-
-
-sqrt(expr)
-Compute the square root of expr . This is equivalent to
-"(expr )^.5".
-
-
-squish(x)
-Compute expression 1/(1 + exp(4*x))
.
-
-
-st(var, expr)
-Allow to store the value of the expression expr in an internal
-variable. var specifies the number of the variable where to
-store the value, and it is a value ranging from 0 to 9. The function
-returns the value stored in the internal variable.
-Note, Variables are currently not shared between expressions.
-
-
-tan(x)
-Compute tangent of x .
-
-
-tanh(x)
-Compute hyperbolic tangent of x .
-
-
-taylor(expr, x)
-taylor(expr, x, id)
-Evaluate a Taylor series at x , given an expression representing
-the ld(id)
-th derivative of a function at 0.
-
-When the series does not converge the result is undefined.
-
-ld(id) is used to represent the derivative order in expr ,
-which means that the given expression will be evaluated multiple times
-with various input values that the expression can access through
-ld(id)
. If id is not specified then 0 is assumed.
-
-Note, when you have the derivatives at y instead of 0,
-taylor(expr, x-y)
can be used.
-
-
-time(0)
-Return the current (wallclock) time in seconds.
-
-
-trunc(expr)
-Round the value of expression expr towards zero to the nearest
-integer. For example, "trunc(-1.5)" is "-1.0".
-
-
-while(cond, expr)
-Evaluate expression expr while the expression cond is
-non-zero, and returns the value of the last expr evaluation, or
-NAN if cond was always false.
-
-
-
-
The following constants are available:
-
-PI
-area of the unit disc, approximately 3.14
-
-E
-exp(1) (Euler’s number), approximately 2.718
-
-PHI
-golden ratio (1+sqrt(5))/2, approximately 1.618
-
-
-
-
Assuming that an expression is considered "true" if it has a non-zero
-value, note that:
-
-
*
works like AND
-
-
+
works like OR
-
-
For example the construct:
-
-
is equivalent to:
-
-
-
In your C code, you can extend the list of unary and binary functions,
-and define recognized constants, so that they are available for your
-expressions.
-
-
The evaluator also recognizes the International System unit prefixes.
-If ’i’ is appended after the prefix, binary prefixes are used, which
-are based on powers of 1024 instead of powers of 1000.
-The ’B’ postfix multiplies the value by 8, and can be appended after a
-unit prefix or used alone. This allows using for example ’KB’, ’MiB’,
-’G’ and ’B’ as number postfix.
-
-
The list of available International System prefixes follows, with
-indication of the corresponding powers of 10 and of 2.
-
-y
-10^-24 / 2^-80
-
-z
-10^-21 / 2^-70
-
-a
-10^-18 / 2^-60
-
-f
-10^-15 / 2^-50
-
-p
-10^-12 / 2^-40
-
-n
-10^-9 / 2^-30
-
-u
-10^-6 / 2^-20
-
-m
-10^-3 / 2^-10
-
-c
-10^-2
-
-d
-10^-1
-
-h
-10^2
-
-k
-10^3 / 2^10
-
-K
-10^3 / 2^10
-
-M
-10^6 / 2^20
-
-G
-10^9 / 2^30
-
-T
-10^12 / 2^40
-
-P
-10^15 / 2^40
-
-E
-10^18 / 2^50
-
-Z
-10^21 / 2^60
-
-Y
-10^24 / 2^70
-
-
-
-
-
-
6 OpenCL Options# TOC
-
-
When FFmpeg is configured with --enable-opencl
, it is possible
-to set the options for the global OpenCL context.
-
-
The list of supported options follows:
-
-
-build_options
-Set build options used to compile the registered kernels.
-
-See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
-
-
-platform_idx
-Select the index of the platform to run OpenCL code.
-
-The specified index must be one of the indexes in the device list
-which can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-device_idx
-Select the index of the device used to run OpenCL code.
-
-The specified index must be one of the indexes in the device list which
-can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-
-
-
-
7 Codec Options# TOC
-
-
libavcodec provides some generic global options, which can be set on
-all the encoders and decoders. In addition each codec may support
-so-called private options, which are specific for a given codec.
-
-
Sometimes, a global option may only affect a specific kind of codec,
-and may be nonsensical or ignored by another, so you need to be aware
-of the meaning of the specified options. Also some options are
-meant only for decoding or encoding.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVCodecContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follow:
-
-
-b integer (encoding,audio,video )
-Set bitrate in bits/s. Default value is 200K.
-
-
-ab integer (encoding,audio )
-Set audio bitrate (in bits/s). Default value is 128K.
-
-
-bt integer (encoding,video )
-Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate
-tolerance specifies how far ratecontrol is willing to deviate from the
-target average bitrate value. This is not related to min/max
-bitrate. Lowering tolerance too much has an adverse effect on quality.
-
-
-flags flags (decoding/encoding,audio,video,subtitles )
-Set generic flags.
-
-Possible values:
-
-‘mv4 ’
-Use four motion vector by macroblock (mpeg4).
-
-‘qpel ’
-Use 1/4 pel motion compensation.
-
-‘loop ’
-Use loop filter.
-
-‘qscale ’
-Use fixed qscale.
-
-‘gmc ’
-Use gmc.
-
-‘mv0 ’
-Always try a mb with mv=<0,0>.
-
-‘input_preserved ’
-‘pass1 ’
-Use internal 2pass ratecontrol in first pass mode.
-
-‘pass2 ’
-Use internal 2pass ratecontrol in second pass mode.
-
-‘gray ’
-Only decode/encode grayscale.
-
-‘emu_edge ’
-Do not draw edges.
-
-‘psnr ’
-Set error[?] variables during encoding.
-
-‘truncated ’
-‘naq ’
-Normalize adaptive quantization.
-
-‘ildct ’
-Use interlaced DCT.
-
-‘low_delay ’
-Force low delay.
-
-‘global_header ’
-Place global headers in extradata instead of every keyframe.
-
-‘bitexact ’
-Only write platform-, build- and time-independent data. (except (I)DCT).
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-‘aic ’
-Apply H263 advanced intra coding / mpeg4 ac prediction.
-
-‘cbp ’
-Deprecated, use mpegvideo private options instead.
-
-‘qprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘ilme ’
-Apply interlaced motion estimation.
-
-‘cgop ’
-Use closed gop.
-
-
-
-
-me_method integer (encoding,video )
-Set motion estimation method.
-
-Possible values:
-
-‘zero ’
-zero motion estimation (fastest)
-
-‘full ’
-full motion estimation (slowest)
-
-‘epzs ’
-EPZS motion estimation (default)
-
-‘esa ’
-esa motion estimation (alias for full)
-
-‘tesa ’
-tesa motion estimation
-
-‘dia ’
-dia motion estimation (alias for epzs)
-
-‘log ’
-log motion estimation
-
-‘phods ’
-phods motion estimation
-
-‘x1 ’
-X1 motion estimation
-
-‘hex ’
-hex motion estimation
-
-‘umh ’
-umh motion estimation
-
-‘iter ’
-iter motion estimation
-
-
-
-
-extradata_size integer
-Set extradata size.
-
-
-time_base rational number
-Set codec time base.
-
-It is the fundamental unit of time (in seconds) in terms of which
-frame timestamps are represented. For fixed-fps content, timebase
-should be 1 / frame_rate
and timestamp increments should be
-identically 1.
-
-
-g integer (encoding,video )
-Set the group of picture size. Default value is 12.
-
-
-ar integer (decoding/encoding,audio )
-Set audio sampling rate (in Hz).
-
-
-ac integer (decoding/encoding,audio )
-Set number of audio channels.
-
-
-cutoff integer (encoding,audio )
-Set cutoff bandwidth.
-
-
-frame_size integer (encoding,audio )
-Set audio frame size.
-
-Each submitted frame except the last must contain exactly frame_size
-samples per channel. May be 0 when the codec has
-CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not
-restricted. It is set by some decoders to indicate constant frame
-size.
-
-
-frame_number integer
-Set the frame number.
-
-
-delay integer
-qcomp float (encoding,video )
-Set video quantizer scale compression (VBR). It is used as a constant
-in the ratecontrol equation. Recommended range for default rc_eq:
-0.0-1.0.
-
-
-qblur float (encoding,video )
-Set video quantizer scale blur (VBR).
-
-
-qmin integer (encoding,video )
-Set min video quantizer scale (VBR). Must be included between -1 and
-69, default value is 2.
-
-
-qmax integer (encoding,video )
-Set max video quantizer scale (VBR). Must be included between -1 and
-1024, default value is 31.
-
-
-qdiff integer (encoding,video )
-Set max difference between the quantizer scale (VBR).
-
-
-bf integer (encoding,video )
-Set max number of B frames between non-B-frames.
-
-Must be an integer between -1 and 16. 0 means that B-frames are
-disabled. If a value of -1 is used, it will choose an automatic value
-depending on the encoder.
-
-Default value is 0.
-
-
-b_qfactor float (encoding,video )
-Set qp factor between P and B frames.
-
-
-rc_strategy integer (encoding,video )
-Set ratecontrol method.
-
-
-b_strategy integer (encoding,video )
-Set strategy to choose between I/P/B-frames.
-
-
-ps integer (encoding,video )
-Set RTP payload size in bytes.
-
-
-mv_bits integer
-header_bits integer
-i_tex_bits integer
-p_tex_bits integer
-i_count integer
-p_count integer
-skip_count integer
-misc_bits integer
-frame_bits integer
-codec_tag integer
-bug flags (decoding,video )
-Workaround not auto detected encoder bugs.
-
-Possible values:
-
-‘autodetect ’
-‘old_msmpeg4 ’
-some old lavc generated msmpeg4v3 files (no autodetection)
-
-‘xvid_ilace ’
-Xvid interlacing bug (autodetected if fourcc==XVIX)
-
-‘ump4 ’
-(autodetected if fourcc==UMP4)
-
-‘no_padding ’
-padding bug (autodetected)
-
-‘amv ’
-‘ac_vlc ’
-illegal vlc bug (autodetected per fourcc)
-
-‘qpel_chroma ’
-‘std_qpel ’
-old standard qpel (autodetected per fourcc/version)
-
-‘qpel_chroma2 ’
-‘direct_blocksize ’
-direct-qpel-blocksize bug (autodetected per fourcc/version)
-
-‘edge ’
-edge padding bug (autodetected per fourcc/version)
-
-‘hpel_chroma ’
-‘dc_clip ’
-‘ms ’
-Workaround various bugs in microsoft broken decoders.
-
-‘trunc ’
-trancated frames
-
-
-
-
-lelim integer (encoding,video )
-Set single coefficient elimination threshold for luminance (negative
-values also consider DC coefficient).
-
-
-celim integer (encoding,video )
-Set single coefficient elimination threshold for chrominance (negative
-values also consider dc coefficient)
-
-
-strict integer (decoding/encoding,audio,video )
-Specify how strictly to follow the standards.
-
-Possible values:
-
-‘very ’
-strictly conform to a older more strict version of the spec or reference software
-
-‘strict ’
-strictly conform to all the things in the spec no matter what consequences
-
-‘normal ’
-‘unofficial ’
-allow unofficial extensions
-
-‘experimental ’
-allow non standardized experimental things, experimental
-(unfinished/work in progress/not well tested) decoders and encoders.
-Note: experimental decoders can pose a security risk, do not use this for
-decoding untrusted input.
-
-
-
-
-b_qoffset float (encoding,video )
-Set QP offset between P and B frames.
-
-
-err_detect flags (decoding,audio,video )
-Set error detection flags.
-
-Possible values:
-
-‘crccheck ’
-verify embedded CRCs
-
-‘bitstream ’
-detect bitstream specification deviations
-
-‘buffer ’
-detect improper bitstream length
-
-‘explode ’
-abort decoding on minor error detection
-
-‘ignore_err ’
-ignore decoding errors, and continue decoding.
-This is useful if you want to analyze the content of a video and thus want
-everything to be decoded no matter what. This option will not result in a video
-that is pleasing to watch in case of errors.
-
-‘careful ’
-consider things that violate the spec and have not been seen in the wild as errors
-
-‘compliant ’
-consider all spec non compliancies as errors
-
-‘aggressive ’
-consider things that a sane encoder should not do as an error
-
-
-
-
-has_b_frames integer
-block_align integer
-mpeg_quant integer (encoding,video )
-Use MPEG quantizers instead of H.263.
-
-
-qsquish float (encoding,video )
-How to keep quantizer between qmin and qmax (0 = clip, 1 = use
-differentiable function).
-
-
-rc_qmod_amp float (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_qmod_freq integer (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_override_count integer
-rc_eq string (encoding,video )
-Set rate control equation. When computing the expression, besides the
-standard functions defined in the section ’Expression Evaluation’, the
-following functions are available: bits2qp(bits), qp2bits(qp). Also
-the following constants are available: iTex pTex tex mv fCode iCount
-mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
-avgTex.
-
-
-maxrate integer (encoding,audio,video )
-Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
-
-
-minrate integer (encoding,audio,video )
-Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR
-encode. It is of little use elsewise.
-
-
-bufsize integer (encoding,audio,video )
-Set ratecontrol buffer size (in bits).
-
-
-rc_buf_aggressivity float (encoding,video )
-Currently useless.
-
-
-i_qfactor float (encoding,video )
-Set QP factor between P and I frames.
-
-
-i_qoffset float (encoding,video )
-Set QP offset between P and I frames.
-
-
-rc_init_cplx float (encoding,video )
-Set initial complexity for 1-pass encoding.
-
-
-dct integer (encoding,video )
-Set DCT algorithm.
-
-Possible values:
-
-‘auto ’
-autoselect a good one (default)
-
-‘fastint ’
-fast integer
-
-‘int ’
-accurate integer
-
-‘mmx ’
-‘altivec ’
-‘faan ’
-floating point AAN DCT
-
-
-
-
-lumi_mask float (encoding,video )
-Compress bright areas stronger than medium ones.
-
-
-tcplx_mask float (encoding,video )
-Set temporal complexity masking.
-
-
-scplx_mask float (encoding,video )
-Set spatial complexity masking.
-
-
-p_mask float (encoding,video )
-Set inter masking.
-
-
-dark_mask float (encoding,video )
-Compress dark areas stronger than medium ones.
-
-
-idct integer (decoding/encoding,video )
-Select IDCT implementation.
-
-Possible values:
-
-‘auto ’
-‘int ’
-‘simple ’
-‘simplemmx ’
-‘simpleauto ’
-Automatically pick a IDCT compatible with the simple one
-
-
-‘arm ’
-‘altivec ’
-‘sh4 ’
-‘simplearm ’
-‘simplearmv5te ’
-‘simplearmv6 ’
-‘simpleneon ’
-‘simplealpha ’
-‘ipp ’
-‘xvidmmx ’
-‘faani ’
-floating point AAN IDCT
-
-
-
-
-slice_count integer
-ec flags (decoding,video )
-Set error concealment strategy.
-
-Possible values:
-
-‘guess_mvs ’
-iterative motion vector (MV) search (slow)
-
-‘deblock ’
-use strong deblock filter for damaged MBs
-
-‘favor_inter ’
-favor predicting from the previous frame instead of the current
-
-
-
-
-bits_per_coded_sample integer
-pred integer (encoding,video )
-Set prediction method.
-
-Possible values:
-
-‘left ’
-‘plane ’
-‘median ’
-
-
-
-aspect rational number (encoding,video )
-Set sample aspect ratio.
-
-
-debug flags (decoding/encoding,audio,video,subtitles )
-Print specific debug info.
-
-Possible values:
-
-‘pict ’
-picture info
-
-‘rc ’
-rate control
-
-‘bitstream ’
-‘mb_type ’
-macroblock (MB) type
-
-‘qp ’
-per-block quantization parameter (QP)
-
-‘mv ’
-motion vector
-
-‘dct_coeff ’
-‘skip ’
-‘startcode ’
-‘pts ’
-‘er ’
-error recognition
-
-‘mmco ’
-memory management control operations (H.264)
-
-‘bugs ’
-‘vis_qp ’
-visualize quantization parameter (QP), lower QP are tinted greener
-
-‘vis_mb_type ’
-visualize block types
-
-‘buffers ’
-picture buffer allocations
-
-‘thread_ops ’
-threading operations
-
-‘nomc ’
-skip motion compensation
-
-
-
-
-vismv integer (decoding,video )
-Visualize motion vectors (MVs).
-
-This option is deprecated, see the codecview filter instead.
-
-Possible values:
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-cmp integer (encoding,video )
-Set full pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-subcmp integer (encoding,video )
-Set sub pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-mbcmp integer (encoding,video )
-Set macroblock compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-ildctcmp integer (encoding,video )
-Set interlaced dct compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-dia_size integer (encoding,video )
-Set diamond type & size for motion estimation.
-
-
-last_pred integer (encoding,video )
-Set amount of motion predictors from the previous frame.
-
-
-preme integer (encoding,video )
-Set pre motion estimation.
-
-
-precmp integer (encoding,video )
-Set pre motion estimation compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-pre_dia_size integer (encoding,video )
-Set diamond type & size for motion estimation pre-pass.
-
-
-subq integer (encoding,video )
-Set sub pel motion estimation quality.
-
-
-dtg_active_format integer
-me_range integer (encoding,video )
-Set limit motion vectors range (1023 for DivX player).
-
-
-ibias integer (encoding,video )
-Set intra quant bias.
-
-
-pbias integer (encoding,video )
-Set inter quant bias.
-
-
-color_table_id integer
-global_quality integer (encoding,audio,video )
-coder integer (encoding,video )
-
-Possible values:
-
-‘vlc ’
-variable length coder / huffman coder
-
-‘ac ’
-arithmetic coder
-
-‘raw ’
-raw (no encoding)
-
-‘rle ’
-run-length coder
-
-‘deflate ’
-deflate-based coder
-
-
-
-
-context integer (encoding,video )
-Set context model.
-
-
-slice_flags integer
-xvmc_acceleration integer
-mbd integer (encoding,video )
-Set macroblock decision algorithm (high quality mode).
-
-Possible values:
-
-‘simple ’
-use mbcmp (default)
-
-‘bits ’
-use fewest bits
-
-‘rd ’
-use best rate distortion
-
-
-
-
-stream_codec_tag integer
-sc_threshold integer (encoding,video )
-Set scene change threshold.
-
-
-lmin integer (encoding,video )
-Set min lagrange factor (VBR).
-
-
-lmax integer (encoding,video )
-Set max lagrange factor (VBR).
-
-
-nr integer (encoding,video )
-Set noise reduction.
-
-
-rc_init_occupancy integer (encoding,video )
-Set number of bits which should be loaded into the rc buffer before
-decoding starts.
-
-
-flags2 flags (decoding/encoding,audio,video )
-
-Possible values:
-
-‘fast ’
-Allow non spec compliant speedup tricks.
-
-‘sgop ’
-Deprecated, use mpegvideo private options instead.
-
-‘noout ’
-Skip bitstream encoding.
-
-‘ignorecrop ’
-Ignore cropping information from sps.
-
-‘local_header ’
-Place global headers at every keyframe instead of in extradata.
-
-‘chunks ’
-Frame data might be split into multiple chunks.
-
-‘showall ’
-Show all frames before the first keyframe.
-
-‘skiprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘export_mvs ’
-Export motion vectors into frame side-data (see AV_FRAME_DATA_MOTION_VECTORS
)
-for codecs that support it. See also doc/examples/export_mvs.c .
-
-
-
-
-error integer (encoding,video )
-qns integer (encoding,video )
-Deprecated, use mpegvideo private options instead.
-
-
-threads integer (decoding/encoding,video )
-
-Possible values:
-
-‘auto ’
-detect a good number of threads
-
-
-
-
-me_threshold integer (encoding,video )
-Set motion estimation threshold.
-
-
-mb_threshold integer (encoding,video )
-Set macroblock threshold.
-
-
-dc integer (encoding,video )
-Set intra_dc_precision.
-
-
-nssew integer (encoding,video )
-Set nsse weight.
-
-
-skip_top integer (decoding,video )
-Set number of macroblock rows at the top which are skipped.
-
-
-skip_bottom integer (decoding,video )
-Set number of macroblock rows at the bottom which are skipped.
-
-
-profile integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-‘aac_main ’
-‘aac_low ’
-‘aac_ssr ’
-‘aac_ltp ’
-‘aac_he ’
-‘aac_he_v2 ’
-‘aac_ld ’
-‘aac_eld ’
-‘mpeg2_aac_low ’
-‘mpeg2_aac_he ’
-‘mpeg4_sp ’
-‘mpeg4_core ’
-‘mpeg4_main ’
-‘mpeg4_asp ’
-‘dts ’
-‘dts_es ’
-‘dts_96_24 ’
-‘dts_hd_hra ’
-‘dts_hd_ma ’
-
-
-
-level integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-
-
-
-lowres integer (decoding,audio,video )
-Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
-
-
-skip_threshold integer (encoding,video )
-Set frame skip threshold.
-
-
-skip_factor integer (encoding,video )
-Set frame skip factor.
-
-
-skip_exp integer (encoding,video )
-Set frame skip exponent.
-Negative values behave identical to the corresponding positive ones, except
-that the score is normalized.
-Positive values exist primarily for compatibility reasons and are not so useful.
-
-
-skipcmp integer (encoding,video )
-Set frame skip compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-border_mask float (encoding,video )
-Increase the quantizer for macroblocks close to borders.
-
-
-mblmin integer (encoding,video )
-Set min macroblock lagrange factor (VBR).
-
-
-mblmax integer (encoding,video )
-Set max macroblock lagrange factor (VBR).
-
-
-mepc integer (encoding,video )
-Set motion estimation bitrate penalty compensation (1.0 = 256).
-
-
-skip_loop_filter integer (decoding,video )
-skip_idct integer (decoding,video )
-skip_frame integer (decoding,video )
-
-Make decoder discard processing depending on the frame type selected
-by the option value.
-
-skip_loop_filter skips frame loop filtering, skip_idct
-skips frame IDCT/dequantization, skip_frame skips decoding.
-
-Possible values:
-
-‘none ’
-Discard no frame.
-
-
-‘default ’
-Discard useless frames like 0-sized frames.
-
-
-‘noref ’
-Discard all non-reference frames.
-
-
-‘bidir ’
-Discard all bidirectional frames.
-
-
-‘nokey ’
-Discard all frames excepts keyframes.
-
-
-‘all ’
-Discard all frames.
-
-
-
-Default value is ‘default ’.
-
-
-bidir_refine integer (encoding,video )
-Refine the two motion vectors used in bidirectional macroblocks.
-
-
-brd_scale integer (encoding,video )
-Downscale frames for dynamic B-frame decision.
-
-
-keyint_min integer (encoding,video )
-Set minimum interval between IDR-frames.
-
-
-refs integer (encoding,video )
-Set reference frames to consider for motion compensation.
-
-
-chromaoffset integer (encoding,video )
-Set chroma qp offset from luma.
-
-
-trellis integer (encoding,audio,video )
-Set rate-distortion optimal quantization.
-
-
-sc_factor integer (encoding,video )
-Set value multiplied by qscale for each frame and added to
-scene_change_score.
-
-
-mv0_threshold integer (encoding,video )
-b_sensitivity integer (encoding,video )
-Adjust sensitivity of b_frame_strategy 1.
-
-
-compression_level integer (encoding,audio,video )
-min_prediction_order integer (encoding,audio )
-max_prediction_order integer (encoding,audio )
-timecode_frame_start integer (encoding,video )
-Set GOP timecode frame start number, in non drop frame format.
-
-
-request_channels integer (decoding,audio )
-Set desired number of audio channels.
-
-
-bits_per_raw_sample integer
-channel_layout integer (decoding/encoding,audio )
-
-Possible values:
-
-request_channel_layout integer (decoding,audio )
-
-Possible values:
-
-rc_max_vbv_use float (encoding,video )
-rc_min_vbv_use float (encoding,video )
-ticks_per_frame integer (decoding/encoding,audio,video )
-color_primaries integer (decoding/encoding,video )
-color_trc integer (decoding/encoding,video )
-colorspace integer (decoding/encoding,video )
-color_range integer (decoding/encoding,video )
-chroma_sample_location integer (decoding/encoding,video )
-log_level_offset integer
-Set the log level offset.
-
-
-slices integer (encoding,video )
-Number of slices, used in parallelized encoding.
-
-
-thread_type flags (decoding/encoding,video )
-Select which multithreading methods to use.
-
-Use of ‘frame ’ will increase decoding delay by one frame per
-thread, so clients which cannot provide future frames should not use
-it.
-
-Possible values:
-
-‘slice ’
-Decode more than one part of a single frame at once.
-
-Multithreading using slices works only when the video was encoded with
-slices.
-
-
-‘frame ’
-Decode more than one frame at once.
-
-
-
-Default value is ‘slice+frame ’.
-
-
-audio_service_type integer (encoding,audio )
-Set audio service type.
-
-Possible values:
-
-‘ma ’
-Main Audio Service
-
-‘ef ’
-Effects
-
-‘vi ’
-Visually Impaired
-
-‘hi ’
-Hearing Impaired
-
-‘di ’
-Dialogue
-
-‘co ’
-Commentary
-
-‘em ’
-Emergency
-
-‘vo ’
-Voice Over
-
-‘ka ’
-Karaoke
-
-
-
-
-request_sample_fmt sample_fmt (decoding,audio )
-Set sample format audio decoders should prefer. Default value is
-none
.
-
-
-pkt_timebase rational number
-sub_charenc encoding (decoding,subtitles )
-Set the input subtitles character encoding.
-
-
-field_order field_order (video )
-Set/override the field order of the video.
-Possible values:
-
-‘progressive ’
-Progressive video
-
-‘tt ’
-Interlaced video, top field coded and displayed first
-
-‘bb ’
-Interlaced video, bottom field coded and displayed first
-
-‘tb ’
-Interlaced video, top coded first, bottom displayed first
-
-‘bt ’
-Interlaced video, bottom coded first, top displayed first
-
-
-
-
-skip_alpha integer (decoding,video )
-Set to 1 to disable processing alpha (transparency). This works like the
-‘gray ’ flag in the flags option which skips chroma information
-instead of alpha. Default is 0.
-
-
-codec_whitelist list (input )
-"," separated List of allowed decoders. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
-
8 Decoders# TOC
-
-
Decoders are configured elements in FFmpeg which allow the decoding of
-multimedia streams.
-
-
When you configure your FFmpeg build, all the supported native decoders
-are enabled by default. Decoders requiring an external library must be enabled
-manually via the corresponding --enable-lib
option. You can list all
-available decoders using the configure option --list-decoders
.
-
-
You can disable all the decoders with the configure option
---disable-decoders
and selectively enable / disable single decoders
-with the options --enable-decoder=DECODER
/
---disable-decoder=DECODER
.
-
-
The option -decoders
of the ff* tools will display the list of
-enabled decoders.
-
-
-
-
9 Video Decoders# TOC
-
-
A description of some of the currently available video decoders
-follows.
-
-
-
9.1 rawvideo# TOC
-
-
Raw video decoder.
-
-
This decoder decodes rawvideo streams.
-
-
-
9.1.1 Options# TOC
-
-
-top top_field_first
-Specify the assumed field type of the input video.
-
--1
-the video is assumed to be progressive (default)
-
-0
-bottom-field-first is assumed
-
-1
-top-field-first is assumed
-
-
-
-
-
-
-
-
-
10 Audio Decoders# TOC
-
-
A description of some of the currently available audio decoders
-follows.
-
-
-
10.1 ac3# TOC
-
-
AC-3 audio decoder.
-
-
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
-the undocumented RealAudio 3 (a.k.a. dnet).
-
-
-
10.1.1 AC-3 Decoder Options# TOC
-
-
--drc_scale value
-Dynamic Range Scale Factor. The factor to apply to dynamic range values
-from the AC-3 stream. This factor is applied exponentially.
-There are 3 notable scale factor ranges:
-
-drc_scale == 0
-DRC disabled. Produces full range audio.
-
-0 < drc_scale <= 1
-DRC enabled. Applies a fraction of the stream DRC value.
-Audio reproduction is between full range and full compression.
-
-drc_scale > 1
-DRC enabled. Applies drc_scale asymmetrically.
-Loud sounds are fully compressed. Soft sounds are enhanced.
-
-
-
-
-
-
-
-
10.2 ffwavesynth# TOC
-
-
Internal wave synthetizer.
-
-
This decoder generates wave patterns according to predefined sequences. Its
-use is purely internal and the format of the data it accepts is not publicly
-documented.
-
-
-
10.3 libcelt# TOC
-
-
libcelt decoder wrapper.
-
-
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
-Requires the presence of the libcelt headers and library during configuration.
-You need to explicitly configure the build with --enable-libcelt
.
-
-
-
10.4 libgsm# TOC
-
-
libgsm decoder wrapper.
-
-
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
-the presence of the libgsm headers and library during configuration. You need
-to explicitly configure the build with --enable-libgsm
.
-
-
This decoder supports both the ordinary GSM and the Microsoft variant.
-
-
-
10.5 libilbc# TOC
-
-
libilbc decoder wrapper.
-
-
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
-audio codec. Requires the presence of the libilbc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libilbc
.
-
-
-
10.5.1 Options# TOC
-
-
The following option is supported by the libilbc wrapper.
-
-
-enhance
-
-Enable the enhancement of the decoded audio when set to 1. The default
-value is 0 (disabled).
-
-
-
-
-
-
10.6 libopencore-amrnb# TOC
-
-
libopencore-amrnb decoder wrapper.
-
-
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
-Narrowband audio codec. Using it requires the presence of the
-libopencore-amrnb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrnb
.
-
-
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
-without this library.
-
-
-
10.7 libopencore-amrwb# TOC
-
-
libopencore-amrwb decoder wrapper.
-
-
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
-Wideband audio codec. Using it requires the presence of the
-libopencore-amrwb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrwb
.
-
-
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
-without this library.
-
-
-
10.8 libopus# TOC
-
-
libopus decoder wrapper.
-
-
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
-Requires the presence of the libopus headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopus
.
-
-
An FFmpeg native decoder for Opus exists, so users can decode Opus
-without this library.
-
-
-
-
11 Subtitles Decoders# TOC
-
-
-
11.1 dvdsub# TOC
-
-
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
-also be found in VobSub file pairs and in some Matroska files.
-
-
-
11.1.1 Options# TOC
-
-
-palette
-Specify the global palette used by the bitmaps. When stored in VobSub, the
-palette is normally specified in the index file; in Matroska, the palette is
-stored in the codec extra-data in the same format as in VobSub. In DVDs, the
-palette is stored in the IFO file, and therefore not available when reading
-from dumped VOB files.
-
-The format for this option is a string containing 16 24-bits hexadecimal
-numbers (without 0x prefix) separated by comas, for example 0d00ee,
-ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
-7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b
.
-
-
-ifo_palette
-Specify the IFO file from which the global palette is obtained.
-(experimental)
-
-
-forced_subs_only
-Only decode subtitle entries marked as forced. Some titles have forced
-and non-forced subtitles in the same track. Setting this flag to 1
-will only keep the forced subtitles. Default value is 0
.
-
-
-
-
-
11.2 libzvbi-teletext# TOC
-
-
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
-subtitles. Requires the presence of the libzvbi headers and library during
-configuration. You need to explicitly configure the build with
---enable-libzvbi
.
-
-
-
11.2.1 Options# TOC
-
-
-txt_page
-List of teletext page numbers to decode. You may use the special * string to
-match all pages. Pages that do not match the specified list are dropped.
-Default value is *.
-
-txt_chop_top
-Discards the top teletext line. Default value is 1.
-
-txt_format
-Specifies the format of the decoded subtitles. The teletext decoder is capable
-of decoding the teletext pages to bitmaps or to simple text, you should use
-"bitmap" for teletext pages, because certain graphics and colors cannot be
-expressed in simple text. You might use "text" for teletext based subtitles if
-your application can handle simple text based subtitles. Default value is
-bitmap.
-
-txt_left
-X offset of generated bitmaps, default is 0.
-
-txt_top
-Y offset of generated bitmaps, default is 0.
-
-txt_chop_spaces
-Chops leading and trailing spaces and removes empty lines from the generated
-text. This option is useful for teletext based subtitles where empty spaces may
-be present at the start or at the end of the lines or empty lines may be
-present between the subtitle lines because of double-sized teletext charactes.
-Default value is 1.
-
-txt_duration
-Sets the display duration of the decoded teletext pages or subtitles in
-miliseconds. Default value is 30000 which is 30 seconds.
-
-txt_transparent
-Force transparent background of the generated teletext bitmaps. Default value
-is 0 which means an opaque (black) background.
-
-
-
-
-
12 Bitstream Filters# TOC
-
-
When you configure your FFmpeg build, all the supported bitstream
-filters are enabled by default. You can list all available ones using
-the configure option --list-bsfs
.
-
-
You can disable all the bitstream filters using the configure option
---disable-bsfs
, and selectively enable any bitstream filter using
-the option --enable-bsf=BSF
, or you can disable a particular
-bitstream filter using the option --disable-bsf=BSF
.
-
-
The option -bsfs
of the ff* tools will display the list of
-all the supported bitstream filters included in your build.
-
-
The ff* tools have a -bsf option applied per stream, taking a
-comma-separated list of filters, whose parameters follow the filter
-name after a ’=’.
-
-
-
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
-
-
-
Below is a description of the currently available bitstream filters,
-with their parameters, if any.
-
-
-
12.1 aac_adtstoasc# TOC
-
-
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
-bitstream filter.
-
-
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
-ADTS header and removes the ADTS header.
-
-
This is required for example when copying an AAC stream from a raw
-ADTS AAC container to a FLV or a MOV/MP4 file.
-
-
-
12.2 chomp# TOC
-
-
Remove zero padding at the end of a packet.
-
-
-
12.3 dump_extra# TOC
-
-
Add extradata to the beginning of the filtered packets.
-
-
The additional argument specifies which packets should be filtered.
-It accepts the values:
-
-‘a ’
-add extradata to all key packets, but only if local_header is
-set in the flags2 codec context field
-
-
-‘k ’
-add extradata to all key packets
-
-
-‘e ’
-add extradata to all packets
-
-
-
-
If not specified it is assumed ‘k ’.
-
-
For example the following ffmpeg
command forces a global
-header (thus disabling individual packet headers) in the H.264 packets
-generated by the libx264
encoder, but corrects them by adding
-the header stored in extradata to the key packets:
-
-
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
-
-
-
-
12.4 h264_mp4toannexb# TOC
-
-
Convert an H.264 bitstream from length prefixed mode to start code
-prefixed mode (as defined in the Annex B of the ITU-T H.264
-specification).
-
-
This is required by some streaming formats, typically the MPEG-2
-transport stream format ("mpegts").
-
-
For example to remux an MP4 file containing an H.264 stream to mpegts
-format with ffmpeg
, you can use the command:
-
-
-
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
-
-
-
-
12.5 imxdump# TOC
-
-
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
-Pro decoder. This filter only applies to the mpeg2video codec, and is
-likely not needed for Final Cut Pro 7 and newer with the appropriate
--tag:v .
-
-
For example, to remux 30 MB/sec NTSC IMX to MOV:
-
-
-
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
-
-
-
-
12.6 mjpeg2jpeg# TOC
-
-
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
-
-
MJPEG is a video codec wherein each video frame is essentially a
-JPEG image. The individual frames can be extracted without loss,
-e.g. by
-
-
-
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
-
-
-
Unfortunately, these chunks are incomplete JPEG images, because
-they lack the DHT segment required for decoding. Quoting from
-http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml :
-
-
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
-commented that "MJPEG, or at least the MJPEG in AVIs having the
-MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
-Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
-and it must use basic Huffman encoding, not arithmetic or
-progressive. . . . You can indeed extract the MJPEG frames and
-decode them with a regular JPEG decoder, but you have to prepend
-the DHT segment to them, or else the decoder won’t have any idea
-how to decompress the data. The exact table necessary is given in
-the OpenDML spec."
-
-
This bitstream filter patches the header of frames extracted from an MJPEG
-stream (carrying the AVI1 header ID and lacking a DHT segment) to
-produce fully qualified JPEG images.
-
-
-
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
-exiftran -i -9 frame*.jpg
-ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
-
-
-
-
12.7 mjpega_dump_header# TOC
-
-
-
12.8 movsub# TOC
-
-
-
12.9 mp3_header_decompress# TOC
-
-
-
12.10 noise# TOC
-
-
Damages the contents of packets without damaging the container. Can be
-used for fuzzing or testing error resilience/concealment.
-
-
Parameters:
-A numeral string, whose value is related to how often output bytes will
-be modified. Therefore, values below or equal to 0 are forbidden, and
-the lower the more frequent bytes will be modified, with 1 meaning
-every byte is modified.
-
-
-
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
-
-
applies the modification to every byte.
-
-
-
12.11 remove_extra# TOC
-
-
-
13 Format Options# TOC
-
-
The libavformat library provides some generic global options, which
-can be set on all the muxers and demuxers. In addition each muxer or
-demuxer may support so-called private options, which are specific for
-that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follows:
-
-
-avioflags flags (input/output )
-Possible values:
-
-‘direct ’
-Reduce buffering.
-
-
-
-
-probesize integer (input )
-Set probing size in bytes, i.e. the size of the data to analyze to get
-stream information. A higher value will allow to detect more
-information in case it is dispersed into the stream, but will increase
-latency. Must be an integer not lesser than 32. It is 5000000 by default.
-
-
-packetsize integer (output )
-Set packet size.
-
-
-fflags flags (input/output )
-Set format flags.
-
-Possible values:
-
-‘ignidx ’
-Ignore index.
-
-‘genpts ’
-Generate PTS.
-
-‘nofillin ’
-Do not fill in missing values that can be exactly calculated.
-
-‘noparse ’
-Disable AVParsers, this needs +nofillin
too.
-
-‘igndts ’
-Ignore DTS.
-
-‘discardcorrupt ’
-Discard corrupted frames.
-
-‘sortdts ’
-Try to interleave output packets by DTS.
-
-‘keepside ’
-Do not merge side data.
-
-‘latm ’
-Enable RTP MP4A-LATM payload.
-
-‘nobuffer ’
-Reduce the latency introduced by optional buffering
-
-‘bitexact ’
-Only write platform-, build- and time-independent data.
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-
-
-
-seek2any integer (input )
-Allow seeking to non-keyframes on demuxer level when supported if set to 1.
-Default is 0.
-
-
-analyzeduration integer (input )
-Specify how many microseconds are analyzed to probe the input. A
-higher value will allow to detect more accurate information, but will
-increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
-
-
-cryptokey hexadecimal string (input )
-Set decryption key.
-
-
-indexmem integer (input )
-Set max memory used for timestamp index (per stream).
-
-
-rtbufsize integer (input )
-Set max memory used for buffering real-time frames.
-
-
-fdebug flags (input/output )
-Print specific debug info.
-
-Possible values:
-
-‘ts ’
-
-
-
-max_delay integer (input/output )
-Set maximum muxing or demuxing delay in microseconds.
-
-
-fpsprobesize integer (input )
-Set number of frames used to probe fps.
-
-
-audio_preload integer (output )
-Set microseconds by which audio packets should be interleaved earlier.
-
-
-chunk_duration integer (output )
-Set microseconds for each chunk.
-
-
-chunk_size integer (output )
-Set size in bytes for each chunk.
-
-
-err_detect, f_err_detect flags (input )
-Set error detection flags. f_err_detect
is deprecated and
-should be used only via the ffmpeg
tool.
-
-Possible values:
-
-‘crccheck ’
-Verify embedded CRCs.
-
-‘bitstream ’
-Detect bitstream specification deviations.
-
-‘buffer ’
-Detect improper bitstream length.
-
-‘explode ’
-Abort decoding on minor error detection.
-
-‘careful ’
-Consider things that violate the spec and have not been seen in the
-wild as errors.
-
-‘compliant ’
-Consider all spec non compliancies as errors.
-
-‘aggressive ’
-Consider things that a sane encoder should not do as an error.
-
-
-
-
-use_wallclock_as_timestamps integer (input )
-Use wallclock as timestamps.
-
-
-avoid_negative_ts integer (output )
-
-Possible values:
-
-‘make_non_negative ’
-Shift timestamps to make them non-negative.
-Also note that this affects only leading negative timestamps, and not
-non-monotonic negative timestamps.
-
-‘make_zero ’
-Shift timestamps so that the first timestamp is 0.
-
-‘auto (default) ’
-Enables shifting when required by the target format.
-
-‘disabled ’
-Disables shifting of timestamp.
-
-
-
-When shifting is enabled, all output timestamps are shifted by the
-same amount. Audio, video, and subtitles desynching and relative
-timestamp differences are preserved compared to how they would have
-been without shifting.
-
-
-skip_initial_bytes integer (input )
-Set number of bytes to skip before reading header and frames if set to 1.
-Default is 0.
-
-
-correct_ts_overflow integer (input )
-Correct single timestamp overflows if set to 1. Default is 1.
-
-
-flush_packets integer (output )
-Flush the underlying I/O stream after each packet. Default 1 enables it, and
-has the effect of reducing the latency; 0 disables it and may slightly
-increase performance in some cases.
-
-
-output_ts_offset offset (output )
-Set the output time offset.
-
-offset must be a time duration specification,
-see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-The offset is added by the muxer to the output timestamps.
-
-Specifying a positive offset means that the corresponding streams are
-delayed bt the time duration specified in offset . Default value
-is 0
(meaning that no offset is applied).
-
-
-format_whitelist list (input )
-"," separated List of allowed demuxers. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
13.1 Format stream specifiers# TOC
-
-
Format stream specifiers allow selection of one or more streams that
-match specific properties.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index.
-
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio,
-’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If
-stream_index is given, then it matches the stream number
-stream_index of this type. Otherwise, it matches all streams of
-this type.
-
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number
-stream_index in the program with the id
-program_id . Otherwise, it matches all streams in the program.
-
-
-#stream_id
-Matches the stream by a format-specific ID.
-
-
-
-
The exact semantics of stream specifiers is defined by the
-avformat_match_stream_specifier()
function declared in the
-libavformat/avformat.h header.
-
-
-
14 Demuxers# TOC
-
-
Demuxers are configured elements in FFmpeg that can read the
-multimedia streams from a particular type of file.
-
-
When you configure your FFmpeg build, all the supported demuxers
-are enabled by default. You can list all available ones using the
-configure option --list-demuxers
.
-
-
You can disable all the demuxers using the configure option
---disable-demuxers
, and selectively enable a single demuxer with
-the option --enable-demuxer=DEMUXER
, or disable it
-with the option --disable-demuxer=DEMUXER
.
-
-
The option -formats
of the ff* tools will display the list of
-enabled demuxers.
-
-
The description of some of the currently available demuxers follows.
-
-
-
14.1 applehttp# TOC
-
-
Apple HTTP Live Streaming demuxer.
-
-
This demuxer presents all AVStreams from all variant streams.
-The id field is set to the bitrate variant index number. By setting
-the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay),
-the caller can decide which variant streams to actually receive.
-The total bitrate of the variant that the stream belongs to is
-available in a metadata key named "variant_bitrate".
-
-
-
14.2 apng# TOC
-
-
Animated Portable Network Graphics demuxer.
-
-
This demuxer is used to demux APNG files.
-All headers, but the PNG signature, up to (but not including) the first
-fcTL chunk are transmitted as extradata.
-Frames are then split as being all the chunks between two fcTL ones, or
-between the last fcTL and IEND chunks.
-
-
--ignore_loop bool
-Ignore the loop variable in the file if set.
-
--max_fps int
-Maximum framerate in frames per second (0 for no limit).
-
--default_fps int
-Default framerate in frames per second when none is specified in the file
-(0 meaning as fast as possible).
-
-
-
-
-
14.3 asf# TOC
-
-
Advanced Systems Format demuxer.
-
-
This demuxer is used to demux ASF files and MMS network streams.
-
-
--no_resync_search bool
-Do not try to resynchronize by looking for a certain optional start code.
-
-
-
-
-
14.4 concat# TOC
-
-
Virtual concatenation script demuxer.
-
-
This demuxer reads a list of files and other directives from a text file and
-demuxes them one after the other, as if all their packet had been muxed
-together.
-
-
The timestamps in the files are adjusted so that the first file starts at 0
-and each next file starts where the previous one finishes. Note that it is
-done globally and may cause gaps if all streams do not have exactly the same
-length.
-
-
All files must have the same streams (same codecs, same time base, etc.).
-
-
The duration of each file is used to adjust the timestamps of the next file:
-if the duration is incorrect (because it was computed using the bit-rate or
-because the file is truncated, for example), it can cause artifacts. The
-duration
directive can be used to override the duration stored in
-each file.
-
-
-
14.4.1 Syntax# TOC
-
-
The script is a text file in extended-ASCII, with one directive per line.
-Empty lines, leading spaces and lines starting with ’#’ are ignored. The
-following directive is recognized:
-
-
-file path
-Path to a file to read; special characters and spaces must be escaped with
-backslash or single quotes.
-
-All subsequent file-related directives apply to that file.
-
-
-ffconcat version 1.0
-Identify the script type and version. It also sets the safe option
-to 1 if it was to its default -1.
-
-To make FFmpeg recognize the format automatically, this directive must
-appears exactly as is (no extra space or byte-order-mark) on the very first
-line of the script.
-
-
-duration dur
-Duration of the file. This information can be specified from the file;
-specifying it here may be more efficient or help if the information from the
-file is not available or accurate.
-
-If the duration is set for all files, then it is possible to seek in the
-whole concatenated video.
-
-
-stream
-Introduce a stream in the virtual file.
-All subsequent stream-related directives apply to the last introduced
-stream.
-Some streams properties must be set in order to allow identifying the
-matching streams in the subfiles.
-If no streams are defined in the script, the streams from the first file are
-copied.
-
-
-exact_stream_id id
-Set the id of the stream.
-If this directive is given, the string with the corresponding id in the
-subfiles will be used.
-This is especially useful for MPEG-PS (VOB) files, where the order of the
-streams is not reliable.
-
-
-
-
-
-
14.4.2 Options# TOC
-
-
This demuxer accepts the following option:
-
-
-safe
-If set to 1, reject unsafe file paths. A file path is considered safe if it
-does not contain a protocol specification and is relative and all components
-only contain characters from the portable character set (letters, digits,
-period, underscore and hyphen) and have no period at the beginning of a
-component.
-
-If set to 0, any file name is accepted.
-
-The default is -1, it is equivalent to 1 if the format was automatically
-probed and 0 otherwise.
-
-
-auto_convert
-If set to 1, try to perform automatic conversions on packet data to make the
-streams concatenable.
-
-Currently, the only conversion is adding the h264_mp4toannexb bitstream
-filter to H.264 streams in MP4 format. This is necessary in particular if
-there are resolution changes.
-
-
-
-
-
-
14.5 flv# TOC
-
-
Adobe Flash Video Format demuxer.
-
-
This demuxer is used to demux FLV files and RTMP network streams.
-
-
--flv_metadata bool
-Allocate the streams according to the onMetaData array content.
-
-
-
-
-
14.6 libgme# TOC
-
-
The Game Music Emu library is a collection of video game music file emulators.
-
-
See http://code.google.com/p/game-music-emu/ for more information.
-
-
Some files have multiple tracks. The demuxer will pick the first track by
-default. The track_index option can be used to select a different
-track. Track indexes start at 0. The demuxer exports the number of tracks as
-tracks meta data entry.
-
-
For very large files, the max_size option may have to be adjusted.
-
-
-
14.7 libquvi# TOC
-
-
Play media from Internet services using the quvi project.
-
-
The demuxer accepts a format option to request a specific quality. It
-is by default set to best .
-
-
See http://quvi.sourceforge.net/ for more information.
-
-
FFmpeg needs to be built with --enable-libquvi
for this demuxer to be
-enabled.
-
-
-
14.8 gif# TOC
-
-
Animated GIF demuxer.
-
-
It accepts the following options:
-
-
-min_delay
-Set the minimum valid delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 2.
-
-
-default_delay
-Set the default delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 10.
-
-
-ignore_loop
-GIF files can contain information to loop a certain number of times (or
-infinitely). If ignore_loop is set to 1, then the loop setting
-from the input will be ignored and looping will not occur. If set to 0,
-then looping will occur and will cycle the number of times according to
-the GIF. Default value is 1.
-
-
-
-
For example, with the overlay filter, place an infinitely looping GIF
-over another video:
-
-
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
-
-
-
Note that in the above example the shortest option for overlay filter is
-used to end the output video at the length of the shortest input file,
-which in this case is input.mp4 as the GIF in this example loops
-infinitely.
-
-
-
14.9 image2# TOC
-
-
Image file demuxer.
-
-
This demuxer reads from a list of image files specified by a pattern.
-The syntax and meaning of the pattern is specified by the
-option pattern_type .
-
-
The pattern may contain a suffix which is used to automatically
-determine the format of the images contained in the files.
-
-
The size, the pixel format, and the format of each image must be the
-same for all the files in the sequence.
-
-
This demuxer accepts the following options:
-
-framerate
-Set the frame rate for the video stream. It defaults to 25.
-
-loop
-If set to 1, loop over the input. Default value is 0.
-
-pattern_type
-Select the pattern type used to interpret the provided filename.
-
-pattern_type accepts one of the following values.
-
-sequence
-Select a sequence pattern type, used to specify a sequence of files
-indexed by sequential numbers.
-
-A sequence pattern may contain the string "%d" or "%0N d", which
-specifies the position of the characters representing a sequential
-number in each filename matched by the pattern. If the form
-"%d0N d" is used, the string representing the number in each
-filename is 0-padded and N is the total number of 0-padded
-digits representing the number. The literal character ’%’ can be
-specified in the pattern with the string "%%".
-
-If the sequence pattern contains "%d" or "%0N d", the first filename of
-the file list specified by the pattern must contain a number
-inclusively contained between start_number and
-start_number +start_number_range -1, and all the following
-numbers must be sequential.
-
-For example the pattern "img-%03d.bmp" will match a sequence of
-filenames of the form img-001.bmp , img-002.bmp , ...,
-img-010.bmp , etc.; the pattern "i%%m%%g-%d.jpg" will match a
-sequence of filenames of the form i%m%g-1.jpg ,
-i%m%g-2.jpg , ..., i%m%g-10.jpg , etc.
-
-Note that the pattern must not necessarily contain "%d" or
-"%0N d", for example to convert a single image file
-img.jpeg you can employ the command:
-
-
ffmpeg -i img.jpeg img.png
-
-
-
-glob
-Select a glob wildcard pattern type.
-
-The pattern is interpreted like a glob()
pattern. This is only
-selectable if libavformat was compiled with globbing support.
-
-
-glob_sequence (deprecated, will be removed)
-Select a mixed glob wildcard/sequence pattern.
-
-If your version of libavformat was compiled with globbing support, and
-the provided pattern contains at least one glob meta character among
-%*?[]{}
that is preceded by an unescaped "%", the pattern is
-interpreted like a glob()
pattern, otherwise it is interpreted
-like a sequence pattern.
-
-All glob special characters %*?[]{}
must be prefixed
-with "%". To escape a literal "%" you shall use "%%".
-
-For example the pattern foo-%*.jpeg
will match all the
-filenames prefixed by "foo-" and terminating with ".jpeg", and
-foo-%?%?%?.jpeg
will match all the filenames prefixed with
-"foo-", followed by a sequence of three characters, and terminating
-with ".jpeg".
-
-This pattern type is deprecated in favor of glob and
-sequence .
-
-
-
-Default value is glob_sequence .
-
-pixel_format
-Set the pixel format of the images to read. If not specified the pixel
-format is guessed from the first image file in the sequence.
-
-start_number
-Set the index of the file matched by the image file pattern to start
-to read from. Default value is 0.
-
-start_number_range
-Set the index interval range to check when looking for the first image
-file in the sequence, starting from start_number . Default value
-is 5.
-
-ts_from_file
-If set to 1, will set frame timestamp to modification time of image file. Note
-that monotonity of timestamps is not provided: images go in the same order as
-without this option. Default value is 0.
-If set to 2, will set frame timestamp to the modification time of the image file in
-nanosecond precision.
-
-video_size
-Set the video size of the images to read. If not specified the video
-size is guessed from the first image file in the sequence.
-
-
-
-
-
14.9.1 Examples# TOC
-
-
- Use ffmpeg
for creating a video from the images in the file
-sequence img-001.jpeg , img-002.jpeg , ..., assuming an
-input frame rate of 10 frames per second:
-
-
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
-
-
- As above, but start by reading from a file with index 100 in the sequence:
-
-
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
-
-
- Read images matching the "*.png" glob pattern , that is all the files
-terminating with the ".png" suffix:
-
-
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
-
-
-
-
-
14.10 mpegts# TOC
-
-
MPEG-2 transport stream demuxer.
-
-
-fix_teletext_pts
-Overrides teletext packet PTS and DTS values with the timestamps calculated
-from the PCR of the first program which the teletext stream is part of and is
-not discarded. Default value is 1, set this option to 0 if you want your
-teletext packet PTS and DTS values untouched.
-
-
-
-
-
14.11 rawvideo# TOC
-
-
Raw video demuxer.
-
-
This demuxer allows one to read raw video data. Since there is no header
-specifying the assumed video parameters, the user must specify them
-in order to be able to decode the data correctly.
-
-
This demuxer accepts the following options:
-
-framerate
-Set input video frame rate. Default value is 25.
-
-
-pixel_format
-Set the input video pixel format. Default value is yuv420p
.
-
-
-video_size
-Set the input video size. This value must be specified explicitly.
-
-
-
-
For example to read a rawvideo file input.raw with
-ffplay
, assuming a pixel format of rgb24
, a video
-size of 320x240
, and a frame rate of 10 images per second, use
-the command:
-
-
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
-
-
-
-
14.12 sbg# TOC
-
-
SBaGen script demuxer.
-
-
This demuxer reads the script language used by SBaGen
-http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG
-script looks like that:
-
-
-SE
-a: 300-2.5/3 440+4.5/0
-b: 300-2.5/0 440+4.5/3
-off: -
-NOW == a
-+0:07:00 == b
-+0:14:00 == a
-+0:21:00 == b
-+0:30:00 off
-
-
-
A SBG script can mix absolute and relative timestamps. If the script uses
-either only absolute timestamps (including the script start time) or only
-relative ones, then its layout is fixed, and the conversion is
-straightforward. On the other hand, if the script mixes both kind of
-timestamps, then the NOW reference for relative timestamps will be
-taken from the current time of day at the time the script is read, and the
-script layout will be frozen according to that reference. That means that if
-the script is directly played, the actual times will match the absolute
-timestamps up to the sound controller’s clock accuracy, but if the user
-somehow pauses the playback or seeks, all times will be shifted accordingly.
-
-
-
14.13 tedcaptions# TOC
-
-
JSON captions used for TED Talks .
-
-
TED does not provide links to the captions, but they can be guessed from the
-page. The file tools/bookmarklets.html from the FFmpeg source tree
-contains a bookmarklet to expose them.
-
-
This demuxer accepts the following option:
-
-start_time
-Set the start time of the TED talk, in milliseconds. The default is 15000
-(15s). It is used to sync the captions with the downloadable videos, because
-they include a 15s intro.
-
-
-
-
Example: convert the captions to a format most players understand:
-
-
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
-
-
-
-
15 Metadata# TOC
-
-
FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded
-INI-like text file and then load it back using the metadata muxer/demuxer.
-
-
The file format is as follows:
-
- A file consists of a header and a number of metadata tags divided into sections,
-each on its own line.
-
- The header is a ’;FFMETADATA’ string, followed by a version number (now 1).
-
- Metadata tags are of the form ’key=value’
-
- Immediately after header follows global metadata
-
- After global metadata there may be sections with per-stream/per-chapter
-metadata.
-
- A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
-brackets (’[’, ’]’) and ends with next section or end of file.
-
- At the beginning of a chapter section there may be an optional timebase to be
-used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and
-den are integers. If the timebase is missing then start/end times are assumed to
-be in milliseconds.
-Next a chapter section must contain chapter start and end times in form
-’START=num’, ’END=num’, where num is a positive integer.
-
- Empty lines and lines starting with ’;’ or ’#’ are ignored.
-
- Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a
-newline) must be escaped with a backslash ’\’.
-
- Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
-the tag (in the example above key is ’foo ’, value is ’ bar’).
-
-
-
A ffmetadata file might look like this:
-
-
;FFMETADATA1
-title=bike\\shed
-;this is a comment
-artist=FFmpeg troll team
-
-[CHAPTER]
-TIMEBASE=1/1000
-START=0
-#chapter ends at 0:01:00
-END=60000
-title=chapter \#1
-[STREAM]
-title=multi\
-line
-
-
-
By using the ffmetadata muxer and demuxer it is possible to extract
-metadata from an input file to an ffmetadata file, and then transcode
-the file into an output file with the edited ffmetadata file.
-
-
Extracting an ffmetadata file with ffmpeg goes as follows:
-
-
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
-
-
-
Reinserting edited metadata information from the FFMETADATAFILE file can
-be done as:
-
-
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
-
-
-
-
16 Protocols# TOC
-
-
Protocols are configured elements in FFmpeg that enable access to
-resources that require specific protocols.
-
-
When you configure your FFmpeg build, all the supported protocols are
-enabled by default. You can list all available ones using the
-configure option "–list-protocols".
-
-
You can disable all the protocols using the configure option
-"–disable-protocols", and selectively enable a protocol using the
-option "–enable-protocol=PROTOCOL ", or you can disable a
-particular protocol using the option
-"–disable-protocol=PROTOCOL ".
-
-
The option "-protocols" of the ff* tools will display the list of
-supported protocols.
-
-
A description of the currently available protocols follows.
-
-
-
16.1 bluray# TOC
-
-
Read BluRay playlist.
-
-
The accepted options are:
-
-angle
-BluRay angle
-
-
-chapter
-Start chapter (1...N)
-
-
-playlist
-Playlist to read (BDMV/PLAYLIST/?????.mpls)
-
-
-
-
-
Examples:
-
-
Read longest playlist from BluRay mounted to /mnt/bluray:
-
-
-
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
-
-
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
-
-
-
-
16.2 cache# TOC
-
-
Caching wrapper for input stream.
-
-
Cache the input stream to temporary file. It brings seeking capability to live streams.
-
-
-
-
-
16.3 concat# TOC
-
-
Physical concatenation protocol.
-
-
Allow to read and seek from many resource in sequence as if they were
-a unique resource.
-
-
A URL accepted by this protocol has the syntax:
-
-
concat:URL1 |URL2 |...|URLN
-
-
-
where URL1 , URL2 , ..., URLN are the urls of the
-resource to be concatenated, each one possibly specifying a distinct
-protocol.
-
-
For example to read a sequence of files split1.mpeg ,
-split2.mpeg , split3.mpeg with ffplay
use the
-command:
-
-
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
-
-
-
Note that you may need to escape the character "|" which is special for
-many shells.
-
-
-
16.4 crypto# TOC
-
-
AES-encrypted stream reading protocol.
-
-
The accepted options are:
-
-key
-Set the AES decryption key binary block from given hexadecimal representation.
-
-
-iv
-Set the AES decryption initialization vector binary block from given hexadecimal representation.
-
-
-
-
Accepted URL formats:
-
-
crypto:URL
-crypto+URL
-
-
-
-
16.5 data# TOC
-
-
Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme .
-
-
For example, to convert a GIF file given inline with ffmpeg
:
-
-
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
-
-
-
-
16.6 file# TOC
-
-
File access protocol.
-
-
Allow to read from or write to a file.
-
-
A file URL can have the form:
-
-
-
where filename is the path of the file to read.
-
-
An URL that does not have a protocol prefix will be assumed to be a
-file URL. Depending on the build, an URL that looks like a Windows
-path with the drive letter at the beginning will also be assumed to be
-a file URL (usually not the case in builds for unix-like systems).
-
-
For example to read from a file input.mpeg with ffmpeg
-use the command:
-
-
ffmpeg -i file:input.mpeg output.mpeg
-
-
-
This protocol accepts the following options:
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable for files on slow medium.
-
-
-
-
-
16.7 ftp# TOC
-
-
FTP (File Transfer Protocol).
-
-
Allow to read from or write to remote resources using FTP protocol.
-
-
Following syntax is required.
-
-
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-ftp-anonymous-password
-Password used when login as anonymous user. Typically an e-mail address
-should be used.
-
-
-ftp-write-seekable
-Control seekability of connection during encoding. If set to 1 the
-resource is supposed to be seekable, if set to 0 it is assumed not
-to be seekable. Default value is 0.
-
-
-
-
NOTE: Protocol can be used as output, but it is recommended to not do
-it, unless special care is taken (tests, customized server configuration
-etc.). Different FTP servers behave in different way during seek
-operation. ff* tools may produce incomplete content due to server limitations.
-
-
-
16.8 gopher# TOC
-
-
Gopher protocol.
-
-
-
16.9 hls# TOC
-
-
Read Apple HTTP Live Streaming compliant segmented stream as
-a uniform one. The M3U8 playlists describing the segments can be
-remote HTTP resources or local files, accessed using the standard
-file protocol.
-The nested protocol is declared by specifying
-"+proto " after the hls URI scheme name, where proto
-is either "file" or "http".
-
-
-
hls+http://host/path/to/remote/resource.m3u8
-hls+file://path/to/local/resource.m3u8
-
-
-
Using this protocol is discouraged - the hls demuxer should work
-just as well (if not, please report the issues) and is more complete.
-To use the hls demuxer instead, simply use the direct URLs to the
-m3u8 files.
-
-
-
16.10 http# TOC
-
-
HTTP (Hyper Text Transfer Protocol).
-
-
This protocol accepts the following options:
-
-
-seekable
-Control seekability of connection. If set to 1 the resource is
-supposed to be seekable, if set to 0 it is assumed not to be seekable,
-if set to -1 it will try to autodetect if it is seekable. Default
-value is -1.
-
-
-chunked_post
-If set to 1 use chunked Transfer-Encoding for posts, default is 1.
-
-
-content_type
-Set a specific content type for the POST messages.
-
-
-headers
-Set custom HTTP headers, can override built in default headers. The
-value must be a string encoding the headers.
-
-
-multiple_requests
-Use persistent connections if set to 1, default is 0.
-
-
-post_data
-Set custom HTTP post data.
-
-
-user-agent
-user_agent
-Override the User-Agent header. If not specified the protocol will use a
-string describing the libavformat build. ("Lavf/<version>")
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-mime_type
-Export the MIME type.
-
-
-icy
-If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
-supports this, the metadata has to be retrieved by the application by reading
-the icy_metadata_headers and icy_metadata_packet options.
-The default is 1.
-
-
-icy_metadata_headers
-If the server supports ICY metadata, this contains the ICY-specific HTTP reply
-headers, separated by newline characters.
-
-
-icy_metadata_packet
-If the server supports ICY metadata, and icy was set to 1, this
-contains the last non-empty metadata packet sent by the server. It should be
-polled in regular intervals by applications interested in mid-stream metadata
-updates.
-
-
-cookies
-Set the cookies to be sent in future requests. The format of each cookie is the
-same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
-delimited by a newline character.
-
-
-offset
-Set initial byte offset.
-
-
-end_offset
-Try to limit the request to bytes preceding this offset.
-
-
-
-
-
16.10.1 HTTP Cookies# TOC
-
-
Some HTTP requests will be denied unless cookie values are passed in with the
-request. The cookies option allows these cookies to be specified. At
-the very least, each cookie must specify a value along with a path and domain.
-HTTP requests that match both the domain and path will automatically include the
-cookie value in the HTTP Cookie header field. Multiple cookies can be delimited
-by a newline.
-
-
The required syntax to play a stream specifying a cookie is:
-
-
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
-
-
-
-
16.11 Icecast# TOC
-
-
Icecast protocol (stream to Icecast servers)
-
-
This protocol accepts the following options:
-
-
-ice_genre
-Set the stream genre.
-
-
-ice_name
-Set the stream name.
-
-
-ice_description
-Set the stream description.
-
-
-ice_url
-Set the stream website URL.
-
-
-ice_public
-Set if the stream should be public.
-The default is 0 (not public).
-
-
-user_agent
-Override the User-Agent header. If not specified a string of the form
-"Lavf/<version>" will be used.
-
-
-password
-Set the Icecast mountpoint password.
-
-
-content_type
-Set the stream content type. This must be set if it is different from
-audio/mpeg.
-
-
-legacy_icecast
-This enables support for Icecast versions < 2.4.0, that do not support the
-HTTP PUT method but the SOURCE method.
-
-
-
-
-
-
icecast://[username [:password ]@]server :port /mountpoint
-
-
-
-
16.12 mmst# TOC
-
-
MMS (Microsoft Media Server) protocol over TCP.
-
-
-
16.13 mmsh# TOC
-
-
MMS (Microsoft Media Server) protocol over HTTP.
-
-
The required syntax is:
-
-
mmsh://server [:port ][/app ][/playpath ]
-
-
-
-
16.14 md5# TOC
-
-
MD5 output protocol.
-
-
Computes the MD5 hash of the data to be written, and on close writes
-this to the designated output or stdout if none is specified. It can
-be used to test muxers without writing an actual file.
-
-
Some examples follow.
-
-
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
-ffmpeg -i input.flv -f avi -y md5:output.avi.md5
-
-# Write the MD5 hash of the encoded AVI file to stdout.
-ffmpeg -i input.flv -f avi -y md5:
-
-
-
Note that some formats (typically MOV) require the output protocol to
-be seekable, so they will fail with the MD5 output protocol.
-
-
-
16.15 pipe# TOC
-
-
UNIX pipe access protocol.
-
-
Allow to read and write from UNIX pipes.
-
-
The accepted syntax is:
-
-
-
number is the number corresponding to the file descriptor of the
-pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number
-is not specified, by default the stdout file descriptor will be used
-for writing, stdin for reading.
-
-
For example to read from stdin with ffmpeg
:
-
-
cat test.wav | ffmpeg -i pipe:0
-# ...this is the same as...
-cat test.wav | ffmpeg -i pipe:
-
-
-
For writing to stdout with ffmpeg
:
-
-
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
-# ...this is the same as...
-ffmpeg -i test.wav -f avi pipe: | cat > test.avi
-
-
-
This protocol accepts the following options:
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable if data transmission is slow.
-
-
-
-
Note that some formats (typically MOV), require the output protocol to
-be seekable, so they will fail with the pipe output protocol.
-
-
-
16.16 rtmp# TOC
-
-
Real-Time Messaging Protocol.
-
-
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
-content across a TCP/IP network.
-
-
The required syntax is:
-
-
rtmp://[username :password @]server [:port ][/app ][/instance ][/playpath ]
-
-
-
The accepted parameters are:
-
-username
-An optional username (mostly for publishing).
-
-
-password
-An optional password (mostly for publishing).
-
-
-server
-The address of the RTMP server.
-
-
-port
-The number of the TCP port to use (by default is 1935).
-
-
-app
-It is the name of the application to access. It usually corresponds to
-the path where the application is installed on the RTMP server
-(e.g. /ondemand/ , /flash/live/ , etc.). You can override
-the value parsed from the URI through the rtmp_app
option, too.
-
-
-playpath
-It is the path or name of the resource to play with reference to the
-application specified in app , may be prefixed by "mp4:". You
-can override the value parsed from the URI through the rtmp_playpath
-option, too.
-
-
-listen
-Act as a server, listening for an incoming connection.
-
-
-timeout
-Maximum time to wait for the incoming connection. Implies listen.
-
-
-
-
Additionally, the following parameters can be set via command line options
-(or in code via AVOption
s):
-
-rtmp_app
-Name of application to connect on the RTMP server. This option
-overrides the parameter specified in the URI.
-
-
-rtmp_buffer
-Set the client buffer time in milliseconds. The default is 3000.
-
-
-rtmp_conn
-Extra arbitrary AMF connection parameters, parsed from a string,
-e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0
.
-Each value is prefixed by a single character denoting the type,
-B for Boolean, N for number, S for string, O for object, or Z for null,
-followed by a colon. For Booleans the data must be either 0 or 1 for
-FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
-1 to end or begin an object, respectively. Data items in subobjects may
-be named, by prefixing the type with ’N’ and specifying the name before
-the value (i.e. NB:myFlag:1
). This option may be used multiple
-times to construct arbitrary AMF sequences.
-
-
-rtmp_flashver
-Version of the Flash plugin used to run the SWF player. The default
-is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
-<libavformat version>).)
-
-
-rtmp_flush_interval
-Number of packets flushed in the same request (RTMPT only). The default
-is 10.
-
-
-rtmp_live
-Specify that the media is a live stream. No resuming or seeking in
-live streams is possible. The default value is any
, which means the
-subscriber first tries to play the live stream specified in the
-playpath. If a live stream of that name is not found, it plays the
-recorded stream. The other possible values are live
and
-recorded
.
-
-
-rtmp_pageurl
-URL of the web page in which the media was embedded. By default no
-value will be sent.
-
-
-rtmp_playpath
-Stream identifier to play or to publish. This option overrides the
-parameter specified in the URI.
-
-
-rtmp_subscribe
-Name of live stream to subscribe to. By default no value will be sent.
-It is only sent if the option is specified or if rtmp_live
-is set to live.
-
-
-rtmp_swfhash
-SHA256 hash of the decompressed SWF file (32 bytes).
-
-
-rtmp_swfsize
-Size of the decompressed SWF file, required for SWFVerification.
-
-
-rtmp_swfurl
-URL of the SWF player for the media. By default no value will be sent.
-
-
-rtmp_swfverify
-URL to player swf file, compute hash/size automatically.
-
-
-rtmp_tcurl
-URL of the target stream. Defaults to proto://host[:port]/app.
-
-
-
-
-
For example to read with ffplay
a multimedia resource named
-"sample" from the application "vod" from an RTMP server "myserver":
-
-
ffplay rtmp://myserver/vod/sample
-
-
-
To publish to a password protected server, passing the playpath and
-app names separately:
-
-
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
-
-
-
-
16.17 rtmpe# TOC
-
-
Encrypted Real-Time Messaging Protocol.
-
-
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
-streaming multimedia content within standard cryptographic primitives,
-consisting of Diffie-Hellman key exchange and HMACSHA256, generating
-a pair of RC4 keys.
-
-
-
16.18 rtmps# TOC
-
-
Real-Time Messaging Protocol over a secure SSL connection.
-
-
The Real-Time Messaging Protocol (RTMPS) is used for streaming
-multimedia content across an encrypted connection.
-
-
-
16.19 rtmpt# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
-for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
16.20 rtmpte# TOC
-
-
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
-is used for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
16.21 rtmpts# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTPS.
-
-
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
-for streaming multimedia content within HTTPS requests to traverse
-firewalls.
-
-
-
16.22 libsmbclient# TOC
-
-
libsmbclient permits one to manipulate CIFS/SMB network resources.
-
-
Following syntax is required.
-
-
-
smb://[[domain:]user[:password@]]server[/share[/path[/file]]]
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in miliseconds of socket I/O operations used by the underlying
-low level operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-workgroup
-Set the workgroup used for making connections. By default workgroup is not specified.
-
-
-
-
-
For more information see: http://www.samba.org/ .
-
-
-
16.23 libssh# TOC
-
-
Secure File Transfer Protocol via libssh
-
-
Allow to read from or write to remote resources using SFTP protocol.
-
-
Following syntax is required.
-
-
-
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-private_key
-Specify the path of the file containing private key to use during authorization.
-By default libssh searches for keys in the ~/.ssh/ directory.
-
-
-
-
-
Example: Play a file stored on remote server.
-
-
-
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
-
-
-
-
16.24 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte# TOC
-
-
Real-Time Messaging Protocol and its variants supported through
-librtmp.
-
-
Requires the presence of the librtmp headers and library during
-configuration. You need to explicitly configure the build with
-"–enable-librtmp". If enabled this will replace the native RTMP
-protocol.
-
-
This protocol provides most client functions and a few server
-functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT),
-encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled
-variants of these encrypted types (RTMPTE, RTMPTS).
-
-
The required syntax is:
-
-
rtmp_proto ://server [:port ][/app ][/playpath ] options
-
-
-
where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe",
-"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and
-server , port , app and playpath have the same
-meaning as specified for the RTMP native protocol.
-options contains a list of space-separated options of the form
-key =val .
-
-
See the librtmp manual page (man 3 librtmp) for more information.
-
-
For example, to stream a file in real-time to an RTMP server using
-ffmpeg
:
-
-
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
-
-
-
To play the same stream using ffplay
:
-
-
ffplay "rtmp://myserver/live/mystream live=1"
-
-
-
-
16.25 rtp# TOC
-
-
Real-time Transport Protocol.
-
-
The required syntax for an RTP URL is:
-rtp://hostname [:port ][?option =val ...]
-
-
port specifies the RTP port to use.
-
-
The following URL options are supported:
-
-
-ttl=n
-Set the TTL (Time-To-Live) value (for multicast only).
-
-
-rtcpport=n
-Set the remote RTCP port to n .
-
-
-localrtpport=n
-Set the local RTP port to n .
-
-
-localrtcpport=n '
-Set the local RTCP port to n .
-
-
-pkt_size=n
-Set max packet size (in bytes) to n .
-
-
-connect=0|1
-Do a connect()
on the UDP socket (if set to 1) or not (if set
-to 0).
-
-
-sources=ip [,ip ]
-List allowed source IP addresses.
-
-
-block=ip [,ip ]
-List disallowed (blocked) source IP addresses.
-
-
-write_to_source=0|1
-Send packets to the source address of the latest received packet (if
-set to 1) or to a default remote address (if set to 0).
-
-
-localport=n
-Set the local RTP port to n .
-
-This is a deprecated option. Instead, localrtpport should be
-used.
-
-
-
-
-
Important notes:
-
-
- If rtcpport is not set the RTCP port will be set to the RTP
-port value plus 1.
-
- If localrtpport (the local RTP port) is not set any available
-port will be used for the local RTP and RTCP ports.
-
- If localrtcpport (the local RTCP port) is not set it will be
-set to the local RTP port value plus 1.
-
-
-
-
16.26 rtsp# TOC
-
-
Real-Time Streaming Protocol.
-
-
RTSP is not technically a protocol handler in libavformat, it is a demuxer
-and muxer. The demuxer supports both normal RTSP (with data transferred
-over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
-data transferred over RDT).
-
-
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
-supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s
-RTSP server ).
-
-
The required syntax for a RTSP url is:
-
-
rtsp://hostname [:port ]/path
-
-
-
Options can be set on the ffmpeg
/ffplay
command
-line, or set in code via AVOption
s or in
-avformat_open_input
.
-
-
The following options are supported.
-
-
-initial_pause
-Do not start playing the stream immediately if set to 1. Default value
-is 0.
-
-
-rtsp_transport
-Set RTSP transport protocols.
-
-It accepts the following values:
-
-‘udp ’
-Use UDP as lower transport protocol.
-
-
-‘tcp ’
-Use TCP (interleaving within the RTSP control channel) as lower
-transport protocol.
-
-
-‘udp_multicast ’
-Use UDP multicast as lower transport protocol.
-
-
-‘http ’
-Use HTTP tunneling as lower transport protocol, which is useful for
-passing proxies.
-
-
-
-Multiple lower transport protocols may be specified, in that case they are
-tried one at a time (if the setup of one fails, the next one is tried).
-For the muxer, only the ‘tcp ’ and ‘udp ’ options are supported.
-
-
-rtsp_flags
-Set RTSP flags.
-
-The following values are accepted:
-
-‘filter_src ’
-Accept packets only from negotiated peer address and port.
-
-‘listen ’
-Act as a server, listening for an incoming connection.
-
-‘prefer_tcp ’
-Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
-
-
-
-Default value is ‘none ’.
-
-
-allowed_media_types
-Set media types to accept from the server.
-
-The following flags are accepted:
-
-‘video ’
-‘audio ’
-‘data ’
-
-
-By default it accepts all media types.
-
-
-min_port
-Set minimum local UDP port. Default value is 5000.
-
-
-max_port
-Set maximum local UDP port. Default value is 65000.
-
-
-timeout
-Set maximum timeout (in seconds) to wait for incoming connections.
-
-A value of -1 means infinite (default). This option implies the
-rtsp_flags set to ‘listen ’.
-
-
-reorder_queue_size
-Set number of packets to buffer for handling of reordered packets.
-
-
-stimeout
-Set socket TCP I/O timeout in microseconds.
-
-
-user-agent
-Override User-Agent header. If not specified, it defaults to the
-libavformat identifier string.
-
-
-
-
When receiving data over UDP, the demuxer tries to reorder received packets
-(since they may arrive out of order, or packets may get lost totally). This
-can be disabled by setting the maximum demuxing delay to zero (via
-the max_delay
field of AVFormatContext).
-
-
When watching multi-bitrate Real-RTSP streams with ffplay
, the
-streams to display can be chosen with -vst
n and
--ast
n for video and audio respectively, and can be switched
-on the fly by pressing v
and a
.
-
-
-
16.26.1 Examples# TOC
-
-
The following examples all make use of the ffplay
and
-ffmpeg
tools.
-
-
- Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
-
-
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
-
-
- Watch a stream tunneled over HTTP:
-
-
ffplay -rtsp_transport http rtsp://server/video.mp4
-
-
- Send a stream in realtime to a RTSP server, for others to watch:
-
-
ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
-
-
- Receive a stream in realtime:
-
-
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
-
-
-
-
-
16.27 sap# TOC
-
-
Session Announcement Protocol (RFC 2974). This is not technically a
-protocol handler in libavformat, it is a muxer and demuxer.
-It is used for signalling of RTP streams, by announcing the SDP for the
-streams regularly on a separate port.
-
-
-
16.27.1 Muxer# TOC
-
-
The syntax for a SAP url given to the muxer is:
-
-
sap://destination [:port ][?options ]
-
-
-
The RTP packets are sent to destination on port port ,
-or to port 5004 if no port is specified.
-options is a &
-separated list. The following options
-are supported:
-
-
-announce_addr=address
-Specify the destination IP address for sending the announcements to.
-If omitted, the announcements are sent to the commonly used SAP
-announcement multicast address 224.2.127.254 (sap.mcast.net), or
-ff0e::2:7ffe if destination is an IPv6 address.
-
-
-announce_port=port
-Specify the port to send the announcements on, defaults to
-9875 if not specified.
-
-
-ttl=ttl
-Specify the time to live value for the announcements and RTP packets,
-defaults to 255.
-
-
-same_port=0|1
-If set to 1, send all RTP streams on the same port pair. If zero (the
-default), all streams are sent on unique ports, with each stream on a
-port 2 numbers higher than the previous.
-VLC/Live555 requires this to be set to 1, to be able to receive the stream.
-The RTP stack in libavformat for receiving requires all streams to be sent
-on unique ports.
-
-
-
-
Example command lines follow.
-
-
To broadcast a stream on the local subnet, for watching in VLC:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
-
-
-
Similarly, for watching in ffplay
:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255
-
-
-
And for watching in ffplay
, over IPv6:
-
-
-
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
-
-
-
-
16.27.2 Demuxer# TOC
-
-
The syntax for a SAP url given to the demuxer is:
-
-
sap://[address ][:port ]
-
-
-
address is the multicast address to listen for announcements on,
-if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port
-is the port that is listened on, 9875 if omitted.
-
-
The demuxers listens for announcements on the given address and port.
-Once an announcement is received, it tries to receive that particular stream.
-
-
Example command lines follow.
-
-
To play back the first stream announced on the normal SAP multicast address:
-
-
-
-
To play back the first stream announced on one the default IPv6 SAP multicast address:
-
-
-
ffplay sap://[ff0e::2:7ffe]
-
-
-
-
16.28 sctp# TOC
-
-
Stream Control Transmission Protocol.
-
-
The accepted URL syntax is:
-
-
sctp://host :port [?options ]
-
-
-
The protocol accepts the following options:
-
-listen
-If set to any value, listen for an incoming connection. Outgoing connection is done by default.
-
-
-max_streams
-Set the maximum number of streams. By default no limit is set.
-
-
-
-
-
16.29 srtp# TOC
-
-
Secure Real-time Transport Protocol.
-
-
The accepted options are:
-
-srtp_in_suite
-srtp_out_suite
-Select input and output encoding suites.
-
-Supported values:
-
-‘AES_CM_128_HMAC_SHA1_80 ’
-‘SRTP_AES128_CM_HMAC_SHA1_80 ’
-‘AES_CM_128_HMAC_SHA1_32 ’
-‘SRTP_AES128_CM_HMAC_SHA1_32 ’
-
-
-
-srtp_in_params
-srtp_out_params
-Set input and output encoding parameters, which are expressed by a
-base64-encoded representation of a binary block. The first 16 bytes of
-this binary block are used as master key, the following 14 bytes are
-used as master salt.
-
-
-
-
-
16.30 subfile# TOC
-
-
Virtually extract a segment of a file or another stream.
-The underlying stream must be seekable.
-
-
Accepted options:
-
-start
-Start offset of the extracted segment, in bytes.
-
-end
-End offset of the extracted segment, in bytes.
-
-
-
-
Examples:
-
-
Extract a chapter from a DVD VOB file (start and end sectors obtained
-externally and multiplied by 2048):
-
-
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
-
-
-
Play an AVI file directly from a TAR archive:
-subfile,,start,183241728,end,366490624,,:archive.tar
-
-
-
16.31 tcp# TOC
-
-
Transmission Control Protocol.
-
-
The required syntax for a TCP url is:
-
-
tcp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form
-key =val .
-
-
The list of supported options follows.
-
-
-listen=1|0
-Listen for an incoming connection. Default value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-listen_timeout=microseconds
-Set listen timeout, expressed in microseconds.
-
-
-
-
The following example shows how to setup a listening TCP connection
-with ffmpeg
, which is then accessed with ffplay
:
-
-
ffmpeg -i input -f format tcp://hostname :port ?listen
-ffplay tcp://hostname :port
-
-
-
-
16.32 tls# TOC
-
-
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
-
-
The required syntax for a TLS/SSL url is:
-
-
tls://hostname :port [?options ]
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-ca_file, cafile=filename
-A file containing certificate authority (CA) root certificates to treat
-as trusted. If the linked TLS library contains a default this might not
-need to be specified for verification to work, but not all libraries and
-setups have defaults built in.
-The file must be in OpenSSL PEM format.
-
-
-tls_verify=1|0
-If enabled, try to verify the peer that we are communicating with.
-Note, if using OpenSSL, this currently only makes sure that the
-peer certificate is signed by one of the root certificates in the CA
-database, but it does not validate that the certificate actually
-matches the host name we are trying to connect to. (With GnuTLS,
-the host name is validated as well.)
-
-This is disabled by default since it requires a CA database to be
-provided by the caller in many cases.
-
-
-cert_file, cert=filename
-A file containing a certificate to use in the handshake with the peer.
-(When operating as server, in listen mode, this is more often required
-by the peer, while client certificates only are mandated in certain
-setups.)
-
-
-key_file, key=filename
-A file containing the private key for the certificate.
-
-
-listen=1|0
-If enabled, listen for connections on the provided port, and assume
-the server role in the handshake instead of the client role.
-
-
-
-
-
Example command lines:
-
-
To create a TLS/SSL server that serves an input stream.
-
-
-
ffmpeg -i input -f format tls://hostname :port ?listen&cert=server.crt &key=server.key
-
-
-
To play back a stream from the TLS/SSL server using ffplay
:
-
-
-
ffplay tls://hostname :port
-
-
-
-
16.33 udp# TOC
-
-
User Datagram Protocol.
-
-
The required syntax for an UDP URL is:
-
-
udp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form key =val .
-
-
In case threading is enabled on the system, a circular buffer is used
-to store the incoming data, which allows one to reduce loss of data due to
-UDP socket buffer overruns. The fifo_size and
-overrun_nonfatal options are related to this buffer.
-
-
The list of supported options follows.
-
-
-buffer_size=size
-Set the UDP maximum socket buffer size in bytes. This is used to set either
-the receive or send buffer size, depending on what the socket is used for.
-Default is 64KB. See also fifo_size .
-
-
-localport=port
-Override the local UDP port to bind with.
-
-
-localaddr=addr
-Choose the local IP address. This is useful e.g. if sending multicast
-and the host has multiple interfaces, where the user can choose
-which interface to send on by specifying the IP address of that interface.
-
-
-pkt_size=size
-Set the size in bytes of UDP packets.
-
-
-reuse=1|0
-Explicitly allow or disallow reusing UDP sockets.
-
-
-ttl=ttl
-Set the time to live value (for multicast only).
-
-
-connect=1|0
-Initialize the UDP socket with connect()
. In this case, the
-destination address can’t be changed with ff_udp_set_remote_url later.
-If the destination address isn’t known at the start, this option can
-be specified in ff_udp_set_remote_url, too.
-This allows finding out the source address for the packets with getsockname,
-and makes writes return with AVERROR(ECONNREFUSED) if "destination
-unreachable" is received.
-For receiving, this gives the benefit of only receiving packets from
-the specified peer address/port.
-
-
-sources=address [,address ]
-Only receive packets sent to the multicast group from one of the
-specified sender IP addresses.
-
-
-block=address [,address ]
-Ignore packets sent to the multicast group from the specified
-sender IP addresses.
-
-
-fifo_size=units
-Set the UDP receiving circular buffer size, expressed as a number of
-packets with size of 188 bytes. If not specified defaults to 7*4096.
-
-
-overrun_nonfatal=1|0
-Survive in case of UDP receiving circular buffer overrun. Default
-value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-broadcast=1|0
-Explicitly allow or disallow UDP broadcasting.
-
-Note that broadcasting may not work properly on networks having
-a broadcast storm protection.
-
-
-
-
-
16.33.1 Examples# TOC
-
-
- Use ffmpeg
to stream over UDP to a remote endpoint:
-
-
ffmpeg -i input -f format udp://hostname :port
-
-
- Use ffmpeg
to stream in mpegts format over UDP using 188
-sized UDP packets, using a large input buffer:
-
-
ffmpeg -i input -f mpegts udp://hostname :port ?pkt_size=188&buffer_size=65535
-
-
- Use ffmpeg
to receive over UDP from a remote endpoint:
-
-
ffmpeg -i udp://[multicast-address ]:port ...
-
-
-
-
-
16.34 unix# TOC
-
-
Unix local socket
-
-
The required syntax for a Unix socket URL is:
-
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-timeout
-Timeout in ms.
-
-listen
-Create the Unix socket in listening mode.
-
-
-
-
-
17 Device Options# TOC
-
-
The libavdevice library provides the same interface as
-libavformat. Namely, an input device is considered like a demuxer, and
-an output device like a muxer, and the interface and generic device
-options are the same provided by libavformat (see the ffmpeg-formats
-manual).
-
-
In addition each input or output device may support so-called private
-options, which are specific for that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the device
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
-
-
18 Input Devices# TOC
-
-
Input devices are configured elements in FFmpeg which allow to access
-the data coming from a multimedia device attached to your system.
-
-
When you configure your FFmpeg build, all the supported input devices
-are enabled by default. You can list all available ones using the
-configure option "–list-indevs".
-
-
You can disable all the input devices using the configure option
-"–disable-indevs", and selectively enable an input device using the
-option "–enable-indev=INDEV ", or you can disable a particular
-input device using the option "–disable-indev=INDEV ".
-
-
The option "-devices" of the ff* tools will display the list of
-supported input devices.
-
-
A description of the currently available input devices follows.
-
-
-
18.1 alsa# TOC
-
-
ALSA (Advanced Linux Sound Architecture) input device.
-
-
To enable this input device during configuration you need libasound
-installed on your system.
-
-
This device allows capturing from an ALSA device. The name of the
-device to capture has to be an ALSA card identifier.
-
-
An ALSA identifier has the syntax:
-
-
hw:CARD [,DEV [,SUBDEV ]]
-
-
-
where the DEV and SUBDEV components are optional.
-
-
The three arguments (in order: CARD ,DEV ,SUBDEV )
-specify card number or identifier, device number and subdevice number
-(-1 means any).
-
-
To see the list of cards currently recognized by your system check the
-files /proc/asound/cards and /proc/asound/devices .
-
-
For example to capture with ffmpeg
from an ALSA device with
-card id 0, you may run the command:
-
-
ffmpeg -f alsa -i hw:0 alsaout.wav
-
-
-
For more information see:
-http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html
-
-
-
18.2 avfoundation# TOC
-
-
AVFoundation input device.
-
-
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
-The older QTKit framework has been marked deprecated since OSX version 10.7.
-
-
The input filename has to be given in the following syntax:
-
-
-i "[[VIDEO]:[AUDIO]]"
-
-
The first entry selects the video input while the latter selects the audio input.
-The stream has to be specified by the device name or the device index as shown by the device list.
-Alternatively, the video and/or audio input device can be chosen by index using the
-
- -video_device_index <INDEX>
-
-and/or
-
- -audio_device_index <INDEX>
-
-, overriding any
-device name or index given in the input filename.
-
-
All available devices can be enumerated by using -list_devices true , listing
-all device names and corresponding indices.
-
-
There are two device name aliases:
-
-default
-Select the AVFoundation default device of the corresponding type.
-
-
-none
-Do not record the corresponding media type.
-This is equivalent to specifying an empty device name or index.
-
-
-
-
-
-
18.2.1 Options# TOC
-
-
AVFoundation supports the following options:
-
-
--list_devices <TRUE|FALSE>
-If set to true, a list of all available input devices is given showing all
-device names and indices.
-
-
--video_device_index <INDEX>
-Specify the video device by its index. Overrides anything given in the input filename.
-
-
--audio_device_index <INDEX>
-Specify the audio device by its index. Overrides anything given in the input filename.
-
-
--pixel_format <FORMAT>
-Request the video device to use a specific pixel format.
-If the specified format is not supported, a list of available formats is given
-und the first one in this list is used instead. Available pixel formats are:
-monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
- bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
- yuv420p, nv12, yuyv422, gray
-
-
-
-
-
-
18.2.2 Examples# TOC
-
-
- Print the list of AVFoundation supported devices and exit:
-
-
$ ffmpeg -f avfoundation -list_devices true -i ""
-
-
- Record video from video device 0 and audio from audio device 0 into out.avi:
-
-
$ ffmpeg -f avfoundation -i "0:0" out.avi
-
-
- Record video from video device 2 and audio from audio device 1 into out.avi:
-
-
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
-
-
- Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
-
-
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
-
-
-
-
-
-
18.3 bktr# TOC
-
-
BSD video input device.
-
-
-
18.4 dshow# TOC
-
-
Windows DirectShow input device.
-
-
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
-Currently only audio and video devices are supported.
-
-
Multiple devices may be opened as separate inputs, but they may also be
-opened on the same input, which should improve synchronism between them.
-
-
The input name should be in the format:
-
-
-
-
where TYPE can be either audio or video ,
-and NAME is the device’s name.
-
-
-
18.4.1 Options# TOC
-
-
If no options are specified, the device’s defaults are used.
-If the device does not support the requested options, it will
-fail to open.
-
-
-video_size
-Set the video size in the captured video.
-
-
-framerate
-Set the frame rate in the captured video.
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-
-
-sample_size
-Set the sample size (in bits) of the captured audio.
-
-
-channels
-Set the number of channels in the captured audio.
-
-
-list_devices
-If set to true , print a list of devices and exit.
-
-
-list_options
-If set to true , print a list of selected device’s options
-and exit.
-
-
-video_device_number
-Set video device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-audio_device_number
-Set audio device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-pixel_format
-Select pixel format to be used by DirectShow. This may only be set when
-the video codec is not set or set to rawvideo.
-
-
-audio_buffer_size
-Set audio device buffer size in milliseconds (which can directly
-impact latency, depending on the device).
-Defaults to using the audio device’s
-default buffer size (typically some multiple of 500ms).
-Setting this value too low can degrade performance.
-See also
-http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx
-
-
-
-
-
-
18.4.2 Examples# TOC
-
-
- Print the list of DirectShow supported devices and exit:
-
-
$ ffmpeg -list_devices true -f dshow -i dummy
-
-
- Open video device Camera :
-
-
$ ffmpeg -f dshow -i video="Camera"
-
-
- Open second video device with name Camera :
-
-
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
-
-
- Open video device Camera and audio device Microphone :
-
-
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
-
-
- Print the list of supported options in selected device and exit:
-
-
$ ffmpeg -list_options true -f dshow -i video="Camera"
-
-
-
-
-
-
18.5 dv1394# TOC
-
-
Linux DV 1394 input device.
-
-
-
18.6 fbdev# TOC
-
-
Linux framebuffer input device.
-
-
The Linux framebuffer is a graphic hardware-independent abstraction
-layer to show graphics on a computer monitor, typically on the
-console. It is accessed through a file device node, usually
-/dev/fb0 .
-
-
For more detailed information read the file
-Documentation/fb/framebuffer.txt included in the Linux source tree.
-
-
To record from the framebuffer device /dev/fb0 with
-ffmpeg
:
-
-
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
-
-
-
You can take a single screenshot image with the command:
-
-
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
-
-
-
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
-
-
-
18.7 gdigrab# TOC
-
-
Win32 GDI-based screen capture device.
-
-
This device allows you to capture a region of the display on Windows.
-
-
There are two options for the input filename:
-
-
or
-
-
-
The first option will capture the entire desktop, or a fixed region of the
-desktop. The second option will instead capture the contents of a single
-window, regardless of its position on the screen.
-
-
For example, to grab the entire desktop using ffmpeg
:
-
-
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
-
-
-
Grab a 640x480 region at position 10,20
:
-
-
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
-
-
-
Grab the contents of the window named "Calculator"
-
-
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
-
-
-
-
18.7.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. Use the value 0
to
-not draw the pointer. Default value is 1
.
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-Note that show_region is incompatible with grabbing the contents
-of a single window.
-
-For example:
-
-
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
-
-
-
-video_size
-Set the video frame size. The default is to capture the full screen if desktop is selected, or the full window size if title=window_title is selected.
-
-
-offset_x
-When capturing a region with video_size , set the distance from the left edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative offset_x value to move the region to that monitor.
-
-
-offset_y
-When capturing a region with video_size , set the distance from the top edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative offset_y value to move the region to that monitor.
-
-
-
-
-
-
18.8 iec61883# TOC
-
-
FireWire DV/HDV input device using libiec61883.
-
-
To enable this input device, you need libiec61883, libraw1394 and
-libavc1394 installed on your system. Use the configure option
---enable-libiec61883
to compile with the device enabled.
-
-
The iec61883 capture device supports capturing from a video device
-connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
-FireWire stack (juju). This is the default DV/HDV input method in Linux
-Kernel 2.6.37 and later, since the old FireWire stack was removed.
-
-
Specify the FireWire port to be used as input file, or "auto"
-to choose the first port connected.
-
-
-
18.8.1 Options# TOC
-
-
-dvtype
-Override autodetection of DV/HDV. This should only be used if auto
-detection does not work, or if usage of a different device type
-should be prohibited. Treating a DV device as HDV (or vice versa) will
-not work and result in undefined behavior.
-The values auto , dv and hdv are supported.
-
-
-dvbuffer
-Set maximum size of buffer for incoming data, in frames. For DV, this
-is an exact value. For HDV, it is not frame exact, since HDV does
-not have a fixed frame size.
-
-
-dvguid
-Select the capture device by specifying it’s GUID. Capturing will only
-be performed from the specified device and fails if no device with the
-given GUID is found. This is useful to select the input if multiple
-devices are connected at the same time.
-Look at /sys/bus/firewire/devices to find out the GUIDs.
-
-
-
-
-
-
18.8.2 Examples# TOC
-
-
- Grab and show the input of a FireWire DV/HDV device.
-
-
ffplay -f iec61883 -i auto
-
-
- Grab and record the input of a FireWire DV/HDV device,
-using a packet buffer of 100000 packets if the source is HDV.
-
-
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
-
-
-
-
-
-
18.9 jack# TOC
-
-
JACK input device.
-
-
To enable this input device during configuration you need libjack
-installed on your system.
-
-
A JACK input device creates one or more JACK writable clients, one for
-each audio channel, with name client_name :input_N , where
-client_name is the name provided by the application, and N
-is a number which identifies the channel.
-Each writable client will send the acquired data to the FFmpeg input
-device.
-
-
Once you have created one or more JACK readable clients, you need to
-connect them to one or more JACK writable clients.
-
-
To connect or disconnect JACK clients you can use the jack_connect
-and jack_disconnect
programs, or do it through a graphical interface,
-for example with qjackctl
.
-
-
To list the JACK clients and their properties you can invoke the command
-jack_lsp
.
-
-
Follows an example which shows how to capture a JACK readable client
-with ffmpeg
.
-
-
# Create a JACK writable client with name "ffmpeg".
-$ ffmpeg -f jack -i ffmpeg -y out.wav
-
-# Start the sample jack_metro readable client.
-$ jack_metro -b 120 -d 0.2 -f 4000
-
-# List the current JACK clients.
-$ jack_lsp -c
-system:capture_1
-system:capture_2
-system:playback_1
-system:playback_2
-ffmpeg:input_1
-metro:120_bpm
-
-# Connect metro to the ffmpeg writable client.
-$ jack_connect metro:120_bpm ffmpeg:input_1
-
-
-
For more information read:
-http://jackaudio.org/
-
-
-
18.10 lavfi# TOC
-
-
Libavfilter input virtual device.
-
-
This input device reads data from the open output pads of a libavfilter
-filtergraph.
-
-
For each filtergraph open output, the input device will create a
-corresponding stream which is mapped to the generated output. Currently
-only video data is supported. The filtergraph is specified through the
-option graph .
-
-
-
18.10.1 Options# TOC
-
-
-graph
-Specify the filtergraph to use as input. Each video open output must be
-labelled by a unique string of the form "outN ", where N is a
-number starting from 0 corresponding to the mapped input stream
-generated by the device.
-The first unlabelled output is automatically assigned to the "out0"
-label, but all the others need to be specified explicitly.
-
-The suffix "+subcc" can be appended to the output label to create an extra
-stream with the closed captions packets attached to that output
-(experimental; only for EIA-608 / CEA-708 for now).
-The subcc streams are created after all the normal streams, in the order of
-the corresponding stream.
-For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
-stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
-
-If not specified defaults to the filename specified for the input
-device.
-
-
-graph_file
-Set the filename of the filtergraph to be read and sent to the other
-filters. Syntax of the filtergraph is the same as the one specified by
-the option graph .
-
-
-
-
-
-
18.10.2 Examples# TOC
-
-
- Create a color video stream and play it back with ffplay
:
-
-
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
-
-
- As the previous example, but use filename for specifying the graph
-description, and omit the "out0" label:
-
-
ffplay -f lavfi color=c=pink
-
-
- Create three different video test filtered sources and play them:
-
-
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
-
-
- Read an audio stream from a file using the amovie source and play it
-back with ffplay
:
-
-
ffplay -f lavfi "amovie=test.wav"
-
-
- Read an audio stream and a video stream and play it back with
-ffplay
:
-
-
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
-
-
- Dump decoded frames to images and closed captions to a file (experimental):
-
-
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
-
-
-
-
-
-
18.11 libcdio# TOC
-
-
Audio-CD input device based on cdio.
-
-
To enable this input device during configuration you need libcdio
-installed on your system. Requires the configure option
---enable-libcdio
.
-
-
This device allows playing and grabbing from an Audio-CD.
-
-
For example to copy with ffmpeg
the entire Audio-CD in /dev/sr0,
-you may run the command:
-
-
ffmpeg -f libcdio -i /dev/sr0 cd.wav
-
-
-
-
18.12 libdc1394# TOC
-
-
IIDC1394 input device, based on libdc1394 and libraw1394.
-
-
Requires the configure option --enable-libdc1394
.
-
-
-
18.13 openal# TOC
-
-
The OpenAL input device provides audio capture on all systems with a
-working OpenAL 1.1 implementation.
-
-
To enable this input device during configuration, you need OpenAL
-headers and libraries installed on your system, and need to configure
-FFmpeg with --enable-openal
.
-
-
OpenAL headers and libraries should be provided as part of your OpenAL
-implementation, or as an additional download (an SDK). Depending on your
-installation you may need to specify additional flags via the
---extra-cflags
and --extra-ldflags
for allowing the build
-system to locate the OpenAL headers and libraries.
-
-
An incomplete list of OpenAL implementations follows:
-
-
-Creative
-The official Windows implementation, providing hardware acceleration
-with supported devices and software fallback.
-See http://openal.org/ .
-
-OpenAL Soft
-Portable, open source (LGPL) software implementation. Includes
-backends for the most common sound APIs on the Windows, Linux,
-Solaris, and BSD operating systems.
-See http://kcat.strangesoft.net/openal.html .
-
-Apple
-OpenAL is part of Core Audio, the official Mac OS X Audio interface.
-See http://developer.apple.com/technologies/mac/audio-and-video.html
-
-
-
-
This device allows one to capture from an audio input device handled
-through OpenAL.
-
-
You need to specify the name of the device to capture in the provided
-filename. If the empty string is provided, the device will
-automatically select the default device. You can get the list of the
-supported devices by using the option list_devices .
-
-
-
18.13.1 Options# TOC
-
-
-channels
-Set the number of channels in the captured audio. Only the values
-1 (monaural) and 2 (stereo) are currently supported.
-Defaults to 2 .
-
-
-sample_size
-Set the sample size (in bits) of the captured audio. Only the values
-8 and 16 are currently supported. Defaults to
-16 .
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-Defaults to 44.1k .
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-
-
-
-
18.13.2 Examples# TOC
-
-
Print the list of OpenAL supported devices and exit:
-
-
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
-
-
-
Capture from the OpenAL device DR-BT101 via PulseAudio :
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
-
-
-
Capture from the default device (note the empty string ” as filename):
-
-
$ ffmpeg -f openal -i '' out.ogg
-
-
-
Capture from two devices simultaneously, writing to two different files,
-within the same ffmpeg
command:
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
-
-
Note: not all OpenAL implementations support multiple simultaneous capture -
-try the latest OpenAL Soft if the above does not work.
-
-
-
18.14 oss# TOC
-
-
Open Sound System input device.
-
-
The filename to provide to the input device is the device node
-representing the OSS input device, and is usually set to
-/dev/dsp .
-
-
For example to grab from /dev/dsp using ffmpeg
use the
-command:
-
-
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
-
-
-
For more information about OSS see:
-http://manuals.opensound.com/usersguide/dsp.html
-
-
-
18.15 pulse# TOC
-
-
PulseAudio input device.
-
-
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
-
-
The filename to provide to the input device is a source device or the
-string "default"
-
-
To list the PulseAudio source devices and their properties you can invoke
-the command pactl list sources
.
-
-
More information about PulseAudio can be found on http://www.pulseaudio.org .
-
-
-
18.15.1 Options# TOC
-
-server
-Connect to a specific PulseAudio server, specified by an IP address.
-Default server is used when not provided.
-
-
-name
-Specify the application name PulseAudio will use when showing active clients,
-by default it is the LIBAVFORMAT_IDENT
string.
-
-
-stream_name
-Specify the stream name PulseAudio will use when showing active streams,
-by default it is "record".
-
-
-sample_rate
-Specify the samplerate in Hz, by default 48kHz is used.
-
-
-channels
-Specify the channels in use, by default 2 (stereo) is set.
-
-
-frame_size
-Specify the number of bytes per frame, by default it is set to 1024.
-
-
-fragment_size
-Specify the minimal buffering fragment in PulseAudio, it will affect the
-audio latency. By default it is unset.
-
-
-
-
-
18.15.2 Examples# TOC
-
Record a stream from default device:
-
-
ffmpeg -f pulse -i default /tmp/pulse.wav
-
-
-
-
18.16 qtkit# TOC
-
-
QTKit input device.
-
-
The filename passed as input is parsed to contain either a device name or index.
-The device index can also be given by using -video_device_index.
-A given device index will override any given device name.
-If the desired device consists of numbers only, use -video_device_index to identify it.
-The default device will be chosen if an empty string or the device name "default" is given.
-The available devices can be enumerated by using -list_devices.
-
-
-
ffmpeg -f qtkit -i "0" out.mpg
-
-
-
-
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
-
-
-
-
ffmpeg -f qtkit -i "default" out.mpg
-
-
-
-
ffmpeg -f qtkit -list_devices true -i ""
-
-
-
-
18.17 sndio# TOC
-
-
sndio input device.
-
-
To enable this input device during configuration you need libsndio
-installed on your system.
-
-
The filename to provide to the input device is the device node
-representing the sndio input device, and is usually set to
-/dev/audio0 .
-
-
For example to grab from /dev/audio0 using ffmpeg
use the
-command:
-
-
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
-
-
-
-
18.18 video4linux2, v4l2# TOC
-
-
Video4Linux2 input video device.
-
-
"v4l2" can be used as alias for "video4linux2".
-
-
If FFmpeg is built with v4l-utils support (by using the
---enable-libv4l2
configure option), it is possible to use it with the
--use_libv4l2
input device option.
-
-
The name of the device to grab is a file device node, usually Linux
-systems tend to automatically create such nodes when the device
-(e.g. an USB webcam) is plugged into the system, and has a name of the
-kind /dev/videoN , where N is a number associated to
-the device.
-
-
Video4Linux2 devices usually support a limited set of
-width xheight sizes and frame rates. You can check which are
-supported using -list_formats all
for Video4Linux2 devices.
-Some devices, like TV cards, support one or more standards. It is possible
-to list all the supported standards using -list_standards all
.
-
-
The time base for the timestamps is 1 microsecond. Depending on the kernel
-version and configuration, the timestamps may be derived from the real time
-clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
-boot time, unaffected by NTP or manual changes to the clock). The
--timestamps abs or -ts abs option can be used to force
-conversion into the real time clock.
-
-
Some usage examples of the video4linux2 device with ffmpeg
-and ffplay
:
-
- Grab and show the input of a video4linux2 device:
-
-
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
-
-
- Grab and record the input of a video4linux2 device, leave the
-frame rate and size as previously set:
-
-
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
-
-
-
-
For more information about Video4Linux, check http://linuxtv.org/ .
-
-
-
18.18.1 Options# TOC
-
-
-standard
-Set the standard. Must be the name of a supported standard. To get a
-list of the supported standards, use the list_standards
-option.
-
-
-channel
-Set the input channel number. Default to -1, which means using the
-previously selected channel.
-
-
-video_size
-Set the video frame size. The argument must be a string in the form
-WIDTH xHEIGHT or a valid size abbreviation.
-
-
-pixel_format
-Select the pixel format (only valid for raw video input).
-
-
-input_format
-Set the preferred pixel format (for raw video) or a codec name.
-This option allows one to select the input format, when several are
-available.
-
-
-framerate
-Set the preferred video frame rate.
-
-
-list_formats
-List available formats (supported pixel formats, codecs, and frame
-sizes) and exit.
-
-Available values are:
-
-‘all ’
-Show all available (compressed and non-compressed) formats.
-
-
-‘raw ’
-Show only raw video (non-compressed) formats.
-
-
-‘compressed ’
-Show only compressed formats.
-
-
-
-
-list_standards
-List supported standards and exit.
-
-Available values are:
-
-‘all ’
-Show all supported standards.
-
-
-
-
-timestamps, ts
-Set type of timestamps for grabbed frames.
-
-Available values are:
-
-‘default ’
-Use timestamps from the kernel.
-
-
-‘abs ’
-Use absolute timestamps (wall clock).
-
-
-‘mono2abs ’
-Force conversion from monotonic to absolute timestamps.
-
-
-
-Default value is default
.
-
-
-
-
-
18.19 vfwcap# TOC
-
-
VfW (Video for Windows) capture input device.
-
-
The filename passed as input is the capture driver number, ranging from
-0 to 9. You may use "list" as filename to print a list of drivers. Any
-other filename will be interpreted as device number 0.
-
-
-
18.20 x11grab# TOC
-
-
X11 video input device.
-
-
Depends on X11, Xext, and Xfixes. Requires the configure option
---enable-x11grab
.
-
-
This device allows one to capture a region of an X11 display.
-
-
The filename passed as input has the syntax:
-
-
[hostname ]:display_number .screen_number [+x_offset ,y_offset ]
-
-
-
hostname :display_number .screen_number specifies the
-X11 display name of the screen to grab from. hostname can be
-omitted, and defaults to "localhost". The environment variable
-DISPLAY
contains the default display name.
-
-
x_offset and y_offset specify the offsets of the grabbed
-area with respect to the top-left border of the X11 screen. They
-default to 0.
-
-
Check the X11 documentation (e.g. man X) for more detailed information.
-
-
Use the dpyinfo
program for getting basic information about the
-properties of your X11 display (e.g. grep for "name" or "dimensions").
-
-
For example to grab from :0.0 using ffmpeg
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
Grab at position 10,20
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-
-
18.20.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. A value of 0
specify
-not to draw the pointer. Default value is 1
.
-
-
-follow_mouse
-Make the grabbed area follow the mouse. The argument can be
-centered
or a number of pixels PIXELS .
-
-When it is specified with "centered", the grabbing region follows the mouse
-pointer and keeps the pointer at the center of region; otherwise, the region
-follows only when the mouse pointer reaches within PIXELS (greater than
-zero) to the edge of region.
-
-For example:
-
-
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-To follow only when the mouse pointer reaches within 100 pixels to edge:
-
-
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-For example:
-
-
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-With follow_mouse :
-
-
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-video_size
-Set the video frame size. Default value is vga
.
-
-
-use_shm
-Use the MIT-SHM extension for shared memory. Default value is 1
.
-It may be necessary to disable it for remote displays.
-
-
-
-
-
18.21 decklink# TOC
-
-
The decklink input device provides capture capabilities for Blackmagic
-DeckLink devices.
-
-
To enable this input device, you need the Blackmagic DeckLink SDK and you
-need to configure with the appropriate --extra-cflags
-and --extra-ldflags
.
-On Windows, you need to run the IDL files through widl
.
-
-
DeckLink is very picky about the formats it supports. Pixel format is always
-uyvy422, framerate and video size must be determined for your device with
--list_formats 1
. Audio sample rate is always 48 kHz and the number
-of channels currently is limited to 2 (stereo).
-
-
-
18.21.1 Options# TOC
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-list_formats
-If set to true , print a list of supported formats and exit.
-Defaults to false .
-
-
-
-
-
-
18.21.2 Examples# TOC
-
-
- List input devices:
-
-
ffmpeg -f decklink -list_devices 1 -i dummy
-
-
- List supported formats:
-
-
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
-
-
- Capture video clip at 1080i50 (format 11):
-
-
ffmpeg -f decklink -i 'Intensity Pro@11' -acodec copy -vcodec copy output.avi
-
-
-
-
-
-
-
19 Resampler Options# TOC
-
-
The audio resampler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, option =value for the aresample filter,
-by setting the value explicitly in the
-SwrContext
options or using the libavutil/opt.h API for
-programmatic use.
-
-
-ich, in_channel_count
-Set the number of input channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-in_channel_layout is set.
-
-
-och, out_channel_count
-Set the number of output channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-out_channel_layout is set.
-
-
-uch, used_channel_count
-Set the number of used input channels. Default value is 0. This option is
-only used for special remapping.
-
-
-isr, in_sample_rate
-Set the input sample rate. Default value is 0.
-
-
-osr, out_sample_rate
-Set the output sample rate. Default value is 0.
-
-
-isf, in_sample_fmt
-Specify the input sample format. It is set by default to none
.
-
-
-osf, out_sample_fmt
-Specify the output sample format. It is set by default to none
.
-
-
-tsf, internal_sample_fmt
-Set the internal sample format. Default value is none
.
-This will automatically be chosen when it is not explicitly set.
-
-
-icl, in_channel_layout
-ocl, out_channel_layout
-Set the input/output channel layout.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-clev, center_mix_level
-Set the center mix level. It is a value expressed in deciBel, and must be
-in the interval [-32,32].
-
-
-slev, surround_mix_level
-Set the surround mix level. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-lfe_mix_level
-Set LFE mix into non LFE level. It is used when there is a LFE input but no
-LFE output. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-rmvol, rematrix_volume
-Set rematrix volume. Default value is 1.0.
-
-
-rematrix_maxval
-Set maximum output value for rematrixing.
-This can be used to prevent clipping vs. preventing volumn reduction
-A value of 1.0 prevents cliping.
-
-
-flags, swr_flags
-Set flags used by the converter. Default value is 0.
-
-It supports the following individual flags:
-
-res
-force resampling, this flag forces resampling to be used even when the
-input and output sample rates match.
-
-
-
-
-dither_scale
-Set the dither scale. Default value is 1.
-
-
-dither_method
-Set dither method. Default value is 0.
-
-Supported values:
-
-‘rectangular ’
-select rectangular dither
-
-‘triangular ’
-select triangular dither
-
-‘triangular_hp ’
-select triangular dither with high pass
-
-‘lipshitz ’
-select lipshitz noise shaping dither
-
-‘shibata ’
-select shibata noise shaping dither
-
-‘low_shibata ’
-select low shibata noise shaping dither
-
-‘high_shibata ’
-select high shibata noise shaping dither
-
-‘f_weighted ’
-select f-weighted noise shaping dither
-
-‘modified_e_weighted ’
-select modified-e-weighted noise shaping dither
-
-‘improved_e_weighted ’
-select improved-e-weighted noise shaping dither
-
-
-
-
-
-resampler
-Set resampling engine. Default value is swr.
-
-Supported values:
-
-‘swr ’
-select the native SW Resampler; filter options precision and cheby are not
-applicable in this case.
-
-‘soxr ’
-select the SoX Resampler (where available); compensation, and filter options
-filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
-case.
-
-
-
-
-filter_size
-For swr only, set resampling filter size, default value is 32.
-
-
-phase_shift
-For swr only, set resampling phase shift, default value is 10, and must be in
-the interval [0,30].
-
-
-linear_interp
-Use Linear Interpolation if set to 1, default value is 0.
-
-
-cutoff
-Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
-value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
-(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
-
-
-precision
-For soxr only, the precision in bits to which the resampled signal will be
-calculated. The default value of 20 (which, with suitable dithering, is
-appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
-value of 28 gives SoX’s ’Very High Quality’.
-
-
-cheby
-For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
-approximation for ’irrational’ ratios. Default value is 0.
-
-
-async
-For swr only, simple 1 parameter audio sync to timestamps using stretching,
-squeezing, filling and trimming. Setting this to 1 will enable filling and
-trimming, larger values represent the maximum amount in samples that the data
-may be stretched or squeezed for each second.
-Default value is 0, thus no compensation is applied to make the samples match
-the audio timestamps.
-
-
-first_pts
-For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
-This allows for padding/trimming at the start of stream. By default, no
-assumption is made about the first frame’s expected pts, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative pts due to encoder delay.
-
-
-min_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger stretching/squeezing/filling or trimming of the
-data to make it match the timestamps. The default is that
-stretching/squeezing/filling and trimming is disabled
-(min_comp = FLT_MAX
).
-
-
-min_hard_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger adding/dropping samples to make it match the
-timestamps. This option effectively is a threshold to select between
-hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
-all compensation is by default disabled through min_comp .
-The default is 0.1.
-
-
-comp_duration
-For swr only, set duration (in seconds) over which data is stretched/squeezed
-to make it match the timestamps. Must be a non-negative double float value,
-default value is 1.0.
-
-
-max_soft_comp
-For swr only, set maximum factor by which data is stretched/squeezed to make it
-match the timestamps. Must be a non-negative double float value, default value
-is 0.
-
-
-matrix_encoding
-Select matrixed stereo encoding.
-
-It accepts the following values:
-
-‘none ’
-select none
-
-‘dolby ’
-select Dolby
-
-‘dplii ’
-select Dolby Pro Logic II
-
-
-
-Default value is none
.
-
-
-filter_type
-For swr only, select resampling filter type. This only affects resampling
-operations.
-
-It accepts the following values:
-
-‘cubic ’
-select cubic
-
-‘blackman_nuttall ’
-select Blackman Nuttall Windowed Sinc
-
-‘kaiser ’
-select Kaiser Windowed Sinc
-
-
-
-
-kaiser_beta
-For swr only, set Kaiser Window Beta value. Must be an integer in the
-interval [2,16], default value is 9.
-
-
-output_sample_bits
-For swr only, set number of used output sample bits for dithering. Must be an integer in the
-interval [0,64], default value is 0, which means it’s not used.
-
-
-
-
-
-
20 Scaler Options# TOC
-
-
The video scaler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools. For programmatic use, they can be set explicitly in the
-SwsContext
options or through the libavutil/opt.h API.
-
-
-
-
-sws_flags
-Set the scaler flags. This is also used to set the scaling
-algorithm. Only a single algorithm should be selected.
-
-It accepts the following values:
-
-‘fast_bilinear ’
-Select fast bilinear scaling algorithm.
-
-
-‘bilinear ’
-Select bilinear scaling algorithm.
-
-
-‘bicubic ’
-Select bicubic scaling algorithm.
-
-
-‘experimental ’
-Select experimental scaling algorithm.
-
-
-‘neighbor ’
-Select nearest neighbor rescaling algorithm.
-
-
-‘area ’
-Select averaging area rescaling algorithm.
-
-
-‘bicublin ’
-Select bicubic scaling algorithm for the luma component, bilinear for
-chroma components.
-
-
-‘gauss ’
-Select Gaussian rescaling algorithm.
-
-
-‘sinc ’
-Select sinc rescaling algorithm.
-
-
-‘lanczos ’
-Select lanczos rescaling algorithm.
-
-
-‘spline ’
-Select natural bicubic spline rescaling algorithm.
-
-
-‘print_info ’
-Enable printing/debug logging.
-
-
-‘accurate_rnd ’
-Enable accurate rounding.
-
-
-‘full_chroma_int ’
-Enable full chroma interpolation.
-
-
-‘full_chroma_inp ’
-Select full chroma input.
-
-
-‘bitexact ’
-Enable bitexact output.
-
-
-
-
-srcw
-Set source width.
-
-
-srch
-Set source height.
-
-
-dstw
-Set destination width.
-
-
-dsth
-Set destination height.
-
-
-src_format
-Set source pixel format (must be expressed as an integer).
-
-
-dst_format
-Set destination pixel format (must be expressed as an integer).
-
-
-src_range
-Select source range.
-
-
-dst_range
-Select destination range.
-
-
-param0, param1
-Set scaling algorithm parameters. The specified values are specific of
-some scaling algorithms and ignored by others. The specified values
-are floating point number values.
-
-
-sws_dither
-Set the dithering algorithm. Accepts one of the following
-values. Default value is ‘auto ’.
-
-
-‘auto ’
-automatic choice
-
-
-‘none ’
-no dithering
-
-
-‘bayer ’
-bayer dither
-
-
-‘ed ’
-error diffusion dither
-
-
-‘a_dither ’
-arithmetic dither, based using addition
-
-
-‘x_dither ’
-arithmetic dither, based using xor (more random/less apparent patterning that
-a_dither).
-
-
-
-
-
-
-
-
-
21 Filtering Introduction# TOC
-
-
Filtering in FFmpeg is enabled through the libavfilter library.
-
-
In libavfilter, a filter can have multiple inputs and multiple
-outputs.
-To illustrate the sorts of things that are possible, we consider the
-following filtergraph.
-
-
-
[main]
-input --> split ---------------------> overlay --> output
- | ^
- |[tmp] [flip]|
- +-----> crop --> vflip -------+
-
-
-
This filtergraph splits the input stream in two streams, then sends one
-stream through the crop filter and the vflip filter, before merging it
-back with the other stream by overlaying it on top. You can use the
-following command to achieve this:
-
-
-
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
-
-
-
The result will be that the top half of the video is mirrored
-onto the bottom half of the output video.
-
-
Filters in the same linear chain are separated by commas, and distinct
-linear chains of filters are separated by semicolons. In our example,
-crop,vflip are in one linear chain, split and
-overlay are separately in another. The points where the linear
-chains join are labelled by names enclosed in square brackets. In the
-example, the split filter generates two outputs that are associated to
-the labels [main] and [tmp] .
-
-
The stream sent to the second output of split , labelled as
-[tmp] , is processed through the crop filter, which crops
-away the lower half part of the video, and then vertically flipped. The
-overlay filter takes in input the first unchanged output of the
-split filter (which was labelled as [main] ), and overlay on its
-lower half the output generated by the crop,vflip filterchain.
-
-
Some filters take in input a list of parameters: they are specified
-after the filter name and an equal sign, and are separated from each other
-by a colon.
-
-
There exist so-called source filters that do not have an
-audio/video input, and sink filters that will not have audio/video
-output.
-
-
-
-
22 graph2dot# TOC
-
-
The graph2dot program included in the FFmpeg tools
-directory can be used to parse a filtergraph description and issue a
-corresponding textual representation in the dot language.
-
-
Invoke the command:
-
-
-
to see how to use graph2dot .
-
-
You can then pass the dot description to the dot program (from
-the graphviz suite of programs) and obtain a graphical representation
-of the filtergraph.
-
-
For example the sequence of commands:
-
-
echo GRAPH_DESCRIPTION | \
-tools/graph2dot -o graph.tmp && \
-dot -Tpng graph.tmp -o graph.png && \
-display graph.png
-
-
-
can be used to create and display an image representing the graph
-described by the GRAPH_DESCRIPTION string. Note that this string must be
-a complete self-contained graph, with its inputs and outputs explicitly defined.
-For example if your command line is of the form:
-
-
ffmpeg -i infile -vf scale=640:360 outfile
-
-
your GRAPH_DESCRIPTION string will need to be of the form:
-
-
nullsrc,scale=640:360,nullsink
-
-
you may also need to set the nullsrc parameters and add a format
-filter in order to simulate a specific input file.
-
-
-
-
23 Filtergraph description# TOC
-
-
A filtergraph is a directed graph of connected filters. It can contain
-cycles, and there can be multiple links between a pair of
-filters. Each link has one input pad on one side connecting it to one
-filter from which it takes its input, and one output pad on the other
-side connecting it to one filter accepting its output.
-
-
Each filter in a filtergraph is an instance of a filter class
-registered in the application, which defines the features and the
-number of input and output pads of the filter.
-
-
A filter with no input pads is called a "source", and a filter with no
-output pads is called a "sink".
-
-
-
23.1 Filtergraph syntax# TOC
-
-
A filtergraph has a textual representation, which is
-recognized by the -filter /-vf and -filter_complex
-options in ffmpeg
and -vf in ffplay
, and by the
-avfilter_graph_parse()
/avfilter_graph_parse2()
functions defined in
-libavfilter/avfilter.h .
-
-
A filterchain consists of a sequence of connected filters, each one
-connected to the previous one in the sequence. A filterchain is
-represented by a list of ","-separated filter descriptions.
-
-
A filtergraph consists of a sequence of filterchains. A sequence of
-filterchains is represented by a list of ";"-separated filterchain
-descriptions.
-
-
A filter is represented by a string of the form:
-[in_link_1 ]...[in_link_N ]filter_name =arguments [out_link_1 ]...[out_link_M ]
-
-
filter_name is the name of the filter class of which the
-described filter is an instance of, and has to be the name of one of
-the filter classes registered in the program.
-The name of the filter class is optionally followed by a string
-"=arguments ".
-
-
arguments is a string which contains the parameters used to
-initialize the filter instance. It may have one of two forms:
-
- A ’:’-separated list of key=value pairs.
-
- A ’:’-separated list of value . In this case, the keys are assumed to be
-the option names in the order they are declared. E.g. the fade
filter
-declares three options in this order – type , start_frame and
-nb_frames . Then the parameter list in:0:30 means that the value
-in is assigned to the option type , 0 to
-start_frame and 30 to nb_frames .
-
- A ’:’-separated list of mixed direct value and long key=value
-pairs. The direct value must precede the key=value pairs, and
-follow the same constraints order of the previous point. The following
-key=value pairs can be set in any preferred order.
-
-
-
-
If the option value itself is a list of items (e.g. the format
filter
-takes a list of pixel formats), the items in the list are usually separated by
-’|’.
-
-
The list of arguments can be quoted using the character "’" as initial
-and ending mark, and the character ’\’ for escaping the characters
-within the quoted text; otherwise the argument string is considered
-terminated when the next special character (belonging to the set
-"[]=;,") is encountered.
-
-
The name and arguments of the filter are optionally preceded and
-followed by a list of link labels.
-A link label allows one to name a link and associate it to a filter output
-or input pad. The preceding labels in_link_1
-... in_link_N , are associated to the filter input pads,
-the following labels out_link_1 ... out_link_M , are
-associated to the output pads.
-
-
When two link labels with the same name are found in the
-filtergraph, a link between the corresponding input and output pad is
-created.
-
-
If an output pad is not labelled, it is linked by default to the first
-unlabelled input pad of the next filter in the filterchain.
-For example in the filterchain
-
-
nullsrc, split[L1], [L2]overlay, nullsink
-
-
the split filter instance has two output pads, and the overlay filter
-instance two input pads. The first output pad of split is labelled
-"L1", the first input pad of overlay is labelled "L2", and the second
-output pad of split is linked to the second input pad of overlay,
-which are both unlabelled.
-
-
In a complete filterchain all the unlabelled filter input and output
-pads must be connected. A filtergraph is considered valid if all the
-filter input and output pads of all the filterchains are connected.
-
-
Libavfilter will automatically insert scale filters where format
-conversion is required. It is possible to specify swscale flags
-for those automatically inserted scalers by prepending
-sws_flags=flags ;
-to the filtergraph description.
-
-
Here is a BNF description of the filtergraph syntax:
-
-
NAME ::= sequence of alphanumeric characters and '_'
-LINKLABEL ::= "[" NAME "]"
-LINKLABELS ::= LINKLABEL [LINKLABELS ]
-FILTER_ARGUMENTS ::= sequence of chars (possibly quoted)
-FILTER ::= [LINKLABELS ] NAME ["=" FILTER_ARGUMENTS ] [LINKLABELS ]
-FILTERCHAIN ::= FILTER [,FILTERCHAIN ]
-FILTERGRAPH ::= [sws_flags=flags ;] FILTERCHAIN [;FILTERGRAPH ]
-
-
-
-
23.2 Notes on filtergraph escaping# TOC
-
-
Filtergraph description composition entails several levels of
-escaping. See (ffmpeg-utils)the "Quoting and escaping"
-section in the ffmpeg-utils(1) manual for more
-information about the employed escaping procedure.
-
-
A first level escaping affects the content of each filter option
-value, which may contain the special character :
used to
-separate values, or one of the escaping characters \'
.
-
-
A second level escaping affects the whole filter description, which
-may contain the escaping characters \'
or the special
-characters [],;
used by the filtergraph description.
-
-
Finally, when you specify a filtergraph on a shell commandline, you
-need to perform a third level escaping for the shell special
-characters contained within it.
-
-
For example, consider the following string to be embedded in
-the drawtext filter description text value:
-
-
this is a 'string': may contain one, or more, special characters
-
-
-
This string contains the '
special escaping character, and the
-:
special character, so it needs to be escaped in this way:
-
-
text=this is a \'string\'\: may contain one, or more, special characters
-
-
-
A second level of escaping is required when embedding the filter
-description in a filtergraph description, in order to escape all the
-filtergraph special characters. Thus the example above becomes:
-
-
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
-
-
(note that in addition to the \'
escaping special characters,
-also ,
needs to be escaped).
-
-
Finally an additional level of escaping is needed when writing the
-filtergraph description in a shell command, which depends on the
-escaping rules of the adopted shell. For example, assuming that
-\
is special and needs to be escaped with another \
, the
-previous string will finally result in:
-
-
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
-
-
-
-
24 Timeline editing# TOC
-
-
Some filters support a generic enable option. For the filters
-supporting timeline editing, this option can be set to an expression which is
-evaluated before sending a frame to the filter. If the evaluation is non-zero,
-the filter will be enabled, otherwise the frame will be sent unchanged to the
-next filter in the filtergraph.
-
-
The expression accepts the following values:
-
-‘t ’
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-‘n ’
-sequential number of the input frame, starting from 0
-
-
-‘pos ’
-the position in the file of the input frame, NAN if unknown
-
-
-‘w ’
-‘h ’
-width and height of the input frame if video
-
-
-
-
Additionally, these filters support an enable command that can be used
-to re-define the expression.
-
-
Like any other filtering option, the enable option follows the same
-rules.
-
-
For example, to enable a blur filter (smartblur ) from 10 seconds to 3
-minutes, and a curves filter starting at 3 seconds:
-
-
smartblur = enable='between(t,10,3*60)',
-curves = enable='gte(t,3)' : preset=cross_process
-
-
-
-
-
25 Audio Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the audio filters included in your
-build.
-
-
Below is a description of the currently available audio filters.
-
-
-
25.1 adelay# TOC
-
-
Delay one or more audio channels.
-
-
Samples in delayed channel are filled with silence.
-
-
The filter accepts the following option:
-
-
-delays
-Set list of delays in milliseconds for each channel separated by ’|’.
-At least one delay greater than 0 should be provided.
-Unused delays will be silently ignored. If number of given delays is
-smaller than number of channels all remaining channels will not be delayed.
-
-
-
-
-
25.1.1 Examples# TOC
-
-
- Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave
-the second channel (and any other channels that may be present) unchanged.
-
-
-
-
-
25.2 aecho# TOC
-
-
Apply echoing to the input audio.
-
-
Echoes are reflected sound and can occur naturally amongst mountains
-(and sometimes large buildings) when talking or shouting; digital echo
-effects emulate this behaviour and are often used to help fill out the
-sound of a single instrument or vocal. The time difference between the
-original signal and the reflection is the delay
, and the
-loudness of the reflected signal is the decay
.
-Multiple echoes can have different delays and decays.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain of reflected signal. Default is 0.6
.
-
-
-out_gain
-Set output gain of reflected signal. Default is 0.3
.
-
-
-delays
-Set list of time intervals in milliseconds between original signal and reflections
-separated by ’|’. Allowed range for each delay
is (0 - 90000.0]
.
-Default is 1000
.
-
-
-decays
-Set list of loudnesses of reflected signals separated by ’|’.
-Allowed range for each decay
is (0 - 1.0]
.
-Default is 0.5
.
-
-
-
-
-
25.2.1 Examples# TOC
-
-
- Make it sound as if there are twice as many instruments as are actually playing:
-
-
- If delay is very short, then it sound like a (metallic) robot playing music:
-
-
- A longer delay will sound like an open air concert in the mountains:
-
-
aecho=0.8:0.9:1000:0.3
-
-
- Same as above but with one more mountain:
-
-
aecho=0.8:0.9:1000|1800:0.3|0.25
-
-
-
-
-
25.3 aeval# TOC
-
-
Modify an audio signal according to the specified expressions.
-
-
This filter accepts one or more expressions (one for each channel),
-which are evaluated and used to modify a corresponding audio signal.
-
-
It accepts the following parameters:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. If
-the number of input channels is greater than the number of
-expressions, the last specified expression is used for the remaining
-output channels.
-
-
-channel_layout, c
-Set output channel layout. If not specified, the channel layout is
-specified by the number of expressions. If set to ‘same ’, it will
-use by default the same input channel layout.
-
-
-
-
Each expression in exprs can contain the following constants and functions:
-
-
-ch
-channel number of the current expression
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-s
-sample rate
-
-
-t
-time of the evaluated sample expressed in seconds
-
-
-nb_in_channels
-nb_out_channels
-input and output number of channels
-
-
-val(CH)
-the value of input channel with number CH
-
-
-
-
Note: this filter is slow. For faster processing you should use a
-dedicated filter.
-
-
-
25.3.1 Examples# TOC
-
-
- Half volume:
-
-
aeval=val(ch)/2:c=same
-
-
- Invert phase of the second channel:
-
-
-
-
-
25.4 afade# TOC
-
-
Apply fade-in/out effect to input audio.
-
-
A description of the accepted parameters follows.
-
-
-type, t
-Specify the effect type, can be either in
for fade-in, or
-out
for a fade-out effect. Default is in
.
-
-
-start_sample, ss
-Specify the number of the start sample for starting to apply the fade
-effect. Default is 0.
-
-
-nb_samples, ns
-Specify the number of samples for which the fade effect has to last. At
-the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence. Default is 44100.
-
-
-start_time, st
-Specify the start time of the fade effect. Default is 0.
-The value must be specified as a time duration; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-If set this option is used instead of start_sample .
-
-
-duration, d
-Specify the duration of the fade effect. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-At the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence.
-By default the duration is determined by nb_samples .
-If set this option is used instead of nb_samples .
-
-
-curve
-Set curve for fade transition.
-
-It accepts the following values:
-
-tri
-select triangular, linear slope (default)
-
-qsin
-select quarter of sine wave
-
-hsin
-select half of sine wave
-
-esin
-select exponential sine wave
-
-log
-select logarithmic
-
-par
-select inverted parabola
-
-qua
-select quadratic
-
-cub
-select cubic
-
-squ
-select square root
-
-cbr
-select cubic root
-
-
-
-
-
-
-
25.4.1 Examples# TOC
-
-
- Fade in first 15 seconds of audio:
-
-
- Fade out last 25 seconds of a 900 seconds audio:
-
-
afade=t=out:st=875:d=25
-
-
-
-
-
25.5 aformat# TOC
-
-
Set output format constraints for the input audio. The framework will
-negotiate the most appropriate format to minimize conversions.
-
-
It accepts the following parameters:
-
-sample_fmts
-A ’|’-separated list of requested sample formats.
-
-
-sample_rates
-A ’|’-separated list of requested sample rates.
-
-
-channel_layouts
-A ’|’-separated list of requested channel layouts.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-
-
If a parameter is omitted, all values are allowed.
-
-
Force the output to either unsigned 8-bit or signed 16-bit stereo
-
-
aformat=sample_fmts=u8|s16:channel_layouts=stereo
-
-
-
-
25.6 allpass# TOC
-
-
Apply a two-pole all-pass filter with central frequency (in Hz)
-frequency , and filter-width width .
-An all-pass filter changes the audio’s frequency to phase relationship
-without changing its frequency to amplitude relationship.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
25.7 amerge# TOC
-
-
Merge two or more audio streams into a single multi-channel stream.
-
-
The filter accepts the following options:
-
-
-inputs
-Set the number of inputs. Default is 2.
-
-
-
-
-
If the channel layouts of the inputs are disjoint, and therefore compatible,
-the channel layout of the output will be set accordingly and the channels
-will be reordered as necessary. If the channel layouts of the inputs are not
-disjoint, the output will have all the channels of the first input then all
-the channels of the second input, in that order, and the channel layout of
-the output will be the default value corresponding to the total number of
-channels.
-
-
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
-is FC+BL+BR, then the output will be in 5.1, with the channels in the
-following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
-first input, b1 is the first channel of the second input).
-
-
On the other hand, if both input are in stereo, the output channels will be
-in the default order: a1, a2, b1, b2, and the channel layout will be
-arbitrarily set to 4.0, which may or may not be the expected value.
-
-
All inputs must have the same sample rate, and format.
-
-
If inputs do not have the same duration, the output will stop with the
-shortest.
-
-
-
25.7.1 Examples# TOC
-
-
- Merge two mono files into a stereo stream:
-
-
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
-
-
- Multiple merges assuming 1 video stream and 6 audio streams in input.mkv :
-
-
ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
-
-
-
-
-
25.8 amix# TOC
-
-
Mixes multiple audio inputs into a single output.
-
-
Note that this filter only supports float samples (the amerge
-and pan audio filters support many formats). If the amix
-input has integer samples then aresample will be automatically
-inserted to perform the conversion to float samples.
-
-
For example
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
-
-
will mix 3 input audio streams to a single output with the same duration as the
-first input and a dropout transition time of 3 seconds.
-
-
It accepts the following parameters:
-
-inputs
-The number of inputs. If unspecified, it defaults to 2.
-
-
-duration
-How to determine the end-of-stream.
-
-longest
-The duration of the longest input. (default)
-
-
-shortest
-The duration of the shortest input.
-
-
-first
-The duration of the first input.
-
-
-
-
-
-dropout_transition
-The transition time, in seconds, for volume renormalization when an input
-stream ends. The default value is 2 seconds.
-
-
-
-
-
-
25.9 anull# TOC
-
-
Pass the audio source unchanged to the output.
-
-
-
25.10 apad# TOC
-
-
Pad the end of an audio stream with silence.
-
-
This can be used together with ffmpeg
-shortest to
-extend audio streams to the same length as the video stream.
-
-
A description of the accepted options follows.
-
-
-packet_size
-Set silence packet size. Default value is 4096.
-
-
-pad_len
-Set the number of samples of silence to add to the end. After the
-value is reached, the stream is terminated. This option is mutually
-exclusive with whole_len .
-
-
-whole_len
-Set the minimum total number of samples in the output audio stream. If
-the value is longer than the input audio length, silence is added to
-the end, until the value is reached. This option is mutually exclusive
-with pad_len .
-
-
-
-
If neither the pad_len nor the whole_len option is
-set, the filter will add silence to the end of the input stream
-indefinitely.
-
-
-
25.10.1 Examples# TOC
-
-
- Add 1024 samples of silence to the end of the input:
-
-
- Make sure the audio output will contain at least 10000 samples, pad
-the input with silence if required:
-
-
- Use ffmpeg
to pad the audio input with silence, so that the
-video stream will always result the shortest and will be converted
-until the end in the output file when using the shortest
-option:
-
-
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
-
-
-
-
-
25.11 aphaser# TOC
-
Add a phasing effect to the input audio.
-
-
A phaser filter creates series of peaks and troughs in the frequency spectrum.
-The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain. Default is 0.4.
-
-
-out_gain
-Set output gain. Default is 0.74
-
-
-delay
-Set delay in milliseconds. Default is 3.0.
-
-
-decay
-Set decay. Default is 0.4.
-
-
-speed
-Set modulation speed in Hz. Default is 0.5.
-
-
-type
-Set modulation type. Default is triangular.
-
-It accepts the following values:
-
-‘triangular, t ’
-‘sinusoidal, s ’
-
-
-
-
-
-
25.12 aresample# TOC
-
-
Resample the input audio to the specified parameters, using the
-libswresample library. If none are specified then the filter will
-automatically convert between its input and output.
-
-
This filter is also able to stretch/squeeze the audio data to make it match
-the timestamps or to inject silence / cut out audio to make it match the
-timestamps, do a combination of both or do neither.
-
-
The filter accepts the syntax
-[sample_rate :]resampler_options , where sample_rate
-expresses a sample rate and resampler_options is a list of
-key =value pairs, separated by ":". See the
-ffmpeg-resampler manual for the complete list of supported options.
-
-
-
25.12.1 Examples# TOC
-
-
- Resample the input audio to 44100Hz:
-
-
- Stretch/squeeze samples to the given timestamps, with a maximum of 1000
-samples per second compensation:
-
-
-
-
-
25.13 asetnsamples# TOC
-
-
Set the number of samples per each output audio frame.
-
-
The last output packet may contain a different number of samples, as
-the filter will flush all the remaining samples when the input audio
-signal its end.
-
-
The filter accepts the following options:
-
-
-nb_out_samples, n
-Set the number of frames per each output audio frame. The number is
-intended as the number of samples per each channel .
-Default value is 1024.
-
-
-pad, p
-If set to 1, the filter will pad the last audio frame with zeroes, so
-that the last frame will contain the same number of samples as the
-previous ones. Default value is 1.
-
-
-
-
For example, to set the number of per-frame samples to 1234 and
-disable padding for the last frame, use:
-
-
asetnsamples=n=1234:p=0
-
-
-
-
25.14 asetrate# TOC
-
-
Set the sample rate without altering the PCM data.
-This will result in a change of speed and pitch.
-
-
The filter accepts the following options:
-
-
-sample_rate, r
-Set the output sample rate. Default is 44100 Hz.
-
-
-
-
-
25.15 ashowinfo# TOC
-
-
Show a line containing various information for each input audio frame.
-The input audio is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The presentation timestamp of the input frame, in time base units; the time base
-depends on the filter input pad, and is usually 1/sample_rate .
-
-
-pts_time
-The presentation timestamp of the input frame in seconds.
-
-
-pos
-position of the frame in the input stream, -1 if this information in
-unavailable and/or meaningless (for example in case of synthetic audio)
-
-
-fmt
-The sample format.
-
-
-chlayout
-The channel layout.
-
-
-rate
-The sample rate for the audio frame.
-
-
-nb_samples
-The number of samples (per channel) in the frame.
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of the audio data. For planar
-audio, the data is treated as if all the planes were concatenated.
-
-
-plane_checksums
-A list of Adler-32 checksums for each data plane.
-
-
-
-
-
25.16 astats# TOC
-
-
Display time domain statistical information about the audio channels.
-Statistics are calculated and displayed for each audio channel and,
-where applicable, an overall figure is also given.
-
-
It accepts the following option:
-
-length
-Short window length in seconds, used for peak and trough RMS measurement.
-Default is 0.05
(50 milliseconds). Allowed range is [0.1 - 10]
.
-
-
-
-
A description of each shown parameter follows:
-
-
-DC offset
-Mean amplitude displacement from zero.
-
-
-Min level
-Minimal sample level.
-
-
-Max level
-Maximal sample level.
-
-
-Peak level dB
-RMS level dB
-Standard peak and RMS level measured in dBFS.
-
-
-RMS peak dB
-RMS trough dB
-Peak and trough values for RMS level measured over a short window.
-
-
-Crest factor
-Standard ratio of peak to RMS level (note: not in dB).
-
-
-Flat factor
-Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels
-(i.e. either Min level or Max level ).
-
-
-Peak count
-Number of occasions (not the number of samples) that the signal attained either
-Min level or Max level .
-
-
-
-
-
25.17 astreamsync# TOC
-
-
Forward two audio streams and control the order the buffers are forwarded.
-
-
The filter accepts the following options:
-
-
-expr, e
-Set the expression deciding which stream should be
-forwarded next: if the result is negative, the first stream is forwarded; if
-the result is positive or zero, the second stream is forwarded. It can use
-the following variables:
-
-
-b1 b2
-number of buffers forwarded so far on each stream
-
-s1 s2
-number of samples forwarded so far on each stream
-
-t1 t2
-current timestamp of each stream
-
-
-
-The default value is t1-t2
, which means to always forward the stream
-that has a smaller timestamp.
-
-
-
-
-
25.17.1 Examples# TOC
-
-
Stress-test amerge
by randomly sending buffers on the wrong
-input, while avoiding too much of a desynchronization:
-
-
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
-[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
-[a2] [b2] amerge
-
-
-
-
25.18 asyncts# TOC
-
-
Synchronize audio data with timestamps by squeezing/stretching it and/or
-dropping samples/adding silence when needed.
-
-
This filter is not built by default, please use aresample to do squeezing/stretching.
-
-
It accepts the following parameters:
-
-compensate
-Enable stretching/squeezing the data to make it match the timestamps. Disabled
-by default. When disabled, time gaps are covered with silence.
-
-
-min_delta
-The minimum difference between timestamps and audio data (in seconds) to trigger
-adding/dropping samples. The default value is 0.1. If you get an imperfect
-sync with this filter, try setting this parameter to 0.
-
-
-max_comp
-The maximum compensation in samples per second. Only relevant with compensate=1.
-The default value is 500.
-
-
-first_pts
-Assume that the first PTS should be this value. The time base is 1 / sample
-rate. This allows for padding/trimming at the start of the stream. By default,
-no assumption is made about the first frame’s expected PTS, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative PTS due to encoder delay.
-
-
-
-
-
-
25.19 atempo# TOC
-
-
Adjust audio tempo.
-
-
The filter accepts exactly one parameter, the audio tempo. If not
-specified then the filter will assume nominal 1.0 tempo. Tempo must
-be in the [0.5, 2.0] range.
-
-
-
25.19.1 Examples# TOC
-
-
- Slow down audio to 80% tempo:
-
-
- To speed up audio to 125% tempo:
-
-
-
-
-
25.20 atrim# TOC
-
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Timestamp (in seconds) of the start of the section to keep. I.e. the audio
-sample with the timestamp start will be the first sample in the output.
-
-
-end
-Specify time of the first audio sample that will be dropped, i.e. the
-audio sample immediately preceding the one with the timestamp end will be
-the last sample in the output.
-
-
-start_pts
-Same as start , except this option sets the start timestamp in samples
-instead of seconds.
-
-
-end_pts
-Same as end , except this option sets the end timestamp in samples instead
-of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_sample
-The number of the first sample that should be output.
-
-
-end_sample
-The number of the first sample that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _sample options simply count the
-samples that pass through the filter. So start/end_pts and start/end_sample will
-give different results when the timestamps are wrong, inexact or do not start at
-zero. Also note that this filter does not modify the timestamps. If you wish
-to have the output timestamps start at zero, insert the asetpts filter after the
-atrim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all samples that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple atrim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -af atrim=60:120
-
-
- Keep only the first 1000 samples:
-
-
ffmpeg -i INPUT -af atrim=end_sample=1000
-
-
-
-
-
-
25.21 bandpass# TOC
-
-
Apply a two-pole Butterworth band-pass filter with central
-frequency frequency , and (3dB-point) band-width width.
-The csg option selects a constant skirt gain (peak gain = Q)
-instead of the default: constant 0dB peak gain.
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-csg
-Constant skirt gain if set to 1. Defaults to 0.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
25.22 bandreject# TOC
-
-
Apply a two-pole Butterworth band-reject filter with central
-frequency frequency , and (3dB-point) band-width width .
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
25.23 bass# TOC
-
-
Boost or cut the bass (lower) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at 0 Hz. Its useful range is about -20
-(for a large cut) to +20 (for a large boost).
-Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 100
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
25.24 biquad# TOC
-
-
Apply a biquad IIR filter with the given coefficients.
-Where b0 , b1 , b2 and a0 , a1 , a2
-are the numerator and denominator coefficients respectively.
-
-
-
25.25 bs2b# TOC
-
Bauer stereo to binaural transformation, which improves headphone listening of
-stereo audio records.
-
-
It accepts the following parameters:
-
-profile
-Pre-defined crossfeed level.
-
-default
-Default level (fcut=700, feed=50).
-
-
-cmoy
-Chu Moy circuit (fcut=700, feed=60).
-
-
-jmeier
-Jan Meier circuit (fcut=650, feed=95).
-
-
-
-
-
-fcut
-Cut frequency (in Hz).
-
-
-feed
-Feed level (in Hz).
-
-
-
-
-
-
25.26 channelmap# TOC
-
-
Remap input channels to new locations.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the output stream.
-
-
-map
-Map channels from input to output. The argument is a ’|’-separated list of
-mappings, each in the in_channel -out_channel
or
-in_channel form. in_channel can be either the name of the input
-channel (e.g. FL for front left) or its index in the input channel layout.
-out_channel is the name of the output channel or its index in the output
-channel layout. If out_channel is not given then it is implicitly an
-index, starting with zero and increasing by one for each mapping.
-
-
-
-
If no mapping is present, the filter will implicitly map input channels to
-output channels, preserving indices.
-
-
For example, assuming a 5.1+downmix input MOV file,
-
-
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
-
-
will create an output WAV file tagged as stereo from the downmix channels of
-the input.
-
-
To fix a 5.1 WAV improperly encoded in AAC’s native channel order
-
-
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
-
-
-
-
25.27 channelsplit# TOC
-
-
Split each channel from an input audio stream into a separate output stream.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the input stream. The default is "stereo".
-
-
-
-
For example, assuming a stereo input MP3 file,
-
-
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
-
-
will create an output Matroska file with two audio streams, one containing only
-the left channel and the other the right channel.
-
-
Split a 5.1 WAV file into per-channel files:
-
-
ffmpeg -i in.wav -filter_complex
-'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
--map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
-front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
-side_right.wav
-
-
-
-
25.28 compand# TOC
-
Compress or expand the audio’s dynamic range.
-
-
It accepts the following parameters:
-
-
-attacks
-decays
-A list of times in seconds for each channel over which the instantaneous level
-of the input signal is averaged to determine its volume. attacks refers to
-increase of volume and decays refers to decrease of volume. For most
-situations, the attack time (response to the audio getting louder) should be
-shorter than the decay time, because the human ear is more sensitive to sudden
-loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and
-a typical value for decay is 0.8 seconds.
-
-
-points
-A list of points for the transfer function, specified in dB relative to the
-maximum possible signal amplitude. Each key points list must be defined using
-the following syntax: x0/y0|x1/y1|x2/y2|....
or
-x0/y0 x1/y1 x2/y2 ....
-
-The input values must be in strictly increasing order but the transfer function
-does not have to be monotonically rising. The point 0/0
is assumed but
-may be overridden (by 0/out-dBn
). Typical values for the transfer
-function are -70/-70|-60/-20
.
-
-
-soft-knee
-Set the curve radius in dB for all joints. It defaults to 0.01.
-
-
-gain
-Set the additional gain in dB to be applied at all points on the transfer
-function. This allows for easy adjustment of the overall gain.
-It defaults to 0.
-
-
-volume
-Set an initial volume, in dB, to be assumed for each channel when filtering
-starts. This permits the user to supply a nominal level initially, so that, for
-example, a very large gain is not applied to initial signal levels before the
-companding has begun to operate. A typical value for audio which is initially
-quiet is -90 dB. It defaults to 0.
-
-
-delay
-Set a delay, in seconds. The input audio is analyzed immediately, but audio is
-delayed before being fed to the volume adjuster. Specifying a delay
-approximately equal to the attack/decay times allows the filter to effectively
-operate in predictive rather than reactive mode. It defaults to 0.
-
-
-
-
-
-
25.28.1 Examples# TOC
-
-
- Make music with both quiet and loud passages suitable for listening to in a
-noisy environment:
-
-
compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
-
-
- A noise gate for when the noise is at a lower level than the signal:
-
-
compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
-
-
- Here is another noise gate, this time for when the noise is at a higher level
-than the signal (making it, in some ways, similar to squelch):
-
-
compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
-
-
-
-
-
25.29 earwax# TOC
-
-
Make audio easier to listen to on headphones.
-
-
This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio
-so that when listened to on headphones the stereo image is moved from
-inside your head (standard for headphones) to outside and in front of
-the listener (standard for speakers).
-
-
Ported from SoX.
-
-
-
25.30 equalizer# TOC
-
-
Apply a two-pole peaking equalisation (EQ) filter. With this
-filter, the signal-level at and around a selected frequency can
-be increased or decreased, whilst (unlike bandpass and bandreject
-filters) that at all other frequencies is unchanged.
-
-
In order to produce complex equalisation curves, this filter can
-be given several times, each with a different central frequency.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-gain, g
-Set the required gain or attenuation in dB.
-Beware of clipping when using a positive gain.
-
-
-
-
-
25.30.1 Examples# TOC
-
- Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz:
-
-
equalizer=f=1000:width_type=h:width=200:g=-10
-
-
- Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2:
-
-
equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
-
-
-
-
-
25.31 flanger# TOC
-
Apply a flanging effect to the audio.
-
-
The filter accepts the following options:
-
-
-delay
-Set base delay in milliseconds. Range from 0 to 30. Default value is 0.
-
-
-depth
-Set added swep delay in milliseconds. Range from 0 to 10. Default value is 2.
-
-
-regen
-Set percentage regeneration (delayed signal feedback). Range from -95 to 95.
-Default value is 0.
-
-
-width
-Set percentage of delayed signal mixed with original. Range from 0 to 100.
-Default value is 71.
-
-
-speed
-Set sweeps per second (Hz). Range from 0.1 to 10. Default value is 0.5.
-
-
-shape
-Set swept wave shape, can be triangular or sinusoidal .
-Default value is sinusoidal .
-
-
-phase
-Set swept wave percentage-shift for multi channel. Range from 0 to 100.
-Default value is 25.
-
-
-interp
-Set delay-line interpolation, linear or quadratic .
-Default is linear .
-
-
-
-
-
25.32 highpass# TOC
-
-
Apply a high-pass filter with 3dB point frequency.
-The filter can be either single-pole, or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 3000.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
25.33 join# TOC
-
-
Join multiple input streams into one multi-channel stream.
-
-
It accepts the following parameters:
-
-inputs
-The number of input streams. It defaults to 2.
-
-
-channel_layout
-The desired output channel layout. It defaults to stereo.
-
-
-map
-Map channels from inputs to output. The argument is a ’|’-separated list of
-mappings, each in the input_idx .in_channel -out_channel
-form. input_idx is the 0-based index of the input stream. in_channel
-can be either the name of the input channel (e.g. FL for front left) or its
-index in the specified input stream. out_channel is the name of the output
-channel.
-
-
-
-
The filter will attempt to guess the mappings when they are not specified
-explicitly. It does so by first trying to find an unused matching input channel
-and if that fails it picks the first unused input channel.
-
-
Join 3 inputs (with properly set channel layouts):
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
-
-
-
Build a 5.1 output from 6 single-channel streams:
-
-
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
-'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
-out
-
-
-
-
25.34 ladspa# TOC
-
-
Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-ladspa
.
-
-
-file, f
-Specifies the name of LADSPA plugin library to load. If the environment
-variable LADSPA_PATH
is defined, the LADSPA plugin is searched in
-each one of the directories specified by the colon separated list in
-LADSPA_PATH
, otherwise in the standard LADSPA paths, which are in
-this order: HOME/.ladspa/lib/ , /usr/local/lib/ladspa/ ,
-/usr/lib/ladspa/ .
-
-
-plugin, p
-Specifies the plugin within the library. Some libraries contain only
-one plugin, but others contain many of them. If this is not set filter
-will list all available plugins within the specified library.
-
-
-controls, c
-Set the ’|’ separated list of controls which are zero or more floating point
-values that determine the behavior of the loaded plugin (for example delay,
-threshold or gain).
-Controls need to be defined using the following syntax:
-c0=value0 |c1=value1 |c2=value2 |..., where
-valuei is the value set on the i -th control.
-If controls is set to help
, all available controls and
-their valid ranges are printed.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100. Only used if plugin have
-zero inputs.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame, default
-is 1024. Only used if plugin have zero inputs.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified duration,
-as the generated audio is always cut at the end of a complete frame.
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-Only used if plugin have zero inputs.
-
-
-
-
-
-
25.34.1 Examples# TOC
-
-
- List all available plugins within amp (LADSPA example plugin) library:
-
-
- List all available controls and their valid ranges for vcf_notch
-plugin from VCF
library:
-
-
ladspa=f=vcf:p=vcf_notch:c=help
-
-
- Simulate low quality audio equipment using Computer Music Toolkit
(CMT)
-plugin library:
-
-
ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
-
-
- Add reverberation to the audio using TAP-plugins
-(Tom’s Audio Processing plugins):
-
-
ladspa=file=tap_reverb:tap_reverb
-
-
- Generate white noise, with 0.2 amplitude:
-
-
ladspa=file=cmt:noise_source_white:c=c0=.2
-
-
- Generate 20 bpm clicks using plugin C* Click - Metronome
from the
-C* Audio Plugin Suite
(CAPS) library:
-
-
ladspa=file=caps:Click:c=c1=20'
-
-
- Apply C* Eq10X2 - Stereo 10-band equaliser
effect:
-
-
ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
-
-
-
-
-
25.34.2 Commands# TOC
-
-
This filter supports the following commands:
-
-cN
-Modify the N -th control value.
-
-If the specified value is not valid, it is ignored and prior one is kept.
-
-
-
-
-
25.35 lowpass# TOC
-
-
Apply a low-pass filter with 3dB point frequency.
-The filter can be either single-pole or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 500.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
25.36 pan# TOC
-
-
Mix channels with specific gain levels. The filter accepts the output
-channel layout followed by a set of channels definitions.
-
-
This filter is also designed to efficiently remap the channels of an audio
-stream.
-
-
The filter accepts parameters of the form:
-"l |outdef |outdef |..."
-
-
-l
-output channel layout or number of channels
-
-
-outdef
-output channel specification, of the form:
-"out_name =[gain *]in_name [+[gain *]in_name ...]"
-
-
-out_name
-output channel to define, either a channel name (FL, FR, etc.) or a channel
-number (c0, c1, etc.)
-
-
-gain
-multiplicative coefficient for the channel, 1 leaving the volume unchanged
-
-
-in_name
-input channel to use, see out_name for details; it is not possible to mix
-named and numbered input channels
-
-
-
-
If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for
-that specification will be renormalized so that the total is 1, thus
-avoiding clipping noise.
-
-
-
25.36.1 Mixing examples# TOC
-
-
For example, if you want to down-mix from stereo to mono, but with a bigger
-factor for the left channel:
-
-
pan=1c|c0=0.9*c0+0.1*c1
-
-
-
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
-7-channels surround:
-
-
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
-
-
-
Note that ffmpeg
integrates a default down-mix (and up-mix) system
-that should be preferred (see "-ac" option) unless you have very specific
-needs.
-
-
-
25.36.2 Remapping examples# TOC
-
-
The channel remapping will be effective if, and only if:
-
-
- gain coefficients are zeroes or ones,
- only one input per channel output,
-
-
-
If all these conditions are satisfied, the filter will notify the user ("Pure
-channel mapping detected"), and use an optimized and lossless method to do the
-remapping.
-
-
For example, if you have a 5.1 source and want a stereo audio stream by
-dropping the extra channels:
-
-
pan="stereo| c0=FL | c1=FR"
-
-
-
Given the same source, you can also switch front left and front right channels
-and keep the input channel layout:
-
-
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
-
-
-
If the input is a stereo audio stream, you can mute the front left channel (and
-still keep the stereo channel layout) with:
-
-
-
Still with a stereo audio stream input, you can copy the right channel in both
-front left and right:
-
-
pan="stereo| c0=FR | c1=FR"
-
-
-
-
25.37 replaygain# TOC
-
-
ReplayGain scanner filter. This filter takes an audio stream as an input and
-outputs it unchanged.
-At end of filtering it displays track_gain
and track_peak
.
-
-
-
25.38 resample# TOC
-
-
Convert the audio sample format, sample rate and channel layout. It is
-not meant to be used directly.
-
-
-
25.39 silencedetect# TOC
-
-
Detect silence in an audio stream.
-
-
This filter logs a message when it detects that the input audio volume is less
-or equal to a noise tolerance value for a duration greater or equal to the
-minimum detected noise duration.
-
-
The printed times and duration are expressed in seconds.
-
-
The filter accepts the following options:
-
-
-duration, d
-Set silence duration until notification (default is 2 seconds).
-
-
-noise, n
-Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
-specified value) or amplitude ratio. Default is -60dB, or 0.001.
-
-
-
-
-
25.39.1 Examples# TOC
-
-
- Detect 5 seconds of silence with -50dB noise tolerance:
-
-
silencedetect=n=-50dB:d=5
-
-
- Complete example with ffmpeg
to detect silence with 0.0001 noise
-tolerance in silence.mp3 :
-
-
ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
-
-
-
-
-
25.40 silenceremove# TOC
-
-
Remove silence from the beginning, middle or end of the audio.
-
-
The filter accepts the following options:
-
-
-start_periods
-This value is used to indicate if audio should be trimmed at beginning of
-the audio. A value of zero indicates no silence should be trimmed from the
-beginning. When specifying a non-zero value, it trims audio up until it
-finds non-silence. Normally, when trimming silence from beginning of audio
-the start_periods will be 1
but it can be increased to higher
-values to trim all audio up to specific count of non-silence periods.
-Default value is 0
.
-
-
-start_duration
-Specify the amount of time that non-silence must be detected before it stops
-trimming audio. By increasing the duration, bursts of noises can be treated
-as silence and trimmed off. Default value is 0
.
-
-
-start_threshold
-This indicates what sample value should be treated as silence. For digital
-audio, a value of 0
may be fine but for audio recorded from analog,
-you may wish to increase the value to account for background noise.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-stop_periods
-Set the count for trimming silence from the end of audio.
-To remove silence from the middle of a file, specify a stop_periods
-that is negative. This value is then treated as a positive value and is
-used to indicate the effect should restart processing as specified by
-start_periods , making it suitable for removing periods of silence
-in the middle of the audio.
-Default value is 0
.
-
-
-stop_duration
-Specify a duration of silence that must exist before audio is not copied any
-more. By specifying a higher duration, silence that is wanted can be left in
-the audio.
-Default value is 0
.
-
-
-stop_threshold
-This is the same as start_threshold but for trimming silence from
-the end of audio.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-leave_silence
-This indicate that stop_duration length of audio should be left intact
-at the beginning of each period of silence.
-For example, if you want to remove long pauses between words but do not want
-to remove the pauses completely. Default value is 0
.
-
-
-
-
-
-
25.40.1 Examples# TOC
-
-
- The following example shows how this filter can be used to start a recording
-that does not contain the delay at the start which usually occurs between
-pressing the record button and the start of the performance:
-
-
silenceremove=1:5:0.02
-
-
-
-
-
25.41 treble# TOC
-
-
Boost or cut treble (upper) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at whichever is the lower of ~22 kHz and the
-Nyquist frequency. Its useful range is about -20 (for a large cut)
-to +20 (for a large boost). Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 3000
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
25.42 volume# TOC
-
-
Adjust the input audio volume.
-
-
It accepts the following parameters:
-
-volume
-Set audio volume expression.
-
-Output values are clipped to the maximum value.
-
-The output audio volume is given by the relation:
-
-
output_volume = volume * input_volume
-
-
-The default value for volume is "1.0".
-
-
-precision
-This parameter represents the mathematical precision.
-
-It determines which input sample formats will be allowed, which affects the
-precision of the volume scaling.
-
-
-fixed
-8-bit fixed-point; this limits input sample format to U8, S16, and S32.
-
-float
-32-bit floating-point; this limits input sample format to FLT. (default)
-
-double
-64-bit floating-point; this limits input sample format to DBL.
-
-
-
-
-replaygain
-Choose the behaviour on encountering ReplayGain side data in input frames.
-
-
-drop
-Remove ReplayGain side data, ignoring its contents (the default).
-
-
-ignore
-Ignore ReplayGain side data, but leave it in the frame.
-
-
-track
-Prefer the track gain, if present.
-
-
-album
-Prefer the album gain, if present.
-
-
-
-
-replaygain_preamp
-Pre-amplification gain in dB to apply to the selected replaygain gain.
-
-Default value for replaygain_preamp is 0.0.
-
-
-eval
-Set when the volume expression is evaluated.
-
-It accepts the following values:
-
-‘once ’
-only evaluate expression once during the filter initialization, or
-when the ‘volume ’ command is sent
-
-
-‘frame ’
-evaluate expression for each incoming frame
-
-
-
-Default value is ‘once ’.
-
-
-
-
The volume expression can contain the following parameters.
-
-
-n
-frame number (starting at zero)
-
-nb_channels
-number of channels
-
-nb_consumed_samples
-number of samples consumed by the filter
-
-nb_samples
-number of samples in the current frame
-
-pos
-original frame position in the file
-
-pts
-frame PTS
-
-sample_rate
-sample rate
-
-startpts
-PTS at start of stream
-
-startt
-time at start of stream
-
-t
-frame time
-
-tb
-timestamp timebase
-
-volume
-last set volume value
-
-
-
-
Note that when eval is set to ‘once ’ only the
-sample_rate and tb variables are available, all other
-variables will evaluate to NAN.
-
-
-
25.42.1 Commands# TOC
-
-
This filter supports the following commands:
-
-volume
-Modify the volume expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-replaygain_noclip
-Prevent clipping by limiting the gain applied.
-
-Default value for replaygain_noclip is 1.
-
-
-
-
-
-
25.42.2 Examples# TOC
-
-
-
-
-
25.43 volumedetect# TOC
-
-
Detect the volume of the input video.
-
-
The filter has no parameters. The input is not modified. Statistics about
-the volume will be printed in the log when the input stream end is reached.
-
-
In particular it will show the mean volume (root mean square), maximum
-volume (on a per-sample basis), and the beginning of a histogram of the
-registered volume values (from the maximum value to a cumulated 1/1000 of
-the samples).
-
-
All volumes are in decibels relative to the maximum PCM value.
-
-
-
25.43.1 Examples# TOC
-
-
Here is an excerpt of the output:
-
-
[Parsed_volumedetect_0 0xa23120] mean_volume: -27 dB
-[Parsed_volumedetect_0 0xa23120] max_volume: -4 dB
-[Parsed_volumedetect_0 0xa23120] histogram_4db: 6
-[Parsed_volumedetect_0 0xa23120] histogram_5db: 62
-[Parsed_volumedetect_0 0xa23120] histogram_6db: 286
-[Parsed_volumedetect_0 0xa23120] histogram_7db: 1042
-[Parsed_volumedetect_0 0xa23120] histogram_8db: 2551
-[Parsed_volumedetect_0 0xa23120] histogram_9db: 4609
-[Parsed_volumedetect_0 0xa23120] histogram_10db: 8409
-
-
-
It means that:
-
- The mean square energy is approximately -27 dB, or 10^-2.7.
- The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB.
- There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc.
-
-
-
In other words, raising the volume by +4 dB does not cause any clipping,
-raising it by +5 dB causes clipping for 6 samples, etc.
-
-
-
-
26 Audio Sources# TOC
-
-
Below is a description of the currently available audio sources.
-
-
-
26.1 abuffer# TOC
-
-
Buffer audio frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/asrc_abuffer.h .
-
-
It accepts the following parameters:
-
-time_base
-The timebase which will be used for timestamps of submitted frames. It must be
-either a floating-point number or in numerator /denominator form.
-
-
-sample_rate
-The sample rate of the incoming audio buffers.
-
-
-sample_fmt
-The sample format of the incoming audio buffers.
-Either a sample format name or its corresponding integer representation from
-the enum AVSampleFormat in libavutil/samplefmt.h
-
-
-channel_layout
-The channel layout of the incoming audio buffers.
-Either a channel layout name from channel_layout_map in
-libavutil/channel_layout.c or its corresponding integer representation
-from the AV_CH_LAYOUT_* macros in libavutil/channel_layout.h
-
-
-channels
-The number of channels of the incoming audio buffers.
-If both channels and channel_layout are specified, then they
-must be consistent.
-
-
-
-
-
-
26.1.1 Examples# TOC
-
-
-
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
-
-
-
will instruct the source to accept planar 16bit signed stereo at 44100Hz.
-Since the sample format with name "s16p" corresponds to the number
-6 and the "stereo" channel layout corresponds to the value 0x3, this is
-equivalent to:
-
-
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
-
-
-
-
26.2 aevalsrc# TOC
-
-
Generate an audio signal specified by an expression.
-
-
This source accepts in input one or more expressions (one for each
-channel), which are evaluated and used to generate a corresponding
-audio signal.
-
-
This source accepts the following options:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. In case the
-channel_layout option is not specified, the selected channel layout
-depends on the number of provided expressions. Otherwise the last
-specified expression is applied to the remaining output channels.
-
-
-channel_layout, c
-Set the channel layout. The number of channels in the specified layout
-must be equal to the number of specified expressions.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified
-duration, as the generated audio is always cut at the end of a
-complete frame.
-
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame,
-default to 1024.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100.
-
-
-
-
Each expression in exprs can contain the following constants:
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-t
-time of the evaluated sample expressed in seconds, starting from 0
-
-
-s
-sample rate
-
-
-
-
-
-
26.2.1 Examples# TOC
-
-
- Generate silence:
-
-
- Generate a sin signal with frequency of 440 Hz, set sample rate to
-8000 Hz:
-
-
aevalsrc="sin(440*2*PI*t):s=8000"
-
-
- Generate a two channels signal, specify the channel layout (Front
-Center + Back Center) explicitly:
-
-
aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
-
-
- Generate white noise:
-
-
aevalsrc="-2+random(0)"
-
-
- Generate an amplitude modulated signal:
-
-
aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
-
-
- Generate 2.5 Hz binaural beats on a 360 Hz carrier:
-
-
aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
-
-
-
-
-
-
26.3 anullsrc# TOC
-
-
The null audio source, return unprocessed audio frames. It is mainly useful
-as a template and to be employed in analysis / debugging tools, or as
-the source for filters which ignore the input data (for example the sox
-synth filter).
-
-
This source accepts the following options:
-
-
-channel_layout, cl
-
-Specifies the channel layout, and can be either an integer or a string
-representing a channel layout. The default value of channel_layout
-is "stereo".
-
-Check the channel_layout_map definition in
-libavutil/channel_layout.c for the mapping between strings and
-channel layout values.
-
-
-sample_rate, r
-Specifies the sample rate, and defaults to 44100.
-
-
-nb_samples, n
-Set the number of samples per requested frames.
-
-
-
-
-
-
26.3.1 Examples# TOC
-
-
- Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
-
-
- Do the same operation with a more obvious syntax:
-
-
anullsrc=r=48000:cl=mono
-
-
-
-
All the parameters need to be explicitly defined.
-
-
-
26.4 flite# TOC
-
-
Synthesize a voice utterance using the libflite library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libflite
.
-
-
Note that the flite library is not thread-safe.
-
-
The filter accepts the following options:
-
-
-list_voices
-If set to 1, list the names of the available voices and exit
-immediately. Default value is 0.
-
-
-nb_samples, n
-Set the maximum number of samples per frame. Default value is 512.
-
-
-textfile
-Set the filename containing the text to speak.
-
-
-text
-Set the text to speak.
-
-
-voice, v
-Set the voice to use for the speech synthesis. Default value is
-kal
. See also the list_voices option.
-
-
-
-
-
26.4.1 Examples# TOC
-
-
- Read from file speech.txt , and synthesize the text using the
-standard flite voice:
-
-
flite=textfile=speech.txt
-
-
- Read the specified text selecting the slt
voice:
-
-
flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Input text to ffmpeg:
-
-
ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Make ffplay speak the specified text, using flite
and
-the lavfi
device:
-
-
ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
-
-
-
-
For more information about libflite, check:
-http://www.speech.cs.cmu.edu/flite/
-
-
-
26.5 sine# TOC
-
-
Generate an audio signal made of a sine wave with amplitude 1/8.
-
-
The audio signal is bit-exact.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the carrier frequency. Default is 440 Hz.
-
-
-beep_factor, b
-Enable a periodic beep every second with frequency beep_factor times
-the carrier frequency. Default is 0, meaning the beep is disabled.
-
-
-sample_rate, r
-Specify the sample rate, default is 44100.
-
-
-duration, d
-Specify the duration of the generated audio stream.
-
-
-samples_per_frame
-Set the number of samples per output frame, default is 1024.
-
-
-
-
-
26.5.1 Examples# TOC
-
-
- Generate a simple 440 Hz sine wave:
-
-
- Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
-
-
sine=220:4:d=5
-sine=f=220:b=4:d=5
-sine=frequency=220:beep_factor=4:duration=5
-
-
-
-
-
-
-
27 Audio Sinks# TOC
-
-
Below is a description of the currently available audio sinks.
-
-
-
27.1 abuffersink# TOC
-
-
Buffer audio frames, and make them available to the end of filter chain.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVABufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
27.2 anullsink# TOC
-
-
Null audio sink; do absolutely nothing with the input audio. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
28 Video Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the video filters included in your
-build.
-
-
Below is a description of the currently available video filters.
-
-
-
28.1 alphaextract# TOC
-
-
Extract the alpha component from the input as a grayscale video. This
-is especially useful with the alphamerge filter.
-
-
-
28.2 alphamerge# TOC
-
-
Add or replace the alpha component of the primary input with the
-grayscale value of a second input. This is intended for use with
-alphaextract to allow the transmission or storage of frame
-sequences that have alpha in a format that doesn’t support an alpha
-channel.
-
-
For example, to reconstruct full frames from a normal YUV-encoded video
-and a separate video created with alphaextract , you might use:
-
-
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
-
-
-
Since this filter is designed for reconstruction, it operates on frame
-sequences without considering timestamps, and terminates when either
-input reaches end of stream. This will cause problems if your encoding
-pipeline drops frames. If you’re trying to apply an image as an
-overlay to a video stream, consider the overlay filter instead.
-
-
-
28.3 ass# TOC
-
-
Same as the subtitles filter, except that it doesn’t require libavcodec
-and libavformat to work. On the other hand, it is limited to ASS (Advanced
-Substation Alpha) subtitles files.
-
-
This filter accepts the following option in addition to the common options from
-the subtitles filter:
-
-
-shaping
-Set the shaping engine
-
-Available values are:
-
-‘auto ’
-The default libass shaping engine, which is the best available.
-
-‘simple ’
-Fast, font-agnostic shaper that can do only substitutions
-
-‘complex ’
-Slower shaper using OpenType for substitutions and positioning
-
-
-
-The default is auto
.
-
-
-
-
-
28.4 bbox# TOC
-
-
Compute the bounding box for the non-black pixels in the input frame
-luminance plane.
-
-
This filter computes the bounding box containing all the pixels with a
-luminance value greater than the minimum allowed value.
-The parameters describing the bounding box are printed on the filter
-log.
-
-
The filter accepts the following option:
-
-
-min_val
-Set the minimal luminance value. Default is 16
.
-
-
-
-
-
28.5 blackdetect# TOC
-
-
Detect video intervals that are (almost) completely black. Can be
-useful to detect chapter transitions, commercials, or invalid
-recordings. Output lines contains the time for the start, end and
-duration of the detected black interval expressed in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
The filter accepts the following options:
-
-
-black_min_duration, d
-Set the minimum detected black duration expressed in seconds. It must
-be a non-negative floating point number.
-
-Default value is 2.0.
-
-
-picture_black_ratio_th, pic_th
-Set the threshold for considering a picture "black".
-Express the minimum value for the ratio:
-
-
nb_black_pixels / nb_pixels
-
-
-for which a picture is considered black.
-Default value is 0.98.
-
-
-pixel_black_th, pix_th
-Set the threshold for considering a pixel "black".
-
-The threshold expresses the maximum pixel luminance value for which a
-pixel is considered "black". The provided value is scaled according to
-the following equation:
-
-
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
-
-
-luminance_range_size and luminance_minimum_value depend on
-the input video format, the range is [0-255] for YUV full-range
-formats and [16-235] for YUV non full-range formats.
-
-Default value is 0.10.
-
-
-
-
The following example sets the maximum pixel threshold to the minimum
-value, and detects only black intervals of 2 or more seconds:
-
-
blackdetect=d=2:pix_th=0.00
-
-
-
-
28.6 blackframe# TOC
-
-
Detect frames that are (almost) completely black. Can be useful to
-detect chapter transitions or commercials. Output lines consist of
-the frame number of the detected frame, the percentage of blackness,
-the position in the file if known or -1 and the timestamp in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
It accepts the following parameters:
-
-
-amount
-The percentage of the pixels that have to be below the threshold; it defaults to
-98
.
-
-
-threshold, thresh
-The threshold below which a pixel value is considered black; it defaults to
-32
.
-
-
-
-
-
-
28.7 blend, tblend# TOC
-
-
Blend two video frames into each other.
-
-
The blend
filter takes two input streams and outputs one
-stream, the first input is the "top" layer and second input is
-"bottom" layer. Output terminates when shortest input terminates.
-
-
The tblend
(time blend) filter takes two consecutive frames
-from one single stream, and outputs the result obtained by blending
-the new frame on top of the old frame.
-
-
A description of the accepted options follows.
-
-
-c0_mode
-c1_mode
-c2_mode
-c3_mode
-all_mode
-Set blend mode for specific pixel component or all pixel components in case
-of all_mode . Default value is normal
.
-
-Available values for component modes are:
-
-‘addition ’
-‘and ’
-‘average ’
-‘burn ’
-‘darken ’
-‘difference ’
-‘difference128 ’
-‘divide ’
-‘dodge ’
-‘exclusion ’
-‘hardlight ’
-‘lighten ’
-‘multiply ’
-‘negation ’
-‘normal ’
-‘or ’
-‘overlay ’
-‘phoenix ’
-‘pinlight ’
-‘reflect ’
-‘screen ’
-‘softlight ’
-‘subtract ’
-‘vividlight ’
-‘xor ’
-
-
-
-c0_opacity
-c1_opacity
-c2_opacity
-c3_opacity
-all_opacity
-Set blend opacity for specific pixel component or all pixel components in case
-of all_opacity . Only used in combination with pixel component blend modes.
-
-
-c0_expr
-c1_expr
-c2_expr
-c3_expr
-all_expr
-Set blend expression for specific pixel component or all pixel components in case
-of all_expr . Note that related mode options will be ignored if those are set.
-
-The expressions can use the following variables:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-the coordinates of the current sample
-
-
-W
-H
-the width and height of currently filtered plane
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-TOP, A
-Value of pixel component at current location for first video frame (top layer).
-
-
-BOTTOM, B
-Value of pixel component at current location for second video frame (bottom layer).
-
-
-
-
-shortest
-Force termination when the shortest input terminates. Default is
-0
. This option is only defined for the blend
filter.
-
-
-repeatlast
-Continue applying the last bottom frame after the end of the stream. A value of
-0
disable the filter after the last frame of the bottom layer is reached.
-Default is 1
. This option is only defined for the blend
filter.
-
-
-
-
-
28.7.1 Examples# TOC
-
-
- Apply transition from bottom layer to top layer in first 10 seconds:
-
-
blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
-
-
- Apply 1x1 checkerboard effect:
-
-
blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
-
-
- Apply uncover left effect:
-
-
blend=all_expr='if(gte(N*SW+X,W),A,B)'
-
-
- Apply uncover down effect:
-
-
blend=all_expr='if(gte(Y-N*SH,0),A,B)'
-
-
- Apply uncover up-left effect:
-
-
blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
-
-
- Display differences between the current and the previous frame:
-
-
tblend=all_mode=difference128
-
-
-
-
-
28.8 boxblur# TOC
-
-
Apply a boxblur algorithm to the input video.
-
-
It accepts the following parameters:
-
-
-luma_radius, lr
-luma_power, lp
-chroma_radius, cr
-chroma_power, cp
-alpha_radius, ar
-alpha_power, ap
-
-
-
A description of the accepted options follows.
-
-
-luma_radius, lr
-chroma_radius, cr
-alpha_radius, ar
-Set an expression for the box radius in pixels used for blurring the
-corresponding input plane.
-
-The radius value must be a non-negative number, and must not be
-greater than the value of the expression min(w,h)/2
for the
-luma and alpha planes, and of min(cw,ch)/2
for the chroma
-planes.
-
-Default value for luma_radius is "2". If not specified,
-chroma_radius and alpha_radius default to the
-corresponding value set for luma_radius .
-
-The expressions can contain the following constants:
-
-w
-h
-The input width and height in pixels.
-
-
-cw
-ch
-The input chroma image width and height in pixels.
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p", hsub is 2 and vsub is 1.
-
-
-
-
-luma_power, lp
-chroma_power, cp
-alpha_power, ap
-Specify how many times the boxblur filter is applied to the
-corresponding plane.
-
-Default value for luma_power is 2. If not specified,
-chroma_power and alpha_power default to the
-corresponding value set for luma_power .
-
-A value of 0 will disable the effect.
-
-
-
-
-
28.8.1 Examples# TOC
-
-
- Apply a boxblur filter with the luma, chroma, and alpha radii
-set to 2:
-
-
boxblur=luma_radius=2:luma_power=1
-boxblur=2:1
-
-
- Set the luma radius to 2, and alpha and chroma radius to 0:
-
-
- Set the luma and chroma radii to a fraction of the video dimension:
-
-
boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
-
-
-
-
-
28.9 codecview# TOC
-
-
Visualize information exported by some codecs.
-
-
Some codecs can export information through frames using side-data or other
-means. For example, some MPEG based codecs export motion vectors through the
-export_mvs flag in the codec flags2 option.
-
-
The filter accepts the following option:
-
-
-mv
-Set motion vectors to visualize.
-
-Available flags for mv are:
-
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-
-
-
28.9.1 Examples# TOC
-
-
- Visualizes multi-directionals MVs from P and B-Frames using ffplay
:
-
-
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
-
-
-
-
-
28.10 colorbalance# TOC
-
Modify intensity of primary colors (red, green and blue) of input frames.
-
-
The filter allows an input frame to be adjusted in the shadows, midtones or highlights
-regions for the red-cyan, green-magenta or blue-yellow balance.
-
-
A positive adjustment value shifts the balance towards the primary color, a negative
-value towards the complementary color.
-
-
The filter accepts the following options:
-
-
-rs
-gs
-bs
-Adjust red, green and blue shadows (darkest pixels).
-
-
-rm
-gm
-bm
-Adjust red, green and blue midtones (medium pixels).
-
-
-rh
-gh
-bh
-Adjust red, green and blue highlights (brightest pixels).
-
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-
-
-
28.10.1 Examples# TOC
-
-
- Add red color cast to shadows:
-
-
-
-
-
28.11 colorlevels# TOC
-
-
Adjust video input frames using levels.
-
-
The filter accepts the following options:
-
-
-rimin
-gimin
-bimin
-aimin
-Adjust red, green, blue and alpha input black point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-rimax
-gimax
-bimax
-aimax
-Adjust red, green, blue and alpha input white point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 1
.
-
-Input levels are used to lighten highlights (bright tones), darken shadows
-(dark tones), change the balance of bright and dark tones.
-
-
-romin
-gomin
-bomin
-aomin
-Adjust red, green, blue and alpha output black point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 0
.
-
-
-romax
-gomax
-bomax
-aomax
-Adjust red, green, blue and alpha output white point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 1
.
-
-Output levels allows manual selection of a constrained output level range.
-
-
-
-
-
28.11.1 Examples# TOC
-
-
- Make video output darker:
-
-
colorlevels=rimin=0.058:gimin=0.058:bimin=0.058
-
-
- Increase contrast:
-
-
colorlevels=rimin=0.039:gimin=0.039:bimin=0.039:rimax=0.96:gimax=0.96:bimax=0.96
-
-
- Make video output lighter:
-
-
colorlevels=rimax=0.902:gimax=0.902:bimax=0.902
-
-
- Increase brightness:
-
-
colorlevels=romin=0.5:gomin=0.5:bomin=0.5
-
-
-
-
-
28.12 colorchannelmixer# TOC
-
-
Adjust video input frames by re-mixing color channels.
-
-
This filter modifies a color channel by adding the values associated to
-the other channels of the same pixels. For example if the value to
-modify is red, the output value will be:
-
-
red =red *rr + blue *rb + green *rg + alpha *ra
-
-
-
The filter accepts the following options:
-
-
-rr
-rg
-rb
-ra
-Adjust contribution of input red, green, blue and alpha channels for output red channel.
-Default is 1
for rr , and 0
for rg , rb and ra .
-
-
-gr
-gg
-gb
-ga
-Adjust contribution of input red, green, blue and alpha channels for output green channel.
-Default is 1
for gg , and 0
for gr , gb and ga .
-
-
-br
-bg
-bb
-ba
-Adjust contribution of input red, green, blue and alpha channels for output blue channel.
-Default is 1
for bb , and 0
for br , bg and ba .
-
-
-ar
-ag
-ab
-aa
-Adjust contribution of input red, green, blue and alpha channels for output alpha channel.
-Default is 1
for aa , and 0
for ar , ag and ab .
-
-Allowed ranges for options are [-2.0, 2.0]
.
-
-
-
-
-
28.12.1 Examples# TOC
-
-
- Convert source to grayscale:
-
-
colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
-
- Simulate sepia tones:
-
-
colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
-
-
-
-
-
28.13 colormatrix# TOC
-
-
Convert color matrix.
-
-
The filter accepts the following options:
-
-
-src
-dst
-Specify the source and destination color matrix. Both values must be
-specified.
-
-The accepted values are:
-
-‘bt709 ’
-BT.709
-
-
-‘bt601 ’
-BT.601
-
-
-‘smpte240m ’
-SMPTE-240M
-
-
-‘fcc ’
-FCC
-
-
-
-
-
-
For example to convert from BT.601 to SMPTE-240M, use the command:
-
-
colormatrix=bt601:smpte240m
-
-
-
-
28.14 copy# TOC
-
-
Copy the input source unchanged to the output. This is mainly useful for
-testing purposes.
-
-
-
28.15 crop# TOC
-
-
Crop the input video to given dimensions.
-
-
It accepts the following parameters:
-
-
-w, out_w
-The width of the output video. It defaults to iw
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-h, out_h
-The height of the output video. It defaults to ih
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-x
-The horizontal position, in the input video, of the left edge of the output
-video. It defaults to (in_w-out_w)/2
.
-This expression is evaluated per-frame.
-
-
-y
-The vertical position, in the input video, of the top edge of the output video.
-It defaults to (in_h-out_h)/2
.
-This expression is evaluated per-frame.
-
-
-keep_aspect
-If set to 1 will force the output display aspect ratio
-to be the same of the input, by changing the output sample aspect
-ratio. It defaults to 0.
-
-
-
-
The out_w , out_h , x , y parameters are
-expressions containing the following constants:
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-in_w
-in_h
-The input width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (cropped) width and height.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-n
-The number of the input frame, starting from 0.
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
The expression for out_w may depend on the value of out_h ,
-and the expression for out_h may depend on out_w , but they
-cannot depend on x and y , as x and y are
-evaluated after out_w and out_h .
-
-
The x and y parameters specify the expressions for the
-position of the top-left corner of the output (non-cropped) area. They
-are evaluated for each frame. If the evaluated value is not valid, it
-is approximated to the nearest valid value.
-
-
The expression for x may depend on y , and the expression
-for y may depend on x .
-
-
-
28.15.1 Examples# TOC
-
-
-
-
-
28.16 cropdetect# TOC
-
-
Auto-detect the crop size.
-
-
It calculates the necessary cropping parameters and prints the
-recommended parameters via the logging system. The detected dimensions
-correspond to the non-black area of the input video.
-
-
It accepts the following parameters:
-
-
-limit
-Set higher black value threshold, which can be optionally specified
-from nothing (0) to everything (255 for 8bit based formats). An intensity
-value greater to the set value is considered non-black. It defaults to 24.
-You can also specify a value between 0.0 and 1.0 which will be scaled depending
-on the bitdepth of the pixel format.
-
-
-round
-The value which the width/height should be divisible by. It defaults to
-16. The offset is automatically adjusted to center the video. Use 2 to
-get only even dimensions (needed for 4:2:2 video). 16 is best when
-encoding to most video codecs.
-
-
-reset_count, reset
-Set the counter that determines after how many frames cropdetect will
-reset the previously detected largest video area and start over to
-detect the current optimal crop area. Default value is 0.
-
-This can be useful when channel logos distort the video area. 0
-indicates ’never reset’, and returns the largest area encountered during
-playback.
-
-
-
-
-
28.17 curves# TOC
-
-
Apply color adjustments using curves.
-
-
This filter is similar to the Adobe Photoshop and GIMP curves tools. Each
-component (red, green and blue) has its values defined by N key points
-tied from each other using a smooth curve. The x-axis represents the pixel
-values from the input frame, and the y-axis the new pixel values to be set for
-the output frame.
-
-
By default, a component curve is defined by the two points (0;0) and
-(1;1) . This creates a straight line where each original pixel value is
-"adjusted" to its own value, which means no change to the image.
-
-
The filter allows you to redefine these two points and add some more. A new
-curve (using a natural cubic spline interpolation) will be define to pass
-smoothly through all these new coordinates. The new defined points needs to be
-strictly increasing over the x-axis, and their x and y values must
-be in the [0;1] interval. If the computed curves happened to go outside
-the vector spaces, the values will be clipped accordingly.
-
-
If there is no key point defined in x=0
, the filter will automatically
-insert a (0;0) point. In the same way, if there is no key point defined
-in x=1
, the filter will automatically insert a (1;1) point.
-
-
The filter accepts the following options:
-
-
-preset
-Select one of the available color presets. This option can be used in addition
-to the r , g , b parameters; in this case, the later
-options takes priority on the preset values.
-Available presets are:
-
-‘none ’
-‘color_negative ’
-‘cross_process ’
-‘darker ’
-‘increase_contrast ’
-‘lighter ’
-‘linear_contrast ’
-‘medium_contrast ’
-‘negative ’
-‘strong_contrast ’
-‘vintage ’
-
-Default is none
.
-
-master, m
-Set the master key points. These points will define a second pass mapping. It
-is sometimes called a "luminance" or "value" mapping. It can be used with
-r , g , b or all since it acts like a
-post-processing LUT.
-
-red, r
-Set the key points for the red component.
-
-green, g
-Set the key points for the green component.
-
-blue, b
-Set the key points for the blue component.
-
-all
-Set the key points for all components (not including master).
-Can be used in addition to the other key points component
-options. In this case, the unset component(s) will fallback on this
-all setting.
-
-psfile
-Specify a Photoshop curves file (.asv
) to import the settings from.
-
-
-
-
To avoid some filtergraph syntax conflicts, each key points list need to be
-defined using the following syntax: x0/y0 x1/y1 x2/y2 ...
.
-
-
-
28.17.1 Examples# TOC
-
-
-
-
-
28.18 dctdnoiz# TOC
-
-
Denoise frames using 2D DCT (frequency domain filtering).
-
-
This filter is not designed for real time.
-
-
The filter accepts the following options:
-
-
-sigma, s
-Set the noise sigma constant.
-
-This sigma defines a hard threshold of 3 * sigma
; every DCT
-coefficient (absolute value) below this threshold with be dropped.
-
-If you need a more advanced filtering, see expr .
-
-Default is 0
.
-
-
-overlap
-Set number overlapping pixels for each block. Since the filter can be slow, you
-may want to reduce this value, at the cost of a less effective filter and the
-risk of various artefacts.
-
-If the overlapping value doesn’t allow to process the whole input width or
-height, a warning will be displayed and according borders won’t be denoised.
-
-Default value is blocksize -1, which is the best possible setting.
-
-
-expr, e
-Set the coefficient factor expression.
-
-For each coefficient of a DCT block, this expression will be evaluated as a
-multiplier value for the coefficient.
-
-If this is option is set, the sigma option will be ignored.
-
-The absolute value of the coefficient can be accessed through the c
-variable.
-
-
-n
-Set the blocksize using the number of bits. 1<<n
defines the
-blocksize , which is the width and height of the processed blocks.
-
-The default value is 3 (8x8) and can be raised to 4 for a
-blocksize of 16x16. Note that changing this setting has huge consequences
-on the speed processing. Also, a larger block size does not necessarily means a
-better de-noising.
-
-
-
-
-
28.18.1 Examples# TOC
-
-
Apply a denoise with a sigma of 4.5
:
-
-
-
The same operation can be achieved using the expression system:
-
-
dctdnoiz=e='gte(c, 4.5*3)'
-
-
-
Violent denoise using a block size of 16x16
:
-
-
-
-
28.19 decimate# TOC
-
-
Drop duplicated frames at regular intervals.
-
-
The filter accepts the following options:
-
-
-cycle
-Set the number of frames from which one will be dropped. Setting this to
-N means one frame in every batch of N frames will be dropped.
-Default is 5
.
-
-
-dupthresh
-Set the threshold for duplicate detection. If the difference metric for a frame
-is less than or equal to this value, then it is declared as duplicate. Default
-is 1.1
-
-
-scthresh
-Set scene change threshold. Default is 15
.
-
-
-blockx
-blocky
-Set the size of the x and y-axis blocks used during metric calculations.
-Larger blocks give better noise suppression, but also give worse detection of
-small movements. Must be a power of two. Default is 32
.
-
-
-ppsrc
-Mark main input as a pre-processed input and activate clean source input
-stream. This allows the input to be pre-processed with various filters to help
-the metrics calculation while keeping the frame selection lossless. When set to
-1
, the first stream is for the pre-processed input, and the second
-stream is the clean source from where the kept frames are chosen. Default is
-0
.
-
-
-chroma
-Set whether or not chroma is considered in the metric calculations. Default is
-1
.
-
-
-
-
-
28.20 dejudder# TOC
-
-
Remove judder produced by partially interlaced telecined content.
-
-
Judder can be introduced, for instance, by pullup filter. If the original
-source was partially telecined content then the output of pullup,dejudder
-will have a variable frame rate. May change the recorded frame rate of the
-container. Aside from that change, this filter will not affect constant frame
-rate video.
-
-
The option available in this filter is:
-
-cycle
-Specify the length of the window over which the judder repeats.
-
-Accepts any integer greater than 1. Useful values are:
-
-‘4 ’
-If the original was telecined from 24 to 30 fps (Film to NTSC).
-
-
-‘5 ’
-If the original was telecined from 25 to 30 fps (PAL to NTSC).
-
-
-‘20 ’
-If a mixture of the two.
-
-
-
-The default is ‘4 ’.
-
-
-
-
-
28.21 delogo# TOC
-
-
Suppress a TV station logo by a simple interpolation of the surrounding
-pixels. Just set a rectangle covering the logo and watch it disappear
-(and sometimes something even uglier appear - your mileage may vary).
-
-
It accepts the following parameters:
-
-x
-y
-Specify the top left corner coordinates of the logo. They must be
-specified.
-
-
-w
-h
-Specify the width and height of the logo to clear. They must be
-specified.
-
-
-band, t
-Specify the thickness of the fuzzy edge of the rectangle (added to
-w and h ). The default value is 4.
-
-
-show
-When set to 1, a green rectangle is drawn on the screen to simplify
-finding the right x , y , w , and h parameters.
-The default value is 0.
-
-The rectangle is drawn on the outermost pixels which will be (partly)
-replaced with interpolated values. The values of the next pixels
-immediately outside this rectangle in each direction will be used to
-compute the interpolated pixel values inside the rectangle.
-
-
-
-
-
-
28.21.1 Examples# TOC
-
-
- Set a rectangle covering the area with top left corner coordinates 0,0
-and size 100x77, and a band of size 10:
-
-
delogo=x=0:y=0:w=100:h=77:band=10
-
-
-
-
-
-
28.22 deshake# TOC
-
-
Attempt to fix small changes in horizontal and/or vertical shift. This
-filter helps remove camera shake from hand-holding a camera, bumping a
-tripod, moving on a vehicle, etc.
-
-
The filter accepts the following options:
-
-
-x
-y
-w
-h
-Specify a rectangular area where to limit the search for motion
-vectors.
-If desired the search for motion vectors can be limited to a
-rectangular area of the frame defined by its top left corner, width
-and height. These parameters have the same meaning as the drawbox
-filter which can be used to visualise the position of the bounding
-box.
-
-This is useful when simultaneous movement of subjects within the frame
-might be confused for camera motion by the motion vector search.
-
-If any or all of x , y , w and h are set to -1
-then the full frame is used. This allows later options to be set
-without specifying the bounding box for the motion vector search.
-
-Default - search the whole frame.
-
-
-rx
-ry
-Specify the maximum extent of movement in x and y directions in the
-range 0-64 pixels. Default 16.
-
-
-edge
-Specify how to generate pixels to fill blanks at the edge of the
-frame. Available values are:
-
-‘blank, 0 ’
-Fill zeroes at blank locations
-
-‘original, 1 ’
-Original image at blank locations
-
-‘clamp, 2 ’
-Extruded edge value at blank locations
-
-‘mirror, 3 ’
-Mirrored edge at blank locations
-
-
-Default value is ‘mirror ’.
-
-
-blocksize
-Specify the blocksize to use for motion search. Range 4-128 pixels,
-default 8.
-
-
-contrast
-Specify the contrast threshold for blocks. Only blocks with more than
-the specified contrast (difference between darkest and lightest
-pixels) will be considered. Range 1-255, default 125.
-
-
-search
-Specify the search strategy. Available values are:
-
-‘exhaustive, 0 ’
-Set exhaustive search
-
-‘less, 1 ’
-Set less exhaustive search.
-
-
-Default value is ‘exhaustive ’.
-
-
-filename
-If set then a detailed log of the motion search is written to the
-specified file.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
-
28.23 drawbox# TOC
-
-
Draw a colored box on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the top left corner coordinates of the box. It defaults to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the box; if 0 they are interpreted as
-the input width and height. It defaults to 0.
-
-
-color, c
-Specify the color of the box to write. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the box edge color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the box edge. Default value is 3
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y offset coordinates where the box is drawn.
-
-
-w
-h
-The width and height of the drawn box.
-
-
-t
-The thickness of the drawn box.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
28.23.1 Examples# TOC
-
-
-
-
-
28.24 drawgrid# TOC
-
-
Draw a grid on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the
-input width and height, respectively, minus thickness
, so image gets
-framed. Default to 0.
-
-
-color, c
-Specify the color of the grid. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the grid color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the grid line. Default value is 1
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input grid cell width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y coordinates of some point of grid intersection (meant to configure offset).
-
-
-w
-h
-The width and height of the drawn cell.
-
-
-t
-The thickness of the drawn cell.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
28.24.1 Examples# TOC
-
-
- Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%:
-
-
drawgrid=width=100:height=100:thickness=2:color=red@0.5
-
-
- Draw a white 3x3 grid with an opacity of 50%:
-
-
drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
-
-
-
-
-
28.25 drawtext# TOC
-
-
Draw a text string or text from a specified file on top of a video, using the
-libfreetype library.
-
-
To enable compilation of this filter, you need to configure FFmpeg with
---enable-libfreetype
.
-To enable default font fallback and the font option you need to
-configure FFmpeg with --enable-libfontconfig
.
-To enable the text_shaping option, you need to configure FFmpeg with
---enable-libfribidi
.
-
-
-
28.25.1 Syntax# TOC
-
-
It accepts the following parameters:
-
-
-box
-Used to draw a box around text using the background color.
-The value must be either 1 (enable) or 0 (disable).
-The default value of box is 0.
-
-
-boxcolor
-The color to be used for drawing box around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of boxcolor is "white".
-
-
-borderw
-Set the width of the border to be drawn around the text using bordercolor .
-The default value of borderw is 0.
-
-
-bordercolor
-Set the color to be used for drawing border around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of bordercolor is "black".
-
-
-expansion
-Select how the text is expanded. Can be either none
,
-strftime
(deprecated) or
-normal
(default). See the Text expansion section
-below for details.
-
-
-fix_bounds
-If true, check and fix text coords to avoid clipping.
-
-
-fontcolor
-The color to be used for drawing fonts. For the syntax of this option, check
-the "Color" section in the ffmpeg-utils manual.
-
-The default value of fontcolor is "black".
-
-
-fontcolor_expr
-String which is expanded the same way as text to obtain dynamic
-fontcolor value. By default this option has empty value and is not
-processed. When this option is set, it overrides fontcolor option.
-
-
-font
-The font family to be used for drawing text. By default Sans.
-
-
-fontfile
-The font file to be used for drawing text. The path must be included.
-This parameter is mandatory if the fontconfig support is disabled.
-
-
-fontsize
-The font size to be used for drawing text.
-The default value of fontsize is 16.
-
-
-text_shaping
-If set to 1, attempt to shape the text (for example, reverse the order of
-right-to-left text and join Arabic characters) before drawing it.
-Otherwise, just draw the text exactly as given.
-By default 1 (if supported).
-
-
-ft_load_flags
-The flags to be used for loading the fonts.
-
-The flags map the corresponding flags supported by libfreetype, and are
-a combination of the following values:
-
-default
-no_scale
-no_hinting
-render
-no_bitmap
-vertical_layout
-force_autohint
-crop_bitmap
-pedantic
-ignore_global_advance_width
-no_recurse
-ignore_transform
-monochrome
-linear_design
-no_autohint
-
-
-Default value is "default".
-
-For more information consult the documentation for the FT_LOAD_*
-libfreetype flags.
-
-
-shadowcolor
-The color to be used for drawing a shadow behind the drawn text. For the
-syntax of this option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of shadowcolor is "black".
-
-
-shadowx
-shadowy
-The x and y offsets for the text shadow position with respect to the
-position of the text. They can be either positive or negative
-values. The default value for both is "0".
-
-
-start_number
-The starting frame number for the n/frame_num variable. The default value
-is "0".
-
-
-tabsize
-The size in number of spaces to use for rendering the tab.
-Default value is 4.
-
-
-timecode
-Set the initial timecode representation in "hh:mm:ss[:;.]ff"
-format. It can be used with or without text parameter. timecode_rate
-option must be specified.
-
-
-timecode_rate, rate, r
-Set the timecode frame rate (timecode only).
-
-
-text
-The text string to be drawn. The text must be a sequence of UTF-8
-encoded characters.
-This parameter is mandatory if no file is specified with the parameter
-textfile .
-
-
-textfile
-A text file containing text to be drawn. The text must be a sequence
-of UTF-8 encoded characters.
-
-This parameter is mandatory if no text string is specified with the
-parameter text .
-
-If both text and textfile are specified, an error is thrown.
-
-
-reload
-If set to 1, the textfile will be reloaded before each frame.
-Be sure to update it atomically, or it may be read partially, or even fail.
-
-
-x
-y
-The expressions which specify the offsets where text will be drawn
-within the video frame. They are relative to the top/left border of the
-output image.
-
-The default value of x and y is "0".
-
-See below for the list of accepted constants and functions.
-
-
-
-
The parameters for x and y are expressions containing the
-following constants and functions:
-
-
-dar
-input display aspect ratio, it is the same as (w / h ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-line_h, lh
-the height of each text line
-
-
-main_h, h, H
-the input height
-
-
-main_w, w, W
-the input width
-
-
-max_glyph_a, ascent
-the maximum distance from the baseline to the highest/upper grid
-coordinate used to place a glyph outline point, for all the rendered
-glyphs.
-It is a positive value, due to the grid’s orientation with the Y axis
-upwards.
-
-
-max_glyph_d, descent
-the maximum distance from the baseline to the lowest grid coordinate
-used to place a glyph outline point, for all the rendered glyphs.
-This is a negative value, due to the grid’s orientation, with the Y axis
-upwards.
-
-
-max_glyph_h
-maximum glyph height, that is the maximum height for all the glyphs
-contained in the rendered text, it is equivalent to ascent -
-descent .
-
-
-max_glyph_w
-maximum glyph width, that is the maximum width for all the glyphs
-contained in the rendered text
-
-
-n
-the number of input frame, starting from 0
-
-
-rand(min, max)
-return a random number included between min and max
-
-
-sar
-The input sample aspect ratio.
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-text_h, th
-the height of the rendered text
-
-
-text_w, tw
-the width of the rendered text
-
-
-x
-y
-the x and y offset coordinates where the text is drawn.
-
-These parameters allow the x and y expressions to refer
-each other, so you can for example specify y=x/dar
.
-
-
-
-
-
28.25.2 Text expansion# TOC
-
-
If expansion is set to strftime
,
-the filter recognizes strftime() sequences in the provided text and
-expands them accordingly. Check the documentation of strftime(). This
-feature is deprecated.
-
-
If expansion is set to none
, the text is printed verbatim.
-
-
If expansion is set to normal
(which is the default),
-the following expansion mechanism is used.
-
-
The backslash character ’\’, followed by any character, always expands to
-the second character.
-
-
Sequence of the form %{...}
are expanded. The text between the
-braces is a function name, possibly followed by arguments separated by ’:’.
-If the arguments contain special characters or delimiters (’:’ or ’}’),
-they should be escaped.
-
-
Note that they probably must also be escaped as the value for the
-text option in the filter argument string and as the filter
-argument in the filtergraph description, and possibly also for the shell,
-that makes up to four levels of escaping; using a text file avoids these
-problems.
-
-
The following functions are available:
-
-
-expr, e
-The expression evaluation result.
-
-It must take one argument specifying the expression to be evaluated,
-which accepts the same constants and functions as the x and
-y values. Note that not all constants should be used, for
-example the text size is not known when evaluating the expression, so
-the constants text_w and text_h will have an undefined
-value.
-
-
-expr_int_format, eif
-Evaluate the expression’s value and output as formatted integer.
-
-The first argument is the expression to be evaluated, just as for the expr function.
-The second argument specifies the output format. Allowed values are ’x’, ’X’, ’d’ and
-’u’. They are treated exactly as in the printf function.
-The third parameter is optional and sets the number of positions taken by the output.
-It can be used to add padding with zeros from the left.
-
-
-gmtime
-The time at which the filter is running, expressed in UTC.
-It can accept an argument: a strftime() format string.
-
-
-localtime
-The time at which the filter is running, expressed in the local time zone.
-It can accept an argument: a strftime() format string.
-
-
-metadata
-Frame metadata. It must take one argument specifying metadata key.
-
-
-n, frame_num
-The frame number, starting from 0.
-
-
-pict_type
-A 1 character description of the current picture type.
-
-
-pts
-The timestamp of the current frame.
-It can take up to two arguments.
-
-The first argument is the format of the timestamp; it defaults to flt
-for seconds as a decimal number with microsecond accuracy; hms
stands
-for a formatted [-]HH:MM:SS.mmm timestamp with millisecond accuracy.
-
-The second argument is an offset added to the timestamp.
-
-
-
-
-
-
28.25.3 Examples# TOC
-
-
-
-
For more information about libfreetype, check:
-http://www.freetype.org/ .
-
-
For more information about fontconfig, check:
-http://freedesktop.org/software/fontconfig/fontconfig-user.html .
-
-
For more information about libfribidi, check:
-http://fribidi.org/ .
-
-
-
28.26 edgedetect# TOC
-
-
Detect and draw edges. The filter uses the Canny Edge Detection algorithm.
-
-
The filter accepts the following options:
-
-
-low
-high
-Set low and high threshold values used by the Canny thresholding
-algorithm.
-
-The high threshold selects the "strong" edge pixels, which are then
-connected through 8-connectivity with the "weak" edge pixels selected
-by the low threshold.
-
-low and high threshold values must be chosen in the range
-[0,1], and low should be lesser or equal to high .
-
-Default value for low is 20/255
, and default value for high
-is 50/255
.
-
-
-mode
-Define the drawing mode.
-
-
-‘wires ’
-Draw white/gray wires on black background.
-
-
-‘colormix ’
-Mix the colors to create a paint/cartoon effect.
-
-
-
-Default value is wires .
-
-
-
-
-
28.26.1 Examples# TOC
-
-
- Standard edge detection with custom values for the hysteresis thresholding:
-
-
edgedetect=low=0.1:high=0.4
-
-
- Painting effect without thresholding:
-
-
edgedetect=mode=colormix:high=0
-
-
-
-
-
28.27 extractplanes# TOC
-
-
Extract color channel components from input video stream into
-separate grayscale video streams.
-
-
The filter accepts the following option:
-
-
-planes
-Set plane(s) to extract.
-
-Available values for planes are:
-
-‘y ’
-‘u ’
-‘v ’
-‘a ’
-‘r ’
-‘g ’
-‘b ’
-
-
-Choosing planes not available in the input will result in an error.
-That means you cannot select r
, g
, b
planes
-with y
, u
, v
planes at same time.
-
-
-
-
-
28.27.1 Examples# TOC
-
-
- Extract luma, u and v color channel component from input video frame
-into 3 grayscale outputs:
-
-
ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
-
-
-
-
-
28.28 elbg# TOC
-
-
Apply a posterize effect using the ELBG (Enhanced LBG) algorithm.
-
-
For each input image, the filter will compute the optimal mapping from
-the input to the output given the codebook length, that is the number
-of distinct output colors.
-
-
This filter accepts the following options.
-
-
-codebook_length, l
-Set codebook length. The value must be a positive integer, and
-represents the number of distinct output colors. Default value is 256.
-
-
-nb_steps, n
-Set the maximum number of iterations to apply for computing the optimal
-mapping. The higher the value the better the result and the higher the
-computation time. Default value is 1.
-
-
-seed, s
-Set a random seed, must be an integer included between 0 and
-UINT32_MAX. If not specified, or if explicitly set to -1, the filter
-will try to use a good random seed on a best effort basis.
-
-
-
-
-
28.29 fade# TOC
-
-
Apply a fade-in/out effect to the input video.
-
-
It accepts the following parameters:
-
-
-type, t
-The effect type can be either "in" for a fade-in, or "out" for a fade-out
-effect.
-Default is in
.
-
-
-start_frame, s
-Specify the number of the frame to start applying the fade
-effect at. Default is 0.
-
-
-nb_frames, n
-The number of frames that the fade effect lasts. At the end of the
-fade-in effect, the output video will have the same intensity as the input video.
-At the end of the fade-out transition, the output video will be filled with the
-selected color .
-Default is 25.
-
-
-alpha
-If set to 1, fade only alpha channel, if one exists on the input.
-Default value is 0.
-
-
-start_time, st
-Specify the timestamp (in seconds) of the frame to start to apply the fade
-effect. If both start_frame and start_time are specified, the fade will start at
-whichever comes last. Default is 0.
-
-
-duration, d
-The number of seconds for which the fade effect has to last. At the end of the
-fade-in effect the output video will have the same intensity as the input video,
-at the end of the fade-out transition the output video will be filled with the
-selected color .
-If both duration and nb_frames are specified, duration is used. Default is 0.
-
-
-color, c
-Specify the color of the fade. Default is "black".
-
-
-
-
-
28.29.1 Examples# TOC
-
-
-
-
-
28.30 field# TOC
-
-
Extract a single field from an interlaced image using stride
-arithmetic to avoid wasting CPU time. The output frames are marked as
-non-interlaced.
-
-
The filter accepts the following options:
-
-
-type
-Specify whether to extract the top (if the value is 0
or
-top
) or the bottom field (if the value is 1
or
-bottom
).
-
-
-
-
-
28.31 fieldmatch# TOC
-
-
Field matching filter for inverse telecine. It is meant to reconstruct the
-progressive frames from a telecined stream. The filter does not drop duplicated
-frames, so to achieve a complete inverse telecine fieldmatch
needs to be
-followed by a decimation filter such as decimate in the filtergraph.
-
-
The separation of the field matching and the decimation is notably motivated by
-the possibility of inserting a de-interlacing filter fallback between the two.
-If the source has mixed telecined and real interlaced content,
-fieldmatch
will not be able to match fields for the interlaced parts.
-But these remaining combed frames will be marked as interlaced, and thus can be
-de-interlaced by a later filter such as yadif before decimation.
-
-
In addition to the various configuration options, fieldmatch
can take an
-optional second stream, activated through the ppsrc option. If
-enabled, the frames reconstruction will be based on the fields and frames from
-this second stream. This allows the first input to be pre-processed in order to
-help the various algorithms of the filter, while keeping the output lossless
-(assuming the fields are matched properly). Typically, a field-aware denoiser,
-or brightness/contrast adjustments can help.
-
-
Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project)
-and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
-which fieldmatch
is based on. While the semantic and usage are very
-close, some behaviour and options names can differ.
-
-
The decimate filter currently only works for constant frame rate input.
-Do not use fieldmatch
and decimate if your input has mixed
-telecined and progressive content with changing framerate.
-
-
The filter accepts the following options:
-
-
-order
-Specify the assumed field order of the input stream. Available values are:
-
-
-‘auto ’
-Auto detect parity (use FFmpeg’s internal parity value).
-
-‘bff ’
-Assume bottom field first.
-
-‘tff ’
-Assume top field first.
-
-
-
-Note that it is sometimes recommended not to trust the parity announced by the
-stream.
-
-Default value is auto .
-
-
-mode
-Set the matching mode or strategy to use. pc mode is the safest in the
-sense that it won’t risk creating jerkiness due to duplicate frames when
-possible, but if there are bad edits or blended fields it will end up
-outputting combed frames when a good match might actually exist. On the other
-hand, pcn_ub mode is the most risky in terms of creating jerkiness,
-but will almost always find a good frame if there is one. The other values are
-all somewhere in between pc and pcn_ub in terms of risking
-jerkiness and creating duplicate frames versus finding good matches in sections
-with bad edits, orphaned fields, blended fields, etc.
-
-More details about p/c/n/u/b are available in p/c/n/u/b meaning section.
-
-Available values are:
-
-
-‘pc ’
-2-way matching (p/c)
-
-‘pc_n ’
-2-way matching, and trying 3rd match if still combed (p/c + n)
-
-‘pc_u ’
-2-way matching, and trying 3rd match (same order) if still combed (p/c + u)
-
-‘pc_n_ub ’
-2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if
-still combed (p/c + n + u/b)
-
-‘pcn ’
-3-way matching (p/c/n)
-
-‘pcn_ub ’
-3-way matching, and trying 4th/5th matches if all 3 of the original matches are
-detected as combed (p/c/n + u/b)
-
-
-
-The parenthesis at the end indicate the matches that would be used for that
-mode assuming order =tff (and field on auto or
-top ).
-
-In terms of speed pc mode is by far the fastest and pcn_ub is
-the slowest.
-
-Default value is pc_n .
-
-
-ppsrc
-Mark the main input stream as a pre-processed input, and enable the secondary
-input stream as the clean source to pick the fields from. See the filter
-introduction for more details. It is similar to the clip2 feature from
-VFM/TFM.
-
-Default value is 0
(disabled).
-
-
-field
-Set the field to match from. It is recommended to set this to the same value as
-order unless you experience matching failures with that setting. In
-certain circumstances changing the field that is used to match from can have a
-large impact on matching performance. Available values are:
-
-
-‘auto ’
-Automatic (same value as order ).
-
-‘bottom ’
-Match from the bottom field.
-
-‘top ’
-Match from the top field.
-
-
-
-Default value is auto .
-
-
-mchroma
-Set whether or not chroma is included during the match comparisons. In most
-cases it is recommended to leave this enabled. You should set this to 0
-only if your clip has bad chroma problems such as heavy rainbowing or other
-artifacts. Setting this to 0
could also be used to speed things up at
-the cost of some accuracy.
-
-Default value is 1
.
-
-
-y0
-y1
-These define an exclusion band which excludes the lines between y0 and
-y1 from being included in the field matching decision. An exclusion
-band can be used to ignore subtitles, a logo, or other things that may
-interfere with the matching. y0 sets the starting scan line and
-y1 sets the ending line; all lines in between y0 and
-y1 (including y0 and y1 ) will be ignored. Setting
-y0 and y1 to the same value will disable the feature.
-y0 and y1 defaults to 0
.
-
-
-scthresh
-Set the scene change detection threshold as a percentage of maximum change on
-the luma plane. Good values are in the [8.0, 14.0]
range. Scene change
-detection is only relevant in case combmatch =sc . The range for
-scthresh is [0.0, 100.0]
.
-
-Default value is 12.0
.
-
-
-combmatch
-When combatch is not none , fieldmatch
will take into
-account the combed scores of matches when deciding what match to use as the
-final match. Available values are:
-
-
-‘none ’
-No final matching based on combed scores.
-
-‘sc ’
-Combed scores are only used when a scene change is detected.
-
-‘full ’
-Use combed scores all the time.
-
-
-
-Default is sc .
-
-
-combdbg
-Force fieldmatch
to calculate the combed metrics for certain matches and
-print them. This setting is known as micout in TFM/VFM vocabulary.
-Available values are:
-
-
-‘none ’
-No forced calculation.
-
-‘pcn ’
-Force p/c/n calculations.
-
-‘pcnub ’
-Force p/c/n/u/b calculations.
-
-
-
-Default value is none .
-
-
-cthresh
-This is the area combing threshold used for combed frame detection. This
-essentially controls how "strong" or "visible" combing must be to be detected.
-Larger values mean combing must be more visible and smaller values mean combing
-can be less visible or strong and still be detected. Valid settings are from
--1
(every pixel will be detected as combed) to 255
(no pixel will
-be detected as combed). This is basically a pixel difference value. A good
-range is [8, 12]
.
-
-Default value is 9
.
-
-
-chroma
-Sets whether or not chroma is considered in the combed frame decision. Only
-disable this if your source has chroma problems (rainbowing, etc.) that are
-causing problems for the combed frame detection with chroma enabled. Actually,
-using chroma =0 is usually more reliable, except for the case
-where there is chroma only combing in the source.
-
-Default value is 0
.
-
-
-blockx
-blocky
-Respectively set the x-axis and y-axis size of the window used during combed
-frame detection. This has to do with the size of the area in which
-combpel pixels are required to be detected as combed for a frame to be
-declared combed. See the combpel parameter description for more info.
-Possible values are any number that is a power of 2 starting at 4 and going up
-to 512.
-
-Default value is 16
.
-
-
-combpel
-The number of combed pixels inside any of the blocky by
-blockx size blocks on the frame for the frame to be detected as
-combed. While cthresh controls how "visible" the combing must be, this
-setting controls "how much" combing there must be in any localized area (a
-window defined by the blockx and blocky settings) on the
-frame. Minimum value is 0
and maximum is blocky x blockx
(at
-which point no frames will ever be detected as combed). This setting is known
-as MI in TFM/VFM vocabulary.
-
-Default value is 80
.
-
-
-
-
-
28.31.1 p/c/n/u/b meaning# TOC
-
-
-
28.31.1.1 p/c/n# TOC
-
-
We assume the following telecined stream:
-
-
-
Top fields: 1 2 2 3 4
-Bottom fields: 1 2 3 4 4
-
-
-
The numbers correspond to the progressive frame the fields relate to. Here, the
-first two frames are progressive, the 3rd and 4th are combed, and so on.
-
-
When fieldmatch
is configured to run a matching from bottom
-(field =bottom ) this is how this input stream get transformed:
-
-
-
Input stream:
- T 1 2 2 3 4
- B 1 2 3 4 4 <-- matching reference
-
-Matches: c c n n c
-
-Output stream:
- T 1 2 3 4 4
- B 1 2 3 4 4
-
-
-
As a result of the field matching, we can see that some frames get duplicated.
-To perform a complete inverse telecine, you need to rely on a decimation filter
-after this operation. See for instance the decimate filter.
-
-
The same operation now matching from top fields (field =top )
-looks like this:
-
-
-
Input stream:
- T 1 2 2 3 4 <-- matching reference
- B 1 2 3 4 4
-
-Matches: c c p p c
-
-Output stream:
- T 1 2 2 3 4
- B 1 2 2 3 4
-
-
-
In these examples, we can see what p , c and n mean;
-basically, they refer to the frame and field of the opposite parity:
-
-
- p matches the field of the opposite parity in the previous frame
- c matches the field of the opposite parity in the current frame
- n matches the field of the opposite parity in the next frame
-
-
-
-
28.31.1.2 u/b# TOC
-
-
The u and b matching are a bit special in the sense that they match
-from the opposite parity flag. In the following examples, we assume that we are
-currently matching the 2nd frame (Top:2, bottom:2). According to the match, a
-’x’ is placed above and below each matched fields.
-
-
With bottom matching (field =bottom ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 1 2 2 2
- 2 2 2 1 3
-
-
-
With top matching (field =top ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 2 2 1 2
- 2 1 3 2 2
-
-
-
-
28.31.2 Examples# TOC
-
-
Simple IVTC of a top field first telecined stream:
-
-
fieldmatch=order=tff:combmatch=none, decimate
-
-
-
Advanced IVTC, with fallback on yadif for still combed frames:
-
-
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
-
-
-
-
28.32 fieldorder# TOC
-
-
Transform the field order of the input video.
-
-
It accepts the following parameters:
-
-
-order
-The output field order. Valid values are tff for top field first or bff
-for bottom field first.
-
-
-
-
The default value is ‘tff ’.
-
-
The transformation is done by shifting the picture content up or down
-by one line, and filling the remaining line with appropriate picture content.
-This method is consistent with most broadcast field order converters.
-
-
If the input video is not flagged as being interlaced, or it is already
-flagged as being of the required output field order, then this filter does
-not alter the incoming video.
-
-
It is very useful when converting to or from PAL DV material,
-which is bottom field first.
-
-
For example:
-
-
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
-
-
-
-
28.33 fifo# TOC
-
-
Buffer input images and send them when they are requested.
-
-
It is mainly useful when auto-inserted by the libavfilter
-framework.
-
-
It does not take parameters.
-
-
-
28.34 format# TOC
-
-
Convert the input video to one of the specified pixel formats.
-Libavfilter will try to pick one that is suitable as input to
-the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-"pix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
28.34.1 Examples# TOC
-
-
-
-
-
28.35 fps# TOC
-
-
Convert the video to specified constant frame rate by duplicating or dropping
-frames as necessary.
-
-
It accepts the following parameters:
-
-fps
-The desired output frame rate. The default is 25
.
-
-
-round
-Rounding method.
-
-Possible values are:
-
-zero
-zero round towards 0
-
-inf
-round away from 0
-
-down
-round towards -infinity
-
-up
-round towards +infinity
-
-near
-round to nearest
-
-
-The default is near
.
-
-
-start_time
-Assume the first PTS should be the given value, in seconds. This allows for
-padding/trimming at the start of stream. By default, no assumption is made
-about the first frame’s expected PTS, so no padding or trimming is done.
-For example, this could be set to 0 to pad the beginning with duplicates of
-the first frame if a video stream starts after the audio stream or to trim any
-frames with a negative PTS.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-fps [:round ].
-
-
See also the setpts filter.
-
-
-
28.35.1 Examples# TOC
-
-
- A typical usage in order to set the fps to 25:
-
-
- Sets the fps to 24, using abbreviation and rounding method to round to nearest:
-
-
fps=fps=film:round=near
-
-
-
-
-
28.36 framepack# TOC
-
-
Pack two different video streams into a stereoscopic video, setting proper
-metadata on supported codecs. The two views should have the same size and
-framerate and processing will stop when the shorter video ends. Please note
-that you may conveniently adjust view properties with the scale and
-fps filters.
-
-
It accepts the following parameters:
-
-format
-The desired packing format. Supported values are:
-
-
-sbs
-The views are next to each other (default).
-
-
-tab
-The views are on top of each other.
-
-
-lines
-The views are packed by line.
-
-
-columns
-The views are packed by column.
-
-
-frameseq
-The views are temporally interleaved.
-
-
-
-
-
-
-
-
Some examples:
-
-
-
# Convert left and right views into a frame-sequential video
-ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
-
-# Convert views into a side-by-side video with the same output resolution as the input
-ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
-
-
-
-
28.37 framestep# TOC
-
-
Select one frame every N-th frame.
-
-
This filter accepts the following option:
-
-step
-Select frame after every step
frames.
-Allowed values are positive integers higher than 0. Default value is 1
.
-
-
-
-
-
28.38 frei0r# TOC
-
-
Apply a frei0r effect to the input video.
-
-
To enable the compilation of this filter, you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the frei0r effect to load. If the environment variable
-FREI0R_PATH
is defined, the frei0r effect is searched for in each of the
-directories specified by the colon-separated list in FREIOR_PATH
.
-Otherwise, the standard frei0r paths are searched, in this order:
-HOME/.frei0r-1/lib/ , /usr/local/lib/frei0r-1/ ,
-/usr/lib/frei0r-1/ .
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r effect.
-
-
-
-
-
A frei0r effect parameter can be a boolean (its value is either
-"y" or "n"), a double, a color (specified as
-R /G /B , where R , G , and B are floating point
-numbers between 0.0 and 1.0, inclusive) or by a color description specified in the "Color"
-section in the ffmpeg-utils manual), a position (specified as X /Y , where
-X and Y are floating point numbers) and/or a string.
-
-
The number and types of parameters depend on the loaded effect. If an
-effect parameter is not specified, the default value is set.
-
-
-
28.38.1 Examples# TOC
-
-
- Apply the distort0r effect, setting the first two double parameters:
-
-
frei0r=filter_name=distort0r:filter_params=0.5|0.01
-
-
- Apply the colordistance effect, taking a color as the first parameter:
-
-
frei0r=colordistance:0.2/0.3/0.4
-frei0r=colordistance:violet
-frei0r=colordistance:0x112233
-
-
- Apply the perspective effect, specifying the top left and top right image
-positions:
-
-
frei0r=perspective:0.2/0.2|0.8/0.2
-
-
-
-
For more information, see
-http://frei0r.dyne.org
-
-
-
28.39 fspp# TOC
-
-
Apply fast and simple postprocessing. It is a faster version of spp .
-
-
It splits (I)DCT into horizontal/vertical passes. Unlike the simple post-
-processing filter, one of them is performed once per block, not per pixel.
-This allows for much higher speed.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 4-5. Default value is 4
.
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range 0-63.
-If not set, the filter will use the QP from the video stream (if available).
-
-
-strength
-Set filter strength. It accepts an integer in range -15 to 32. Lower values mean
-more details but also more artifacts, while higher values make the image smoother
-but also blurrier. Default value is 0
− PSNR optimal.
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
-
28.40 geq# TOC
-
-
The filter accepts the following options:
-
-
-lum_expr, lum
-Set the luminance expression.
-
-cb_expr, cb
-Set the chrominance blue expression.
-
-cr_expr, cr
-Set the chrominance red expression.
-
-alpha_expr, a
-Set the alpha expression.
-
-red_expr, r
-Set the red expression.
-
-green_expr, g
-Set the green expression.
-
-blue_expr, b
-Set the blue expression.
-
-
-
-
The colorspace is selected according to the specified options. If one
-of the lum_expr , cb_expr , or cr_expr
-options is specified, the filter will automatically select a YCbCr
-colorspace. If one of the red_expr , green_expr , or
-blue_expr options is specified, it will select an RGB
-colorspace.
-
-
If one of the chrominance expression is not defined, it falls back on the other
-one. If no alpha expression is specified it will evaluate to opaque value.
-If none of chrominance expressions are specified, they will evaluate
-to the luminance expression.
-
-
The expressions can use the following variables and functions:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-The coordinates of the current sample.
-
-
-W
-H
-The width and height of the image.
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-p(x, y)
-Return the value of the pixel at location (x ,y ) of the current
-plane.
-
-
-lum(x, y)
-Return the value of the pixel at location (x ,y ) of the luminance
-plane.
-
-
-cb(x, y)
-Return the value of the pixel at location (x ,y ) of the
-blue-difference chroma plane. Return 0 if there is no such plane.
-
-
-cr(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red-difference chroma plane. Return 0 if there is no such plane.
-
-
-r(x, y)
-g(x, y)
-b(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red/green/blue component. Return 0 if there is no such component.
-
-
-alpha(x, y)
-Return the value of the pixel at location (x ,y ) of the alpha
-plane. Return 0 if there is no such plane.
-
-
-
-
For functions, if x and y are outside the area, the value will be
-automatically clipped to the closer edge.
-
-
-
28.40.1 Examples# TOC
-
-
- Flip the image horizontally:
-
-
- Generate a bidimensional sine wave, with angle PI/3
and a
-wavelength of 100 pixels:
-
-
geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
-
-
- Generate a fancy enigmatic moving light:
-
-
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
-
-
- Generate a quick emboss effect:
-
-
format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
-
-
- Modify RGB components depending on pixel position:
-
-
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
-
-
- Create a radial gradient that is the same size as the input (also see
-the vignette filter):
-
-
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
-
-
- Create a linear gradient to use as a mask for another filter, then
-compose with overlay . In this example the video will gradually
-become more blurry from the top to the bottom of the y-axis as defined
-by the linear gradient:
-
-
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
-
-
-
-
-
28.41 gradfun# TOC
-
-
Fix the banding artifacts that are sometimes introduced into nearly flat
-regions by truncation to 8bit color depth.
-Interpolate the gradients that should go where the bands are, and
-dither them.
-
-
It is designed for playback only. Do not use it prior to
-lossy compression, because compression tends to lose the dither and
-bring back the bands.
-
-
It accepts the following parameters:
-
-
-strength
-The maximum amount by which the filter will change any one pixel. This is also
-the threshold for detecting nearly flat regions. Acceptable values range from
-.51 to 64; the default value is 1.2. Out-of-range values will be clipped to the
-valid range.
-
-
-radius
-The neighborhood to fit the gradient to. A larger radius makes for smoother
-gradients, but also prevents the filter from modifying the pixels near detailed
-regions. Acceptable values are 8-32; the default value is 16. Out-of-range
-values will be clipped to the valid range.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-strength [:radius ]
-
-
-
28.41.1 Examples# TOC
-
-
- Apply the filter with a 3.5
strength and radius of 8
:
-
-
- Specify radius, omitting the strength (which will fall-back to the default
-value):
-
-
-
-
-
-
28.42 haldclut# TOC
-
-
Apply a Hald CLUT to a video stream.
-
-
First input is the video stream to process, and second one is the Hald CLUT.
-The Hald CLUT input can be a simple picture or a complete video stream.
-
-
The filter accepts the following options:
-
-
-shortest
-Force termination when the shortest input terminates. Default is 0
.
-
-repeatlast
-Continue applying the last CLUT after the end of the stream. A value of
-0
disable the filter after the last frame of the CLUT is reached.
-Default is 1
.
-
-
-
-
haldclut
also has the same interpolation options as lut3d (both
-filters share the same internals).
-
-
More information about the Hald CLUT can be found on Eskil Steenberg’s website
-(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html .
-
-
-
28.42.1 Workflow examples# TOC
-
-
-
28.42.1.1 Hald CLUT video stream# TOC
-
-
Generate an identity Hald CLUT stream altered with various effects:
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
-
-
-
Note: make sure you use a lossless codec.
-
-
Then use it with haldclut
to apply it on some random stream:
-
-
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
-
-
-
The Hald CLUT will be applied to the 10 first seconds (duration of
-clut.nut ), then the latest picture of that CLUT stream will be applied
-to the remaining frames of the mandelbrot
stream.
-
-
-
28.42.1.2 Hald CLUT with preview# TOC
-
-
A Hald CLUT is supposed to be a squared image of Level*Level*Level
by
-Level*Level*Level
pixels. For a given Hald CLUT, FFmpeg will select the
-biggest possible square starting at the top left of the picture. The remaining
-padding pixels (bottom or right) will be ignored. This area can be used to add
-a preview of the Hald CLUT.
-
-
Typically, the following generated Hald CLUT will be supported by the
-haldclut
filter:
-
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "
- pad=iw+320 [padded_clut];
- smptebars=s=320x256, split [a][b];
- [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
- [main][b] overlay=W-320" -frames:v 1 clut.png
-
-
-
It contains the original and a preview of the effect of the CLUT: SMPTE color
-bars are displayed on the right-top, and below the same color bars processed by
-the color changes.
-
-
Then, the effect of this Hald CLUT can be visualized with:
-
-
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
-
-
-
-
28.43 hflip# TOC
-
-
Flip the input video horizontally.
-
-
For example, to horizontally flip the input video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "hflip" out.avi
-
-
-
-
28.44 histeq# TOC
-
This filter applies a global color histogram equalization on a
-per-frame basis.
-
-
It can be used to correct video that has a compressed range of pixel
-intensities. The filter redistributes the pixel intensities to
-equalize their distribution across the intensity range. It may be
-viewed as an "automatically adjusting contrast filter". This filter is
-useful only for correcting degraded or poorly captured source
-video.
-
-
The filter accepts the following options:
-
-
-strength
-Determine the amount of equalization to be applied. As the strength
-is reduced, the distribution of pixel intensities more-and-more
-approaches that of the input frame. The value must be a float number
-in the range [0,1] and defaults to 0.200.
-
-
-intensity
-Set the maximum intensity that can generated and scale the output
-values appropriately. The strength should be set as desired and then
-the intensity can be limited if needed to avoid washing-out. The value
-must be a float number in the range [0,1] and defaults to 0.210.
-
-
-antibanding
-Set the antibanding level. If enabled the filter will randomly vary
-the luminance of output pixels by a small amount to avoid banding of
-the histogram. Possible values are none
, weak
or
-strong
. It defaults to none
.
-
-
-
-
-
28.45 histogram# TOC
-
-
Compute and draw a color distribution histogram for the input video.
-
-
The computed histogram is a representation of the color component
-distribution in an image.
-
-
The filter accepts the following options:
-
-
-mode
-Set histogram mode.
-
-It accepts the following values:
-
-‘levels ’
-Standard histogram that displays the color components distribution in an
-image. Displays color graph for each color component. Shows distribution of
-the Y, U, V, A or R, G, B components, depending on input format, in the
-current frame. Below each graph a color component scale meter is shown.
-
-
-‘color ’
-Displays chroma values (U/V color placement) in a two dimensional
-graph (which is called a vectorscope). The brighter a pixel in the
-vectorscope, the more pixels of the input frame correspond to that pixel
-(i.e., more pixels have this chroma value). The V component is displayed on
-the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost
-side being V = 255. The U component is displayed on the vertical (Y) axis,
-with the top representing U = 0 and the bottom representing U = 255.
-
-The position of a white pixel in the graph corresponds to the chroma value of
-a pixel of the input clip. The graph can therefore be used to read the hue
-(color flavor) and the saturation (the dominance of the hue in the color). As
-the hue of a color changes, it moves around the square. At the center of the
-square the saturation is zero, which means that the corresponding pixel has no
-color. If the amount of a specific color is increased (while leaving the other
-colors unchanged) the saturation increases, and the indicator moves towards
-the edge of the square.
-
-
-‘color2 ’
-Chroma values in vectorscope, similar as color
but actual chroma values
-are displayed.
-
-
-‘waveform ’
-Per row/column color component graph. In row mode, the graph on the left side
-represents color component value 0 and the right side represents value = 255.
-In column mode, the top side represents color component value = 0 and bottom
-side represents value = 255.
-
-
-Default value is levels
.
-
-
-level_height
-Set height of level in levels
. Default value is 200
.
-Allowed range is [50, 2048].
-
-
-scale_height
-Set height of color scale in levels
. Default value is 12
.
-Allowed range is [0, 40].
-
-
-step
-Set step for waveform
mode. Smaller values are useful to find out how
-many values of the same luminance are distributed across input rows/columns.
-Default value is 10
. Allowed range is [1, 255].
-
-
-waveform_mode
-Set mode for waveform
. Can be either row
, or column
.
-Default is row
.
-
-
-waveform_mirror
-Set mirroring mode for waveform
. 0
means unmirrored, 1
-means mirrored. In mirrored mode, higher values will be represented on the left
-side for row
mode and at the top for column
mode. Default is
-0
(unmirrored).
-
-
-display_mode
-Set display mode for waveform
and levels
.
-It accepts the following values:
-
-‘parade ’
-Display separate graph for the color components side by side in
-row
waveform mode or one below the other in column
waveform mode
-for waveform
histogram mode. For levels
histogram mode,
-per color component graphs are placed below each other.
-
-Using this display mode in waveform
histogram mode makes it easy to
-spot color casts in the highlights and shadows of an image, by comparing the
-contours of the top and the bottom graphs of each waveform. Since whites,
-grays, and blacks are characterized by exactly equal amounts of red, green,
-and blue, neutral areas of the picture should display three waveforms of
-roughly equal width/height. If not, the correction is easy to perform by
-making level adjustments the three waveforms.
-
-
-‘overlay ’
-Presents information identical to that in the parade
, except
-that the graphs representing color components are superimposed directly
-over one another.
-
-This display mode in waveform
histogram mode makes it easier to spot
-relative differences or similarities in overlapping areas of the color
-components that are supposed to be identical, such as neutral whites, grays,
-or blacks.
-
-
-Default is parade
.
-
-
-levels_mode
-Set mode for levels
. Can be either linear
, or logarithmic
.
-Default is linear
.
-
-
-
-
-
28.45.1 Examples# TOC
-
-
- Calculate and draw histogram:
-
-
ffplay -i input -vf histogram
-
-
-
-
-
-
28.46 hqdn3d# TOC
-
-
This is a high precision/quality 3d denoise filter. It aims to reduce
-image noise, producing smooth images and making still images really
-still. It should enhance compressibility.
-
-
It accepts the following optional parameters:
-
-
-luma_spatial
-A non-negative floating point number which specifies spatial luma strength.
-It defaults to 4.0.
-
-
-chroma_spatial
-A non-negative floating point number which specifies spatial chroma strength.
-It defaults to 3.0*luma_spatial /4.0.
-
-
-luma_tmp
-A floating point number which specifies luma temporal strength. It defaults to
-6.0*luma_spatial /4.0.
-
-
-chroma_tmp
-A floating point number which specifies chroma temporal strength. It defaults to
-luma_tmp *chroma_spatial /luma_spatial .
-
-
-
-
-
28.47 hqx# TOC
-
-
Apply a high-quality magnification filter designed for pixel art. This filter
-was originally created by Maxim Stepin.
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for hq2x
, 3
for
-hq3x
and 4
for hq4x
.
-Default is 3
.
-
-
-
-
-
28.48 hue# TOC
-
-
Modify the hue and/or the saturation of the input.
-
-
It accepts the following parameters:
-
-
-h
-Specify the hue angle as a number of degrees. It accepts an expression,
-and defaults to "0".
-
-
-s
-Specify the saturation in the [-10,10] range. It accepts an expression and
-defaults to "1".
-
-
-H
-Specify the hue angle as a number of radians. It accepts an
-expression, and defaults to "0".
-
-
-b
-Specify the brightness in the [-10,10] range. It accepts an expression and
-defaults to "0".
-
-
-
-
h and H are mutually exclusive, and can’t be
-specified at the same time.
-
-
The b , h , H and s option values are
-expressions containing the following constants:
-
-
-n
-frame count of the input frame starting from 0
-
-
-pts
-presentation timestamp of the input frame expressed in time base units
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-tb
-time base of the input video
-
-
-
-
-
28.48.1 Examples# TOC
-
-
-
-
-
28.48.2 Commands# TOC
-
-
This filter supports the following commands:
-
-b
-s
-h
-H
-Modify the hue and/or the saturation and/or brightness of the input video.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
28.49 idet# TOC
-
-
Detect video interlacing type.
-
-
This filter tries to detect if the input frames as interlaced, progressive,
-top or bottom field first. It will also try and detect fields that are
-repeated between adjacent frames (a sign of telecine).
-
-
Single frame detection considers only immediately adjacent frames when classifying each frame.
-Multiple frame detection incorporates the classification history of previous frames.
-
-
The filter will log these metadata values:
-
-
-single.current_frame
-Detected type of current frame using single-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-single.tff
-Cumulative number of frames detected as top field first using single-frame detection.
-
-
-multiple.tff
-Cumulative number of frames detected as top field first using multiple-frame detection.
-
-
-single.bff
-Cumulative number of frames detected as bottom field first using single-frame detection.
-
-
-multiple.current_frame
-Detected type of current frame using multiple-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-multiple.bff
-Cumulative number of frames detected as bottom field first using multiple-frame detection.
-
-
-single.progressive
-Cumulative number of frames detected as progressive using single-frame detection.
-
-
-multiple.progressive
-Cumulative number of frames detected as progressive using multiple-frame detection.
-
-
-single.undetermined
-Cumulative number of frames that could not be classified using single-frame detection.
-
-
-multiple.undetermined
-Cumulative number of frames that could not be classified using multiple-frame detection.
-
-
-repeated.current_frame
-Which field in the current frame is repeated from the last. One of “neither”, “top”, or “bottom”.
-
-
-repeated.neither
-Cumulative number of frames with no repeated field.
-
-
-repeated.top
-Cumulative number of frames with the top field repeated from the previous frame’s top field.
-
-
-repeated.bottom
-Cumulative number of frames with the bottom field repeated from the previous frame’s bottom field.
-
-
-
-
The filter accepts the following options:
-
-
-intl_thres
-Set interlacing threshold.
-
-prog_thres
-Set progressive threshold.
-
-repeat_thres
-Threshold for repeated field detection.
-
-half_life
-Number of frames after which a given frame’s contribution to the
-statistics is halved (i.e., it contributes only 0.5 to it’s
-classification). The default of 0 means that all frames seen are given
-full weight of 1.0 forever.
-
-analyze_interlaced_flag
-When this is not 0 then idet will use the specified number of frames to determine
-if the interlaced flag is accurate, it will not count undetermined frames.
-If the flag is found to be accurate it will be used without any further
-computations, if it is found to be inaccuarte it will be cleared without any
-further computations. This allows inserting the idet filter as a low computational
-method to clean up the interlaced flag
-
-
-
-
-
28.50 il# TOC
-
-
Deinterleave or interleave fields.
-
-
This filter allows one to process interlaced images fields without
-deinterlacing them. Deinterleaving splits the input frame into 2
-fields (so called half pictures). Odd lines are moved to the top
-half of the output image, even lines to the bottom half.
-You can process (filter) them independently and then re-interleave them.
-
-
The filter accepts the following options:
-
-
-luma_mode, l
-chroma_mode, c
-alpha_mode, a
-Available values for luma_mode , chroma_mode and
-alpha_mode are:
-
-
-‘none ’
-Do nothing.
-
-
-‘deinterleave, d ’
-Deinterleave fields, placing one above the other.
-
-
-‘interleave, i ’
-Interleave fields. Reverse the effect of deinterleaving.
-
-
-Default value is none
.
-
-
-luma_swap, ls
-chroma_swap, cs
-alpha_swap, as
-Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0
.
-
-
-
-
-
28.51 interlace# TOC
-
-
Simple interlacing filter from progressive contents. This interleaves upper (or
-lower) lines from odd frames with lower (or upper) lines from even frames,
-halving the frame rate and preserving image height.
-
-
-
Original Original New Frame
- Frame 'j' Frame 'j+1' (tff)
- ========== =========== ==================
- Line 0 --------------------> Frame 'j' Line 0
- Line 1 Line 1 ----> Frame 'j+1' Line 1
- Line 2 ---------------------> Frame 'j' Line 2
- Line 3 Line 3 ----> Frame 'j+1' Line 3
- ... ... ...
-New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
-
-
-
It accepts the following optional parameters:
-
-
-scan
-This determines whether the interlaced frame is taken from the even
-(tff - default) or odd (bff) lines of the progressive frame.
-
-
-lowpass
-Enable (default) or disable the vertical lowpass filter to avoid twitter
-interlacing and reduce moire patterns.
-
-
-
-
-
28.52 kerndeint# TOC
-
-
Deinterlace input video by applying Donald Graft’s adaptive kernel
-deinterling. Work on interlaced parts of a video to produce
-progressive frames.
-
-
The description of the accepted parameters follows.
-
-
-thresh
-Set the threshold which affects the filter’s tolerance when
-determining if a pixel line must be processed. It must be an integer
-in the range [0,255] and defaults to 10. A value of 0 will result in
-applying the process on every pixels.
-
-
-map
-Paint pixels exceeding the threshold value to white if set to 1.
-Default is 0.
-
-
-order
-Set the fields order. Swap fields if set to 1, leave fields alone if
-0. Default is 0.
-
-
-sharp
-Enable additional sharpening if set to 1. Default is 0.
-
-
-twoway
-Enable twoway sharpening if set to 1. Default is 0.
-
-
-
-
-
28.52.1 Examples# TOC
-
-
- Apply default values:
-
-
kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
-
-
- Enable additional sharpening:
-
-
- Paint processed pixels in white:
-
-
-
-
-
28.53 lenscorrection# TOC
-
-
Correct radial lens distortion
-
-
This filter can be used to correct for radial distortion as can result from the use
-of wide angle lenses, and thereby re-rectify the image. To find the right parameters
-one can use tools available for example as part of opencv or simply trial-and-error.
-To use opencv use the calibration sample (under samples/cpp) from the opencv sources
-and extract the k1 and k2 coefficients from the resulting matrix.
-
-
Note that effectively the same filter is available in the open-source tools Krita and
-Digikam from the KDE project.
-
-
In contrast to the vignette filter, which can also be used to compensate lens errors,
-this filter corrects the distortion of the image, whereas vignette corrects the
-brightness distribution, so you may want to use both filters together in certain
-cases, though you will have to take care of ordering, i.e. whether vignetting should
-be applied before or after lens correction.
-
-
-
28.53.1 Options# TOC
-
-
The filter accepts the following options:
-
-
-cx
-Relative x-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-width.
-
-cy
-Relative y-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-height.
-
-k1
-Coefficient of the quadratic correction term. 0.5 means no correction.
-
-k2
-Coefficient of the double quadratic correction term. 0.5 means no correction.
-
-
-
-
The formula that generates the correction is:
-
-
r_src = r_tgt * (1 + k1 * (r_tgt / r_0 )^2 + k2 * (r_tgt / r_0 )^4)
-
-
where r_0 is halve of the image diagonal and r_src and r_tgt are the
-distances from the focal point in the source and target images, respectively.
-
-
-
28.54 lut3d# TOC
-
-
Apply a 3D LUT to an input video.
-
-
The filter accepts the following options:
-
-
-file
-Set the 3D LUT file name.
-
-Currently supported formats:
-
-‘3dl ’
-AfterEffects
-
-‘cube ’
-Iridas
-
-‘dat ’
-DaVinci
-
-‘m3d ’
-Pandora
-
-
-
-interp
-Select interpolation mode.
-
-Available values are:
-
-
-‘nearest ’
-Use values from the nearest defined point.
-
-‘trilinear ’
-Interpolate values using the 8 points defining a cube.
-
-‘tetrahedral ’
-Interpolate values using a tetrahedron.
-
-
-
-
-
-
-
28.55 lut, lutrgb, lutyuv# TOC
-
-
Compute a look-up table for binding each pixel component input value
-to an output value, and apply it to the input video.
-
-
lutyuv applies a lookup table to a YUV input video, lutrgb
-to an RGB input video.
-
-
These filters accept the following parameters:
-
-c0
-set first pixel component expression
-
-c1
-set second pixel component expression
-
-c2
-set third pixel component expression
-
-c3
-set fourth pixel component expression, corresponds to the alpha component
-
-
-r
-set red component expression
-
-g
-set green component expression
-
-b
-set blue component expression
-
-a
-alpha component expression
-
-
-y
-set Y/luminance component expression
-
-u
-set U/Cb component expression
-
-v
-set V/Cr component expression
-
-
-
-
Each of them specifies the expression to use for computing the lookup table for
-the corresponding pixel component values.
-
-
The exact component associated to each of the c* options depends on the
-format in input.
-
-
The lut filter requires either YUV or RGB pixel formats in input,
-lutrgb requires RGB pixel formats in input, and lutyuv requires YUV.
-
-
The expressions can contain the following constants and functions:
-
-
-w
-h
-The input width and height.
-
-
-val
-The input value for the pixel component.
-
-
-clipval
-The input value, clipped to the minval -maxval range.
-
-
-maxval
-The maximum value for the pixel component.
-
-
-minval
-The minimum value for the pixel component.
-
-
-negval
-The negated value for the pixel component value, clipped to the
-minval -maxval range; it corresponds to the expression
-"maxval-clipval+minval".
-
-
-clip(val)
-The computed value in val , clipped to the
-minval -maxval range.
-
-
-gammaval(gamma)
-The computed gamma correction value of the pixel component value,
-clipped to the minval -maxval range. It corresponds to the
-expression
-"pow((clipval-minval)/(maxval-minval)\,gamma )*(maxval-minval)+minval"
-
-
-
-
-
All expressions default to "val".
-
-
-
28.55.1 Examples# TOC
-
-
-
-
-
28.56 mergeplanes# TOC
-
-
Merge color channel components from several video streams.
-
-
The filter accepts up to 4 input streams, and merge selected input
-planes to the output video.
-
-
This filter accepts the following options:
-
-mapping
-Set input to output plane mapping. Default is 0
.
-
-The mappings is specified as a bitmap. It should be specified as a
-hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the
-mapping for the first plane of the output stream. ’A’ sets the number of
-the input stream to use (from 0 to 3), and ’a’ the plane number of the
-corresponding input to use (from 0 to 3). The rest of the mappings is
-similar, ’Bb’ describes the mapping for the output stream second
-plane, ’Cc’ describes the mapping for the output stream third plane and
-’Dd’ describes the mapping for the output stream fourth plane.
-
-
-format
-Set output pixel format. Default is yuva444p
.
-
-
-
-
-
28.56.1 Examples# TOC
-
-
- Merge three gray video streams of same width and height into single video stream:
-
-
[a0][a1][a2]mergeplanes=0x001020:yuv444p
-
-
- Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream:
-
-
[a0][a1]mergeplanes=0x00010210:yuva444p
-
-
- Swap Y and A plane in yuva444p stream:
-
-
format=yuva444p,mergeplanes=0x03010200:yuva444p
-
-
- Swap U and V plane in yuv420p stream:
-
-
format=yuv420p,mergeplanes=0x000201:yuv420p
-
-
- Cast a rgb24 clip to yuv444p:
-
-
format=rgb24,mergeplanes=0x000102:yuv444p
-
-
-
-
-
28.57 mcdeint# TOC
-
-
Apply motion-compensation deinterlacing.
-
-
It needs one field per frame as input and must thus be used together
-with yadif=1/3 or equivalent.
-
-
This filter accepts the following options:
-
-mode
-Set the deinterlacing mode.
-
-It accepts one of the following values:
-
-‘fast ’
-‘medium ’
-‘slow ’
-use iterative motion estimation
-
-‘extra_slow ’
-like ‘slow ’, but use multiple reference frames.
-
-
-Default value is ‘fast ’.
-
-
-parity
-Set the picture field parity assumed for the input video. It must be
-one of the following values:
-
-
-‘0, tff ’
-assume top field first
-
-‘1, bff ’
-assume bottom field first
-
-
-
-Default value is ‘bff ’.
-
-
-qp
-Set per-block quantization parameter (QP) used by the internal
-encoder.
-
-Higher values should result in a smoother motion vector field but less
-optimal individual vectors. Default value is 1.
-
-
-
-
-
28.58 mp# TOC
-
-
Apply an MPlayer filter to the input video.
-
-
This filter provides a wrapper around some of the filters of
-MPlayer/MEncoder.
-
-
This wrapper is considered experimental. Some of the wrapped filters
-may not work properly and we may drop support for them, as they will
-be implemented natively into FFmpeg. Thus you should avoid
-depending on them when writing portable scripts.
-
-
The filter accepts the parameters:
-filter_name [:=]filter_params
-
-
filter_name is the name of a supported MPlayer filter,
-filter_params is a string containing the parameters accepted by
-the named filter.
-
-
The list of the currently supported filters follows:
-
-eq2
-eq
-ilpack
-softpulldown
-
-
-
The parameter syntax and behavior for the listed filters are the same
-of the corresponding MPlayer filters. For detailed instructions check
-the "VIDEO FILTERS" section in the MPlayer manual.
-
-
-
28.58.1 Examples# TOC
-
-
- Adjust gamma, brightness, contrast:
-
-
-
-
See also mplayer(1), http://www.mplayerhq.hu/ .
-
-
-
28.59 mpdecimate# TOC
-
-
Drop frames that do not differ greatly from the previous frame in
-order to reduce frame rate.
-
-
The main use of this filter is for very-low-bitrate encoding
-(e.g. streaming over dialup modem), but it could in theory be used for
-fixing movies that were inverse-telecined incorrectly.
-
-
A description of the accepted options follows.
-
-
-max
-Set the maximum number of consecutive frames which can be dropped (if
-positive), or the minimum interval between dropped frames (if
-negative). If the value is 0, the frame is dropped unregarding the
-number of previous sequentially dropped frames.
-
-Default value is 0.
-
-
-hi
-lo
-frac
-Set the dropping threshold values.
-
-Values for hi and lo are for 8x8 pixel blocks and
-represent actual pixel value differences, so a threshold of 64
-corresponds to 1 unit of difference for each pixel, or the same spread
-out differently over the block.
-
-A frame is a candidate for dropping if no 8x8 blocks differ by more
-than a threshold of hi , and if no more than frac blocks (1
-meaning the whole image) differ by more than a threshold of lo .
-
-Default value for hi is 64*12, default value for lo is
-64*5, and default value for frac is 0.33.
-
-
-
-
-
-
28.60 negate# TOC
-
-
Negate input video.
-
-
It accepts an integer in input; if non-zero it negates the
-alpha component (if available). The default value in input is 0.
-
-
-
28.61 noformat# TOC
-
-
Force libavfilter not to use any of the specified pixel formats for the
-input to the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-apix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
28.61.1 Examples# TOC
-
-
- Force libavfilter to use a format different from yuv420p for the
-input to the vflip filter:
-
-
noformat=pix_fmts=yuv420p,vflip
-
-
- Convert the input video to any of the formats not contained in the list:
-
-
noformat=yuv420p|yuv444p|yuv410p
-
-
-
-
-
28.62 noise# TOC
-
-
Add noise on video input frame.
-
-
The filter accepts the following options:
-
-
-all_seed
-c0_seed
-c1_seed
-c2_seed
-c3_seed
-Set noise seed for specific pixel component or all pixel components in case
-of all_seed . Default value is 123457
.
-
-
-all_strength, alls
-c0_strength, c0s
-c1_strength, c1s
-c2_strength, c2s
-c3_strength, c3s
-Set noise strength for specific pixel component or all pixel components in case
-all_strength . Default value is 0
. Allowed range is [0, 100].
-
-
-all_flags, allf
-c0_flags, c0f
-c1_flags, c1f
-c2_flags, c2f
-c3_flags, c3f
-Set pixel component flags or set flags for all components if all_flags .
-Available values for component flags are:
-
-‘a ’
-averaged temporal noise (smoother)
-
-‘p ’
-mix random noise with a (semi)regular pattern
-
-‘t ’
-temporal noise (noise pattern changes between frames)
-
-‘u ’
-uniform noise (gaussian otherwise)
-
-
-
-
-
-
-
28.62.1 Examples# TOC
-
-
Add temporal and uniform noise to input video:
-
-
noise=alls=20:allf=t+u
-
-
-
-
28.63 null# TOC
-
-
Pass the video source unchanged to the output.
-
-
-
28.64 ocv# TOC
-
-
Apply a video transform using libopencv.
-
-
To enable this filter, install the libopencv library and headers and
-configure FFmpeg with --enable-libopencv
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the libopencv filter to apply.
-
-
-filter_params
-The parameters to pass to the libopencv filter. If not specified, the default
-values are assumed.
-
-
-
-
-
Refer to the official libopencv documentation for more precise
-information:
-http://docs.opencv.org/master/modules/imgproc/doc/filtering.html
-
-
Several libopencv filters are supported; see the following subsections.
-
-
-
28.64.1 dilate# TOC
-
-
Dilate an image by using a specific structuring element.
-It corresponds to the libopencv function cvDilate
.
-
-
It accepts the parameters: struct_el |nb_iterations .
-
-
struct_el represents a structuring element, and has the syntax:
-cols xrows +anchor_x xanchor_y /shape
-
-
cols and rows represent the number of columns and rows of
-the structuring element, anchor_x and anchor_y the anchor
-point, and shape the shape for the structuring element. shape
-must be "rect", "cross", "ellipse", or "custom".
-
-
If the value for shape is "custom", it must be followed by a
-string of the form "=filename ". The file with name
-filename is assumed to represent a binary image, with each
-printable character corresponding to a bright pixel. When a custom
-shape is used, cols and rows are ignored, the number
-or columns and rows of the read file are assumed instead.
-
-
The default value for struct_el is "3x3+0x0/rect".
-
-
nb_iterations specifies the number of times the transform is
-applied to the image, and defaults to 1.
-
-
Some examples:
-
-
# Use the default values
-ocv=dilate
-
-# Dilate using a structuring element with a 5x5 cross, iterating two times
-ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
-
-# Read the shape from the file diamond.shape, iterating two times.
-# The file diamond.shape may contain a pattern of characters like this
-# *
-# ***
-# *****
-# ***
-# *
-# The specified columns and rows are ignored
-# but the anchor point coordinates are not
-ocv=dilate:0x0+2x2/custom=diamond.shape|2
-
-
-
-
28.64.2 erode# TOC
-
-
Erode an image by using a specific structuring element.
-It corresponds to the libopencv function cvErode
.
-
-
It accepts the parameters: struct_el :nb_iterations ,
-with the same syntax and semantics as the dilate filter.
-
-
-
28.64.3 smooth# TOC
-
-
Smooth the input video.
-
-
The filter takes the following parameters:
-type |param1 |param2 |param3 |param4 .
-
-
type is the type of smooth filter to apply, and must be one of
-the following values: "blur", "blur_no_scale", "median", "gaussian",
-or "bilateral". The default value is "gaussian".
-
-
The meaning of param1 , param2 , param3 , and param4
-depend on the smooth type. param1 and
-param2 accept integer positive values or 0. param3 and
-param4 accept floating point values.
-
-
The default value for param1 is 3. The default value for the
-other parameters is 0.
-
-
These parameters correspond to the parameters assigned to the
-libopencv function cvSmooth
.
-
-
-
28.65 overlay# TOC
-
-
Overlay one video on top of another.
-
-
It takes two inputs and has one output. The first input is the "main"
-video on which the second input is overlaid.
-
-
It accepts the following parameters:
-
-
A description of the accepted options follows.
-
-
-x
-y
-Set the expression for the x and y coordinates of the overlaid video
-on the main video. Default value is "0" for both expressions. In case
-the expression is invalid, it is set to a huge value (meaning that the
-overlay will not be displayed within the output visible area).
-
-
-eof_action
-The action to take when EOF is encountered on the secondary input; it accepts
-one of the following values:
-
-
-repeat
-Repeat the last frame (the default).
-
-endall
-End both streams.
-
-pass
-Pass the main input through.
-
-
-
-
-eval
-Set when the expressions for x , and y are evaluated.
-
-It accepts the following values:
-
-‘init ’
-only evaluate expressions once during the filter initialization or
-when a command is processed
-
-
-‘frame ’
-evaluate expressions for each incoming frame
-
-
-
-Default value is ‘frame ’.
-
-
-shortest
-If set to 1, force the output to terminate when the shortest input
-terminates. Default value is 0.
-
-
-format
-Set the format for the output video.
-
-It accepts the following values:
-
-‘yuv420 ’
-force YUV420 output
-
-
-‘yuv422 ’
-force YUV422 output
-
-
-‘yuv444 ’
-force YUV444 output
-
-
-‘rgb ’
-force RGB output
-
-
-
-Default value is ‘yuv420 ’.
-
-
-rgb (deprecated)
-If set to 1, force the filter to accept inputs in the RGB
-color space. Default value is 0. This option is deprecated, use
-format instead.
-
-
-repeatlast
-If set to 1, force the filter to draw the last overlay frame over the
-main input until the end of the stream. A value of 0 disables this
-behavior. Default value is 1.
-
-
-
-
The x , and y expressions can contain the following
-parameters.
-
-
-main_w, W
-main_h, H
-The main input width and height.
-
-
-overlay_w, w
-overlay_h, h
-The overlay input width and height.
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values of the output
-format. For example for the pixel format "yuv422p" hsub is 2 and
-vsub is 1.
-
-
-n
-the number of input frame, starting from 0
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp, expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
Note that the n , pos , t variables are available only
-when evaluation is done per frame , and will evaluate to NAN
-when eval is set to ‘init ’.
-
-
Be aware that frames are taken from each input video in timestamp
-order, hence, if their initial timestamps differ, it is a good idea
-to pass the two inputs through a setpts=PTS-STARTPTS filter to
-have them begin in the same zero timestamp, as the example for
-the movie filter does.
-
-
You can chain together more overlays but you should test the
-efficiency of such approach.
-
-
-
28.65.1 Commands# TOC
-
-
This filter supports the following commands:
-
-x
-y
-Modify the x and y of the overlay input.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
28.65.2 Examples# TOC
-
-
-
-
-
28.66 owdenoise# TOC
-
-
Apply Overcomplete Wavelet denoiser.
-
-
The filter accepts the following options:
-
-
-depth
-Set depth.
-
-Larger depth values will denoise lower frequency components more, but
-slow down filtering.
-
-Must be an int in the range 8-16, default is 8
.
-
-
-luma_strength, ls
-Set luma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-chroma_strength, cs
-Set chroma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-
-
-
28.67 pad# TOC
-
-
Add paddings to the input image, and place the original input at the
-provided x , y coordinates.
-
-
It accepts the following parameters:
-
-
-width, w
-height, h
-Specify an expression for the size of the output image with the
-paddings added. If the value for width or height is 0, the
-corresponding input size is used for the output.
-
-The width expression can reference the value set by the
-height expression, and vice versa.
-
-The default value of width and height is 0.
-
-
-x
-y
-Specify the offsets to place the input image at within the padded area,
-with respect to the top/left border of the output image.
-
-The x expression can reference the value set by the y
-expression, and vice versa.
-
-The default value of x and y is 0.
-
-
-color
-Specify the color of the padded area. For the syntax of this option,
-check the "Color" section in the ffmpeg-utils manual.
-
-The default value of color is "black".
-
-
-
-
The value for the width , height , x , and y
-options are expressions containing the following constants:
-
-
-in_w
-in_h
-The input video width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output width and height (the size of the padded area), as
-specified by the width and height expressions.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-x
-y
-The x and y offsets as specified by the x and y
-expressions, or NAN if not yet specified.
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
28.67.1 Examples# TOC
-
-
-
-
-
28.68 perspective# TOC
-
-
Correct perspective of video not recorded perpendicular to the screen.
-
-
A description of the accepted parameters follows.
-
-
-x0
-y0
-x1
-y1
-x2
-y2
-x3
-y3
-Set coordinates expression for top left, top right, bottom left and bottom right corners.
-Default values are 0:0:W:0:0:H:W:H
with which perspective will remain unchanged.
-If the sense
option is set to source
, then the specified points will be sent
-to the corners of the destination. If the sense
option is set to destination
,
-then the corners of the source will be sent to the specified coordinates.
-
-The expressions can use the following variables:
-
-
-W
-H
-the width and height of video frame.
-
-
-
-
-interpolation
-Set interpolation for perspective correction.
-
-It accepts the following values:
-
-‘linear ’
-‘cubic ’
-
-
-Default value is ‘linear ’.
-
-
-sense
-Set interpretation of coordinate options.
-
-It accepts the following values:
-
-‘0, source ’
-
-Send point in the source specified by the given coordinates to
-the corners of the destination.
-
-
-‘1, destination ’
-
-Send the corners of the source to the point in the destination specified
-by the given coordinates.
-
-Default value is ‘source ’.
-
-
-
-
-
-
-
28.69 phase# TOC
-
-
Delay interlaced video by one field time so that the field order changes.
-
-
The intended use is to fix PAL movies that have been captured with the
-opposite field order to the film-to-video transfer.
-
-
A description of the accepted parameters follows.
-
-
-mode
-Set phase mode.
-
-It accepts the following values:
-
-‘t ’
-Capture field order top-first, transfer bottom-first.
-Filter will delay the bottom field.
-
-
-‘b ’
-Capture field order bottom-first, transfer top-first.
-Filter will delay the top field.
-
-
-‘p ’
-Capture and transfer with the same field order. This mode only exists
-for the documentation of the other options to refer to, but if you
-actually select it, the filter will faithfully do nothing.
-
-
-‘a ’
-Capture field order determined automatically by field flags, transfer
-opposite.
-Filter selects among ‘t ’ and ‘b ’ modes on a frame by frame
-basis using field flags. If no field information is available,
-then this works just like ‘u ’.
-
-
-‘u ’
-Capture unknown or varying, transfer opposite.
-Filter selects among ‘t ’ and ‘b ’ on a frame by frame basis by
-analyzing the images and selecting the alternative that produces best
-match between the fields.
-
-
-‘T ’
-Capture top-first, transfer unknown or varying.
-Filter selects among ‘t ’ and ‘p ’ using image analysis.
-
-
-‘B ’
-Capture bottom-first, transfer unknown or varying.
-Filter selects among ‘b ’ and ‘p ’ using image analysis.
-
-
-‘A ’
-Capture determined by field flags, transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using field flags and
-image analysis. If no field information is available, then this works just
-like ‘U ’. This is the default mode.
-
-
-‘U ’
-Both capture and transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using image analysis only.
-
-
-
-
-
-
-
28.70 pixdesctest# TOC
-
-
Pixel format descriptor test filter, mainly useful for internal
-testing. The output video should be equal to the input video.
-
-
For example:
-
-
format=monow, pixdesctest
-
-
-
can be used to test the monowhite pixel format descriptor definition.
-
-
-
28.71 pp# TOC
-
-
Enable the specified chain of postprocessing subfilters using libpostproc. This
-library should be automatically selected with a GPL build (--enable-gpl
).
-Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’.
-Each subfilter and some options have a short and a long name that can be used
-interchangeably, i.e. dr/dering are the same.
-
-
The filters accept the following options:
-
-
-subfilters
-Set postprocessing subfilters string.
-
-
-
-
All subfilters share common options to determine their scope:
-
-
-a/autoq
-Honor the quality commands for this subfilter.
-
-
-c/chrom
-Do chrominance filtering, too (default).
-
-
-y/nochrom
-Do luminance filtering only (no chrominance).
-
-
-n/noluma
-Do chrominance filtering only (no luminance).
-
-
-
-
These options can be appended after the subfilter name, separated by a ’|’.
-
-
Available subfilters are:
-
-
-hb/hdeblock[|difference[|flatness]]
-Horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-vb/vdeblock[|difference[|flatness]]
-Vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-ha/hadeblock[|difference[|flatness]]
-Accurate horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-va/vadeblock[|difference[|flatness]]
-Accurate vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-
-
The horizontal and vertical deblocking filters share the difference and
-flatness values so you cannot set different horizontal and vertical
-thresholds.
-
-
-h1/x1hdeblock
-Experimental horizontal deblocking filter
-
-
-v1/x1vdeblock
-Experimental vertical deblocking filter
-
-
-dr/dering
-Deringing filter
-
-
-tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
-
-threshold1
-larger -> stronger filtering
-
-threshold2
-larger -> stronger filtering
-
-threshold3
-larger -> stronger filtering
-
-
-
-
-al/autolevels[:f/fullyrange], automatic brightness / contrast correction
-
-f/fullyrange
-Stretch luminance to 0-255
.
-
-
-
-
-lb/linblenddeint
-Linear blend deinterlacing filter that deinterlaces the given block by
-filtering all lines with a (1 2 1)
filter.
-
-
-li/linipoldeint
-Linear interpolating deinterlacing filter that deinterlaces the given block by
-linearly interpolating every second line.
-
-
-ci/cubicipoldeint
-Cubic interpolating deinterlacing filter deinterlaces the given block by
-cubically interpolating every second line.
-
-
-md/mediandeint
-Median deinterlacing filter that deinterlaces the given block by applying a
-median filter to every second line.
-
-
-fd/ffmpegdeint
-FFmpeg deinterlacing filter that deinterlaces the given block by filtering every
-second line with a (-1 4 2 4 -1)
filter.
-
-
-l5/lowpass5
-Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given
-block by filtering all lines with a (-1 2 6 2 -1)
filter.
-
-
-fq/forceQuant[|quantizer]
-Overrides the quantizer table from the input with the constant quantizer you
-specify.
-
-quantizer
-Quantizer to use
-
-
-
-
-de/default
-Default pp filter combination (hb|a,vb|a,dr|a
)
-
-
-fa/fast
-Fast pp filter combination (h1|a,v1|a,dr|a
)
-
-
-ac
-High quality pp filter combination (ha|a|128|7,va|a,dr|a
)
-
-
-
-
-
28.71.1 Examples# TOC
-
-
- Apply horizontal and vertical deblocking, deringing and automatic
-brightness/contrast:
-
-
- Apply default filters without brightness/contrast correction:
-
-
- Apply default filters and temporal denoiser:
-
-
pp=default/tmpnoise|1|2|3
-
-
- Apply deblocking on luminance only, and switch vertical deblocking on or off
-automatically depending on available CPU time:
-
-
-
-
-
28.72 pp7# TOC
-
Apply Postprocessing filter 7. It is variant of the spp filter,
-similar to spp = 6 with 7 point DCT, where only the center sample is
-used after IDCT.
-
-
The filter accepts the following options:
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range
-0 to 63. If not set, the filter will use the QP from the video stream
-(if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding.
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-‘medium ’
-Set medium thresholding (good results, default).
-
-
-
-
-
-
-
28.73 psnr# TOC
-
-
Obtain the average, maximum and minimum PSNR (Peak Signal to Noise
-Ratio) between two input videos.
-
-
This filter takes in input two input videos, the first input is
-considered the "main" source and is passed unchanged to the
-output. The second input is used as a "reference" video for computing
-the PSNR.
-
-
Both video inputs must have the same resolution and pixel format for
-this filter to work correctly. Also it assumes that both inputs
-have the same number of frames, which are compared one by one.
-
-
The obtained average PSNR is printed through the logging system.
-
-
The filter stores the accumulated MSE (mean squared error) of each
-frame, and at the end of the processing it is averaged across all frames
-equally, and the following formula is applied to obtain the PSNR:
-
-
-
PSNR = 10*log10(MAX^2/MSE)
-
-
-
Where MAX is the average of the maximum values of each component of the
-image.
-
-
The description of the accepted parameters follows.
-
-
-stats_file, f
-If specified the filter will use the named file to save the PSNR of
-each individual frame.
-
-
-
-
The file printed if stats_file is selected, contains a sequence of
-key/value pairs of the form key :value for each compared
-couple of frames.
-
-
A description of each shown parameter follows:
-
-
-n
-sequential number of the input frame, starting from 1
-
-
-mse_avg
-Mean Square Error pixel-by-pixel average difference of the compared
-frames, averaged over all the image components.
-
-
-mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
-Mean Square Error pixel-by-pixel average difference of the compared
-frames for the component specified by the suffix.
-
-
-psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
-Peak Signal to Noise ratio of the compared frames for the component
-specified by the suffix.
-
-
-
-
For example:
-
-
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
-[main][ref] psnr="stats_file=stats.log" [out]
-
-
-
On this example the input file being processed is compared with the
-reference file ref_movie.mpg . The PSNR of each individual frame
-is stored in stats.log .
-
-
-
28.74 pullup# TOC
-
-
Pulldown reversal (inverse telecine) filter, capable of handling mixed
-hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive
-content.
-
-
The pullup filter is designed to take advantage of future context in making
-its decisions. This filter is stateless in the sense that it does not lock
-onto a pattern to follow, but it instead looks forward to the following
-fields in order to identify matches and rebuild progressive frames.
-
-
To produce content with an even framerate, insert the fps filter after
-pullup, use fps=24000/1001
if the input frame rate is 29.97fps,
-fps=24
for 30fps and the (rare) telecined 25fps input.
-
-
The filter accepts the following options:
-
-
-jl
-jr
-jt
-jb
-These options set the amount of "junk" to ignore at the left, right, top, and
-bottom of the image, respectively. Left and right are in units of 8 pixels,
-while top and bottom are in units of 2 lines.
-The default is 8 pixels on each side.
-
-
-sb
-Set the strict breaks. Setting this option to 1 will reduce the chances of
-filter generating an occasional mismatched frame, but it may also cause an
-excessive number of frames to be dropped during high motion sequences.
-Conversely, setting it to -1 will make filter match fields more easily.
-This may help processing of video where there is slight blurring between
-the fields, but may also cause there to be interlaced frames in the output.
-Default value is 0
.
-
-
-mp
-Set the metric plane to use. It accepts the following values:
-
-‘l ’
-Use luma plane.
-
-
-‘u ’
-Use chroma blue plane.
-
-
-‘v ’
-Use chroma red plane.
-
-
-
-This option may be set to use chroma plane instead of the default luma plane
-for doing filter’s computations. This may improve accuracy on very clean
-source material, but more likely will decrease accuracy, especially if there
-is chroma noise (rainbow effect) or any grayscale video.
-The main purpose of setting mp to a chroma plane is to reduce CPU
-load and make pullup usable in realtime on slow machines.
-
-
-
-
For best results (without duplicated frames in the output file) it is
-necessary to change the output frame rate. For example, to inverse
-telecine NTSC input:
-
-
ffmpeg -i input -vf pullup -r 24000/1001 ...
-
-
-
-
28.75 qp# TOC
-
-
Change video quantization parameters (QP).
-
-
The filter accepts the following option:
-
-
-qp
-Set expression for quantization parameter.
-
-
-
-
The expression is evaluated through the eval API and can contain, among others,
-the following constants:
-
-
-known
-1 if index is not 129, 0 otherwise.
-
-
-qp
-Sequentional index starting from -129 to 128.
-
-
-
-
-
28.75.1 Examples# TOC
-
-
- Some equation like:
-
-
-
-
-
28.76 removelogo# TOC
-
-
Suppress a TV station logo, using an image file to determine which
-pixels comprise the logo. It works by filling in the pixels that
-comprise the logo with neighboring pixels.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filter bitmap file, which can be any image format supported by
-libavformat. The width and height of the image file must match those of the
-video stream being processed.
-
-
-
-
Pixels in the provided bitmap image with a value of zero are not
-considered part of the logo, non-zero pixels are considered part of
-the logo. If you use white (255) for the logo and black (0) for the
-rest, you will be safe. For making the filter bitmap, it is
-recommended to take a screen capture of a black frame with the logo
-visible, and then using a threshold filter followed by the erode
-filter once or twice.
-
-
If needed, little splotches can be fixed manually. Remember that if
-logo pixels are not covered, the filter quality will be much
-reduced. Marking too many pixels as part of the logo does not hurt as
-much, but it will increase the amount of blurring needed to cover over
-the image and will destroy more information than necessary, and extra
-pixels will slow things down on a large logo.
-
-
-
28.77 rotate# TOC
-
-
Rotate video by an arbitrary angle expressed in radians.
-
-
The filter accepts the following options:
-
-
A description of the optional parameters follows.
-
-angle, a
-Set an expression for the angle by which to rotate the input video
-clockwise, expressed as a number of radians. A negative value will
-result in a counter-clockwise rotation. By default it is set to "0".
-
-This expression is evaluated for each frame.
-
-
-out_w, ow
-Set the output width expression, default value is "iw".
-This expression is evaluated just once during configuration.
-
-
-out_h, oh
-Set the output height expression, default value is "ih".
-This expression is evaluated just once during configuration.
-
-
-bilinear
-Enable bilinear interpolation if set to 1, a value of 0 disables
-it. Default value is 1.
-
-
-fillcolor, c
-Set the color used to fill the output area not covered by the rotated
-image. For the general syntax of this option, check the "Color" section in the
-ffmpeg-utils manual. If the special value "none" is selected then no
-background is printed (useful for example if the background is never shown).
-
-Default value is "black".
-
-
-
-
The expressions for the angle and the output size can contain the
-following constants and functions:
-
-
-n
-sequential number of the input frame, starting from 0. It is always NAN
-before the first frame is filtered.
-
-
-t
-time in seconds of the input frame, it is set to 0 when the filter is
-configured. It is always NAN before the first frame is filtered.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_w, iw
-in_h, ih
-the input video width and height
-
-
-out_w, ow
-out_h, oh
-the output width and height, that is the size of the padded area as
-specified by the width and height expressions
-
-
-rotw(a)
-roth(a)
-the minimal width/height required for completely containing the input
-video rotated by a radians.
-
-These are only available when computing the out_w and
-out_h expressions.
-
-
-
-
-
28.77.1 Examples# TOC
-
-
- Rotate the input by PI/6 radians clockwise:
-
-
- Rotate the input by PI/6 radians counter-clockwise:
-
-
- Rotate the input by 45 degrees clockwise:
-
-
- Apply a constant rotation with period T, starting from an angle of PI/3:
-
-
- Make the input video rotation oscillating with a period of T
-seconds and an amplitude of A radians:
-
-
rotate=A*sin(2*PI/T*t)
-
-
- Rotate the video, output size is chosen so that the whole rotating
-input video is always completely contained in the output:
-
-
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
-
-
- Rotate the video, reduce the output size so that no background is ever
-shown:
-
-
rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
-
-
-
-
-
28.77.2 Commands# TOC
-
-
The filter supports the following commands:
-
-
-a, angle
-Set the angle expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
28.78 sab# TOC
-
-
Apply Shape Adaptive Blur.
-
-
The filter accepts the following options:
-
-
-luma_radius, lr
-Set luma blur filter strength, must be a value in range 0.1-4.0, default
-value is 1.0. A greater value will result in a more blurred image, and
-in slower processing.
-
-
-luma_pre_filter_radius, lpfr
-Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default
-value is 1.0.
-
-
-luma_strength, ls
-Set luma maximum difference between pixels to still be considered, must
-be a value in the 0.1-100.0 range, default value is 1.0.
-
-
-chroma_radius, cr
-Set chroma blur filter strength, must be a value in range 0.1-4.0. A
-greater value will result in a more blurred image, and in slower
-processing.
-
-
-chroma_pre_filter_radius, cpfr
-Set chroma pre-filter radius, must be a value in the 0.1-2.0 range.
-
-
-chroma_strength, cs
-Set chroma maximum difference between pixels to still be considered,
-must be a value in the 0.1-100.0 range.
-
-
-
-
Each chroma option value, if not explicitly specified, is set to the
-corresponding luma option value.
-
-
-
28.79 scale# TOC
-
-
Scale (resize) the input video, using the libswscale library.
-
-
The scale filter forces the output display aspect ratio to be the same
-of the input, by changing the output sample aspect ratio.
-
-
If the input image format is different from the format requested by
-the next filter, the scale filter will convert the input to the
-requested format.
-
-
-
28.79.1 Options# TOC
-
The filter accepts the following options, or any of the options
-supported by the libswscale scaler.
-
-
See (ffmpeg-scaler)the ffmpeg-scaler manual for
-the complete list of scaler options.
-
-
-width, w
-height, h
-Set the output video dimension expression. Default value is the input
-dimension.
-
-If the value is 0, the input width is used for the output.
-
-If one of the values is -1, the scale filter will use a value that
-maintains the aspect ratio of the input image, calculated from the
-other specified dimension. If both of them are -1, the input size is
-used
-
-If one of the values is -n with n > 1, the scale filter will also use a value
-that maintains the aspect ratio of the input image, calculated from the other
-specified dimension. After that it will, however, make sure that the calculated
-dimension is divisible by n and adjust the value if necessary.
-
-See below for the list of accepted constants for use in the dimension
-expression.
-
-
-interl
-Set the interlacing mode. It accepts the following values:
-
-
-‘1 ’
-Force interlaced aware scaling.
-
-
-‘0 ’
-Do not apply interlaced scaling.
-
-
-‘-1 ’
-Select interlaced aware scaling depending on whether the source frames
-are flagged as interlaced or not.
-
-
-
-Default value is ‘0 ’.
-
-
-flags
-Set libswscale scaling flags. See
-(ffmpeg-scaler)the ffmpeg-scaler manual for the
-complete list of values. If not explicitly specified the filter applies
-the default flags.
-
-
-size, s
-Set the video size. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-in_color_matrix
-out_color_matrix
-Set in/output YCbCr color space type.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder.
-
-If not specified, the color space type depends on the pixel format.
-
-Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘bt709 ’
-Format conforming to International Telecommunication Union (ITU)
-Recommendation BT.709.
-
-
-‘fcc ’
-Set color space conforming to the United States Federal Communications
-Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a).
-
-
-‘bt601 ’
-Set color space conforming to:
-
-
- ITU Radiocommunication Sector (ITU-R) Recommendation BT.601
-
- ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G
-
- Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004
-
-
-
-
-‘smpte240m ’
-Set color space conforming to SMPTE ST 240:1999.
-
-
-
-
-in_range
-out_range
-Set in/output YCbCr sample range.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder. If not specified, the
-range depends on the pixel format. Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘jpeg/full/pc ’
-Set full range (0-255 in case of 8-bit luma).
-
-
-‘mpeg/tv ’
-Set "MPEG" range (16-235 in case of 8-bit luma).
-
-
-
-
-force_original_aspect_ratio
-Enable decreasing or increasing output video width or height if necessary to
-keep the original aspect ratio. Possible values:
-
-
-‘disable ’
-Scale the video as specified and disable this feature.
-
-
-‘decrease ’
-The output video dimensions will automatically be decreased if needed.
-
-
-‘increase ’
-The output video dimensions will automatically be increased if needed.
-
-
-
-
-One useful instance of this option is that when you know a specific device’s
-maximum allowed resolution, you can use this to limit the output video to
-that, while retaining the aspect ratio. For example, device A allows
-1280x720 playback, and your video is 1920x800. Using this option (set it to
-decrease) and specifying 1280x720 to the command line makes the output
-1280x533.
-
-Please note that this is a different thing than specifying -1 for w
-or h , you still need to specify the output resolution for this option
-to work.
-
-
-
-
-
The values of the w and h options are expressions
-containing the following constants:
-
-
-in_w
-in_h
-The input width and height
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (scaled) width and height
-
-
-ow
-oh
-These are the same as out_w and out_h
-
-
-a
-The same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-The input display aspect ratio. Calculated from (iw / ih) * sar
.
-
-
-hsub
-vsub
-horizontal and vertical input chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-ohsub
-ovsub
-horizontal and vertical output chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
28.79.2 Examples# TOC
-
-
-
-
-
28.80 separatefields# TOC
-
-
The separatefields
takes a frame-based video input and splits
-each frame into its components fields, producing a new half height clip
-with twice the frame rate and twice the frame count.
-
-
This filter use field-dominance information in frame to decide which
-of each pair of fields to place first in the output.
-If it gets it wrong use setfield filter before separatefields
filter.
-
-
-
28.81 setdar, setsar# TOC
-
-
The setdar
filter sets the Display Aspect Ratio for the filter
-output video.
-
-
This is done by changing the specified Sample (aka Pixel) Aspect
-Ratio, according to the following equation:
-
-
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
-
-
-
Keep in mind that the setdar
filter does not modify the pixel
-dimensions of the video frame. Also, the display aspect ratio set by
-this filter may be changed by later filters in the filterchain,
-e.g. in case of scaling or if another "setdar" or a "setsar" filter is
-applied.
-
-
The setsar
filter sets the Sample (aka Pixel) Aspect Ratio for
-the filter output video.
-
-
Note that as a consequence of the application of this filter, the
-output display aspect ratio will change according to the equation
-above.
-
-
Keep in mind that the sample aspect ratio set by the setsar
-filter may be changed by later filters in the filterchain, e.g. if
-another "setsar" or a "setdar" filter is applied.
-
-
It accepts the following parameters:
-
-
-r, ratio, dar (setdar
only), sar (setsar
only)
-Set the aspect ratio used by the filter.
-
-The parameter can be a floating point number string, an expression, or
-a string of the form num :den , where num and
-den are the numerator and denominator of the aspect ratio. If
-the parameter is not specified, it is assumed the value "0".
-In case the form "num :den " is used, the :
character
-should be escaped.
-
-
-max
-Set the maximum integer value to use for expressing numerator and
-denominator when reducing the expressed aspect ratio to a rational.
-Default value is 100
.
-
-
-
-
-
The parameter sar is an expression containing
-the following constants:
-
-
-E, PI, PHI
-These are approximated values for the mathematical constants e
-(Euler’s number), pi (Greek pi), and phi (the golden ratio).
-
-
-w, h
-The input width and height.
-
-
-a
-These are the same as w / h .
-
-
-sar
-The input sample aspect ratio.
-
-
-dar
-The input display aspect ratio. It is the same as
-(w / h ) * sar .
-
-
-hsub, vsub
-Horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
28.81.1 Examples# TOC
-
-
- To change the display aspect ratio to 16:9, specify one of the following:
-
-
setdar=dar=1.77777
-setdar=dar=16/9
-setdar=dar=1.77777
-
-
- To change the sample aspect ratio to 10:11, specify:
-
-
- To set a display aspect ratio of 16:9, and specify a maximum integer value of
-1000 in the aspect ratio reduction, use the command:
-
-
setdar=ratio=16/9:max=1000
-
-
-
-
-
-
28.82 setfield# TOC
-
-
Force field for the output video frame.
-
-
The setfield
filter marks the interlace type field for the
-output frames. It does not change the input frame, but only sets the
-corresponding property, which affects how the frame is treated by
-following filters (e.g. fieldorder
or yadif
).
-
-
The filter accepts the following options:
-
-
-mode
-Available values are:
-
-
-‘auto ’
-Keep the same field property.
-
-
-‘bff ’
-Mark the frame as bottom-field-first.
-
-
-‘tff ’
-Mark the frame as top-field-first.
-
-
-‘prog ’
-Mark the frame as progressive.
-
-
-
-
-
-
-
28.83 showinfo# TOC
-
-
Show a line containing various information for each input video frame.
-The input video is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The Presentation TimeStamp of the input frame, expressed as a number of
-time base units. The time base unit depends on the filter input pad.
-
-
-pts_time
-The Presentation TimeStamp of the input frame, expressed as a number of
-seconds.
-
-
-pos
-The position of the frame in the input stream, or -1 if this information is
-unavailable and/or meaningless (for example in case of synthetic video).
-
-
-fmt
-The pixel format name.
-
-
-sar
-The sample aspect ratio of the input frame, expressed in the form
-num /den .
-
-
-s
-The size of the input frame. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-i
-The type of interlaced mode ("P" for "progressive", "T" for top field first, "B"
-for bottom field first).
-
-
-iskey
-This is 1 if the frame is a key frame, 0 otherwise.
-
-
-type
-The picture type of the input frame ("I" for an I-frame, "P" for a
-P-frame, "B" for a B-frame, or "?" for an unknown type).
-Also refer to the documentation of the AVPictureType
enum and of
-the av_get_picture_type_char
function defined in
-libavutil/avutil.h .
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame.
-
-
-plane_checksum
-The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
-expressed in the form "[c0 c1 c2 c3 ]".
-
-
-
-
-
28.84 shuffleplanes# TOC
-
-
Reorder and/or duplicate video planes.
-
-
It accepts the following parameters:
-
-
-map0
-The index of the input plane to be used as the first output plane.
-
-
-map1
-The index of the input plane to be used as the second output plane.
-
-
-map2
-The index of the input plane to be used as the third output plane.
-
-
-map3
-The index of the input plane to be used as the fourth output plane.
-
-
-
-
-
The first plane has the index 0. The default is to keep the input unchanged.
-
-
Swap the second and third planes of the input:
-
-
ffmpeg -i INPUT -vf shuffleplanes=0:2:1:3 OUTPUT
-
-
-
-
28.85 signalstats# TOC
-
Evaluate various visual metrics that assist in determining issues associated
-with the digitization of analog video media.
-
-
By default the filter will log these metadata values:
-
-
-YMIN
-Display the minimal Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-YLOW
-Display the Y value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YAVG
-Display the average Y value within the input frame. Expressed in range of
-[0-255].
-
-
-YHIGH
-Display the Y value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YMAX
-Display the maximum Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-UMIN
-Display the minimal U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-ULOW
-Display the U value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UAVG
-Display the average U value within the input frame. Expressed in range of
-[0-255].
-
-
-UHIGH
-Display the U value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UMAX
-Display the maximum U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VMIN
-Display the minimal V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VLOW
-Display the V value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VAVG
-Display the average V value within the input frame. Expressed in range of
-[0-255].
-
-
-VHIGH
-Display the V value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VMAX
-Display the maximum V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-SATMIN
-Display the minimal saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATLOW
-Display the saturation value at the 10% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATAVG
-Display the average saturation value within the input frame. Expressed in range
-of [0-~181.02].
-
-
-SATHIGH
-Display the saturation value at the 90% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATMAX
-Display the maximum saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-HUEMED
-Display the median value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-HUEAVG
-Display the average value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-YDIF
-Display the average of sample value difference between all values of the Y
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-UDIF
-Display the average of sample value difference between all values of the U
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-VDIF
-Display the average of sample value difference between all values of the V
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-
-
The filter accepts the following options:
-
-
-stat
-out
-
-stat specify an additional form of image analysis.
-out output video with the specified type of pixel highlighted.
-
-Both options accept the following values:
-
-
-‘tout ’
-Identify temporal outliers pixels. A temporal outlier is a pixel
-unlike the neighboring pixels of the same field. Examples of temporal outliers
-include the results of video dropouts, head clogs, or tape tracking issues.
-
-
-‘vrep ’
-Identify vertical line repetition . Vertical line repetition includes
-similar rows of pixels within a frame. In born-digital video vertical line
-repetition is common, but this pattern is uncommon in video digitized from an
-analog source. When it occurs in video that results from the digitization of an
-analog source it can indicate concealment from a dropout compensator.
-
-
-‘brng ’
-Identify pixels that fall outside of legal broadcast range.
-
-
-
-
-color, c
-Set the highlight color for the out option. The default color is
-yellow.
-
-
-
-
-
28.85.1 Examples# TOC
-
-
-
-
-
28.86 smartblur# TOC
-
-
Blur the input video without impacting the outlines.
-
-
It accepts the following options:
-
-
-luma_radius, lr
-Set the luma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-luma_strength, ls
-Set the luma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-luma_threshold, lt
-Set the luma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-chroma_radius, cr
-Set the chroma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-chroma_strength, cs
-Set the chroma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-chroma_threshold, ct
-Set the chroma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-
-
If a chroma option is not explicitly set, the corresponding luma value
-is set.
-
-
-
28.87 stereo3d# TOC
-
-
Convert between different stereoscopic image formats.
-
-
The filters accept the following options:
-
-
-in
-Set stereoscopic image format of input.
-
-Available values for input image formats are:
-
-‘sbsl ’
-side by side parallel (left eye left, right eye right)
-
-
-‘sbsr ’
-side by side crosseye (right eye left, left eye right)
-
-
-‘sbs2l ’
-side by side parallel with half width resolution
-(left eye left, right eye right)
-
-
-‘sbs2r ’
-side by side crosseye with half width resolution
-(right eye left, left eye right)
-
-
-‘abl ’
-above-below (left eye above, right eye below)
-
-
-‘abr ’
-above-below (right eye above, left eye below)
-
-
-‘ab2l ’
-above-below with half height resolution
-(left eye above, right eye below)
-
-
-‘ab2r ’
-above-below with half height resolution
-(right eye above, left eye below)
-
-
-‘al ’
-alternating frames (left eye first, right eye second)
-
-
-‘ar ’
-alternating frames (right eye first, left eye second)
-
-Default value is ‘sbsl ’.
-
-
-
-
-out
-Set stereoscopic image format of output.
-
-Available values for output image formats are all the input formats as well as:
-
-‘arbg ’
-anaglyph red/blue gray
-(red filter on left eye, blue filter on right eye)
-
-
-‘argg ’
-anaglyph red/green gray
-(red filter on left eye, green filter on right eye)
-
-
-‘arcg ’
-anaglyph red/cyan gray
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arch ’
-anaglyph red/cyan half colored
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcc ’
-anaglyph red/cyan color
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcd ’
-anaglyph red/cyan color optimized with the least squares projection of dubois
-(red filter on left eye, cyan filter on right eye)
-
-
-‘agmg ’
-anaglyph green/magenta gray
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmh ’
-anaglyph green/magenta half colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmc ’
-anaglyph green/magenta colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmd ’
-anaglyph green/magenta color optimized with the least squares projection of dubois
-(green filter on left eye, magenta filter on right eye)
-
-
-‘aybg ’
-anaglyph yellow/blue gray
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybh ’
-anaglyph yellow/blue half colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybc ’
-anaglyph yellow/blue colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybd ’
-anaglyph yellow/blue color optimized with the least squares projection of dubois
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘irl ’
-interleaved rows (left eye has top row, right eye starts on next row)
-
-
-‘irr ’
-interleaved rows (right eye has top row, left eye starts on next row)
-
-
-‘ml ’
-mono output (left eye only)
-
-
-‘mr ’
-mono output (right eye only)
-
-
-
-Default value is ‘arcd ’.
-
-
-
-
-
28.87.1 Examples# TOC
-
-
- Convert input video from side by side parallel to anaglyph yellow/blue dubois:
-
-
- Convert input video from above bellow (left eye above, right eye below) to side by side crosseye.
-
-
-
-
-
28.88 spp# TOC
-
-
Apply a simple postprocessing filter that compresses and decompresses the image
-at several (or - in the case of quality level 6
- all) shifts
-and average the results.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-6. If set to 0
, the filter will have no
-effect. A value of 6
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding (default).
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
28.89 subtitles# TOC
-
-
Draw subtitles on top of input video using the libass library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libass
. This filter also requires a build with libavcodec and
-libavformat to convert the passed subtitles file to ASS (Advanced Substation
-Alpha) subtitles format.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filename of the subtitle file to read. It must be specified.
-
-
-original_size
-Specify the size of the original video, the video for which the ASS file
-was composed. For the syntax of this option, check the "Video size" section in
-the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic,
-this is necessary to correctly scale the fonts if the aspect ratio has been
-changed.
-
-
-charenc
-Set subtitles input character encoding. subtitles
filter only. Only
-useful if not UTF-8.
-
-
-stream_index, si
-Set subtitles stream index. subtitles
filter only.
-
-
-
-
If the first key is not specified, it is assumed that the first value
-specifies the filename .
-
-
For example, to render the file sub.srt on top of the input
-video, use the command:
-
-
-
which is equivalent to:
-
-
subtitles=filename=sub.srt
-
-
-
To render the default subtitles stream from file video.mkv , use:
-
-
-
To render the second subtitles stream from that file, use:
-
-
subtitles=video.mkv:si=1
-
-
-
-
28.90 super2xsai# TOC
-
-
Scale the input by 2x and smooth using the Super2xSaI (Scale and
-Interpolate) pixel art scaling algorithm.
-
-
Useful for enlarging pixel art images without reducing sharpness.
-
-
-
28.91 swapuv# TOC
-
Swap U & V plane.
-
-
-
28.92 telecine# TOC
-
-
Apply telecine process to the video.
-
-
This filter accepts the following options:
-
-
-first_field
-
-‘top, t ’
-top field first
-
-‘bottom, b ’
-bottom field first
-The default value is top
.
-
-
-
-
-pattern
-A string of numbers representing the pulldown pattern you wish to apply.
-The default value is 23
.
-
-
-
-
-
Some typical patterns:
-
-NTSC output (30i):
-27.5p: 32222
-24p: 23 (classic)
-24p: 2332 (preferred)
-20p: 33
-18p: 334
-16p: 3444
-
-PAL output (25i):
-27.5p: 12222
-24p: 222222222223 ("Euro pulldown")
-16.67p: 33
-16p: 33333334
-
-
-
-
28.93 thumbnail# TOC
-
Select the most representative frame in a given sequence of consecutive frames.
-
-
The filter accepts the following options:
-
-
-n
-Set the frames batch size to analyze; in a set of n frames, the filter
-will pick one of them, and then handle the next batch of n frames until
-the end. Default is 100
.
-
-
-
-
Since the filter keeps track of the whole frames sequence, a bigger n
-value will result in a higher memory usage, so a high value is not recommended.
-
-
-
28.93.1 Examples# TOC
-
-
- Extract one picture each 50 frames:
-
-
- Complete example of a thumbnail creation with ffmpeg
:
-
-
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
-
-
-
-
-
28.94 tile# TOC
-
-
Tile several successive frames together.
-
-
The filter accepts the following options:
-
-
-layout
-Set the grid size (i.e. the number of lines and columns). For the syntax of
-this option, check the "Video size" section in the ffmpeg-utils manual.
-
-
-nb_frames
-Set the maximum number of frames to render in the given area. It must be less
-than or equal to w xh . The default value is 0
, meaning all
-the area will be used.
-
-
-margin
-Set the outer border margin in pixels.
-
-
-padding
-Set the inner border thickness (i.e. the number of pixels between frames). For
-more advanced padding options (such as having different values for the edges),
-refer to the pad video filter.
-
-
-color
-Specify the color of the unused area. For the syntax of this option, check the
-"Color" section in the ffmpeg-utils manual. The default value of color
-is "black".
-
-
-
-
-
28.94.1 Examples# TOC
-
-
-
-
-
28.95 tinterlace# TOC
-
-
Perform various types of temporal field interlacing.
-
-
Frames are counted starting from 1, so the first input frame is
-considered odd.
-
-
The filter accepts the following options:
-
-
-mode
-Specify the mode of the interlacing. This option can also be specified
-as a value alone. See below for a list of values for this option.
-
-Available values are:
-
-
-‘merge, 0 ’
-Move odd frames into the upper field, even into the lower field,
-generating a double height frame at half frame rate.
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-‘drop_odd, 1 ’
-Only output even frames, odd frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
- 22222 44444
- 22222 44444
- 22222 44444
- 22222 44444
-
-
-
-‘drop_even, 2 ’
-Only output odd frames, even frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-11111 33333
-11111 33333
-11111 33333
-
-
-
-‘pad, 3 ’
-Expand each frame to full height, but pad alternate lines with black,
-generating a frame with double height at the same input frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-
-
-
-
-‘interleave_top, 4 ’
-Interleave the upper field from odd frames with the lower field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-
-‘interleave_bottom, 5 ’
-Interleave the lower field from odd frames with the upper field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-
-Output:
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-
-
-
-
-‘interlacex2, 6 ’
-Double frame rate with unchanged height. Frames are inserted each
-containing the second temporal field from the previous input frame and
-the first temporal field from the next input frame. This mode relies on
-the top_field_first flag. Useful for interlaced video displays with no
-field synchronisation.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
- 11111 22222 33333 44444
-11111 22222 33333 44444
- 11111 22222 33333 44444
-
-Output:
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-
-
-
-
-
-
-Numeric values are deprecated but are accepted for backward
-compatibility reasons.
-
-Default mode is merge
.
-
-
-flags
-Specify flags influencing the filter process.
-
-Available value for flags is:
-
-
-low_pass_filter, vlfp
-Enable vertical low-pass filtering in the filter.
-Vertical low-pass filtering is required when creating an interlaced
-destination from a progressive source which contains high-frequency
-vertical detail. Filtering will reduce interlace ’twitter’ and Moire
-patterning.
-
-Vertical low-pass filtering can only be enabled for mode
-interleave_top and interleave_bottom .
-
-
-
-
-
-
-
-
28.96 transpose# TOC
-
-
Transpose rows with columns in the input video and optionally flip it.
-
-
It accepts the following parameters:
-
-
-dir
-Specify the transposition direction.
-
-Can assume the following values:
-
-‘0, 4, cclock_flip ’
-Rotate by 90 degrees counterclockwise and vertically flip (default), that is:
-
-
L.R L.l
-. . -> . .
-l.r R.r
-
-
-
-‘1, 5, clock ’
-Rotate by 90 degrees clockwise, that is:
-
-
L.R l.L
-. . -> . .
-l.r r.R
-
-
-
-‘2, 6, cclock ’
-Rotate by 90 degrees counterclockwise, that is:
-
-
L.R R.r
-. . -> . .
-l.r L.l
-
-
-
-‘3, 7, clock_flip ’
-Rotate by 90 degrees clockwise and vertically flip, that is:
-
-
L.R r.R
-. . -> . .
-l.r l.L
-
-
-
-
-For values between 4-7, the transposition is only done if the input
-video geometry is portrait and not landscape. These values are
-deprecated, the passthrough
option should be used instead.
-
-Numerical values are deprecated, and should be dropped in favor of
-symbolic constants.
-
-
-passthrough
-Do not apply the transposition if the input geometry matches the one
-specified by the specified value. It accepts the following values:
-
-‘none ’
-Always apply transposition.
-
-‘portrait ’
-Preserve portrait geometry (when height >= width ).
-
-‘landscape ’
-Preserve landscape geometry (when width >= height ).
-
-
-
-Default value is none
.
-
-
-
-
For example to rotate by 90 degrees clockwise and preserve portrait
-layout:
-
-
transpose=dir=1:passthrough=portrait
-
-
-
The command above can also be specified as:
-
-
-
-
28.97 trim# TOC
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Specify the time of the start of the kept section, i.e. the frame with the
-timestamp start will be the first frame in the output.
-
-
-end
-Specify the time of the first frame that will be dropped, i.e. the frame
-immediately preceding the one with the timestamp end will be the last
-frame in the output.
-
-
-start_pts
-This is the same as start , except this option sets the start timestamp
-in timebase units instead of seconds.
-
-
-end_pts
-This is the same as end , except this option sets the end timestamp
-in timebase units instead of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_frame
-The number of the first frame that should be passed to the output.
-
-
-end_frame
-The number of the first frame that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _frame variants simply count the
-frames that pass through the filter. Also note that this filter does not modify
-the timestamps. If you wish for the output timestamps to start at zero, insert a
-setpts filter after the trim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all the frames that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple trim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -vf trim=60:120
-
-
- Keep only the first second:
-
-
ffmpeg -i INPUT -vf trim=duration=1
-
-
-
-
-
-
-
28.98 unsharp# TOC
-
-
Sharpen or blur the input video.
-
-
It accepts the following parameters:
-
-
-luma_msize_x, lx
-Set the luma matrix horizontal size. It must be an odd integer between
-3 and 63. The default value is 5.
-
-
-luma_msize_y, ly
-Set the luma matrix vertical size. It must be an odd integer between 3
-and 63. The default value is 5.
-
-
-luma_amount, la
-Set the luma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 1.0.
-
-
-chroma_msize_x, cx
-Set the chroma matrix horizontal size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_msize_y, cy
-Set the chroma matrix vertical size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_amount, ca
-Set the chroma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 0.0.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
All parameters are optional and default to the equivalent of the
-string ’5:5:1.0:5:5:0.0’.
-
-
-
28.98.1 Examples# TOC
-
-
- Apply strong luma sharpen effect:
-
-
unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
-
-
- Apply a strong blur of both luma and chroma parameters:
-
-
-
-
-
28.99 uspp# TOC
-
-
Apply ultra slow/simple postprocessing filter that compresses and decompresses
-the image at several (or - in the case of quality level 8
- all)
-shifts and average the results.
-
-
The way this differs from the behavior of spp is that uspp actually encodes &
-decodes each case with libavcodec Snow, whereas spp uses a simplified intra only 8x8
-DCT similar to MJPEG.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-8. If set to 0
, the filter will have no
-effect. A value of 8
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-
-
-
28.100 vidstabdetect# TOC
-
-
Analyze video stabilization/deshaking. Perform pass 1 of 2, see
-vidstabtransform for pass 2.
-
-
This filter generates a file with relative translation and rotation
-transform information about subsequent frames, which is then used by
-the vidstabtransform filter.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
This filter accepts the following options:
-
-
-result
-Set the path to the file used to write the transforms information.
-Default value is transforms.trf .
-
-
-shakiness
-Set how shaky the video is and how quick the camera is. It accepts an
-integer in the range 1-10, a value of 1 means little shakiness, a
-value of 10 means strong shakiness. Default value is 5.
-
-
-accuracy
-Set the accuracy of the detection process. It must be a value in the
-range 1-15. A value of 1 means low accuracy, a value of 15 means high
-accuracy. Default value is 15.
-
-
-stepsize
-Set stepsize of the search process. The region around minimum is
-scanned with 1 pixel resolution. Default value is 6.
-
-
-mincontrast
-Set minimum contrast. Below this value a local measurement field is
-discarded. Must be a floating point value in the range 0-1. Default
-value is 0.3.
-
-
-tripod
-Set reference frame number for tripod mode.
-
-If enabled, the motion of the frames is compared to a reference frame
-in the filtered stream, identified by the specified number. The idea
-is to compensate all movements in a more-or-less static scene and keep
-the camera view absolutely still.
-
-If set to 0, it is disabled. The frames are counted starting from 1.
-
-
-show
-Show fields and transforms in the resulting frames. It accepts an
-integer in the range 0-2. Default value is 0, which disables any
-visualization.
-
-
-
-
-
28.100.1 Examples# TOC
-
-
- Use default values:
-
-
- Analyze strongly shaky movie and put the results in file
-mytransforms.trf :
-
-
vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
-
-
- Visualize the result of internal transformations in the resulting
-video:
-
-
- Analyze a video with medium shakiness using ffmpeg
:
-
-
ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
-
-
-
-
-
28.101 vidstabtransform# TOC
-
-
Video stabilization/deshaking: pass 2 of 2,
-see vidstabdetect for pass 1.
-
-
Read a file with transform information for each frame and
-apply/compensate them. Together with the vidstabdetect
-filter this can be used to deshake videos. See also
-http://public.hronopik.de/vid.stab . It is important to also use
-the unsharp filter, see below.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
-
28.101.1 Options# TOC
-
-
-input
-Set path to the file used to read the transforms. Default value is
-transforms.trf .
-
-
-smoothing
-Set the number of frames (value*2 + 1) used for lowpass filtering the
-camera movements. Default value is 10.
-
-For example a number of 10 means that 21 frames are used (10 in the
-past and 10 in the future) to smoothen the motion in the video. A
-larger value leads to a smoother video, but limits the acceleration of
-the camera (pan/tilt movements). 0 is a special case where a static
-camera is simulated.
-
-
-optalgo
-Set the camera path optimization algorithm.
-
-Accepted values are:
-
-‘gauss ’
-gaussian kernel low-pass filter on camera motion (default)
-
-‘avg ’
-averaging on transformations
-
-
-
-
-maxshift
-Set maximal number of pixels to translate frames. Default value is -1,
-meaning no limit.
-
-
-maxangle
-Set maximal angle in radians (degree*PI/180) to rotate frames. Default
-value is -1, meaning no limit.
-
-
-crop
-Specify how to deal with borders that may be visible due to movement
-compensation.
-
-Available values are:
-
-‘keep ’
-keep image information from previous frame (default)
-
-‘black ’
-fill the border black
-
-
-
-
-invert
-Invert transforms if set to 1. Default value is 0.
-
-
-relative
-Consider transforms as relative to previous frame if set to 1,
-absolute if set to 0. Default value is 0.
-
-
-zoom
-Set percentage to zoom. A positive value will result in a zoom-in
-effect, a negative value in a zoom-out effect. Default value is 0 (no
-zoom).
-
-
-optzoom
-Set optimal zooming to avoid borders.
-
-Accepted values are:
-
-‘0 ’
-disabled
-
-‘1 ’
-optimal static zoom value is determined (only very strong movements
-will lead to visible borders) (default)
-
-‘2 ’
-optimal adaptive zoom value is determined (no borders will be
-visible), see zoomspeed
-
-
-
-Note that the value given at zoom is added to the one calculated here.
-
-
-zoomspeed
-Set percent to zoom maximally each frame (enabled when
-optzoom is set to 2). Range is from 0 to 5, default value is
-0.25.
-
-
-interpol
-Specify type of interpolation.
-
-Available values are:
-
-‘no ’
-no interpolation
-
-‘linear ’
-linear only horizontal
-
-‘bilinear ’
-linear in both directions (default)
-
-‘bicubic ’
-cubic in both directions (slow)
-
-
-
-
-tripod
-Enable virtual tripod mode if set to 1, which is equivalent to
-relative=0:smoothing=0
. Default value is 0.
-
-Use also tripod
option of vidstabdetect .
-
-
-debug
-Increase log verbosity if set to 1. Also the detected global motions
-are written to the temporary file global_motions.trf . Default
-value is 0.
-
-
-
-
-
28.101.2 Examples# TOC
-
-
-
-
-
28.102 vflip# TOC
-
-
Flip the input video vertically.
-
-
For example, to vertically flip a video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "vflip" out.avi
-
-
-
-
28.103 vignette# TOC
-
-
Make or reverse a natural vignetting effect.
-
-
The filter accepts the following options:
-
-
-angle, a
-Set lens angle expression as a number of radians.
-
-The value is clipped in the [0,PI/2]
range.
-
-Default value: "PI/5"
-
-
-x0
-y0
-Set center coordinates expressions. Respectively "w/2"
and "h/2"
-by default.
-
-
-mode
-Set forward/backward mode.
-
-Available modes are:
-
-‘forward ’
-The larger the distance from the central point, the darker the image becomes.
-
-
-‘backward ’
-The larger the distance from the central point, the brighter the image becomes.
-This can be used to reverse a vignette effect, though there is no automatic
-detection to extract the lens angle and other settings (yet). It can
-also be used to create a burning effect.
-
-
-
-Default value is ‘forward ’.
-
-
-eval
-Set evaluation mode for the expressions (angle , x0 , y0 ).
-
-It accepts the following values:
-
-‘init ’
-Evaluate expressions only once during the filter initialization.
-
-
-‘frame ’
-Evaluate expressions for each incoming frame. This is way slower than the
-‘init ’ mode since it requires all the scalers to be re-computed, but it
-allows advanced dynamic expressions.
-
-
-
-Default value is ‘init ’.
-
-
-dither
-Set dithering to reduce the circular banding effects. Default is 1
-(enabled).
-
-
-aspect
-Set vignette aspect. This setting allows one to adjust the shape of the vignette.
-Setting this value to the SAR of the input will make a rectangular vignetting
-following the dimensions of the video.
-
-Default is 1/1
.
-
-
-
-
-
28.103.1 Expressions# TOC
-
-
The alpha , x0 and y0 expressions can contain the
-following parameters.
-
-
-w
-h
-input width and height
-
-
-n
-the number of input frame, starting from 0
-
-
-pts
-the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in
-TB units, NAN if undefined
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-the PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in seconds, NAN if undefined
-
-
-tb
-time base of the input video
-
-
-
-
-
-
28.103.2 Examples# TOC
-
-
- Apply simple strong vignetting effect:
-
-
- Make a flickering vignetting:
-
-
vignette='PI/4+random(1)*PI/50':eval=frame
-
-
-
-
-
-
28.104 w3fdif# TOC
-
-
Deinterlace the input video ("w3fdif" stands for "Weston 3 Field
-Deinterlacing Filter").
-
-
Based on the process described by Martin Weston for BBC R&D, and
-implemented based on the de-interlace algorithm written by Jim
-Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter
-uses filter coefficients calculated by BBC R&D.
-
-
There are two sets of filter coefficients, so called "simple":
-and "complex". Which set of filter coefficients is used can
-be set by passing an optional parameter:
-
-
-filter
-Set the interlacing filter coefficients. Accepts one of the following values:
-
-
-‘simple ’
-Simple filter coefficient set.
-
-‘complex ’
-More-complex filter coefficient set.
-
-
-Default value is ‘complex ’.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following values:
-
-
-‘all ’
-Deinterlace all frames,
-
-‘interlaced ’
-Only deinterlace frames marked as interlaced.
-
-
-
-Default value is ‘all ’.
-
-
-
-
-
28.105 xbr# TOC
-
Apply the xBR high-quality magnification filter which is designed for pixel
-art. It follows a set of edge-detection rules, see
-http://www.libretro.com/forums/viewtopic.php?f=6&t=134 .
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for 2xBR
, 3
for
-3xBR
and 4
for 4xBR
.
-Default is 3
.
-
-
-
-
-
28.106 yadif# TOC
-
-
Deinterlace the input video ("yadif" means "yet another deinterlacing
-filter").
-
-
It accepts the following parameters:
-
-
-
-mode
-The interlacing mode to adopt. It accepts one of the following values:
-
-
-0, send_frame
-Output one frame for each frame.
-
-1, send_field
-Output one frame for each field.
-
-2, send_frame_nospatial
-Like send_frame
, but it skips the spatial interlacing check.
-
-3, send_field_nospatial
-Like send_field
, but it skips the spatial interlacing check.
-
-
-
-The default value is send_frame
.
-
-
-parity
-The picture field parity assumed for the input interlaced video. It accepts one
-of the following values:
-
-
-0, tff
-Assume the top field is first.
-
-1, bff
-Assume the bottom field is first.
-
--1, auto
-Enable automatic detection of field parity.
-
-
-
-The default value is auto
.
-If the interlacing is unknown or the decoder does not export this information,
-top field first will be assumed.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following
-values:
-
-
-0, all
-Deinterlace all frames.
-
-1, interlaced
-Only deinterlace frames marked as interlaced.
-
-
-
-The default value is all
.
-
-
-
-
-
28.107 zoompan# TOC
-
-
Apply Zoom & Pan effect.
-
-
This filter accepts the following options:
-
-
-zoom, z
-Set the zoom expression. Default is 1.
-
-
-x
-y
-Set the x and y expression. Default is 0.
-
-
-d
-Set the duration expression in number of frames.
-This sets for how many number of frames effect will last for
-single input image.
-
-
-s
-Set the output image size, default is ’hd720’.
-
-
-
-
Each expression can contain the following constants:
-
-
-in_w, iw
-Input width.
-
-
-in_h, ih
-Input height.
-
-
-out_w, ow
-Output width.
-
-
-out_h, oh
-Output height.
-
-
-in
-Input frame count.
-
-
-on
-Output frame count.
-
-
-x
-y
-Last calculated ’x’ and ’y’ position from ’x’ and ’y’ expression
-for current input frame.
-
-
-px
-py
-’x’ and ’y’ of last output frame of previous input frame or 0 when there was
-not yet such frame (first input frame).
-
-
-zoom
-Last calculated zoom from ’z’ expression for current input frame.
-
-
-pzoom
-Last calculated zoom of last output frame of previous input frame.
-
-
-duration
-Number of output frames for current input frame. Calculated from ’d’ expression
-for each input frame.
-
-
-pduration
-number of output frames created for previous input frame
-
-
-a
-Rational number: input width / input height
-
-
-sar
-sample aspect ratio
-
-
-dar
-display aspect ratio
-
-
-
-
-
-
28.107.1 Examples# TOC
-
-
- Zoom-in up to 1.5 and pan at same time to some spot near center of picture:
-
-
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
-
-
-
-
-
-
29 Video Sources# TOC
-
-
Below is a description of the currently available video sources.
-
-
-
29.1 buffer# TOC
-
-
Buffer video frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/vsrc_buffer.h .
-
-
It accepts the following parameters:
-
-
-video_size
-Specify the size (width and height) of the buffered video frames. For the
-syntax of this option, check the "Video size" section in the ffmpeg-utils
-manual.
-
-
-width
-The input video width.
-
-
-height
-The input video height.
-
-
-pix_fmt
-A string representing the pixel format of the buffered video frames.
-It may be a number corresponding to a pixel format, or a pixel format
-name.
-
-
-time_base
-Specify the timebase assumed by the timestamps of the buffered frames.
-
-
-frame_rate
-Specify the frame rate expected for the video stream.
-
-
-pixel_aspect, sar
-The sample (pixel) aspect ratio of the input video.
-
-
-sws_param
-Specify the optional parameters to be used for the scale filter which
-is automatically inserted when an input change is detected in the
-input size or format.
-
-
-
-
For example:
-
-
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
-
-
-
will instruct the source to accept video frames with size 320x240 and
-with format "yuv410p", assuming 1/24 as the timestamps timebase and
-square pixels (1:1 sample aspect ratio).
-Since the pixel format with name "yuv410p" corresponds to the number 6
-(check the enum AVPixelFormat definition in libavutil/pixfmt.h ),
-this example corresponds to:
-
-
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
-
-
-
Alternatively, the options can be specified as a flat string, but this
-syntax is deprecated:
-
-
width :height :pix_fmt :time_base.num :time_base.den :pixel_aspect.num :pixel_aspect.den [:sws_param ]
-
-
-
29.2 cellauto# TOC
-
-
Create a pattern generated by an elementary cellular automaton.
-
-
The initial state of the cellular automaton can be defined through the
-filename , and pattern options. If such options are
-not specified an initial state is created randomly.
-
-
At each new frame a new row in the video is filled with the result of
-the cellular automaton next generation. The behavior when the whole
-frame is filled is defined by the scroll option.
-
-
This source accepts the following options:
-
-
-filename, f
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified file.
-In the file, each non-whitespace character is considered an alive
-cell, a newline will terminate the row, and further characters in the
-file will be ignored.
-
-
-pattern, p
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified string.
-
-Each non-whitespace character in the string is considered an alive
-cell, a newline will terminate the row, and further characters in the
-string will be ignored.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial cellular automaton row. It
-is a floating point number value ranging from 0 to 1, defaults to
-1/PHI.
-
-This option is ignored when a file or a pattern is specified.
-
-
-random_seed, seed
-Set the seed for filling randomly the initial row, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the cellular automaton rule, it is a number ranging from 0 to 255.
-Default value is 110.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual.
-
-If filename or pattern is specified, the size is set
-by default to the width of the specified initial state row, and the
-height is set to width * PHI.
-
-If size is set, it must contain the width of the specified
-pattern string, and the specified pattern will be centered in the
-larger row.
-
-If a filename or a pattern string is not specified, the size value
-defaults to "320x518" (used for a randomly generated initial state).
-
-
-scroll
-If set to 1, scroll the output upward when all the rows in the output
-have been already filled. If set to 0, the new generated row will be
-written over the top row just after the bottom row is filled.
-Defaults to 1.
-
-
-start_full, full
-If set to 1, completely fill the output with generated rows before
-outputting the first frame.
-This is the default behavior, for disabling set the value to 0.
-
-
-stitch
-If set to 1, stitch the left and right row edges together.
-This is the default behavior, for disabling set the value to 0.
-
-
-
-
-
29.2.1 Examples# TOC
-
-
- Read the initial state from pattern , and specify an output of
-size 200x400.
-
-
cellauto=f=pattern:s=200x400
-
-
- Generate a random initial row with a width of 200 cells, with a fill
-ratio of 2/3:
-
-
cellauto=ratio=2/3:s=200x200
-
-
- Create a pattern generated by rule 18 starting by a single alive cell
-centered on an initial row with width 100:
-
-
cellauto=p=@:s=100x400:full=0:rule=18
-
-
- Specify a more elaborated initial pattern:
-
-
cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
-
-
-
-
-
-
29.3 mandelbrot# TOC
-
-
Generate a Mandelbrot set fractal, and progressively zoom towards the
-point specified with start_x and start_y .
-
-
This source accepts the following options:
-
-
-end_pts
-Set the terminal pts value. Default value is 400.
-
-
-end_scale
-Set the terminal scale value.
-Must be a floating point value. Default value is 0.3.
-
-
-inner
-Set the inner coloring mode, that is the algorithm used to draw the
-Mandelbrot fractal internal region.
-
-It shall assume one of the following values:
-
-black
-Set black mode.
-
-convergence
-Show time until convergence.
-
-mincol
-Set color based on point closest to the origin of the iterations.
-
-period
-Set period mode.
-
-
-
-Default value is mincol .
-
-
-bailout
-Set the bailout value. Default value is 10.0.
-
-
-maxiter
-Set the maximum of iterations performed by the rendering
-algorithm. Default value is 7189.
-
-
-outer
-Set outer coloring mode.
-It shall assume one of following values:
-
-iteration_count
-Set iteration cound mode.
-
-normalized_iteration_count
-set normalized iteration count mode.
-
-
-Default value is normalized_iteration_count .
-
-
-rate, r
-Set frame rate, expressed as number of frames per second. Default
-value is "25".
-
-
-size, s
-Set frame size. For the syntax of this option, check the "Video
-size" section in the ffmpeg-utils manual. Default value is "640x480".
-
-
-start_scale
-Set the initial scale value. Default value is 3.0.
-
-
-start_x
-Set the initial x position. Must be a floating point value between
--100 and 100. Default value is -0.743643887037158704752191506114774.
-
-
-start_y
-Set the initial y position. Must be a floating point value between
--100 and 100. Default value is -0.131825904205311970493132056385139.
-
-
-
-
-
29.4 mptestsrc# TOC
-
-
Generate various test patterns, as generated by the MPlayer test filter.
-
-
The size of the generated video is fixed, and is 256x256.
-This source is useful in particular for testing encoding features.
-
-
This source accepts the following options:
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-test, t
-
-Set the number or the name of the test to perform. Supported tests are:
-
-dc_luma
-dc_chroma
-freq_luma
-freq_chroma
-amp_luma
-amp_chroma
-cbp
-mv
-ring1
-ring2
-all
-
-
-Default value is "all", which will cycle through the list of all tests.
-
-
-
-
Some examples:
-
-
-
will generate a "dc_luma" test pattern.
-
-
-
29.5 frei0r_src# TOC
-
-
Provide a frei0r source.
-
-
To enable compilation of this filter you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
This source accepts the following parameters:
-
-
-size
-The size of the video to generate. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-
-framerate
-The framerate of the generated video. It may be a string of the form
-num /den or a frame rate abbreviation.
-
-
-filter_name
-The name to the frei0r source to load. For more information regarding frei0r and
-how to set the parameters, read the frei0r section in the video filters
-documentation.
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r source.
-
-
-
-
-
For example, to generate a frei0r partik0l source with size 200x200
-and frame rate 10 which is overlaid on the overlay filter main input:
-
-
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
-
-
-
-
29.6 life# TOC
-
-
Generate a life pattern.
-
-
This source is based on a generalization of John Conway’s life game.
-
-
The sourced input represents a life grid, each pixel represents a cell
-which can be in one of two possible states, alive or dead. Every cell
-interacts with its eight neighbours, which are the cells that are
-horizontally, vertically, or diagonally adjacent.
-
-
At each interaction the grid evolves according to the adopted rule,
-which specifies the number of neighbor alive cells which will make a
-cell stay alive or born. The rule option allows one to specify
-the rule to adopt.
-
-
This source accepts the following options:
-
-
-filename, f
-Set the file from which to read the initial grid state. In the file,
-each non-whitespace character is considered an alive cell, and newline
-is used to delimit the end of each row.
-
-If this option is not specified, the initial grid is generated
-randomly.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial random grid. It is a
-floating point number value ranging from 0 to 1, defaults to 1/PHI.
-It is ignored when a file is specified.
-
-
-random_seed, seed
-Set the seed for filling the initial random grid, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the life rule.
-
-A rule can be specified with a code of the kind "SNS /BNB ",
-where NS and NB are sequences of numbers in the range 0-8,
-NS specifies the number of alive neighbor cells which make a
-live cell stay alive, and NB the number of alive neighbor cells
-which make a dead cell to become alive (i.e. to "born").
-"s" and "b" can be used in place of "S" and "B", respectively.
-
-Alternatively a rule can be specified by an 18-bits integer. The 9
-high order bits are used to encode the next cell state if it is alive
-for each number of neighbor alive cells, the low order bits specify
-the rule for "borning" new cells. Higher order bits encode for an
-higher number of neighbor cells.
-For example the number 6153 = (12<<9)+9
specifies a stay alive
-rule of 12 and a born rule of 9, which corresponds to "S23/B03".
-
-Default value is "S23/B3", which is the original Conway’s game of life
-rule, and will keep a cell alive if it has 2 or 3 neighbor alive
-cells, and will born a new cell if there are three alive cells around
-a dead cell.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-If filename is specified, the size is set by default to the
-same size of the input file. If size is set, it must contain
-the size specified in the input file, and the initial grid defined in
-that file is centered in the larger resulting area.
-
-If a filename is not specified, the size value defaults to "320x240"
-(used for a randomly generated initial grid).
-
-
-stitch
-If set to 1, stitch the left and right grid edges together, and the
-top and bottom edges also. Defaults to 1.
-
-
-mold
-Set cell mold speed. If set, a dead cell will go from death_color to
-mold_color with a step of mold . mold can have a
-value from 0 to 255.
-
-
-life_color
-Set the color of living (or new born) cells.
-
-
-death_color
-Set the color of dead cells. If mold is set, this is the first color
-used to represent a dead cell.
-
-
-mold_color
-Set mold color, for definitely dead and moldy cells.
-
-For the syntax of these 3 color options, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-
-
-
29.6.1 Examples# TOC
-
-
- Read a grid from pattern , and center it on a grid of size
-300x300 pixels:
-
-
life=f=pattern:s=300x300
-
-
- Generate a random grid of size 200x200, with a fill ratio of 2/3:
-
-
life=ratio=2/3:s=200x200
-
-
- Specify a custom rule for evolving a randomly generated grid:
-
-
- Full example with slow death effect (mold) using ffplay
:
-
-
ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
-
-
-
-
-
29.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc# TOC
-
-
The color
source provides an uniformly colored input.
-
-
The haldclutsrc
source provides an identity Hald CLUT. See also
-haldclut filter.
-
-
The nullsrc
source returns unprocessed video frames. It is
-mainly useful to be employed in analysis / debugging tools, or as the
-source for filters which ignore the input data.
-
-
The rgbtestsrc
source generates an RGB test pattern useful for
-detecting RGB vs BGR issues. You should see a red, green and blue
-stripe from top to bottom.
-
-
The smptebars
source generates a color bars pattern, based on
-the SMPTE Engineering Guideline EG 1-1990.
-
-
The smptehdbars
source generates a color bars pattern, based on
-the SMPTE RP 219-2002.
-
-
The testsrc
source generates a test video pattern, showing a
-color pattern, a scrolling gradient and a timestamp. This is mainly
-intended for testing purposes.
-
-
The sources accept the following parameters:
-
-
-color, c
-Specify the color of the source, only available in the color
-source. For the syntax of this option, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-level
-Specify the level of the Hald CLUT, only available in the haldclutsrc
-source. A level of N
generates a picture of N*N*N
by N*N*N
-pixels to be used as identity matrix for 3D lookup tables. Each component is
-coded on a 1/(N*N)
scale.
-
-
-size, s
-Specify the size of the sourced video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual. The default value is
-"320x240".
-
-This option is not available with the haldclutsrc
filter.
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-sar
-Set the sample aspect ratio of the sourced video.
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-decimals, n
-Set the number of decimals to show in the timestamp, only available in the
-testsrc
source.
-
-The displayed timestamp value will correspond to the original
-timestamp value multiplied by the power of 10 of the specified
-value. Default value is 0.
-
-
-
-
For example the following:
-
-
testsrc=duration=5.3:size=qcif:rate=10
-
-
-
will generate a video with a duration of 5.3 seconds, with size
-176x144 and a frame rate of 10 frames per second.
-
-
The following graph description will generate a red source
-with an opacity of 0.2, with size "qcif" and a frame rate of 10
-frames per second.
-
-
color=c=red@0.2:s=qcif:r=10
-
-
-
If the input content is to be ignored, nullsrc
can be used. The
-following command generates noise in the luminance plane by employing
-the geq
filter:
-
-
nullsrc=s=256x256, geq=random(1)*255:128:128
-
-
-
-
29.7.1 Commands# TOC
-
-
The color
source supports the following commands:
-
-
-c, color
-Set the color of the created image. Accepts the same syntax of the
-corresponding color option.
-
-
-
-
-
-
30 Video Sinks# TOC
-
-
Below is a description of the currently available video sinks.
-
-
-
30.1 buffersink# TOC
-
-
Buffer video frames, and make them available to the end of the filter
-graph.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVBufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
-
30.2 nullsink# TOC
-
-
Null video sink: do absolutely nothing with the input video. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
31 Multimedia Filters# TOC
-
-
Below is a description of the currently available multimedia filters.
-
-
-
31.1 avectorscope# TOC
-
-
Convert input audio to a video output, representing the audio vector
-scope.
-
-
The filter is used to measure the difference between channels of stereo
-audio stream. A monoaural signal, consisting of identical left and right
-signal, results in straight vertical line. Any stereo separation is visible
-as a deviation from this line, creating a Lissajous figure.
-If the straight (or deviation from it) but horizontal line appears this
-indicates that the left and right channels are out of phase.
-
-
The filter accepts the following options:
-
-
-mode, m
-Set the vectorscope mode.
-
-Available values are:
-
-‘lissajous ’
-Lissajous rotated by 45 degrees.
-
-
-‘lissajous_xy ’
-Same as above but not rotated.
-
-
-
-Default value is ‘lissajous ’.
-
-
-size, s
-Set the video size for the output. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual. Default value is 400x400
.
-
-
-rate, r
-Set the output frame rate. Default value is 25
.
-
-
-rc
-gc
-bc
-Specify the red, green and blue contrast. Default values are 40
, 160
and 80
.
-Allowed range is [0, 255]
.
-
-
-rf
-gf
-bf
-Specify the red, green and blue fade. Default values are 15
, 10
and 5
.
-Allowed range is [0, 255]
.
-
-
-zoom
-Set the zoom factor. Default value is 1
. Allowed range is [1, 10]
.
-
-
-
-
-
31.1.1 Examples# TOC
-
-
- Complete example using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
-
-
-
-
-
31.2 concat# TOC
-
-
Concatenate audio and video streams, joining them together one after the
-other.
-
-
The filter works on segments of synchronized video and audio streams. All
-segments must have the same number of streams of each type, and that will
-also be the number of streams at output.
-
-
The filter accepts the following options:
-
-
-n
-Set the number of segments. Default is 2.
-
-
-v
-Set the number of output video streams, that is also the number of video
-streams in each segment. Default is 1.
-
-
-a
-Set the number of output audio streams, that is also the number of audio
-streams in each segment. Default is 0.
-
-
-unsafe
-Activate unsafe mode: do not fail if segments have a different format.
-
-
-
-
-
The filter has v +a outputs: first v video outputs, then
-a audio outputs.
-
-
There are n x(v +a ) inputs: first the inputs for the first
-segment, in the same order as the outputs, then the inputs for the second
-segment, etc.
-
-
Related streams do not always have exactly the same duration, for various
-reasons including codec frame size or sloppy authoring. For that reason,
-related synchronized streams (e.g. a video and its audio track) should be
-concatenated at once. The concat filter will use the duration of the longest
-stream in each segment (except the last one), and if necessary pad shorter
-audio streams with silence.
-
-
For this filter to work correctly, all segments must start at timestamp 0.
-
-
All corresponding streams must have the same parameters in all segments; the
-filtering system will automatically select a common pixel format for video
-streams, and a common sample format, sample rate and channel layout for
-audio streams, but other settings, such as resolution, must be converted
-explicitly by the user.
-
-
Different frame rates are acceptable but will result in variable frame rate
-at output; be sure to configure the output file to handle it.
-
-
-
31.2.1 Examples# TOC
-
-
-
-
-
31.3 ebur128# TOC
-
-
EBU R128 scanner filter. This filter takes an audio stream as input and outputs
-it unchanged. By default, it logs a message at a frequency of 10Hz with the
-Momentary loudness (identified by M
), Short-term loudness (S
),
-Integrated loudness (I
) and Loudness Range (LRA
).
-
-
The filter also has a video output (see the video option) with a real
-time graph to observe the loudness evolution. The graphic contains the logged
-message mentioned above, so it is not printed anymore when this option is set,
-unless the verbose logging is set. The main graphing area contains the
-short-term loudness (3 seconds of analysis), and the gauge on the right is for
-the momentary loudness (400 milliseconds).
-
-
More information about the Loudness Recommendation EBU R128 on
-http://tech.ebu.ch/loudness .
-
-
The filter accepts the following options:
-
-
-video
-Activate the video output. The audio stream is passed unchanged whether this
-option is set or no. The video stream will be the first output stream if
-activated. Default is 0
.
-
-
-size
-Set the video size. This option is for video only. For the syntax of this
-option, check the "Video size" section in the ffmpeg-utils manual. Default
-and minimum resolution is 640x480
.
-
-
-meter
-Set the EBU scale meter. Default is 9
. Common values are 9
and
-18
, respectively for EBU scale meter +9 and EBU scale meter +18. Any
-other integer value between this range is allowed.
-
-
-metadata
-Set metadata injection. If set to 1
, the audio input will be segmented
-into 100ms output frames, each of them containing various loudness information
-in metadata. All the metadata keys are prefixed with lavfi.r128.
.
-
-Default is 0
.
-
-
-framelog
-Force the frame logging level.
-
-Available values are:
-
-‘info ’
-information logging level
-
-‘verbose ’
-verbose logging level
-
-
-
-By default, the logging level is set to info . If the video or
-the metadata options are set, it switches to verbose .
-
-
-peak
-Set peak mode(s).
-
-Available modes can be cumulated (the option is a flag
type). Possible
-values are:
-
-‘none ’
-Disable any peak mode (default).
-
-‘sample ’
-Enable sample-peak mode.
-
-Simple peak mode looking for the higher sample value. It logs a message
-for sample-peak (identified by SPK
).
-
-‘true ’
-Enable true-peak mode.
-
-If enabled, the peak lookup is done on an over-sampled version of the input
-stream for better peak accuracy. It logs a message for true-peak.
-(identified by TPK
) and true-peak per frame (identified by FTPK
).
-This mode requires a build with libswresample
.
-
-
-
-
-
-
-
-
31.3.1 Examples# TOC
-
-
- Real-time graph using ffplay
, with a EBU scale meter +18:
-
-
ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
-
-
- Run an analysis with ffmpeg
:
-
-
ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
-
-
-
-
-
31.4 interleave, ainterleave# TOC
-
-
Temporally interleave frames from several inputs.
-
-
interleave
works with video inputs, ainterleave
with audio.
-
-
These filters read frames from several inputs and send the oldest
-queued frame to the output.
-
-
Input streams must have a well defined, monotonically increasing frame
-timestamp values.
-
-
In order to submit one frame to output, these filters need to enqueue
-at least one frame for each input, so they cannot work in case one
-input is not yet terminated and will not receive incoming frames.
-
-
For example consider the case when one input is a select
filter
-which always drop input frames. The interleave
filter will keep
-reading from that input, but it will never be able to send new frames
-to output until the input will send an end-of-stream signal.
-
-
Also, depending on inputs synchronization, the filters will drop
-frames in case one input receives more frames than the other ones, and
-the queue is already filled.
-
-
These filters accept the following options:
-
-
-nb_inputs, n
-Set the number of different inputs, it is 2 by default.
-
-
-
-
-
31.4.1 Examples# TOC
-
-
- Interleave frames belonging to different streams using ffmpeg
:
-
-
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
-
-
- Add flickering blur effect:
-
-
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
-
-
-
-
-
31.5 perms, aperms# TOC
-
-
Set read/write permissions for the output frames.
-
-
These filters are mainly aimed at developers to test direct path in the
-following filter in the filtergraph.
-
-
The filters accept the following options:
-
-
-mode
-Select the permissions mode.
-
-It accepts the following values:
-
-‘none ’
-Do nothing. This is the default.
-
-‘ro ’
-Set all the output frames read-only.
-
-‘rw ’
-Set all the output frames directly writable.
-
-‘toggle ’
-Make the frame read-only if writable, and writable if read-only.
-
-‘random ’
-Set each output frame read-only or writable randomly.
-
-
-
-
-seed
-Set the seed for the random mode, must be an integer included between
-0
and UINT32_MAX
. If not specified, or if explicitly set to
--1
, the filter will try to use a good random seed on a best effort
-basis.
-
-
-
-
Note: in case of auto-inserted filter between the permission filter and the
-following one, the permission might not be received as expected in that
-following filter. Inserting a format or aformat filter before the
-perms/aperms filter can avoid this problem.
-
-
-
31.6 select, aselect# TOC
-
-
Select frames to pass in output.
-
-
This filter accepts the following options:
-
-
-expr, e
-Set expression, which is evaluated for each input frame.
-
-If the expression is evaluated to zero, the frame is discarded.
-
-If the evaluation result is negative or NaN, the frame is sent to the
-first output; otherwise it is sent to the output with index
-ceil(val)-1
, assuming that the input index starts from 0.
-
-For example a value of 1.2
corresponds to the output with index
-ceil(1.2)-1 = 2-1 = 1
, that is the second output.
-
-
-outputs, n
-Set the number of outputs. The output to which to send the selected
-frame is based on the result of the evaluation. Default value is 1.
-
-
-
-
The expression can contain the following constants:
-
-
-n
-The (sequential) number of the filtered frame, starting from 0.
-
-
-selected_n
-The (sequential) number of the selected frame, starting from 0.
-
-
-prev_selected_n
-The sequential number of the last selected frame. It’s NAN if undefined.
-
-
-TB
-The timebase of the input timestamps.
-
-
-pts
-The PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in TB units. It’s NAN if undefined.
-
-
-t
-The PTS of the filtered video frame,
-expressed in seconds. It’s NAN if undefined.
-
-
-prev_pts
-The PTS of the previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_pts
-The PTS of the last previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_t
-The PTS of the last previously selected video frame. It’s NAN if undefined.
-
-
-start_pts
-The PTS of the first video frame in the video. It’s NAN if undefined.
-
-
-start_t
-The time of the first video frame in the video. It’s NAN if undefined.
-
-
-pict_type (video only)
-The type of the filtered frame. It can assume one of the following
-values:
-
-I
-P
-B
-S
-SI
-SP
-BI
-
-
-
-interlace_type (video only)
-The frame interlace type. It can assume one of the following values:
-
-PROGRESSIVE
-The frame is progressive (not interlaced).
-
-TOPFIRST
-The frame is top-field-first.
-
-BOTTOMFIRST
-The frame is bottom-field-first.
-
-
-
-
-consumed_sample_n (audio only)
-the number of selected samples before the current frame
-
-
-samples_n (audio only)
-the number of samples in the current frame
-
-
-sample_rate (audio only)
-the input sample rate
-
-
-key
-This is 1 if the filtered frame is a key-frame, 0 otherwise.
-
-
-pos
-the position in the file of the filtered frame, -1 if the information
-is not available (e.g. for synthetic video)
-
-
-scene (video only)
-value between 0 and 1 to indicate a new scene; a low value reflects a low
-probability for the current frame to introduce a new scene, while a higher
-value means the current frame is more likely to be one (see the example below)
-
-
-
-
-
The default value of the select expression is "1".
-
-
-
31.6.1 Examples# TOC
-
-
-
-
-
31.7 sendcmd, asendcmd# TOC
-
-
Send commands to filters in the filtergraph.
-
-
These filters read commands to be sent to other filters in the
-filtergraph.
-
-
sendcmd
must be inserted between two video filters,
-asendcmd
must be inserted between two audio filters, but apart
-from that they act the same way.
-
-
The specification of commands can be provided in the filter arguments
-with the commands option, or in a file specified by the
-filename option.
-
-
These filters accept the following options:
-
-commands, c
-Set the commands to be read and sent to the other filters.
-
-filename, f
-Set the filename of the commands to be read and sent to the other
-filters.
-
-
-
-
-
31.7.1 Commands syntax# TOC
-
-
A commands description consists of a sequence of interval
-specifications, comprising a list of commands to be executed when a
-particular event related to that interval occurs. The occurring event
-is typically the current frame time entering or leaving a given time
-interval.
-
-
An interval is specified by the following syntax:
-
-
-
The time interval is specified by the START and END times.
-END is optional and defaults to the maximum time.
-
-
The current frame time is considered within the specified interval if
-it is included in the interval [START , END ), that is when
-the time is greater or equal to START and is lesser than
-END .
-
-
COMMANDS consists of a sequence of one or more command
-specifications, separated by ",", relating to that interval. The
-syntax of a command specification is given by:
-
-
[FLAGS ] TARGET COMMAND ARG
-
-
-
FLAGS is optional and specifies the type of events relating to
-the time interval which enable sending the specified command, and must
-be a non-null sequence of identifier flags separated by "+" or "|" and
-enclosed between "[" and "]".
-
-
The following flags are recognized:
-
-enter
-The command is sent when the current frame timestamp enters the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was not in the given interval, and the
-current is.
-
-
-leave
-The command is sent when the current frame timestamp leaves the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was in the given interval, and the
-current is not.
-
-
-
-
If FLAGS is not specified, a default value of [enter]
is
-assumed.
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional list of argument for
-the given COMMAND .
-
-
Between one interval specification and another, whitespaces, or
-sequences of characters starting with #
until the end of line,
-are ignored and can be used to annotate comments.
-
-
A simplified BNF description of the commands specification syntax
-follows:
-
-
COMMAND_FLAG ::= "enter" | "leave"
-COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG ]
-COMMAND ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG ]
-COMMANDS ::= COMMAND [,COMMANDS ]
-INTERVAL ::= START [-END ] COMMANDS
-INTERVALS ::= INTERVAL [;INTERVALS ]
-
-
-
-
31.7.2 Examples# TOC
-
-
-
-
-
31.8 setpts, asetpts# TOC
-
-
Change the PTS (presentation timestamp) of the input frames.
-
-
setpts
works on video frames, asetpts
on audio frames.
-
-
This filter accepts the following options:
-
-
-expr
-The expression which is evaluated for each frame to construct its timestamp.
-
-
-
-
-
The expression is evaluated through the eval API and can contain the following
-constants:
-
-
-FRAME_RATE
-frame rate, only defined for constant frame-rate video
-
-
-PTS
-The presentation timestamp in input
-
-
-N
-The count of the input frame for video or the number of consumed samples,
-not including the current frame for audio, starting from 0.
-
-
-NB_CONSUMED_SAMPLES
-The number of consumed samples, not including the current frame (only
-audio)
-
-
-NB_SAMPLES, S
-The number of samples in the current frame (only audio)
-
-
-SAMPLE_RATE, SR
-The audio sample rate.
-
-
-STARTPTS
-The PTS of the first frame.
-
-
-STARTT
-the time in seconds of the first frame
-
-
-INTERLACED
-State whether the current frame is interlaced.
-
-
-T
-the time in seconds of the current frame
-
-
-POS
-original position in the file of the frame, or undefined if undefined
-for the current frame
-
-
-PREV_INPTS
-The previous input PTS.
-
-
-PREV_INT
-previous input time in seconds
-
-
-PREV_OUTPTS
-The previous output PTS.
-
-
-PREV_OUTT
-previous output time in seconds
-
-
-RTCTIME
-The wallclock (RTC) time in microseconds. This is deprecated, use time(0)
-instead.
-
-
-RTCSTART
-The wallclock (RTC) time at the start of the movie in microseconds.
-
-
-TB
-The timebase of the input timestamps.
-
-
-
-
-
-
31.8.1 Examples# TOC
-
-
- Start counting PTS from zero
-
-
- Apply fast motion effect:
-
-
- Apply slow motion effect:
-
-
- Set fixed rate of 25 frames per second:
-
-
- Set fixed rate 25 fps with some jitter:
-
-
setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
-
-
- Apply an offset of 10 seconds to the input PTS:
-
-
- Generate timestamps from a "live source" and rebase onto the current timebase:
-
-
setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
-
-
- Generate timestamps by counting samples:
-
-
-
-
-
-
31.9 settb, asettb# TOC
-
-
Set the timebase to use for the output frames timestamps.
-It is mainly useful for testing timebase configuration.
-
-
It accepts the following parameters:
-
-
-expr, tb
-The expression which is evaluated into the output timebase.
-
-
-
-
-
The value for tb is an arithmetic expression representing a
-rational. The expression can contain the constants "AVTB" (the default
-timebase), "intb" (the input timebase) and "sr" (the sample rate,
-audio only). Default value is "intb".
-
-
-
31.9.1 Examples# TOC
-
-
- Set the timebase to 1/25:
-
-
- Set the timebase to 1/10:
-
-
- Set the timebase to 1001/1000:
-
-
- Set the timebase to 2*intb:
-
-
- Set the default timebase value:
-
-
-
-
-
31.10 showcqt# TOC
-
Convert input audio to a video output representing
-frequency spectrum logarithmically (using constant Q transform with
-Brown-Puckette algorithm), with musical tone scale, from E0 to D#10 (10 octaves).
-
-
The filter accepts the following options:
-
-
-volume
-Specify transform volume (multiplier) expression. The expression can contain
-variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-a_weighting(f)
-A-weighting of equal loudness
-
-b_weighting(f)
-B-weighting of equal loudness
-
-c_weighting(f)
-C-weighting of equal loudness
-
-
-Default value is 16
.
-
-
-tlength
-Specify transform length expression. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-Default value is 384/f*tc/(384/f+tc)
.
-
-
-timeclamp
-Specify the transform timeclamp. At low frequency, there is trade-off between
-accuracy in time domain and frequency domain. If timeclamp is lower,
-event in time domain is represented more accurately (such as fast bass drum),
-otherwise event in frequency domain is represented more accurately
-(such as bass guitar). Acceptable value is [0.1, 1.0]. Default value is 0.17
.
-
-
-coeffclamp
-Specify the transform coeffclamp. If coeffclamp is lower, transform is
-more accurate, otherwise transform is faster. Acceptable value is [0.1, 10.0].
-Default value is 1.0
.
-
-
-gamma
-Specify gamma. Lower gamma makes the spectrum more contrast, higher gamma
-makes the spectrum having more range. Acceptable value is [1.0, 7.0].
-Default value is 3.0
.
-
-
-fontfile
-Specify font file for use with freetype. If not specified, use embedded font.
-
-
-fontcolor
-Specify font color expression. This is arithmetic expression that should return
-integer value 0xRRGGBB. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-midi(f)
-midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
-
-r(x), g(x), b(x)
-red, green, and blue value of intensity x
-
-
-Default value is st(0, (midi(f)-59.5)/12);
-st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
-r(1-ld(1)) + b(ld(1))
-
-
-fullhd
-If set to 1 (the default), the video size is 1920x1080 (full HD),
-if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
-
-
-fps
-Specify video fps. Default value is 25
.
-
-
-count
-Specify number of transform per frame, so there are fps*count transforms
-per second. Note that audio data rate must be divisible by fps*count.
-Default value is 6
.
-
-
-
-
-
-
31.10.1 Examples# TOC
-
-
- Playing audio while showing the spectrum:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with frame rate 30 fps:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fps=30:count=5 [out0]'
-
-
- Playing at 960x540 and lower CPU usage:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fullhd=0:count=3 [out0]'
-
-
- A1 and its harmonics: A1, A2, (near)E3, A3:
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with more accuracy in frequency domain (and slower):
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
-
-
- B-weighting of equal loudness
-
-
volume=16*b_weighting(f)
-
-
- Lower Q factor
-
-
tlength=100/f*tc/(100/f+tc)
-
-
- Custom fontcolor, C-note is colored green, others are colored blue
-
-
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
-
-
-
-
-
-
31.11 showspectrum# TOC
-
-
Convert input audio to a video output, representing the audio frequency
-spectrum.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value is
-640x512
.
-
-
-slide
-Specify how the spectrum should slide along the window.
-
-It accepts the following values:
-
-‘replace ’
-the samples start again on the left when they reach the right
-
-‘scroll ’
-the samples scroll from right to left
-
-‘fullframe ’
-frames are only produced when the samples reach the right
-
-
-
-Default value is replace
.
-
-
-mode
-Specify display mode.
-
-It accepts the following values:
-
-‘combined ’
-all channels are displayed in the same row
-
-‘separate ’
-all channels are displayed in separate rows
-
-
-
-Default value is ‘combined ’.
-
-
-color
-Specify display color mode.
-
-It accepts the following values:
-
-‘channel ’
-each channel is displayed in a separate color
-
-‘intensity ’
-each channel is is displayed using the same color scheme
-
-
-
-Default value is ‘channel ’.
-
-
-scale
-Specify scale used for calculating intensity color values.
-
-It accepts the following values:
-
-‘lin ’
-linear
-
-‘sqrt ’
-square root, default
-
-‘cbrt ’
-cubic root
-
-‘log ’
-logarithmic
-
-
-
-Default value is ‘sqrt ’.
-
-
-saturation
-Set saturation modifier for displayed colors. Negative values provide
-alternative color scheme. 0
is no saturation at all.
-Saturation must be in [-10.0, 10.0] range.
-Default value is 1
.
-
-
-win_func
-Set window function.
-
-It accepts the following values:
-
-‘none ’
-No samples pre-processing (do not expect this to be faster)
-
-‘hann ’
-Hann window
-
-‘hamming ’
-Hamming window
-
-‘blackman ’
-Blackman window
-
-
-
-Default value is hann
.
-
-
-
-
The usage is very similar to the showwaves filter; see the examples in that
-section.
-
-
-
31.11.1 Examples# TOC
-
-
- Large window with logarithmic color scaling:
-
-
showspectrum=s=1280x480:scale=log
-
-
- Complete example for a colored and sliding spectrum per channel using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
-
-
-
-
-
31.12 showwaves# TOC
-
-
Convert input audio to a video output, representing the samples waves.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value
-is "600x240".
-
-
-mode
-Set display mode.
-
-Available values are:
-
-‘point ’
-Draw a point for each sample.
-
-
-‘line ’
-Draw a vertical line for each sample.
-
-
-‘p2p ’
-Draw a point for each sample and a line between them.
-
-
-‘cline ’
-Draw a centered vertical line for each sample.
-
-
-
-Default value is point
.
-
-
-n
-Set the number of samples which are printed on the same column. A
-larger value will decrease the frame rate. Must be a positive
-integer. This option can be set only if the value for rate
-is not explicitly specified.
-
-
-rate, r
-Set the (approximate) output frame rate. This is done by setting the
-option n . Default value is "25".
-
-
-split_channels
-Set if channels should be drawn separately or overlap. Default value is 0.
-
-
-
-
-
-
31.12.1 Examples# TOC
-
-
- Output the input file audio and the corresponding video representation
-at the same time:
-
-
amovie=a.mp3,asplit[out0],showwaves[out1]
-
-
- Create a synthetic signal and show it with showwaves, forcing a
-frame rate of 30 frames per second:
-
-
aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
-
-
-
-
-
31.13 split, asplit# TOC
-
-
Split input into several identical outputs.
-
-
asplit
works with audio input, split
with video.
-
-
The filter accepts a single parameter which specifies the number of outputs. If
-unspecified, it defaults to 2.
-
-
-
31.13.1 Examples# TOC
-
-
- Create two separate outputs from the same input:
-
-
[in] split [out0][out1]
-
-
- To create 3 or more outputs, you need to specify the number of
-outputs, like in:
-
-
[in] asplit=3 [out0][out1][out2]
-
-
- Create two separate outputs from the same input, one cropped and
-one padded:
-
-
[in] split [splitout1][splitout2];
-[splitout1] crop=100:100:0:0 [cropout];
-[splitout2] pad=200:200:100:100 [padout];
-
-
- Create 5 copies of the input audio with ffmpeg
:
-
-
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
-
-
-
-
-
31.14 zmq, azmq# TOC
-
-
Receive commands sent through a libzmq client, and forward them to
-filters in the filtergraph.
-
-
zmq
and azmq
work as a pass-through filters. zmq
-must be inserted between two video filters, azmq
between two
-audio filters.
-
-
To enable these filters you need to install the libzmq library and
-headers and configure FFmpeg with --enable-libzmq
.
-
-
For more information about libzmq see:
-http://www.zeromq.org/
-
-
The zmq
and azmq
filters work as a libzmq server, which
-receives messages sent through a network interface defined by the
-bind_address option.
-
-
The received message must be in the form:
-
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional argument list for the
-given COMMAND .
-
-
Upon reception, the message is processed and the corresponding command
-is injected into the filtergraph. Depending on the result, the filter
-will send a reply to the client, adopting the format:
-
-
ERROR_CODE ERROR_REASON
-MESSAGE
-
-
-
MESSAGE is optional.
-
-
-
31.14.1 Examples# TOC
-
-
Look at tools/zmqsend for an example of a zmq client which can
-be used to send commands processed by these filters.
-
-
Consider the following filtergraph generated by ffplay
-
-
ffplay -dumpgraph 1 -f lavfi "
-color=s=100x100:c=red [l];
-color=s=100x100:c=blue [r];
-nullsrc=s=200x100, zmq [bg];
-[bg][l] overlay [bg+l];
-[bg+l][r] overlay=x=100 "
-
-
-
To change the color of the left side of the video, the following
-command can be used:
-
-
echo Parsed_color_0 c yellow | tools/zmqsend
-
-
-
To change the right side:
-
-
echo Parsed_color_1 c pink | tools/zmqsend
-
-
-
-
-
32 Multimedia Sources# TOC
-
-
Below is a description of the currently available multimedia sources.
-
-
-
32.1 amovie# TOC
-
-
This is the same as movie source, except it selects an audio
-stream by default.
-
-
-
32.2 movie# TOC
-
-
Read audio and/or video stream(s) from a movie container.
-
-
It accepts the following parameters:
-
-
-filename
-The name of the resource to read (not necessarily a file; it can also be a
-device or a stream accessed through some protocol).
-
-
-format_name, f
-Specifies the format assumed for the movie to read, and can be either
-the name of a container or an input device. If not specified, the
-format is guessed from movie_name or by probing.
-
-
-seek_point, sp
-Specifies the seek point in seconds. The frames will be output
-starting from this seek point. The parameter is evaluated with
-av_strtod
, so the numerical value may be suffixed by an IS
-postfix. The default value is "0".
-
-
-streams, s
-Specifies the streams to read. Several streams can be specified,
-separated by "+". The source will then have as many outputs, in the
-same order. The syntax is explained in the “Stream specifiers”
-section in the ffmpeg manual. Two special names, "dv" and "da" specify
-respectively the default (best suited) video and audio stream. Default
-is "dv", or "da" if the filter is called as "amovie".
-
-
-stream_index, si
-Specifies the index of the video stream to read. If the value is -1,
-the most suitable video stream will be automatically selected. The default
-value is "-1". Deprecated. If the filter is called "amovie", it will select
-audio instead of video.
-
-
-loop
-Specifies how many times to read the stream in sequence.
-If the value is less than 1, the stream will be read again and again.
-Default value is "1".
-
-Note that when the movie is looped the source timestamps are not
-changed, so it will generate non monotonically increasing timestamps.
-
-
-
-
It allows overlaying a second video on top of the main input of
-a filtergraph, as shown in this graph:
-
-
input -----------> deltapts0 --> overlay --> output
- ^
- |
-movie --> scale--> deltapts1 -------+
-
-
-
32.2.1 Examples# TOC
-
-
- Skip 3.2 seconds from the start of the AVI file in.avi, and overlay it
-on top of the input labelled "in":
-
-
movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read from a video4linux2 device, and overlay it on top of the input
-labelled "in":
-
-
movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read the first video stream and the audio stream with id 0x81 from
-dvd.vob; the video is connected to the pad named "video" and the audio is
-connected to the pad named "audio":
-
-
movie=dvd.vob:s=v:0+#0x81 [video] [audio]
-
-
-
-
-
-
33 See Also# TOC
-
-
ffplay ,
-ffmpeg , ffprobe , ffserver ,
-ffmpeg-utils ,
-ffmpeg-scaler ,
-ffmpeg-resampler ,
-ffmpeg-codecs ,
-ffmpeg-bitstream-filters ,
-ffmpeg-formats ,
-ffmpeg-devices ,
-ffmpeg-protocols ,
-ffmpeg-filters
-
-
-
-
34 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffplay.html b/Externals/ffmpeg/shared/doc/ffplay.html
deleted file mode 100644
index e072758fa8..0000000000
--- a/Externals/ffmpeg/shared/doc/ffplay.html
+++ /dev/null
@@ -1,745 +0,0 @@
-
-
-
-
-
-
- ffplay Documentation
-
-
-
-
-
-
-
-
- ffplay Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Synopsis# TOC
-
-
ffplay [options ] [input_file ]
-
-
-
2 Description# TOC
-
-
FFplay is a very simple and portable media player using the FFmpeg
-libraries and the SDL library. It is mostly used as a testbed for the
-various FFmpeg APIs.
-
-
-
3 Options# TOC
-
-
All the numerical options, if not specified otherwise, accept a string
-representing a number as input, which may be followed by one of the SI
-unit prefixes, for example: ’K’, ’M’, or ’G’.
-
-
If ’i’ is appended to the SI unit prefix, the complete prefix will be
-interpreted as a unit prefix for binary multiples, which are based on
-powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
-prefix multiplies the value by 8. This allows using, for example:
-’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
-
-
Options which do not take arguments are boolean options, and set the
-corresponding value to true. They can be set to false by prefixing
-the option name with "no". For example using "-nofoo"
-will set the boolean option with name "foo" to false.
-
-
-
3.1 Stream specifiers# TOC
-
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
-are used to precisely specify which stream(s) a given option belongs to.
-
-
A stream specifier is a string generally appended to the option name and
-separated from it by a colon. E.g. -codec:a:1 ac3
contains the
-a:1
stream specifier, which matches the second audio stream. Therefore, it
-would select the ac3 codec for the second audio stream.
-
-
A stream specifier can match several streams, so that the option is applied to all
-of them. E.g. the stream specifier in -b:a 128k
matches all audio
-streams.
-
-
An empty stream specifier matches all streams. For example, -codec copy
-or -codec: copy
would copy all the streams without reencoding.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index. E.g. -threads:1 4
would set the
-thread count for the second stream to 4.
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
-’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
-stream number stream_index of this type. Otherwise, it matches all
-streams of this type.
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number stream_index
-in the program with the id program_id . Otherwise, it matches all streams in the
-program.
-
-#stream_id or i:stream_id
-Match the stream by stream id (e.g. PID in MPEG-TS container).
-
-m:key [:value ]
-Matches streams with the metadata tag key having the specified value. If
-value is not given, matches streams that contain the given tag with any
-value.
-
-Note that in ffmpeg
, matching by metadata will only work properly for
-input files.
-
-
-
-
-
3.2 Generic options# TOC
-
-
These options are shared amongst the ff* tools.
-
-
--L
-Show license.
-
-
--h, -?, -help, --help [arg ]
-Show help. An optional parameter may be specified to print help about a specific
-item. If no argument is specified, only basic (non advanced) tool
-options are shown.
-
-Possible values of arg are:
-
-long
-Print advanced tool options in addition to the basic tool options.
-
-
-full
-Print complete list of options, including shared and private options
-for encoders, decoders, demuxers, muxers, filters, etc.
-
-
-decoder=decoder_name
-Print detailed information about the decoder named decoder_name . Use the
--decoders option to get a list of all decoders.
-
-
-encoder=encoder_name
-Print detailed information about the encoder named encoder_name . Use the
--encoders option to get a list of all encoders.
-
-
-demuxer=demuxer_name
-Print detailed information about the demuxer named demuxer_name . Use the
--formats option to get a list of all demuxers and muxers.
-
-
-muxer=muxer_name
-Print detailed information about the muxer named muxer_name . Use the
--formats option to get a list of all muxers and demuxers.
-
-
-filter=filter_name
-Print detailed information about the filter name filter_name . Use the
--filters option to get a list of all filters.
-
-
-
-
--version
-Show version.
-
-
--formats
-Show available formats (including devices).
-
-
--devices
-Show available devices.
-
-
--codecs
-Show all codecs known to libavcodec.
-
-Note that the term ’codec’ is used throughout this documentation as a shortcut
-for what is more correctly called a media bitstream format.
-
-
--decoders
-Show available decoders.
-
-
--encoders
-Show all available encoders.
-
-
--bsfs
-Show available bitstream filters.
-
-
--protocols
-Show available protocols.
-
-
--filters
-Show available libavfilter filters.
-
-
--pix_fmts
-Show available pixel formats.
-
-
--sample_fmts
-Show available sample formats.
-
-
--layouts
-Show channel names and standard channel layouts.
-
-
--colors
-Show recognized color names.
-
-
--sources device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sources of the intput device.
-Some devices may provide system-dependent source names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sources pulse,server=192.168.0.4
-
-
-
--sinks device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sinks of the output device.
-Some devices may provide system-dependent sink names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sinks pulse,server=192.168.0.4
-
-
-
--loglevel [repeat+]loglevel | -v [repeat+]loglevel
-Set the logging level used by the library.
-Adding "repeat+" indicates that repeated log output should not be compressed
-to the first line and the "Last message repeated n times" line will be
-omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-’repeat’ will not change the loglevel.
-loglevel is a string or a number containing one of the following values:
-
-‘quiet, -8 ’
-Show nothing at all; be silent.
-
-‘panic, 0 ’
-Only show fatal errors which could lead the process to crash, such as
-and assert failure. This is not currently used for anything.
-
-‘fatal, 8 ’
-Only show fatal errors. These are errors after which the process absolutely
-cannot continue after.
-
-‘error, 16 ’
-Show all errors, including ones which can be recovered from.
-
-‘warning, 24 ’
-Show all warnings and errors. Any message related to possibly
-incorrect or unexpected events will be shown.
-
-‘info, 32 ’
-Show informative messages during processing. This is in addition to
-warnings and errors. This is the default value.
-
-‘verbose, 40 ’
-Same as info
, except more verbose.
-
-‘debug, 48 ’
-Show everything, including debugging information.
-
-
-
-By default the program logs to stderr, if coloring is supported by the
-terminal, colors are used to mark errors and warnings. Log coloring
-can be disabled setting the environment variable
-AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
-the environment variable AV_LOG_FORCE_COLOR
.
-The use of the environment variable NO_COLOR
is deprecated and
-will be dropped in a following FFmpeg version.
-
-
--report
-Dump full command line and console output to a file named
-program -YYYYMMDD -HHMMSS .log
in the current
-directory.
-This file can be useful for bug reports.
-It also implies -loglevel verbose
.
-
-Setting the environment variable FFREPORT
to any value has the
-same effect. If the value is a ’:’-separated key=value sequence, these
-options will affect the report; option values must be escaped if they
-contain special characters or the options delimiter ’:’ (see the
-“Quoting and escaping” section in the ffmpeg-utils manual).
-
-The following options are recognized:
-
-file
-set the file name to use for the report; %p
is expanded to the name
-of the program, %t
is expanded to a timestamp, %%
is expanded
-to a plain %
-
-level
-set the log verbosity level using a numerical value (see -loglevel
).
-
-
-
-For example, to output a report to a file named ffreport.log
-using a log level of 32
(alias for log level info
):
-
-
-
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
-
-
-Errors in parsing the environment variable are not fatal, and will not
-appear in the report.
-
-
--hide_banner
-Suppress printing banner.
-
-All FFmpeg tools will normally show a copyright notice, build options
-and library versions. This option can be used to suppress printing
-this information.
-
-
--cpuflags flags (global )
-Allows setting and clearing cpu flags. This option is intended
-for testing. Do not use it unless you know what you’re doing.
-
-
ffmpeg -cpuflags -sse+mmx ...
-ffmpeg -cpuflags mmx ...
-ffmpeg -cpuflags 0 ...
-
-Possible flags for this option are:
-
-‘x86 ’
-
-‘mmx ’
-‘mmxext ’
-‘sse ’
-‘sse2 ’
-‘sse2slow ’
-‘sse3 ’
-‘sse3slow ’
-‘ssse3 ’
-‘atom ’
-‘sse4.1 ’
-‘sse4.2 ’
-‘avx ’
-‘xop ’
-‘fma4 ’
-‘3dnow ’
-‘3dnowext ’
-‘cmov ’
-
-
-‘ARM ’
-
-‘armv5te ’
-‘armv6 ’
-‘armv6t2 ’
-‘vfp ’
-‘vfpv3 ’
-‘neon ’
-
-
-‘PowerPC ’
-
-‘altivec ’
-
-
-‘Specific Processors ’
-
-‘pentium2 ’
-‘pentium3 ’
-‘pentium4 ’
-‘k6 ’
-‘k62 ’
-‘athlon ’
-‘athlonxp ’
-‘k8 ’
-
-
-
-
-
--opencl_bench
-Benchmark all available OpenCL devices and show the results. This option
-is only available when FFmpeg has been compiled with --enable-opencl
.
-
-
--opencl_options options (global )
-Set OpenCL environment options. This option is only available when
-FFmpeg has been compiled with --enable-opencl
.
-
-options must be a list of key =value option pairs
-separated by ’:’. See the “OpenCL Options” section in the
-ffmpeg-utils manual for the list of supported options.
-
-
-
-
-
3.3 AVOptions# TOC
-
-
These options are provided directly by the libavformat, libavdevice and
-libavcodec libraries. To see the list of available AVOptions, use the
--help option. They are separated into two categories:
-
-generic
-These options can be set for any container, codec or device. Generic options
-are listed under AVFormatContext options for containers/devices and under
-AVCodecContext options for codecs.
-
-private
-These options are specific to the given container, device or codec. Private
-options are listed under their corresponding containers/devices/codecs.
-
-
-
-
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
-an MP3 file, use the id3v2_version private option of the MP3
-muxer:
-
-
ffmpeg -i input.flac -id3v2_version 3 out.mp3
-
-
-
All codec AVOptions are per-stream, and thus a stream specifier
-should be attached to them.
-
-
Note: the -nooption syntax cannot be used for boolean
-AVOptions, use -option 0 /-option 1 .
-
-
Note: the old undocumented way of specifying per-stream AVOptions by
-prepending v/a/s to the options name is now obsolete and will be
-removed soon.
-
-
-
3.4 Main options# TOC
-
-
--x width
-Force displayed width.
-
--y height
-Force displayed height.
-
--s size
-Set frame size (WxH or abbreviation), needed for videos which do
-not contain a header with the frame size like raw YUV. This option
-has been deprecated in favor of private options, try -video_size.
-
--fs
-Start in fullscreen mode.
-
--an
-Disable audio.
-
--vn
-Disable video.
-
--sn
-Disable subtitles.
-
--ss pos
-Seek to a given position in seconds.
-
--t duration
-play <duration> seconds of audio/video
-
--bytes
-Seek by bytes.
-
--nodisp
-Disable graphical display.
-
--f fmt
-Force format.
-
--window_title title
-Set window title (default is the input filename).
-
--loop number
-Loops movie playback <number> times. 0 means forever.
-
--showmode mode
-Set the show mode to use.
-Available values for mode are:
-
-‘0, video ’
-show video
-
-‘1, waves ’
-show audio waves
-
-‘2, rdft ’
-show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform)
-
-
-
-Default value is "video", if video is not present or cannot be played
-"rdft" is automatically selected.
-
-You can interactively cycle through the available show modes by
-pressing the key w .
-
-
--vf filtergraph
-Create the filtergraph specified by filtergraph and use it to
-filter the video stream.
-
-filtergraph is a description of the filtergraph to apply to
-the stream, and must have a single video input and a single video
-output. In the filtergraph, the input is associated to the label
-in
, and the output to the label out
. See the
-ffmpeg-filters manual for more information about the filtergraph
-syntax.
-
-You can specify this parameter multiple times and cycle through the specified
-filtergraphs along with the show modes by pressing the key w .
-
-
--af filtergraph
-filtergraph is a description of the filtergraph to apply to
-the input audio.
-Use the option "-filters" to show all the available filters (including
-sources and sinks).
-
-
--i input_file
-Read input_file .
-
-
-
-
-
3.5 Advanced options# TOC
-
--pix_fmt format
-Set pixel format.
-This option has been deprecated in favor of private options, try -pixel_format.
-
-
--stats
-Print several playback statistics, in particular show the stream
-duration, the codec parameters, the current position in the stream and
-the audio/video synchronisation drift. It is on by default, to
-explicitly disable it you need to specify -nostats
.
-
-
--fast
-Non-spec-compliant optimizations.
-
--genpts
-Generate pts.
-
--sync type
-Set the master clock to audio (type=audio
), video
-(type=video
) or external (type=ext
). Default is audio. The
-master clock is used to control audio-video synchronization. Most media
-players use audio as master clock, but in some cases (streaming or high
-quality broadcast) it is necessary to change that. This option is mainly
-used for debugging purposes.
-
--ast audio_stream_specifier
-Select the desired audio stream using the given stream specifier. The stream
-specifiers are described in the Stream specifiers chapter. If this option
-is not specified, the "best" audio stream is selected in the program of the
-already selected video stream.
-
--vst video_stream_specifier
-Select the desired video stream using the given stream specifier. The stream
-specifiers are described in the Stream specifiers chapter. If this option
-is not specified, the "best" video stream is selected.
-
--sst subtitle_stream_specifier
-Select the desired subtitle stream using the given stream specifier. The stream
-specifiers are described in the Stream specifiers chapter. If this option
-is not specified, the "best" subtitle stream is selected in the program of the
-already selected video or audio stream.
-
--autoexit
-Exit when video is done playing.
-
--exitonkeydown
-Exit if any key is pressed.
-
--exitonmousedown
-Exit if any mouse button is pressed.
-
-
--codec:media_specifier codec_name
-Force a specific decoder implementation for the stream identified by
-media_specifier , which can assume the values a
(audio),
-v
(video), and s
subtitle.
-
-
--acodec codec_name
-Force a specific audio decoder.
-
-
--vcodec codec_name
-Force a specific video decoder.
-
-
--scodec codec_name
-Force a specific subtitle decoder.
-
-
--autorotate
-Automatically rotate the video according to presentation metadata. Enabled by
-default, use -noautorotate to disable it.
-
-
--framedrop
-Drop video frames if video is out of sync. Enabled by default if the master
-clock is not set to video. Use this option to enable frame dropping for all
-master clock sources, use -noframedrop to disable it.
-
-
--infbuf
-Do not limit the input buffer size, read as much data as possible from the
-input as soon as possible. Enabled by default for realtime streams, where data
-may be dropped if not read in time. Use this option to enable infinite buffers
-for all inputs, use -noinfbuf to disable it.
-
-
-
-
-
-
3.6 While playing# TOC
-
-
-q, ESC
-Quit.
-
-
-f
-Toggle full screen.
-
-
-p, SPC
-Pause.
-
-
-a
-Cycle audio channel in the current program.
-
-
-v
-Cycle video channel.
-
-
-t
-Cycle subtitle channel in the current program.
-
-
-c
-Cycle program.
-
-
-w
-Cycle video filters or show modes.
-
-
-s
-Step to the next frame.
-
-Pause if the stream is not already paused, step to the next video
-frame, and pause.
-
-
-left/right
-Seek backward/forward 10 seconds.
-
-
-down/up
-Seek backward/forward 1 minute.
-
-
-page down/page up
-Seek to the previous/next chapter.
-or if there are no chapters
-Seek backward/forward 10 minutes.
-
-
-mouse click
-Seek to percentage in file corresponding to fraction of width.
-
-
-
-
-
-
-
-
4 See Also# TOC
-
-
ffmpeg-all ,
-ffmpeg , ffprobe , ffserver ,
-ffmpeg-utils ,
-ffmpeg-scaler ,
-ffmpeg-resampler ,
-ffmpeg-codecs ,
-ffmpeg-bitstream-filters ,
-ffmpeg-formats ,
-ffmpeg-devices ,
-ffmpeg-protocols ,
-ffmpeg-filters
-
-
-
-
5 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffprobe-all.html b/Externals/ffmpeg/shared/doc/ffprobe-all.html
deleted file mode 100644
index a52af3304f..0000000000
--- a/Externals/ffmpeg/shared/doc/ffprobe-all.html
+++ /dev/null
@@ -1,21676 +0,0 @@
-
-
-
-
-
-
- ffprobe Documentation
-
-
-
-
-
-
-
-
- ffprobe Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Synopsis# TOC
-
-
ffprobe [options ] [input_file ]
-
-
-
2 Description# TOC
-
-
ffprobe gathers information from multimedia streams and prints it in
-human- and machine-readable fashion.
-
-
For example it can be used to check the format of the container used
-by a multimedia stream and the format and type of each media stream
-contained in it.
-
-
If a filename is specified in input, ffprobe will try to open and
-probe the file content. If the file cannot be opened or recognized as
-a multimedia file, a positive exit code is returned.
-
-
ffprobe may be employed both as a standalone application or in
-combination with a textual filter, which may perform more
-sophisticated processing, e.g. statistical processing or plotting.
-
-
Options are used to list some of the formats supported by ffprobe or
-for specifying which information to display, and for setting how
-ffprobe will show it.
-
-
ffprobe output is designed to be easily parsable by a textual filter,
-and consists of one or more sections of a form defined by the selected
-writer, which is specified by the print_format option.
-
-
Sections may contain other nested sections, and are identified by a
-name (which may be shared by other sections), and an unique
-name. See the output of sections .
-
-
Metadata tags stored in the container or in the streams are recognized
-and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM"
-section.
-
-
-
-
3 Options# TOC
-
-
All the numerical options, if not specified otherwise, accept a string
-representing a number as input, which may be followed by one of the SI
-unit prefixes, for example: ’K’, ’M’, or ’G’.
-
-
If ’i’ is appended to the SI unit prefix, the complete prefix will be
-interpreted as a unit prefix for binary multiples, which are based on
-powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
-prefix multiplies the value by 8. This allows using, for example:
-’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
-
-
Options which do not take arguments are boolean options, and set the
-corresponding value to true. They can be set to false by prefixing
-the option name with "no". For example using "-nofoo"
-will set the boolean option with name "foo" to false.
-
-
-
3.1 Stream specifiers# TOC
-
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
-are used to precisely specify which stream(s) a given option belongs to.
-
-
A stream specifier is a string generally appended to the option name and
-separated from it by a colon. E.g. -codec:a:1 ac3
contains the
-a:1
stream specifier, which matches the second audio stream. Therefore, it
-would select the ac3 codec for the second audio stream.
-
-
A stream specifier can match several streams, so that the option is applied to all
-of them. E.g. the stream specifier in -b:a 128k
matches all audio
-streams.
-
-
An empty stream specifier matches all streams. For example, -codec copy
-or -codec: copy
would copy all the streams without reencoding.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index. E.g. -threads:1 4
would set the
-thread count for the second stream to 4.
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
-’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
-stream number stream_index of this type. Otherwise, it matches all
-streams of this type.
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number stream_index
-in the program with the id program_id . Otherwise, it matches all streams in the
-program.
-
-#stream_id or i:stream_id
-Match the stream by stream id (e.g. PID in MPEG-TS container).
-
-m:key [:value ]
-Matches streams with the metadata tag key having the specified value. If
-value is not given, matches streams that contain the given tag with any
-value.
-
-Note that in ffmpeg
, matching by metadata will only work properly for
-input files.
-
-
-
-
-
3.2 Generic options# TOC
-
-
These options are shared amongst the ff* tools.
-
-
--L
-Show license.
-
-
--h, -?, -help, --help [arg ]
-Show help. An optional parameter may be specified to print help about a specific
-item. If no argument is specified, only basic (non advanced) tool
-options are shown.
-
-Possible values of arg are:
-
-long
-Print advanced tool options in addition to the basic tool options.
-
-
-full
-Print complete list of options, including shared and private options
-for encoders, decoders, demuxers, muxers, filters, etc.
-
-
-decoder=decoder_name
-Print detailed information about the decoder named decoder_name . Use the
--decoders option to get a list of all decoders.
-
-
-encoder=encoder_name
-Print detailed information about the encoder named encoder_name . Use the
--encoders option to get a list of all encoders.
-
-
-demuxer=demuxer_name
-Print detailed information about the demuxer named demuxer_name . Use the
--formats option to get a list of all demuxers and muxers.
-
-
-muxer=muxer_name
-Print detailed information about the muxer named muxer_name . Use the
--formats option to get a list of all muxers and demuxers.
-
-
-filter=filter_name
-Print detailed information about the filter name filter_name . Use the
--filters option to get a list of all filters.
-
-
-
-
--version
-Show version.
-
-
--formats
-Show available formats (including devices).
-
-
--devices
-Show available devices.
-
-
--codecs
-Show all codecs known to libavcodec.
-
-Note that the term ’codec’ is used throughout this documentation as a shortcut
-for what is more correctly called a media bitstream format.
-
-
--decoders
-Show available decoders.
-
-
--encoders
-Show all available encoders.
-
-
--bsfs
-Show available bitstream filters.
-
-
--protocols
-Show available protocols.
-
-
--filters
-Show available libavfilter filters.
-
-
--pix_fmts
-Show available pixel formats.
-
-
--sample_fmts
-Show available sample formats.
-
-
--layouts
-Show channel names and standard channel layouts.
-
-
--colors
-Show recognized color names.
-
-
--sources device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sources of the intput device.
-Some devices may provide system-dependent source names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sources pulse,server=192.168.0.4
-
-
-
--sinks device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sinks of the output device.
-Some devices may provide system-dependent sink names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sinks pulse,server=192.168.0.4
-
-
-
--loglevel [repeat+]loglevel | -v [repeat+]loglevel
-Set the logging level used by the library.
-Adding "repeat+" indicates that repeated log output should not be compressed
-to the first line and the "Last message repeated n times" line will be
-omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-’repeat’ will not change the loglevel.
-loglevel is a string or a number containing one of the following values:
-
-‘quiet, -8 ’
-Show nothing at all; be silent.
-
-‘panic, 0 ’
-Only show fatal errors which could lead the process to crash, such as
-and assert failure. This is not currently used for anything.
-
-‘fatal, 8 ’
-Only show fatal errors. These are errors after which the process absolutely
-cannot continue after.
-
-‘error, 16 ’
-Show all errors, including ones which can be recovered from.
-
-‘warning, 24 ’
-Show all warnings and errors. Any message related to possibly
-incorrect or unexpected events will be shown.
-
-‘info, 32 ’
-Show informative messages during processing. This is in addition to
-warnings and errors. This is the default value.
-
-‘verbose, 40 ’
-Same as info
, except more verbose.
-
-‘debug, 48 ’
-Show everything, including debugging information.
-
-
-
-By default the program logs to stderr, if coloring is supported by the
-terminal, colors are used to mark errors and warnings. Log coloring
-can be disabled setting the environment variable
-AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
-the environment variable AV_LOG_FORCE_COLOR
.
-The use of the environment variable NO_COLOR
is deprecated and
-will be dropped in a following FFmpeg version.
-
-
--report
-Dump full command line and console output to a file named
-program -YYYYMMDD -HHMMSS .log
in the current
-directory.
-This file can be useful for bug reports.
-It also implies -loglevel verbose
.
-
-Setting the environment variable FFREPORT
to any value has the
-same effect. If the value is a ’:’-separated key=value sequence, these
-options will affect the report; option values must be escaped if they
-contain special characters or the options delimiter ’:’ (see the
-“Quoting and escaping” section in the ffmpeg-utils manual).
-
-The following options are recognized:
-
-file
-set the file name to use for the report; %p
is expanded to the name
-of the program, %t
is expanded to a timestamp, %%
is expanded
-to a plain %
-
-level
-set the log verbosity level using a numerical value (see -loglevel
).
-
-
-
-For example, to output a report to a file named ffreport.log
-using a log level of 32
(alias for log level info
):
-
-
-
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
-
-
-Errors in parsing the environment variable are not fatal, and will not
-appear in the report.
-
-
--hide_banner
-Suppress printing banner.
-
-All FFmpeg tools will normally show a copyright notice, build options
-and library versions. This option can be used to suppress printing
-this information.
-
-
--cpuflags flags (global )
-Allows setting and clearing cpu flags. This option is intended
-for testing. Do not use it unless you know what you’re doing.
-
-
ffmpeg -cpuflags -sse+mmx ...
-ffmpeg -cpuflags mmx ...
-ffmpeg -cpuflags 0 ...
-
-Possible flags for this option are:
-
-‘x86 ’
-
-‘mmx ’
-‘mmxext ’
-‘sse ’
-‘sse2 ’
-‘sse2slow ’
-‘sse3 ’
-‘sse3slow ’
-‘ssse3 ’
-‘atom ’
-‘sse4.1 ’
-‘sse4.2 ’
-‘avx ’
-‘xop ’
-‘fma4 ’
-‘3dnow ’
-‘3dnowext ’
-‘cmov ’
-
-
-‘ARM ’
-
-‘armv5te ’
-‘armv6 ’
-‘armv6t2 ’
-‘vfp ’
-‘vfpv3 ’
-‘neon ’
-
-
-‘PowerPC ’
-
-‘altivec ’
-
-
-‘Specific Processors ’
-
-‘pentium2 ’
-‘pentium3 ’
-‘pentium4 ’
-‘k6 ’
-‘k62 ’
-‘athlon ’
-‘athlonxp ’
-‘k8 ’
-
-
-
-
-
--opencl_bench
-Benchmark all available OpenCL devices and show the results. This option
-is only available when FFmpeg has been compiled with --enable-opencl
.
-
-
--opencl_options options (global )
-Set OpenCL environment options. This option is only available when
-FFmpeg has been compiled with --enable-opencl
.
-
-options must be a list of key =value option pairs
-separated by ’:’. See the “OpenCL Options” section in the
-ffmpeg-utils manual for the list of supported options.
-
-
-
-
-
3.3 AVOptions# TOC
-
-
These options are provided directly by the libavformat, libavdevice and
-libavcodec libraries. To see the list of available AVOptions, use the
--help option. They are separated into two categories:
-
-generic
-These options can be set for any container, codec or device. Generic options
-are listed under AVFormatContext options for containers/devices and under
-AVCodecContext options for codecs.
-
-private
-These options are specific to the given container, device or codec. Private
-options are listed under their corresponding containers/devices/codecs.
-
-
-
-
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
-an MP3 file, use the id3v2_version private option of the MP3
-muxer:
-
-
ffmpeg -i input.flac -id3v2_version 3 out.mp3
-
-
-
All codec AVOptions are per-stream, and thus a stream specifier
-should be attached to them.
-
-
Note: the -nooption syntax cannot be used for boolean
-AVOptions, use -option 0 /-option 1 .
-
-
Note: the old undocumented way of specifying per-stream AVOptions by
-prepending v/a/s to the options name is now obsolete and will be
-removed soon.
-
-
-
3.4 Main options# TOC
-
-
--f format
-Force format to use.
-
-
--unit
-Show the unit of the displayed values.
-
-
--prefix
-Use SI prefixes for the displayed values.
-Unless the "-byte_binary_prefix" option is used all the prefixes
-are decimal.
-
-
--byte_binary_prefix
-Force the use of binary prefixes for byte values.
-
-
--sexagesimal
-Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
-
-
--pretty
-Prettify the format of the displayed values, it corresponds to the
-options "-unit -prefix -byte_binary_prefix -sexagesimal".
-
-
--of, -print_format writer_name [=writer_options ]
-Set the output printing format.
-
-writer_name specifies the name of the writer, and
-writer_options specifies the options to be passed to the writer.
-
-For example for printing the output in JSON format, specify:
-
-
-For more details on the available output printing formats, see the
-Writers section below.
-
-
--sections
-Print sections structure and section information, and exit. The output
-is not meant to be parsed by a machine.
-
-
--select_streams stream_specifier
-Select only the streams specified by stream_specifier . This
-option affects only the options related to streams
-(e.g. show_streams
, show_packets
, etc.).
-
-For example to show only audio streams, you can use the command:
-
-
ffprobe -show_streams -select_streams a INPUT
-
-
-To show only video packets belonging to the video stream with index 1:
-
-
ffprobe -show_packets -select_streams v:1 INPUT
-
-
-
--show_data
-Show payload data, as a hexadecimal and ASCII dump. Coupled with
--show_packets , it will dump the packets’ data. Coupled with
--show_streams , it will dump the codec extradata.
-
-The dump is printed as the "data" field. It may contain newlines.
-
-
--show_data_hash algorithm
-Show a hash of payload data, for packets with -show_packets and for
-codec extradata with -show_streams .
-
-
--show_error
-Show information about the error found when trying to probe the input.
-
-The error information is printed within a section with name "ERROR".
-
-
--show_format
-Show information about the container format of the input multimedia
-stream.
-
-All the container format information is printed within a section with
-name "FORMAT".
-
-
--show_format_entry name
-Like -show_format , but only prints the specified entry of the
-container format information, rather than all. This option may be given more
-than once, then all specified entries will be shown.
-
-This option is deprecated, use show_entries
instead.
-
-
--show_entries section_entries
-Set list of entries to show.
-
-Entries are specified according to the following
-syntax. section_entries contains a list of section entries
-separated by :
. Each section entry is composed by a section
-name (or unique name), optionally followed by a list of entries local
-to that section, separated by ,
.
-
-If section name is specified but is followed by no =
, all
-entries are printed to output, together with all the contained
-sections. Otherwise only the entries specified in the local section
-entries list are printed. In particular, if =
is specified but
-the list of local entries is empty, then no entries will be shown for
-that section.
-
-Note that the order of specification of the local section entries is
-not honored in the output, and the usual display order will be
-retained.
-
-The formal syntax is given by:
-
-
LOCAL_SECTION_ENTRIES ::= SECTION_ENTRY_NAME [,LOCAL_SECTION_ENTRIES ]
-SECTION_ENTRY ::= SECTION_NAME [=[LOCAL_SECTION_ENTRIES ]]
-SECTION_ENTRIES ::= SECTION_ENTRY [:SECTION_ENTRIES ]
-
-
-For example, to show only the index and type of each stream, and the PTS
-time, duration time, and stream index of the packets, you can specify
-the argument:
-
-
packet=pts_time,duration_time,stream_index : stream=index,codec_type
-
-
-To show all the entries in the section "format", but only the codec
-type in the section "stream", specify the argument:
-
-
format : stream=codec_type
-
-
-To show all the tags in the stream and format sections:
-
-
stream_tags : format_tags
-
-
-To show only the title
tag (if available) in the stream
-sections:
-
-
-
--show_packets
-Show information about each packet contained in the input multimedia
-stream.
-
-The information for each single packet is printed within a dedicated
-section with name "PACKET".
-
-
--show_frames
-Show information about each frame and subtitle contained in the input
-multimedia stream.
-
-The information for each single frame is printed within a dedicated
-section with name "FRAME" or "SUBTITLE".
-
-
--show_streams
-Show information about each media stream contained in the input
-multimedia stream.
-
-Each media stream information is printed within a dedicated section
-with name "STREAM".
-
-
--show_programs
-Show information about programs and their streams contained in the input
-multimedia stream.
-
-Each media stream information is printed within a dedicated section
-with name "PROGRAM_STREAM".
-
-
--show_chapters
-Show information about chapters stored in the format.
-
-Each chapter is printed within a dedicated section with name "CHAPTER".
-
-
--count_frames
-Count the number of frames per stream and report it in the
-corresponding stream section.
-
-
--count_packets
-Count the number of packets per stream and report it in the
-corresponding stream section.
-
-
--read_intervals read_intervals
-
-Read only the specified intervals. read_intervals must be a
-sequence of interval specifications separated by ",".
-ffprobe
will seek to the interval starting point, and will
-continue reading from that.
-
-Each interval is specified by two optional parts, separated by "%".
-
-The first part specifies the interval start position. It is
-interpreted as an abolute position, or as a relative offset from the
-current position if it is preceded by the "+" character. If this first
-part is not specified, no seeking will be performed when reading this
-interval.
-
-The second part specifies the interval end position. It is interpreted
-as an absolute position, or as a relative offset from the current
-position if it is preceded by the "+" character. If the offset
-specification starts with "#", it is interpreted as the number of
-packets to read (not including the flushing packets) from the interval
-start. If no second part is specified, the program will read until the
-end of the input.
-
-Note that seeking is not accurate, thus the actual interval start
-point may be different from the specified position. Also, when an
-interval duration is specified, the absolute end time will be computed
-by adding the duration to the interval start point found by seeking
-the file, rather than to the specified start value.
-
-The formal syntax is given by:
-
-
INTERVAL ::= [START |+START_OFFSET ][%[END |+END_OFFSET ]]
-INTERVALS ::= INTERVAL [,INTERVALS ]
-
-
-A few examples follow.
-
- Seek to time 10, read packets until 20 seconds after the found seek
-point, then seek to position 01:30
(1 minute and thirty
-seconds) and read packets until position 01:45
.
-
-
- Read only 42 packets after seeking to position 01:23
:
-
-
- Read only the first 20 seconds from the start:
-
-
- Read from the start until position 02:30
:
-
-
-
-
--show_private_data, -private
-Show private data, that is data depending on the format of the
-particular shown element.
-This option is enabled by default, but you may need to disable it
-for specific uses, for example when creating XSD-compliant XML output.
-
-
--show_program_version
-Show information related to program version.
-
-Version information is printed within a section with name
-"PROGRAM_VERSION".
-
-
--show_library_versions
-Show information related to library versions.
-
-Version information for each library is printed within a section with
-name "LIBRARY_VERSION".
-
-
--show_versions
-Show information related to program and library versions. This is the
-equivalent of setting both -show_program_version and
--show_library_versions options.
-
-
--show_pixel_formats
-Show information about all pixel formats supported by FFmpeg.
-
-Pixel format information for each format is printed within a section
-with name "PIXEL_FORMAT".
-
-
--bitexact
-Force bitexact output, useful to produce output which is not dependent
-on the specific build.
-
-
--i input_file
-Read input_file .
-
-
-
-
-
-
4 Writers# TOC
-
-
A writer defines the output format adopted by ffprobe
, and will be
-used for printing all the parts of the output.
-
-
A writer may accept one or more arguments, which specify the options
-to adopt. The options are specified as a list of key =value
-pairs, separated by ":".
-
-
All writers support the following options:
-
-
-string_validation, sv
-Set string validation mode.
-
-The following values are accepted.
-
-‘fail ’
-The writer will fail immediately in case an invalid string (UTF-8)
-sequence or code point is found in the input. This is especially
-useful to validate input metadata.
-
-
-‘ignore ’
-Any validation error will be ignored. This will result in possibly
-broken output, especially with the json or xml writer.
-
-
-‘replace ’
-The writer will substitute invalid UTF-8 sequences or code points with
-the string specified with the string_validation_replacement .
-
-
-
-Default value is ‘replace ’.
-
-
-string_validation_replacement, svr
-Set replacement string to use in case string_validation is
-set to ‘replace ’.
-
-In case the option is not specified, the writer will assume the empty
-string, that is it will remove the invalid sequences from the input
-strings.
-
-
-
-
A description of the currently available writers follows.
-
-
-
4.1 default# TOC
-
Default format.
-
-
Print each section in the form:
-
-
[SECTION]
-key1=val1
-...
-keyN=valN
-[/SECTION]
-
-
-
Metadata tags are printed as a line in the corresponding FORMAT, STREAM or
-PROGRAM_STREAM section, and are prefixed by the string "TAG:".
-
-
A description of the accepted options follows.
-
-
-nokey, nk
-If set to 1 specify not to print the key of each field. Default value
-is 0.
-
-
-noprint_wrappers, nw
-If set to 1 specify not to print the section header and footer.
-Default value is 0.
-
-
-
-
-
4.2 compact, csv# TOC
-
Compact and CSV format.
-
-
The csv
writer is equivalent to compact
, but supports
-different defaults.
-
-
Each section is printed on a single line.
-If no option is specifid, the output has the form:
-
-
section|key1=val1| ... |keyN=valN
-
-
-
Metadata tags are printed in the corresponding "format" or "stream"
-section. A metadata tag key, if printed, is prefixed by the string
-"tag:".
-
-
The description of the accepted options follows.
-
-
-item_sep, s
-Specify the character to use for separating fields in the output line.
-It must be a single printable character, it is "|" by default ("," for
-the csv
writer).
-
-
-nokey, nk
-If set to 1 specify not to print the key of each field. Its default
-value is 0 (1 for the csv
writer).
-
-
-escape, e
-Set the escape mode to use, default to "c" ("csv" for the csv
-writer).
-
-It can assume one of the following values:
-
-c
-Perform C-like escaping. Strings containing a newline (’\n’), carriage
-return (’\r’), a tab (’\t’), a form feed (’\f’), the escaping
-character (’\’) or the item separator character SEP are escaped using C-like fashioned
-escaping, so that a newline is converted to the sequence "\n", a
-carriage return to "\r", ’\’ to "\\" and the separator SEP is
-converted to "\SEP ".
-
-
-csv
-Perform CSV-like escaping, as described in RFC4180. Strings
-containing a newline (’\n’), a carriage return (’\r’), a double quote
-(’"’), or SEP are enclosed in double-quotes.
-
-
-none
-Perform no escaping.
-
-
-
-
-print_section, p
-Print the section name at the begin of each line if the value is
-1
, disable it with value set to 0
. Default value is
-1
.
-
-
-
-
-
-
4.3 flat# TOC
-
Flat format.
-
-
A free-form output where each line contains an explicit key=value, such as
-"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be
-directly embedded in sh scripts as long as the separator character is an
-alphanumeric character or an underscore (see sep_char option).
-
-
The description of the accepted options follows.
-
-
-sep_char, s
-Separator character used to separate the chapter, the section name, IDs and
-potential tags in the printed field key.
-
-Default value is ’.’.
-
-
-hierarchical, h
-Specify if the section name specification should be hierarchical. If
-set to 1, and if there is more than one section in the current
-chapter, the section name will be prefixed by the name of the
-chapter. A value of 0 will disable this behavior.
-
-Default value is 1.
-
-
-
-
-
-
INI format output.
-
-
Print output in an INI based format.
-
-
The following conventions are adopted:
-
-
- all key and values are UTF-8
- ’.’ is the subgroup separator
- newline, ’\t’, ’\f’, ’\b’ and the following characters are escaped
- ’\’ is the escape character
- ’#’ is the comment indicator
- ’=’ is the key/value separator
- ’:’ is not used but usually parsed as key/value separator
-
-
-
This writer accepts options as a list of key =value pairs,
-separated by ":".
-
-
The description of the accepted options follows.
-
-
-hierarchical, h
-Specify if the section name specification should be hierarchical. If
-set to 1, and if there is more than one section in the current
-chapter, the section name will be prefixed by the name of the
-chapter. A value of 0 will disable this behavior.
-
-Default value is 1.
-
-
-
-
-
4.5 json# TOC
-
JSON based format.
-
-
Each section is printed using JSON notation.
-
-
The description of the accepted options follows.
-
-
-compact, c
-If set to 1 enable compact output, that is each section will be
-printed on a single line. Default value is 0.
-
-
-
-
For more information about JSON, see http://www.json.org/ .
-
-
-
-
XML based format.
-
-
The XML output is described in the XML schema description file
-ffprobe.xsd installed in the FFmpeg datadir.
-
-
An updated version of the schema can be retrieved at the url
-http://www.ffmpeg.org/schema/ffprobe.xsd , which redirects to the
-latest schema committed into the FFmpeg development source code tree.
-
-
Note that the output issued will be compliant to the
-ffprobe.xsd schema only when no special global output options
-(unit , prefix , byte_binary_prefix ,
-sexagesimal etc.) are specified.
-
-
The description of the accepted options follows.
-
-
-fully_qualified, q
-If set to 1 specify if the output should be fully qualified. Default
-value is 0.
-This is required for generating an XML file which can be validated
-through an XSD file.
-
-
-xsd_compliant, x
-If set to 1 perform more checks for ensuring that the output is XSD
-compliant. Default value is 0.
-This option automatically sets fully_qualified to 1.
-
-
-
-
For more information about the XML format, see
-http://www.w3.org/XML/ .
-
-
-
5 Timecode# TOC
-
-
ffprobe
supports Timecode extraction:
-
-
- MPEG1/2 timecode is extracted from the GOP, and is available in the video
-stream details (-show_streams , see timecode ).
-
- MOV timecode is extracted from tmcd track, so is available in the tmcd
-stream metadata (-show_streams , see TAG:timecode ).
-
- DV, GXF and AVI timecodes are available in format metadata
-(-show_format , see TAG:timecode ).
-
-
-
-
-
6 Syntax# TOC
-
-
This section documents the syntax and formats employed by the FFmpeg
-libraries and tools.
-
-
-
6.1 Quoting and escaping# TOC
-
-
FFmpeg adopts the following quoting and escaping mechanism, unless
-explicitly specified. The following rules are applied:
-
-
- '
and \
are special characters (respectively used for
-quoting and escaping). In addition to them, there might be other
-special characters depending on the specific syntax where the escaping
-and quoting are employed.
-
- A special character is escaped by prefixing it with a ’\’.
-
- All characters enclosed between ” are included literally in the
-parsed string. The quote character '
itself cannot be quoted,
-so you may need to close the quote and escape it.
-
- Leading and trailing whitespaces, unless escaped or quoted, are
-removed from the parsed string.
-
-
-
Note that you may need to add a second level of escaping when using
-the command line or a script, which depends on the syntax of the
-adopted shell language.
-
-
The function av_get_token
defined in
-libavutil/avstring.h can be used to parse a token quoted or
-escaped according to the rules defined above.
-
-
The tool tools/ffescape in the FFmpeg source tree can be used
-to automatically quote or escape a string in a script.
-
-
-
6.1.1 Examples# TOC
-
-
- Escape the string Crime d'Amour
containing the '
special
-character:
-
-
- The string above contains a quote, so the '
needs to be escaped
-when quoting it:
-
-
- Include leading or trailing whitespaces using quoting:
-
-
' this string starts and ends with whitespaces '
-
-
- Escaping and quoting can be mixed together:
-
-
' The string '\'string\'' is a string '
-
-
- To include a literal \
you can use either escaping or quoting:
-
-
'c:\foo' can be written as c:\\foo
-
-
-
-
-
6.2 Date# TOC
-
-
The accepted syntax is:
-
-
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
-now
-
-
-
If the value is "now" it takes the current time.
-
-
Time is local time unless Z is appended, in which case it is
-interpreted as UTC.
-If the year-month-day part is not specified it takes the current
-year-month-day.
-
-
-
6.3 Time duration# TOC
-
-
There are two accepted syntaxes for expressing time duration.
-
-
-
-
HH expresses the number of hours, MM the number of minutes
-for a maximum of 2 digits, and SS the number of seconds for a
-maximum of 2 digits. The m at the end expresses decimal value for
-SS .
-
-
or
-
-
-
-
S expresses the number of seconds, with the optional decimal part
-m .
-
-
In both expressions, the optional ‘- ’ indicates negative duration.
-
-
-
6.3.1 Examples# TOC
-
-
The following examples are all valid time duration:
-
-
-‘55 ’
-55 seconds
-
-
-‘12:03:45 ’
-12 hours, 03 minutes and 45 seconds
-
-
-‘23.189 ’
-23.189 seconds
-
-
-
-
-
6.4 Video size# TOC
-
Specify the size of the sourced video, it may be a string of the form
-width xheight , or the name of a size abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-720x480
-
-‘pal ’
-720x576
-
-‘qntsc ’
-352x240
-
-‘qpal ’
-352x288
-
-‘sntsc ’
-640x480
-
-‘spal ’
-768x576
-
-‘film ’
-352x240
-
-‘ntsc-film ’
-352x240
-
-‘sqcif ’
-128x96
-
-‘qcif ’
-176x144
-
-‘cif ’
-352x288
-
-‘4cif ’
-704x576
-
-‘16cif ’
-1408x1152
-
-‘qqvga ’
-160x120
-
-‘qvga ’
-320x240
-
-‘vga ’
-640x480
-
-‘svga ’
-800x600
-
-‘xga ’
-1024x768
-
-‘uxga ’
-1600x1200
-
-‘qxga ’
-2048x1536
-
-‘sxga ’
-1280x1024
-
-‘qsxga ’
-2560x2048
-
-‘hsxga ’
-5120x4096
-
-‘wvga ’
-852x480
-
-‘wxga ’
-1366x768
-
-‘wsxga ’
-1600x1024
-
-‘wuxga ’
-1920x1200
-
-‘woxga ’
-2560x1600
-
-‘wqsxga ’
-3200x2048
-
-‘wquxga ’
-3840x2400
-
-‘whsxga ’
-6400x4096
-
-‘whuxga ’
-7680x4800
-
-‘cga ’
-320x200
-
-‘ega ’
-640x350
-
-‘hd480 ’
-852x480
-
-‘hd720 ’
-1280x720
-
-‘hd1080 ’
-1920x1080
-
-‘2k ’
-2048x1080
-
-‘2kflat ’
-1998x1080
-
-‘2kscope ’
-2048x858
-
-‘4k ’
-4096x2160
-
-‘4kflat ’
-3996x2160
-
-‘4kscope ’
-4096x1716
-
-‘nhd ’
-640x360
-
-‘hqvga ’
-240x160
-
-‘wqvga ’
-400x240
-
-‘fwqvga ’
-432x240
-
-‘hvga ’
-480x320
-
-‘qhd ’
-960x540
-
-
-
-
-
6.5 Video rate# TOC
-
-
Specify the frame rate of a video, expressed as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a float
-number or a valid video frame rate abbreviation.
-
-
The following abbreviations are recognized:
-
-‘ntsc ’
-30000/1001
-
-‘pal ’
-25/1
-
-‘qntsc ’
-30000/1001
-
-‘qpal ’
-25/1
-
-‘sntsc ’
-30000/1001
-
-‘spal ’
-25/1
-
-‘film ’
-24/1
-
-‘ntsc-film ’
-24000/1001
-
-
-
-
-
6.6 Ratio# TOC
-
-
A ratio can be expressed as an expression, or in the form
-numerator :denominator .
-
-
Note that a ratio with infinite (1/0) or negative value is
-considered valid, so you should check on the returned value if you
-want to exclude those values.
-
-
The undefined value can be expressed using the "0:0" string.
-
-
-
6.7 Color# TOC
-
-
It can be the name of a color as defined below (case insensitive match) or a
-[0x|#]RRGGBB[AA]
sequence, possibly followed by @ and a string
-representing the alpha component.
-
-
The alpha component may be a string composed by "0x" followed by an
-hexadecimal number or a decimal number between 0.0 and 1.0, which
-represents the opacity value (‘0x00 ’ or ‘0.0 ’ means completely
-transparent, ‘0xff ’ or ‘1.0 ’ completely opaque). If the alpha
-component is not specified then ‘0xff ’ is assumed.
-
-
The string ‘random ’ will result in a random color.
-
-
The following names of colors are recognized:
-
-‘AliceBlue ’
-0xF0F8FF
-
-‘AntiqueWhite ’
-0xFAEBD7
-
-‘Aqua ’
-0x00FFFF
-
-‘Aquamarine ’
-0x7FFFD4
-
-‘Azure ’
-0xF0FFFF
-
-‘Beige ’
-0xF5F5DC
-
-‘Bisque ’
-0xFFE4C4
-
-‘Black ’
-0x000000
-
-‘BlanchedAlmond ’
-0xFFEBCD
-
-‘Blue ’
-0x0000FF
-
-‘BlueViolet ’
-0x8A2BE2
-
-‘Brown ’
-0xA52A2A
-
-‘BurlyWood ’
-0xDEB887
-
-‘CadetBlue ’
-0x5F9EA0
-
-‘Chartreuse ’
-0x7FFF00
-
-‘Chocolate ’
-0xD2691E
-
-‘Coral ’
-0xFF7F50
-
-‘CornflowerBlue ’
-0x6495ED
-
-‘Cornsilk ’
-0xFFF8DC
-
-‘Crimson ’
-0xDC143C
-
-‘Cyan ’
-0x00FFFF
-
-‘DarkBlue ’
-0x00008B
-
-‘DarkCyan ’
-0x008B8B
-
-‘DarkGoldenRod ’
-0xB8860B
-
-‘DarkGray ’
-0xA9A9A9
-
-‘DarkGreen ’
-0x006400
-
-‘DarkKhaki ’
-0xBDB76B
-
-‘DarkMagenta ’
-0x8B008B
-
-‘DarkOliveGreen ’
-0x556B2F
-
-‘Darkorange ’
-0xFF8C00
-
-‘DarkOrchid ’
-0x9932CC
-
-‘DarkRed ’
-0x8B0000
-
-‘DarkSalmon ’
-0xE9967A
-
-‘DarkSeaGreen ’
-0x8FBC8F
-
-‘DarkSlateBlue ’
-0x483D8B
-
-‘DarkSlateGray ’
-0x2F4F4F
-
-‘DarkTurquoise ’
-0x00CED1
-
-‘DarkViolet ’
-0x9400D3
-
-‘DeepPink ’
-0xFF1493
-
-‘DeepSkyBlue ’
-0x00BFFF
-
-‘DimGray ’
-0x696969
-
-‘DodgerBlue ’
-0x1E90FF
-
-‘FireBrick ’
-0xB22222
-
-‘FloralWhite ’
-0xFFFAF0
-
-‘ForestGreen ’
-0x228B22
-
-‘Fuchsia ’
-0xFF00FF
-
-‘Gainsboro ’
-0xDCDCDC
-
-‘GhostWhite ’
-0xF8F8FF
-
-‘Gold ’
-0xFFD700
-
-‘GoldenRod ’
-0xDAA520
-
-‘Gray ’
-0x808080
-
-‘Green ’
-0x008000
-
-‘GreenYellow ’
-0xADFF2F
-
-‘HoneyDew ’
-0xF0FFF0
-
-‘HotPink ’
-0xFF69B4
-
-‘IndianRed ’
-0xCD5C5C
-
-‘Indigo ’
-0x4B0082
-
-‘Ivory ’
-0xFFFFF0
-
-‘Khaki ’
-0xF0E68C
-
-‘Lavender ’
-0xE6E6FA
-
-‘LavenderBlush ’
-0xFFF0F5
-
-‘LawnGreen ’
-0x7CFC00
-
-‘LemonChiffon ’
-0xFFFACD
-
-‘LightBlue ’
-0xADD8E6
-
-‘LightCoral ’
-0xF08080
-
-‘LightCyan ’
-0xE0FFFF
-
-‘LightGoldenRodYellow ’
-0xFAFAD2
-
-‘LightGreen ’
-0x90EE90
-
-‘LightGrey ’
-0xD3D3D3
-
-‘LightPink ’
-0xFFB6C1
-
-‘LightSalmon ’
-0xFFA07A
-
-‘LightSeaGreen ’
-0x20B2AA
-
-‘LightSkyBlue ’
-0x87CEFA
-
-‘LightSlateGray ’
-0x778899
-
-‘LightSteelBlue ’
-0xB0C4DE
-
-‘LightYellow ’
-0xFFFFE0
-
-‘Lime ’
-0x00FF00
-
-‘LimeGreen ’
-0x32CD32
-
-‘Linen ’
-0xFAF0E6
-
-‘Magenta ’
-0xFF00FF
-
-‘Maroon ’
-0x800000
-
-‘MediumAquaMarine ’
-0x66CDAA
-
-‘MediumBlue ’
-0x0000CD
-
-‘MediumOrchid ’
-0xBA55D3
-
-‘MediumPurple ’
-0x9370D8
-
-‘MediumSeaGreen ’
-0x3CB371
-
-‘MediumSlateBlue ’
-0x7B68EE
-
-‘MediumSpringGreen ’
-0x00FA9A
-
-‘MediumTurquoise ’
-0x48D1CC
-
-‘MediumVioletRed ’
-0xC71585
-
-‘MidnightBlue ’
-0x191970
-
-‘MintCream ’
-0xF5FFFA
-
-‘MistyRose ’
-0xFFE4E1
-
-‘Moccasin ’
-0xFFE4B5
-
-‘NavajoWhite ’
-0xFFDEAD
-
-‘Navy ’
-0x000080
-
-‘OldLace ’
-0xFDF5E6
-
-‘Olive ’
-0x808000
-
-‘OliveDrab ’
-0x6B8E23
-
-‘Orange ’
-0xFFA500
-
-‘OrangeRed ’
-0xFF4500
-
-‘Orchid ’
-0xDA70D6
-
-‘PaleGoldenRod ’
-0xEEE8AA
-
-‘PaleGreen ’
-0x98FB98
-
-‘PaleTurquoise ’
-0xAFEEEE
-
-‘PaleVioletRed ’
-0xD87093
-
-‘PapayaWhip ’
-0xFFEFD5
-
-‘PeachPuff ’
-0xFFDAB9
-
-‘Peru ’
-0xCD853F
-
-‘Pink ’
-0xFFC0CB
-
-‘Plum ’
-0xDDA0DD
-
-‘PowderBlue ’
-0xB0E0E6
-
-‘Purple ’
-0x800080
-
-‘Red ’
-0xFF0000
-
-‘RosyBrown ’
-0xBC8F8F
-
-‘RoyalBlue ’
-0x4169E1
-
-‘SaddleBrown ’
-0x8B4513
-
-‘Salmon ’
-0xFA8072
-
-‘SandyBrown ’
-0xF4A460
-
-‘SeaGreen ’
-0x2E8B57
-
-‘SeaShell ’
-0xFFF5EE
-
-‘Sienna ’
-0xA0522D
-
-‘Silver ’
-0xC0C0C0
-
-‘SkyBlue ’
-0x87CEEB
-
-‘SlateBlue ’
-0x6A5ACD
-
-‘SlateGray ’
-0x708090
-
-‘Snow ’
-0xFFFAFA
-
-‘SpringGreen ’
-0x00FF7F
-
-‘SteelBlue ’
-0x4682B4
-
-‘Tan ’
-0xD2B48C
-
-‘Teal ’
-0x008080
-
-‘Thistle ’
-0xD8BFD8
-
-‘Tomato ’
-0xFF6347
-
-‘Turquoise ’
-0x40E0D0
-
-‘Violet ’
-0xEE82EE
-
-‘Wheat ’
-0xF5DEB3
-
-‘White ’
-0xFFFFFF
-
-‘WhiteSmoke ’
-0xF5F5F5
-
-‘Yellow ’
-0xFFFF00
-
-‘YellowGreen ’
-0x9ACD32
-
-
-
-
-
6.8 Channel Layout# TOC
-
-
A channel layout specifies the spatial disposition of the channels in
-a multi-channel audio stream. To specify a channel layout, FFmpeg
-makes use of a special syntax.
-
-
Individual channels are identified by an id, as given by the table
-below:
-
-‘FL ’
-front left
-
-‘FR ’
-front right
-
-‘FC ’
-front center
-
-‘LFE ’
-low frequency
-
-‘BL ’
-back left
-
-‘BR ’
-back right
-
-‘FLC ’
-front left-of-center
-
-‘FRC ’
-front right-of-center
-
-‘BC ’
-back center
-
-‘SL ’
-side left
-
-‘SR ’
-side right
-
-‘TC ’
-top center
-
-‘TFL ’
-top front left
-
-‘TFC ’
-top front center
-
-‘TFR ’
-top front right
-
-‘TBL ’
-top back left
-
-‘TBC ’
-top back center
-
-‘TBR ’
-top back right
-
-‘DL ’
-downmix left
-
-‘DR ’
-downmix right
-
-‘WL ’
-wide left
-
-‘WR ’
-wide right
-
-‘SDL ’
-surround direct left
-
-‘SDR ’
-surround direct right
-
-‘LFE2 ’
-low frequency 2
-
-
-
-
Standard channel layout compositions can be specified by using the
-following identifiers:
-
-‘mono ’
-FC
-
-‘stereo ’
-FL+FR
-
-‘2.1 ’
-FL+FR+LFE
-
-‘3.0 ’
-FL+FR+FC
-
-‘3.0(back) ’
-FL+FR+BC
-
-‘4.0 ’
-FL+FR+FC+BC
-
-‘quad ’
-FL+FR+BL+BR
-
-‘quad(side) ’
-FL+FR+SL+SR
-
-‘3.1 ’
-FL+FR+FC+LFE
-
-‘5.0 ’
-FL+FR+FC+BL+BR
-
-‘5.0(side) ’
-FL+FR+FC+SL+SR
-
-‘4.1 ’
-FL+FR+FC+LFE+BC
-
-‘5.1 ’
-FL+FR+FC+LFE+BL+BR
-
-‘5.1(side) ’
-FL+FR+FC+LFE+SL+SR
-
-‘6.0 ’
-FL+FR+FC+BC+SL+SR
-
-‘6.0(front) ’
-FL+FR+FLC+FRC+SL+SR
-
-‘hexagonal ’
-FL+FR+FC+BL+BR+BC
-
-‘6.1 ’
-FL+FR+FC+LFE+BC+SL+SR
-
-‘6.1 ’
-FL+FR+FC+LFE+BL+BR+BC
-
-‘6.1(front) ’
-FL+FR+LFE+FLC+FRC+SL+SR
-
-‘7.0 ’
-FL+FR+FC+BL+BR+SL+SR
-
-‘7.0(front) ’
-FL+FR+FC+FLC+FRC+SL+SR
-
-‘7.1 ’
-FL+FR+FC+LFE+BL+BR+SL+SR
-
-‘7.1(wide) ’
-FL+FR+FC+LFE+BL+BR+FLC+FRC
-
-‘7.1(wide-side) ’
-FL+FR+FC+LFE+FLC+FRC+SL+SR
-
-‘octagonal ’
-FL+FR+FC+BL+BR+BC+SL+SR
-
-‘downmix ’
-DL+DR
-
-
-
-
A custom channel layout can be specified as a sequence of terms, separated by
-’+’ or ’|’. Each term can be:
-
- the name of a standard channel layout (e.g. ‘mono ’,
-‘stereo ’, ‘4.0 ’, ‘quad ’, ‘5.0 ’, etc.)
-
- the name of a single channel (e.g. ‘FL ’, ‘FR ’, ‘FC ’, ‘LFE ’, etc.)
-
- a number of channels, in decimal, optionally followed by ’c’, yielding
-the default channel layout for that number of channels (see the
-function av_get_default_channel_layout
)
-
- a channel layout mask, in hexadecimal starting with "0x" (see the
-AV_CH_*
macros in libavutil/channel_layout.h .
-
-
-
Starting from libavutil version 53 the trailing character "c" to
-specify a number of channels will be required, while a channel layout
-mask could also be specified as a decimal number (if and only if not
-followed by "c").
-
-
See also the function av_get_channel_layout
defined in
-libavutil/channel_layout.h .
-
-
-
7 Expression Evaluation# TOC
-
-
When evaluating an arithmetic expression, FFmpeg uses an internal
-formula evaluator, implemented through the libavutil/eval.h
-interface.
-
-
An expression may contain unary, binary operators, constants, and
-functions.
-
-
Two expressions expr1 and expr2 can be combined to form
-another expression "expr1 ;expr2 ".
-expr1 and expr2 are evaluated in turn, and the new
-expression evaluates to the value of expr2 .
-
-
The following binary operators are available: +
, -
,
-*
, /
, ^
.
-
-
The following unary operators are available: +
, -
.
-
-
The following functions are available:
-
-abs(x)
-Compute absolute value of x .
-
-
-acos(x)
-Compute arccosine of x .
-
-
-asin(x)
-Compute arcsine of x .
-
-
-atan(x)
-Compute arctangent of x .
-
-
-between(x, min, max)
-Return 1 if x is greater than or equal to min and lesser than or
-equal to max , 0 otherwise.
-
-
-bitand(x, y)
-bitor(x, y)
-Compute bitwise and/or operation on x and y .
-
-The results of the evaluation of x and y are converted to
-integers before executing the bitwise operation.
-
-Note that both the conversion to integer and the conversion back to
-floating point can lose precision. Beware of unexpected results for
-large numbers (usually 2^53 and larger).
-
-
-ceil(expr)
-Round the value of expression expr upwards to the nearest
-integer. For example, "ceil(1.5)" is "2.0".
-
-
-clip(x, min, max)
-Return the value of x clipped between min and max .
-
-
-cos(x)
-Compute cosine of x .
-
-
-cosh(x)
-Compute hyperbolic cosine of x .
-
-
-eq(x, y)
-Return 1 if x and y are equivalent, 0 otherwise.
-
-
-exp(x)
-Compute exponential of x (with base e
, the Euler’s number).
-
-
-floor(expr)
-Round the value of expression expr downwards to the nearest
-integer. For example, "floor(-1.5)" is "-2.0".
-
-
-gauss(x)
-Compute Gauss function of x , corresponding to
-exp(-x*x/2) / sqrt(2*PI)
.
-
-
-gcd(x, y)
-Return the greatest common divisor of x and y . If both x and
-y are 0 or either or both are less than zero then behavior is undefined.
-
-
-gt(x, y)
-Return 1 if x is greater than y , 0 otherwise.
-
-
-gte(x, y)
-Return 1 if x is greater than or equal to y , 0 otherwise.
-
-
-hypot(x, y)
-This function is similar to the C function with the same name; it returns
-"sqrt(x *x + y *y )", the length of the hypotenuse of a
-right triangle with sides of length x and y , or the distance of the
-point (x , y ) from the origin.
-
-
-if(x, y)
-Evaluate x , and if the result is non-zero return the result of
-the evaluation of y , return 0 otherwise.
-
-
-if(x, y, z)
-Evaluate x , and if the result is non-zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-ifnot(x, y)
-Evaluate x , and if the result is zero return the result of the
-evaluation of y , return 0 otherwise.
-
-
-ifnot(x, y, z)
-Evaluate x , and if the result is zero return the evaluation
-result of y , otherwise the evaluation result of z .
-
-
-isinf(x)
-Return 1.0 if x is +/-INFINITY, 0.0 otherwise.
-
-
-isnan(x)
-Return 1.0 if x is NAN, 0.0 otherwise.
-
-
-ld(var)
-Allow to load the value of the internal variable with number
-var , which was previously stored with st(var , expr ).
-The function returns the loaded value.
-
-
-log(x)
-Compute natural logarithm of x .
-
-
-lt(x, y)
-Return 1 if x is lesser than y , 0 otherwise.
-
-
-lte(x, y)
-Return 1 if x is lesser than or equal to y , 0 otherwise.
-
-
-max(x, y)
-Return the maximum between x and y .
-
-
-min(x, y)
-Return the maximum between x and y .
-
-
-mod(x, y)
-Compute the remainder of division of x by y .
-
-
-not(expr)
-Return 1.0 if expr is zero, 0.0 otherwise.
-
-
-pow(x, y)
-Compute the power of x elevated y , it is equivalent to
-"(x )^(y )".
-
-
-print(t)
-print(t, l)
-Print the value of expression t with loglevel l . If
-l is not specified then a default log level is used.
-Returns the value of the expression printed.
-
-Prints t with loglevel l
-
-
-random(x)
-Return a pseudo random value between 0.0 and 1.0. x is the index of the
-internal variable which will be used to save the seed/state.
-
-
-root(expr, max)
-Find an input value for which the function represented by expr
-with argument ld(0) is 0 in the interval 0..max .
-
-The expression in expr must denote a continuous function or the
-result is undefined.
-
-ld(0) is used to represent the function input value, which means
-that the given expression will be evaluated multiple times with
-various input values that the expression can access through
-ld(0)
. When the expression evaluates to 0 then the
-corresponding input value will be returned.
-
-
-sin(x)
-Compute sine of x .
-
-
-sinh(x)
-Compute hyperbolic sine of x .
-
-
-sqrt(expr)
-Compute the square root of expr . This is equivalent to
-"(expr )^.5".
-
-
-squish(x)
-Compute expression 1/(1 + exp(4*x))
.
-
-
-st(var, expr)
-Allow to store the value of the expression expr in an internal
-variable. var specifies the number of the variable where to
-store the value, and it is a value ranging from 0 to 9. The function
-returns the value stored in the internal variable.
-Note, Variables are currently not shared between expressions.
-
-
-tan(x)
-Compute tangent of x .
-
-
-tanh(x)
-Compute hyperbolic tangent of x .
-
-
-taylor(expr, x)
-taylor(expr, x, id)
-Evaluate a Taylor series at x , given an expression representing
-the ld(id)
-th derivative of a function at 0.
-
-When the series does not converge the result is undefined.
-
-ld(id) is used to represent the derivative order in expr ,
-which means that the given expression will be evaluated multiple times
-with various input values that the expression can access through
-ld(id)
. If id is not specified then 0 is assumed.
-
-Note, when you have the derivatives at y instead of 0,
-taylor(expr, x-y)
can be used.
-
-
-time(0)
-Return the current (wallclock) time in seconds.
-
-
-trunc(expr)
-Round the value of expression expr towards zero to the nearest
-integer. For example, "trunc(-1.5)" is "-1.0".
-
-
-while(cond, expr)
-Evaluate expression expr while the expression cond is
-non-zero, and returns the value of the last expr evaluation, or
-NAN if cond was always false.
-
-
-
-
The following constants are available:
-
-PI
-area of the unit disc, approximately 3.14
-
-E
-exp(1) (Euler’s number), approximately 2.718
-
-PHI
-golden ratio (1+sqrt(5))/2, approximately 1.618
-
-
-
-
Assuming that an expression is considered "true" if it has a non-zero
-value, note that:
-
-
*
works like AND
-
-
+
works like OR
-
-
For example the construct:
-
-
is equivalent to:
-
-
-
In your C code, you can extend the list of unary and binary functions,
-and define recognized constants, so that they are available for your
-expressions.
-
-
The evaluator also recognizes the International System unit prefixes.
-If ’i’ is appended after the prefix, binary prefixes are used, which
-are based on powers of 1024 instead of powers of 1000.
-The ’B’ postfix multiplies the value by 8, and can be appended after a
-unit prefix or used alone. This allows using for example ’KB’, ’MiB’,
-’G’ and ’B’ as number postfix.
-
-
The list of available International System prefixes follows, with
-indication of the corresponding powers of 10 and of 2.
-
-y
-10^-24 / 2^-80
-
-z
-10^-21 / 2^-70
-
-a
-10^-18 / 2^-60
-
-f
-10^-15 / 2^-50
-
-p
-10^-12 / 2^-40
-
-n
-10^-9 / 2^-30
-
-u
-10^-6 / 2^-20
-
-m
-10^-3 / 2^-10
-
-c
-10^-2
-
-d
-10^-1
-
-h
-10^2
-
-k
-10^3 / 2^10
-
-K
-10^3 / 2^10
-
-M
-10^6 / 2^20
-
-G
-10^9 / 2^30
-
-T
-10^12 / 2^40
-
-P
-10^15 / 2^40
-
-E
-10^18 / 2^50
-
-Z
-10^21 / 2^60
-
-Y
-10^24 / 2^70
-
-
-
-
-
-
8 OpenCL Options# TOC
-
-
When FFmpeg is configured with --enable-opencl
, it is possible
-to set the options for the global OpenCL context.
-
-
The list of supported options follows:
-
-
-build_options
-Set build options used to compile the registered kernels.
-
-See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
-
-
-platform_idx
-Select the index of the platform to run OpenCL code.
-
-The specified index must be one of the indexes in the device list
-which can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-device_idx
-Select the index of the device used to run OpenCL code.
-
-The specified index must be one of the indexes in the device list which
-can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
-
-
-
-
-
-
9 Codec Options# TOC
-
-
libavcodec provides some generic global options, which can be set on
-all the encoders and decoders. In addition each codec may support
-so-called private options, which are specific for a given codec.
-
-
Sometimes, a global option may only affect a specific kind of codec,
-and may be nonsensical or ignored by another, so you need to be aware
-of the meaning of the specified options. Also some options are
-meant only for decoding or encoding.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVCodecContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follow:
-
-
-b integer (encoding,audio,video )
-Set bitrate in bits/s. Default value is 200K.
-
-
-ab integer (encoding,audio )
-Set audio bitrate (in bits/s). Default value is 128K.
-
-
-bt integer (encoding,video )
-Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate
-tolerance specifies how far ratecontrol is willing to deviate from the
-target average bitrate value. This is not related to min/max
-bitrate. Lowering tolerance too much has an adverse effect on quality.
-
-
-flags flags (decoding/encoding,audio,video,subtitles )
-Set generic flags.
-
-Possible values:
-
-‘mv4 ’
-Use four motion vector by macroblock (mpeg4).
-
-‘qpel ’
-Use 1/4 pel motion compensation.
-
-‘loop ’
-Use loop filter.
-
-‘qscale ’
-Use fixed qscale.
-
-‘gmc ’
-Use gmc.
-
-‘mv0 ’
-Always try a mb with mv=<0,0>.
-
-‘input_preserved ’
-‘pass1 ’
-Use internal 2pass ratecontrol in first pass mode.
-
-‘pass2 ’
-Use internal 2pass ratecontrol in second pass mode.
-
-‘gray ’
-Only decode/encode grayscale.
-
-‘emu_edge ’
-Do not draw edges.
-
-‘psnr ’
-Set error[?] variables during encoding.
-
-‘truncated ’
-‘naq ’
-Normalize adaptive quantization.
-
-‘ildct ’
-Use interlaced DCT.
-
-‘low_delay ’
-Force low delay.
-
-‘global_header ’
-Place global headers in extradata instead of every keyframe.
-
-‘bitexact ’
-Only write platform-, build- and time-independent data. (except (I)DCT).
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-‘aic ’
-Apply H263 advanced intra coding / mpeg4 ac prediction.
-
-‘cbp ’
-Deprecated, use mpegvideo private options instead.
-
-‘qprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘ilme ’
-Apply interlaced motion estimation.
-
-‘cgop ’
-Use closed gop.
-
-
-
-
-me_method integer (encoding,video )
-Set motion estimation method.
-
-Possible values:
-
-‘zero ’
-zero motion estimation (fastest)
-
-‘full ’
-full motion estimation (slowest)
-
-‘epzs ’
-EPZS motion estimation (default)
-
-‘esa ’
-esa motion estimation (alias for full)
-
-‘tesa ’
-tesa motion estimation
-
-‘dia ’
-dia motion estimation (alias for epzs)
-
-‘log ’
-log motion estimation
-
-‘phods ’
-phods motion estimation
-
-‘x1 ’
-X1 motion estimation
-
-‘hex ’
-hex motion estimation
-
-‘umh ’
-umh motion estimation
-
-‘iter ’
-iter motion estimation
-
-
-
-
-extradata_size integer
-Set extradata size.
-
-
-time_base rational number
-Set codec time base.
-
-It is the fundamental unit of time (in seconds) in terms of which
-frame timestamps are represented. For fixed-fps content, timebase
-should be 1 / frame_rate
and timestamp increments should be
-identically 1.
-
-
-g integer (encoding,video )
-Set the group of picture size. Default value is 12.
-
-
-ar integer (decoding/encoding,audio )
-Set audio sampling rate (in Hz).
-
-
-ac integer (decoding/encoding,audio )
-Set number of audio channels.
-
-
-cutoff integer (encoding,audio )
-Set cutoff bandwidth.
-
-
-frame_size integer (encoding,audio )
-Set audio frame size.
-
-Each submitted frame except the last must contain exactly frame_size
-samples per channel. May be 0 when the codec has
-CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not
-restricted. It is set by some decoders to indicate constant frame
-size.
-
-
-frame_number integer
-Set the frame number.
-
-
-delay integer
-qcomp float (encoding,video )
-Set video quantizer scale compression (VBR). It is used as a constant
-in the ratecontrol equation. Recommended range for default rc_eq:
-0.0-1.0.
-
-
-qblur float (encoding,video )
-Set video quantizer scale blur (VBR).
-
-
-qmin integer (encoding,video )
-Set min video quantizer scale (VBR). Must be included between -1 and
-69, default value is 2.
-
-
-qmax integer (encoding,video )
-Set max video quantizer scale (VBR). Must be included between -1 and
-1024, default value is 31.
-
-
-qdiff integer (encoding,video )
-Set max difference between the quantizer scale (VBR).
-
-
-bf integer (encoding,video )
-Set max number of B frames between non-B-frames.
-
-Must be an integer between -1 and 16. 0 means that B-frames are
-disabled. If a value of -1 is used, it will choose an automatic value
-depending on the encoder.
-
-Default value is 0.
-
-
-b_qfactor float (encoding,video )
-Set qp factor between P and B frames.
-
-
-rc_strategy integer (encoding,video )
-Set ratecontrol method.
-
-
-b_strategy integer (encoding,video )
-Set strategy to choose between I/P/B-frames.
-
-
-ps integer (encoding,video )
-Set RTP payload size in bytes.
-
-
-mv_bits integer
-header_bits integer
-i_tex_bits integer
-p_tex_bits integer
-i_count integer
-p_count integer
-skip_count integer
-misc_bits integer
-frame_bits integer
-codec_tag integer
-bug flags (decoding,video )
-Workaround not auto detected encoder bugs.
-
-Possible values:
-
-‘autodetect ’
-‘old_msmpeg4 ’
-some old lavc generated msmpeg4v3 files (no autodetection)
-
-‘xvid_ilace ’
-Xvid interlacing bug (autodetected if fourcc==XVIX)
-
-‘ump4 ’
-(autodetected if fourcc==UMP4)
-
-‘no_padding ’
-padding bug (autodetected)
-
-‘amv ’
-‘ac_vlc ’
-illegal vlc bug (autodetected per fourcc)
-
-‘qpel_chroma ’
-‘std_qpel ’
-old standard qpel (autodetected per fourcc/version)
-
-‘qpel_chroma2 ’
-‘direct_blocksize ’
-direct-qpel-blocksize bug (autodetected per fourcc/version)
-
-‘edge ’
-edge padding bug (autodetected per fourcc/version)
-
-‘hpel_chroma ’
-‘dc_clip ’
-‘ms ’
-Workaround various bugs in microsoft broken decoders.
-
-‘trunc ’
-trancated frames
-
-
-
-
-lelim integer (encoding,video )
-Set single coefficient elimination threshold for luminance (negative
-values also consider DC coefficient).
-
-
-celim integer (encoding,video )
-Set single coefficient elimination threshold for chrominance (negative
-values also consider dc coefficient)
-
-
-strict integer (decoding/encoding,audio,video )
-Specify how strictly to follow the standards.
-
-Possible values:
-
-‘very ’
-strictly conform to a older more strict version of the spec or reference software
-
-‘strict ’
-strictly conform to all the things in the spec no matter what consequences
-
-‘normal ’
-‘unofficial ’
-allow unofficial extensions
-
-‘experimental ’
-allow non standardized experimental things, experimental
-(unfinished/work in progress/not well tested) decoders and encoders.
-Note: experimental decoders can pose a security risk, do not use this for
-decoding untrusted input.
-
-
-
-
-b_qoffset float (encoding,video )
-Set QP offset between P and B frames.
-
-
-err_detect flags (decoding,audio,video )
-Set error detection flags.
-
-Possible values:
-
-‘crccheck ’
-verify embedded CRCs
-
-‘bitstream ’
-detect bitstream specification deviations
-
-‘buffer ’
-detect improper bitstream length
-
-‘explode ’
-abort decoding on minor error detection
-
-‘ignore_err ’
-ignore decoding errors, and continue decoding.
-This is useful if you want to analyze the content of a video and thus want
-everything to be decoded no matter what. This option will not result in a video
-that is pleasing to watch in case of errors.
-
-‘careful ’
-consider things that violate the spec and have not been seen in the wild as errors
-
-‘compliant ’
-consider all spec non compliancies as errors
-
-‘aggressive ’
-consider things that a sane encoder should not do as an error
-
-
-
-
-has_b_frames integer
-block_align integer
-mpeg_quant integer (encoding,video )
-Use MPEG quantizers instead of H.263.
-
-
-qsquish float (encoding,video )
-How to keep quantizer between qmin and qmax (0 = clip, 1 = use
-differentiable function).
-
-
-rc_qmod_amp float (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_qmod_freq integer (encoding,video )
-Set experimental quantizer modulation.
-
-
-rc_override_count integer
-rc_eq string (encoding,video )
-Set rate control equation. When computing the expression, besides the
-standard functions defined in the section ’Expression Evaluation’, the
-following functions are available: bits2qp(bits), qp2bits(qp). Also
-the following constants are available: iTex pTex tex mv fCode iCount
-mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
-avgTex.
-
-
-maxrate integer (encoding,audio,video )
-Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
-
-
-minrate integer (encoding,audio,video )
-Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR
-encode. It is of little use elsewise.
-
-
-bufsize integer (encoding,audio,video )
-Set ratecontrol buffer size (in bits).
-
-
-rc_buf_aggressivity float (encoding,video )
-Currently useless.
-
-
-i_qfactor float (encoding,video )
-Set QP factor between P and I frames.
-
-
-i_qoffset float (encoding,video )
-Set QP offset between P and I frames.
-
-
-rc_init_cplx float (encoding,video )
-Set initial complexity for 1-pass encoding.
-
-
-dct integer (encoding,video )
-Set DCT algorithm.
-
-Possible values:
-
-‘auto ’
-autoselect a good one (default)
-
-‘fastint ’
-fast integer
-
-‘int ’
-accurate integer
-
-‘mmx ’
-‘altivec ’
-‘faan ’
-floating point AAN DCT
-
-
-
-
-lumi_mask float (encoding,video )
-Compress bright areas stronger than medium ones.
-
-
-tcplx_mask float (encoding,video )
-Set temporal complexity masking.
-
-
-scplx_mask float (encoding,video )
-Set spatial complexity masking.
-
-
-p_mask float (encoding,video )
-Set inter masking.
-
-
-dark_mask float (encoding,video )
-Compress dark areas stronger than medium ones.
-
-
-idct integer (decoding/encoding,video )
-Select IDCT implementation.
-
-Possible values:
-
-‘auto ’
-‘int ’
-‘simple ’
-‘simplemmx ’
-‘simpleauto ’
-Automatically pick a IDCT compatible with the simple one
-
-
-‘arm ’
-‘altivec ’
-‘sh4 ’
-‘simplearm ’
-‘simplearmv5te ’
-‘simplearmv6 ’
-‘simpleneon ’
-‘simplealpha ’
-‘ipp ’
-‘xvidmmx ’
-‘faani ’
-floating point AAN IDCT
-
-
-
-
-slice_count integer
-ec flags (decoding,video )
-Set error concealment strategy.
-
-Possible values:
-
-‘guess_mvs ’
-iterative motion vector (MV) search (slow)
-
-‘deblock ’
-use strong deblock filter for damaged MBs
-
-‘favor_inter ’
-favor predicting from the previous frame instead of the current
-
-
-
-
-bits_per_coded_sample integer
-pred integer (encoding,video )
-Set prediction method.
-
-Possible values:
-
-‘left ’
-‘plane ’
-‘median ’
-
-
-
-aspect rational number (encoding,video )
-Set sample aspect ratio.
-
-
-debug flags (decoding/encoding,audio,video,subtitles )
-Print specific debug info.
-
-Possible values:
-
-‘pict ’
-picture info
-
-‘rc ’
-rate control
-
-‘bitstream ’
-‘mb_type ’
-macroblock (MB) type
-
-‘qp ’
-per-block quantization parameter (QP)
-
-‘mv ’
-motion vector
-
-‘dct_coeff ’
-‘skip ’
-‘startcode ’
-‘pts ’
-‘er ’
-error recognition
-
-‘mmco ’
-memory management control operations (H.264)
-
-‘bugs ’
-‘vis_qp ’
-visualize quantization parameter (QP), lower QP are tinted greener
-
-‘vis_mb_type ’
-visualize block types
-
-‘buffers ’
-picture buffer allocations
-
-‘thread_ops ’
-threading operations
-
-‘nomc ’
-skip motion compensation
-
-
-
-
-vismv integer (decoding,video )
-Visualize motion vectors (MVs).
-
-This option is deprecated, see the codecview filter instead.
-
-Possible values:
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-cmp integer (encoding,video )
-Set full pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-subcmp integer (encoding,video )
-Set sub pel me compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-mbcmp integer (encoding,video )
-Set macroblock compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-ildctcmp integer (encoding,video )
-Set interlaced dct compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-dia_size integer (encoding,video )
-Set diamond type & size for motion estimation.
-
-
-last_pred integer (encoding,video )
-Set amount of motion predictors from the previous frame.
-
-
-preme integer (encoding,video )
-Set pre motion estimation.
-
-
-precmp integer (encoding,video )
-Set pre motion estimation compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-pre_dia_size integer (encoding,video )
-Set diamond type & size for motion estimation pre-pass.
-
-
-subq integer (encoding,video )
-Set sub pel motion estimation quality.
-
-
-dtg_active_format integer
-me_range integer (encoding,video )
-Set limit motion vectors range (1023 for DivX player).
-
-
-ibias integer (encoding,video )
-Set intra quant bias.
-
-
-pbias integer (encoding,video )
-Set inter quant bias.
-
-
-color_table_id integer
-global_quality integer (encoding,audio,video )
-coder integer (encoding,video )
-
-Possible values:
-
-‘vlc ’
-variable length coder / huffman coder
-
-‘ac ’
-arithmetic coder
-
-‘raw ’
-raw (no encoding)
-
-‘rle ’
-run-length coder
-
-‘deflate ’
-deflate-based coder
-
-
-
-
-context integer (encoding,video )
-Set context model.
-
-
-slice_flags integer
-xvmc_acceleration integer
-mbd integer (encoding,video )
-Set macroblock decision algorithm (high quality mode).
-
-Possible values:
-
-‘simple ’
-use mbcmp (default)
-
-‘bits ’
-use fewest bits
-
-‘rd ’
-use best rate distortion
-
-
-
-
-stream_codec_tag integer
-sc_threshold integer (encoding,video )
-Set scene change threshold.
-
-
-lmin integer (encoding,video )
-Set min lagrange factor (VBR).
-
-
-lmax integer (encoding,video )
-Set max lagrange factor (VBR).
-
-
-nr integer (encoding,video )
-Set noise reduction.
-
-
-rc_init_occupancy integer (encoding,video )
-Set number of bits which should be loaded into the rc buffer before
-decoding starts.
-
-
-flags2 flags (decoding/encoding,audio,video )
-
-Possible values:
-
-‘fast ’
-Allow non spec compliant speedup tricks.
-
-‘sgop ’
-Deprecated, use mpegvideo private options instead.
-
-‘noout ’
-Skip bitstream encoding.
-
-‘ignorecrop ’
-Ignore cropping information from sps.
-
-‘local_header ’
-Place global headers at every keyframe instead of in extradata.
-
-‘chunks ’
-Frame data might be split into multiple chunks.
-
-‘showall ’
-Show all frames before the first keyframe.
-
-‘skiprd ’
-Deprecated, use mpegvideo private options instead.
-
-‘export_mvs ’
-Export motion vectors into frame side-data (see AV_FRAME_DATA_MOTION_VECTORS
)
-for codecs that support it. See also doc/examples/export_mvs.c .
-
-
-
-
-error integer (encoding,video )
-qns integer (encoding,video )
-Deprecated, use mpegvideo private options instead.
-
-
-threads integer (decoding/encoding,video )
-
-Possible values:
-
-‘auto ’
-detect a good number of threads
-
-
-
-
-me_threshold integer (encoding,video )
-Set motion estimation threshold.
-
-
-mb_threshold integer (encoding,video )
-Set macroblock threshold.
-
-
-dc integer (encoding,video )
-Set intra_dc_precision.
-
-
-nssew integer (encoding,video )
-Set nsse weight.
-
-
-skip_top integer (decoding,video )
-Set number of macroblock rows at the top which are skipped.
-
-
-skip_bottom integer (decoding,video )
-Set number of macroblock rows at the bottom which are skipped.
-
-
-profile integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-‘aac_main ’
-‘aac_low ’
-‘aac_ssr ’
-‘aac_ltp ’
-‘aac_he ’
-‘aac_he_v2 ’
-‘aac_ld ’
-‘aac_eld ’
-‘mpeg2_aac_low ’
-‘mpeg2_aac_he ’
-‘mpeg4_sp ’
-‘mpeg4_core ’
-‘mpeg4_main ’
-‘mpeg4_asp ’
-‘dts ’
-‘dts_es ’
-‘dts_96_24 ’
-‘dts_hd_hra ’
-‘dts_hd_ma ’
-
-
-
-level integer (encoding,audio,video )
-
-Possible values:
-
-‘unknown ’
-
-
-
-lowres integer (decoding,audio,video )
-Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
-
-
-skip_threshold integer (encoding,video )
-Set frame skip threshold.
-
-
-skip_factor integer (encoding,video )
-Set frame skip factor.
-
-
-skip_exp integer (encoding,video )
-Set frame skip exponent.
-Negative values behave identical to the corresponding positive ones, except
-that the score is normalized.
-Positive values exist primarily for compatibility reasons and are not so useful.
-
-
-skipcmp integer (encoding,video )
-Set frame skip compare function.
-
-Possible values:
-
-‘sad ’
-sum of absolute differences, fast (default)
-
-‘sse ’
-sum of squared errors
-
-‘satd ’
-sum of absolute Hadamard transformed differences
-
-‘dct ’
-sum of absolute DCT transformed differences
-
-‘psnr ’
-sum of squared quantization errors (avoid, low quality)
-
-‘bit ’
-number of bits needed for the block
-
-‘rd ’
-rate distortion optimal, slow
-
-‘zero ’
-0
-
-‘vsad ’
-sum of absolute vertical differences
-
-‘vsse ’
-sum of squared vertical differences
-
-‘nsse ’
-noise preserving sum of squared differences
-
-‘w53 ’
-5/3 wavelet, only used in snow
-
-‘w97 ’
-9/7 wavelet, only used in snow
-
-‘dctmax ’
-‘chroma ’
-
-
-
-border_mask float (encoding,video )
-Increase the quantizer for macroblocks close to borders.
-
-
-mblmin integer (encoding,video )
-Set min macroblock lagrange factor (VBR).
-
-
-mblmax integer (encoding,video )
-Set max macroblock lagrange factor (VBR).
-
-
-mepc integer (encoding,video )
-Set motion estimation bitrate penalty compensation (1.0 = 256).
-
-
-skip_loop_filter integer (decoding,video )
-skip_idct integer (decoding,video )
-skip_frame integer (decoding,video )
-
-Make decoder discard processing depending on the frame type selected
-by the option value.
-
-skip_loop_filter skips frame loop filtering, skip_idct
-skips frame IDCT/dequantization, skip_frame skips decoding.
-
-Possible values:
-
-‘none ’
-Discard no frame.
-
-
-‘default ’
-Discard useless frames like 0-sized frames.
-
-
-‘noref ’
-Discard all non-reference frames.
-
-
-‘bidir ’
-Discard all bidirectional frames.
-
-
-‘nokey ’
-Discard all frames excepts keyframes.
-
-
-‘all ’
-Discard all frames.
-
-
-
-Default value is ‘default ’.
-
-
-bidir_refine integer (encoding,video )
-Refine the two motion vectors used in bidirectional macroblocks.
-
-
-brd_scale integer (encoding,video )
-Downscale frames for dynamic B-frame decision.
-
-
-keyint_min integer (encoding,video )
-Set minimum interval between IDR-frames.
-
-
-refs integer (encoding,video )
-Set reference frames to consider for motion compensation.
-
-
-chromaoffset integer (encoding,video )
-Set chroma qp offset from luma.
-
-
-trellis integer (encoding,audio,video )
-Set rate-distortion optimal quantization.
-
-
-sc_factor integer (encoding,video )
-Set value multiplied by qscale for each frame and added to
-scene_change_score.
-
-
-mv0_threshold integer (encoding,video )
-b_sensitivity integer (encoding,video )
-Adjust sensitivity of b_frame_strategy 1.
-
-
-compression_level integer (encoding,audio,video )
-min_prediction_order integer (encoding,audio )
-max_prediction_order integer (encoding,audio )
-timecode_frame_start integer (encoding,video )
-Set GOP timecode frame start number, in non drop frame format.
-
-
-request_channels integer (decoding,audio )
-Set desired number of audio channels.
-
-
-bits_per_raw_sample integer
-channel_layout integer (decoding/encoding,audio )
-
-Possible values:
-
-request_channel_layout integer (decoding,audio )
-
-Possible values:
-
-rc_max_vbv_use float (encoding,video )
-rc_min_vbv_use float (encoding,video )
-ticks_per_frame integer (decoding/encoding,audio,video )
-color_primaries integer (decoding/encoding,video )
-color_trc integer (decoding/encoding,video )
-colorspace integer (decoding/encoding,video )
-color_range integer (decoding/encoding,video )
-chroma_sample_location integer (decoding/encoding,video )
-log_level_offset integer
-Set the log level offset.
-
-
-slices integer (encoding,video )
-Number of slices, used in parallelized encoding.
-
-
-thread_type flags (decoding/encoding,video )
-Select which multithreading methods to use.
-
-Use of ‘frame ’ will increase decoding delay by one frame per
-thread, so clients which cannot provide future frames should not use
-it.
-
-Possible values:
-
-‘slice ’
-Decode more than one part of a single frame at once.
-
-Multithreading using slices works only when the video was encoded with
-slices.
-
-
-‘frame ’
-Decode more than one frame at once.
-
-
-
-Default value is ‘slice+frame ’.
-
-
-audio_service_type integer (encoding,audio )
-Set audio service type.
-
-Possible values:
-
-‘ma ’
-Main Audio Service
-
-‘ef ’
-Effects
-
-‘vi ’
-Visually Impaired
-
-‘hi ’
-Hearing Impaired
-
-‘di ’
-Dialogue
-
-‘co ’
-Commentary
-
-‘em ’
-Emergency
-
-‘vo ’
-Voice Over
-
-‘ka ’
-Karaoke
-
-
-
-
-request_sample_fmt sample_fmt (decoding,audio )
-Set sample format audio decoders should prefer. Default value is
-none
.
-
-
-pkt_timebase rational number
-sub_charenc encoding (decoding,subtitles )
-Set the input subtitles character encoding.
-
-
-field_order field_order (video )
-Set/override the field order of the video.
-Possible values:
-
-‘progressive ’
-Progressive video
-
-‘tt ’
-Interlaced video, top field coded and displayed first
-
-‘bb ’
-Interlaced video, bottom field coded and displayed first
-
-‘tb ’
-Interlaced video, top coded first, bottom displayed first
-
-‘bt ’
-Interlaced video, bottom coded first, top displayed first
-
-
-
-
-skip_alpha integer (decoding,video )
-Set to 1 to disable processing alpha (transparency). This works like the
-‘gray ’ flag in the flags option which skips chroma information
-instead of alpha. Default is 0.
-
-
-codec_whitelist list (input )
-"," separated List of allowed decoders. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
-
10 Decoders# TOC
-
-
Decoders are configured elements in FFmpeg which allow the decoding of
-multimedia streams.
-
-
When you configure your FFmpeg build, all the supported native decoders
-are enabled by default. Decoders requiring an external library must be enabled
-manually via the corresponding --enable-lib
option. You can list all
-available decoders using the configure option --list-decoders
.
-
-
You can disable all the decoders with the configure option
---disable-decoders
and selectively enable / disable single decoders
-with the options --enable-decoder=DECODER
/
---disable-decoder=DECODER
.
-
-
The option -decoders
of the ff* tools will display the list of
-enabled decoders.
-
-
-
-
11 Video Decoders# TOC
-
-
A description of some of the currently available video decoders
-follows.
-
-
-
11.1 rawvideo# TOC
-
-
Raw video decoder.
-
-
This decoder decodes rawvideo streams.
-
-
-
11.1.1 Options# TOC
-
-
-top top_field_first
-Specify the assumed field type of the input video.
-
--1
-the video is assumed to be progressive (default)
-
-0
-bottom-field-first is assumed
-
-1
-top-field-first is assumed
-
-
-
-
-
-
-
-
-
12 Audio Decoders# TOC
-
-
A description of some of the currently available audio decoders
-follows.
-
-
-
12.1 ac3# TOC
-
-
AC-3 audio decoder.
-
-
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
-the undocumented RealAudio 3 (a.k.a. dnet).
-
-
-
12.1.1 AC-3 Decoder Options# TOC
-
-
--drc_scale value
-Dynamic Range Scale Factor. The factor to apply to dynamic range values
-from the AC-3 stream. This factor is applied exponentially.
-There are 3 notable scale factor ranges:
-
-drc_scale == 0
-DRC disabled. Produces full range audio.
-
-0 < drc_scale <= 1
-DRC enabled. Applies a fraction of the stream DRC value.
-Audio reproduction is between full range and full compression.
-
-drc_scale > 1
-DRC enabled. Applies drc_scale asymmetrically.
-Loud sounds are fully compressed. Soft sounds are enhanced.
-
-
-
-
-
-
-
-
12.2 ffwavesynth# TOC
-
-
Internal wave synthetizer.
-
-
This decoder generates wave patterns according to predefined sequences. Its
-use is purely internal and the format of the data it accepts is not publicly
-documented.
-
-
-
12.3 libcelt# TOC
-
-
libcelt decoder wrapper.
-
-
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
-Requires the presence of the libcelt headers and library during configuration.
-You need to explicitly configure the build with --enable-libcelt
.
-
-
-
12.4 libgsm# TOC
-
-
libgsm decoder wrapper.
-
-
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
-the presence of the libgsm headers and library during configuration. You need
-to explicitly configure the build with --enable-libgsm
.
-
-
This decoder supports both the ordinary GSM and the Microsoft variant.
-
-
-
12.5 libilbc# TOC
-
-
libilbc decoder wrapper.
-
-
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
-audio codec. Requires the presence of the libilbc headers and library during
-configuration. You need to explicitly configure the build with
---enable-libilbc
.
-
-
-
12.5.1 Options# TOC
-
-
The following option is supported by the libilbc wrapper.
-
-
-enhance
-
-Enable the enhancement of the decoded audio when set to 1. The default
-value is 0 (disabled).
-
-
-
-
-
-
12.6 libopencore-amrnb# TOC
-
-
libopencore-amrnb decoder wrapper.
-
-
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
-Narrowband audio codec. Using it requires the presence of the
-libopencore-amrnb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrnb
.
-
-
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
-without this library.
-
-
-
12.7 libopencore-amrwb# TOC
-
-
libopencore-amrwb decoder wrapper.
-
-
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
-Wideband audio codec. Using it requires the presence of the
-libopencore-amrwb headers and library during configuration. You need to
-explicitly configure the build with --enable-libopencore-amrwb
.
-
-
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
-without this library.
-
-
-
12.8 libopus# TOC
-
-
libopus decoder wrapper.
-
-
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
-Requires the presence of the libopus headers and library during
-configuration. You need to explicitly configure the build with
---enable-libopus
.
-
-
An FFmpeg native decoder for Opus exists, so users can decode Opus
-without this library.
-
-
-
-
13 Subtitles Decoders# TOC
-
-
-
13.1 dvdsub# TOC
-
-
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
-also be found in VobSub file pairs and in some Matroska files.
-
-
-
13.1.1 Options# TOC
-
-
-palette
-Specify the global palette used by the bitmaps. When stored in VobSub, the
-palette is normally specified in the index file; in Matroska, the palette is
-stored in the codec extra-data in the same format as in VobSub. In DVDs, the
-palette is stored in the IFO file, and therefore not available when reading
-from dumped VOB files.
-
-The format for this option is a string containing 16 24-bits hexadecimal
-numbers (without 0x prefix) separated by comas, for example 0d00ee,
-ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
-7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b
.
-
-
-ifo_palette
-Specify the IFO file from which the global palette is obtained.
-(experimental)
-
-
-forced_subs_only
-Only decode subtitle entries marked as forced. Some titles have forced
-and non-forced subtitles in the same track. Setting this flag to 1
-will only keep the forced subtitles. Default value is 0
.
-
-
-
-
-
13.2 libzvbi-teletext# TOC
-
-
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
-subtitles. Requires the presence of the libzvbi headers and library during
-configuration. You need to explicitly configure the build with
---enable-libzvbi
.
-
-
-
13.2.1 Options# TOC
-
-
-txt_page
-List of teletext page numbers to decode. You may use the special * string to
-match all pages. Pages that do not match the specified list are dropped.
-Default value is *.
-
-txt_chop_top
-Discards the top teletext line. Default value is 1.
-
-txt_format
-Specifies the format of the decoded subtitles. The teletext decoder is capable
-of decoding the teletext pages to bitmaps or to simple text, you should use
-"bitmap" for teletext pages, because certain graphics and colors cannot be
-expressed in simple text. You might use "text" for teletext based subtitles if
-your application can handle simple text based subtitles. Default value is
-bitmap.
-
-txt_left
-X offset of generated bitmaps, default is 0.
-
-txt_top
-Y offset of generated bitmaps, default is 0.
-
-txt_chop_spaces
-Chops leading and trailing spaces and removes empty lines from the generated
-text. This option is useful for teletext based subtitles where empty spaces may
-be present at the start or at the end of the lines or empty lines may be
-present between the subtitle lines because of double-sized teletext charactes.
-Default value is 1.
-
-txt_duration
-Sets the display duration of the decoded teletext pages or subtitles in
-miliseconds. Default value is 30000 which is 30 seconds.
-
-txt_transparent
-Force transparent background of the generated teletext bitmaps. Default value
-is 0 which means an opaque (black) background.
-
-
-
-
-
14 Bitstream Filters# TOC
-
-
When you configure your FFmpeg build, all the supported bitstream
-filters are enabled by default. You can list all available ones using
-the configure option --list-bsfs
.
-
-
You can disable all the bitstream filters using the configure option
---disable-bsfs
, and selectively enable any bitstream filter using
-the option --enable-bsf=BSF
, or you can disable a particular
-bitstream filter using the option --disable-bsf=BSF
.
-
-
The option -bsfs
of the ff* tools will display the list of
-all the supported bitstream filters included in your build.
-
-
The ff* tools have a -bsf option applied per stream, taking a
-comma-separated list of filters, whose parameters follow the filter
-name after a ’=’.
-
-
-
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
-
-
-
Below is a description of the currently available bitstream filters,
-with their parameters, if any.
-
-
-
14.1 aac_adtstoasc# TOC
-
-
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
-bitstream filter.
-
-
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
-ADTS header and removes the ADTS header.
-
-
This is required for example when copying an AAC stream from a raw
-ADTS AAC container to a FLV or a MOV/MP4 file.
-
-
-
14.2 chomp# TOC
-
-
Remove zero padding at the end of a packet.
-
-
-
14.3 dump_extra# TOC
-
-
Add extradata to the beginning of the filtered packets.
-
-
The additional argument specifies which packets should be filtered.
-It accepts the values:
-
-‘a ’
-add extradata to all key packets, but only if local_header is
-set in the flags2 codec context field
-
-
-‘k ’
-add extradata to all key packets
-
-
-‘e ’
-add extradata to all packets
-
-
-
-
If not specified it is assumed ‘k ’.
-
-
For example the following ffmpeg
command forces a global
-header (thus disabling individual packet headers) in the H.264 packets
-generated by the libx264
encoder, but corrects them by adding
-the header stored in extradata to the key packets:
-
-
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
-
-
-
-
14.4 h264_mp4toannexb# TOC
-
-
Convert an H.264 bitstream from length prefixed mode to start code
-prefixed mode (as defined in the Annex B of the ITU-T H.264
-specification).
-
-
This is required by some streaming formats, typically the MPEG-2
-transport stream format ("mpegts").
-
-
For example to remux an MP4 file containing an H.264 stream to mpegts
-format with ffmpeg
, you can use the command:
-
-
-
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
-
-
-
-
14.5 imxdump# TOC
-
-
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
-Pro decoder. This filter only applies to the mpeg2video codec, and is
-likely not needed for Final Cut Pro 7 and newer with the appropriate
--tag:v .
-
-
For example, to remux 30 MB/sec NTSC IMX to MOV:
-
-
-
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
-
-
-
-
14.6 mjpeg2jpeg# TOC
-
-
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
-
-
MJPEG is a video codec wherein each video frame is essentially a
-JPEG image. The individual frames can be extracted without loss,
-e.g. by
-
-
-
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
-
-
-
Unfortunately, these chunks are incomplete JPEG images, because
-they lack the DHT segment required for decoding. Quoting from
-http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml :
-
-
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
-commented that "MJPEG, or at least the MJPEG in AVIs having the
-MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
-Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
-and it must use basic Huffman encoding, not arithmetic or
-progressive. . . . You can indeed extract the MJPEG frames and
-decode them with a regular JPEG decoder, but you have to prepend
-the DHT segment to them, or else the decoder won’t have any idea
-how to decompress the data. The exact table necessary is given in
-the OpenDML spec."
-
-
This bitstream filter patches the header of frames extracted from an MJPEG
-stream (carrying the AVI1 header ID and lacking a DHT segment) to
-produce fully qualified JPEG images.
-
-
-
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
-exiftran -i -9 frame*.jpg
-ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
-
-
-
-
14.7 mjpega_dump_header# TOC
-
-
-
14.8 movsub# TOC
-
-
-
14.9 mp3_header_decompress# TOC
-
-
-
14.10 noise# TOC
-
-
Damages the contents of packets without damaging the container. Can be
-used for fuzzing or testing error resilience/concealment.
-
-
Parameters:
-A numeral string, whose value is related to how often output bytes will
-be modified. Therefore, values below or equal to 0 are forbidden, and
-the lower the more frequent bytes will be modified, with 1 meaning
-every byte is modified.
-
-
-
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
-
-
applies the modification to every byte.
-
-
-
14.11 remove_extra# TOC
-
-
-
15 Format Options# TOC
-
-
The libavformat library provides some generic global options, which
-can be set on all the muxers and demuxers. In addition each muxer or
-demuxer may support so-called private options, which are specific for
-that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
The list of supported options follows:
-
-
-avioflags flags (input/output )
-Possible values:
-
-‘direct ’
-Reduce buffering.
-
-
-
-
-probesize integer (input )
-Set probing size in bytes, i.e. the size of the data to analyze to get
-stream information. A higher value will allow to detect more
-information in case it is dispersed into the stream, but will increase
-latency. Must be an integer not lesser than 32. It is 5000000 by default.
-
-
-packetsize integer (output )
-Set packet size.
-
-
-fflags flags (input/output )
-Set format flags.
-
-Possible values:
-
-‘ignidx ’
-Ignore index.
-
-‘genpts ’
-Generate PTS.
-
-‘nofillin ’
-Do not fill in missing values that can be exactly calculated.
-
-‘noparse ’
-Disable AVParsers, this needs +nofillin
too.
-
-‘igndts ’
-Ignore DTS.
-
-‘discardcorrupt ’
-Discard corrupted frames.
-
-‘sortdts ’
-Try to interleave output packets by DTS.
-
-‘keepside ’
-Do not merge side data.
-
-‘latm ’
-Enable RTP MP4A-LATM payload.
-
-‘nobuffer ’
-Reduce the latency introduced by optional buffering
-
-‘bitexact ’
-Only write platform-, build- and time-independent data.
-This ensures that file and data checksums are reproducible and match between
-platforms. Its primary use is for regression testing.
-
-
-
-
-seek2any integer (input )
-Allow seeking to non-keyframes on demuxer level when supported if set to 1.
-Default is 0.
-
-
-analyzeduration integer (input )
-Specify how many microseconds are analyzed to probe the input. A
-higher value will allow to detect more accurate information, but will
-increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
-
-
-cryptokey hexadecimal string (input )
-Set decryption key.
-
-
-indexmem integer (input )
-Set max memory used for timestamp index (per stream).
-
-
-rtbufsize integer (input )
-Set max memory used for buffering real-time frames.
-
-
-fdebug flags (input/output )
-Print specific debug info.
-
-Possible values:
-
-‘ts ’
-
-
-
-max_delay integer (input/output )
-Set maximum muxing or demuxing delay in microseconds.
-
-
-fpsprobesize integer (input )
-Set number of frames used to probe fps.
-
-
-audio_preload integer (output )
-Set microseconds by which audio packets should be interleaved earlier.
-
-
-chunk_duration integer (output )
-Set microseconds for each chunk.
-
-
-chunk_size integer (output )
-Set size in bytes for each chunk.
-
-
-err_detect, f_err_detect flags (input )
-Set error detection flags. f_err_detect
is deprecated and
-should be used only via the ffmpeg
tool.
-
-Possible values:
-
-‘crccheck ’
-Verify embedded CRCs.
-
-‘bitstream ’
-Detect bitstream specification deviations.
-
-‘buffer ’
-Detect improper bitstream length.
-
-‘explode ’
-Abort decoding on minor error detection.
-
-‘careful ’
-Consider things that violate the spec and have not been seen in the
-wild as errors.
-
-‘compliant ’
-Consider all spec non compliancies as errors.
-
-‘aggressive ’
-Consider things that a sane encoder should not do as an error.
-
-
-
-
-use_wallclock_as_timestamps integer (input )
-Use wallclock as timestamps.
-
-
-avoid_negative_ts integer (output )
-
-Possible values:
-
-‘make_non_negative ’
-Shift timestamps to make them non-negative.
-Also note that this affects only leading negative timestamps, and not
-non-monotonic negative timestamps.
-
-‘make_zero ’
-Shift timestamps so that the first timestamp is 0.
-
-‘auto (default) ’
-Enables shifting when required by the target format.
-
-‘disabled ’
-Disables shifting of timestamp.
-
-
-
-When shifting is enabled, all output timestamps are shifted by the
-same amount. Audio, video, and subtitles desynching and relative
-timestamp differences are preserved compared to how they would have
-been without shifting.
-
-
-skip_initial_bytes integer (input )
-Set number of bytes to skip before reading header and frames if set to 1.
-Default is 0.
-
-
-correct_ts_overflow integer (input )
-Correct single timestamp overflows if set to 1. Default is 1.
-
-
-flush_packets integer (output )
-Flush the underlying I/O stream after each packet. Default 1 enables it, and
-has the effect of reducing the latency; 0 disables it and may slightly
-increase performance in some cases.
-
-
-output_ts_offset offset (output )
-Set the output time offset.
-
-offset must be a time duration specification,
-see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-The offset is added by the muxer to the output timestamps.
-
-Specifying a positive offset means that the corresponding streams are
-delayed bt the time duration specified in offset . Default value
-is 0
(meaning that no offset is applied).
-
-
-format_whitelist list (input )
-"," separated List of allowed demuxers. By default all are allowed.
-
-
-dump_separator string (input )
-Separator used to separate the fields printed on the command line about the
-Stream parameters.
-For example to separate the fields with newlines and indention:
-
-
ffprobe -dump_separator "
- " -i ~/videos/matrixbench_mpeg2.mpg
-
-
-
-
-
-
-
15.1 Format stream specifiers# TOC
-
-
Format stream specifiers allow selection of one or more streams that
-match specific properties.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index.
-
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio,
-’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If
-stream_index is given, then it matches the stream number
-stream_index of this type. Otherwise, it matches all streams of
-this type.
-
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number
-stream_index in the program with the id
-program_id . Otherwise, it matches all streams in the program.
-
-
-#stream_id
-Matches the stream by a format-specific ID.
-
-
-
-
The exact semantics of stream specifiers is defined by the
-avformat_match_stream_specifier()
function declared in the
-libavformat/avformat.h header.
-
-
-
16 Demuxers# TOC
-
-
Demuxers are configured elements in FFmpeg that can read the
-multimedia streams from a particular type of file.
-
-
When you configure your FFmpeg build, all the supported demuxers
-are enabled by default. You can list all available ones using the
-configure option --list-demuxers
.
-
-
You can disable all the demuxers using the configure option
---disable-demuxers
, and selectively enable a single demuxer with
-the option --enable-demuxer=DEMUXER
, or disable it
-with the option --disable-demuxer=DEMUXER
.
-
-
The option -formats
of the ff* tools will display the list of
-enabled demuxers.
-
-
The description of some of the currently available demuxers follows.
-
-
-
16.1 applehttp# TOC
-
-
Apple HTTP Live Streaming demuxer.
-
-
This demuxer presents all AVStreams from all variant streams.
-The id field is set to the bitrate variant index number. By setting
-the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay),
-the caller can decide which variant streams to actually receive.
-The total bitrate of the variant that the stream belongs to is
-available in a metadata key named "variant_bitrate".
-
-
-
16.2 apng# TOC
-
-
Animated Portable Network Graphics demuxer.
-
-
This demuxer is used to demux APNG files.
-All headers, but the PNG signature, up to (but not including) the first
-fcTL chunk are transmitted as extradata.
-Frames are then split as being all the chunks between two fcTL ones, or
-between the last fcTL and IEND chunks.
-
-
--ignore_loop bool
-Ignore the loop variable in the file if set.
-
--max_fps int
-Maximum framerate in frames per second (0 for no limit).
-
--default_fps int
-Default framerate in frames per second when none is specified in the file
-(0 meaning as fast as possible).
-
-
-
-
-
16.3 asf# TOC
-
-
Advanced Systems Format demuxer.
-
-
This demuxer is used to demux ASF files and MMS network streams.
-
-
--no_resync_search bool
-Do not try to resynchronize by looking for a certain optional start code.
-
-
-
-
-
16.4 concat# TOC
-
-
Virtual concatenation script demuxer.
-
-
This demuxer reads a list of files and other directives from a text file and
-demuxes them one after the other, as if all their packet had been muxed
-together.
-
-
The timestamps in the files are adjusted so that the first file starts at 0
-and each next file starts where the previous one finishes. Note that it is
-done globally and may cause gaps if all streams do not have exactly the same
-length.
-
-
All files must have the same streams (same codecs, same time base, etc.).
-
-
The duration of each file is used to adjust the timestamps of the next file:
-if the duration is incorrect (because it was computed using the bit-rate or
-because the file is truncated, for example), it can cause artifacts. The
-duration
directive can be used to override the duration stored in
-each file.
-
-
-
16.4.1 Syntax# TOC
-
-
The script is a text file in extended-ASCII, with one directive per line.
-Empty lines, leading spaces and lines starting with ’#’ are ignored. The
-following directive is recognized:
-
-
-file path
-Path to a file to read; special characters and spaces must be escaped with
-backslash or single quotes.
-
-All subsequent file-related directives apply to that file.
-
-
-ffconcat version 1.0
-Identify the script type and version. It also sets the safe option
-to 1 if it was to its default -1.
-
-To make FFmpeg recognize the format automatically, this directive must
-appears exactly as is (no extra space or byte-order-mark) on the very first
-line of the script.
-
-
-duration dur
-Duration of the file. This information can be specified from the file;
-specifying it here may be more efficient or help if the information from the
-file is not available or accurate.
-
-If the duration is set for all files, then it is possible to seek in the
-whole concatenated video.
-
-
-stream
-Introduce a stream in the virtual file.
-All subsequent stream-related directives apply to the last introduced
-stream.
-Some streams properties must be set in order to allow identifying the
-matching streams in the subfiles.
-If no streams are defined in the script, the streams from the first file are
-copied.
-
-
-exact_stream_id id
-Set the id of the stream.
-If this directive is given, the string with the corresponding id in the
-subfiles will be used.
-This is especially useful for MPEG-PS (VOB) files, where the order of the
-streams is not reliable.
-
-
-
-
-
-
16.4.2 Options# TOC
-
-
This demuxer accepts the following option:
-
-
-safe
-If set to 1, reject unsafe file paths. A file path is considered safe if it
-does not contain a protocol specification and is relative and all components
-only contain characters from the portable character set (letters, digits,
-period, underscore and hyphen) and have no period at the beginning of a
-component.
-
-If set to 0, any file name is accepted.
-
-The default is -1, it is equivalent to 1 if the format was automatically
-probed and 0 otherwise.
-
-
-auto_convert
-If set to 1, try to perform automatic conversions on packet data to make the
-streams concatenable.
-
-Currently, the only conversion is adding the h264_mp4toannexb bitstream
-filter to H.264 streams in MP4 format. This is necessary in particular if
-there are resolution changes.
-
-
-
-
-
-
16.5 flv# TOC
-
-
Adobe Flash Video Format demuxer.
-
-
This demuxer is used to demux FLV files and RTMP network streams.
-
-
--flv_metadata bool
-Allocate the streams according to the onMetaData array content.
-
-
-
-
-
16.6 libgme# TOC
-
-
The Game Music Emu library is a collection of video game music file emulators.
-
-
See http://code.google.com/p/game-music-emu/ for more information.
-
-
Some files have multiple tracks. The demuxer will pick the first track by
-default. The track_index option can be used to select a different
-track. Track indexes start at 0. The demuxer exports the number of tracks as
-tracks meta data entry.
-
-
For very large files, the max_size option may have to be adjusted.
-
-
-
16.7 libquvi# TOC
-
-
Play media from Internet services using the quvi project.
-
-
The demuxer accepts a format option to request a specific quality. It
-is by default set to best .
-
-
See http://quvi.sourceforge.net/ for more information.
-
-
FFmpeg needs to be built with --enable-libquvi
for this demuxer to be
-enabled.
-
-
-
16.8 gif# TOC
-
-
Animated GIF demuxer.
-
-
It accepts the following options:
-
-
-min_delay
-Set the minimum valid delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 2.
-
-
-default_delay
-Set the default delay between frames in hundredths of seconds.
-Range is 0 to 6000. Default value is 10.
-
-
-ignore_loop
-GIF files can contain information to loop a certain number of times (or
-infinitely). If ignore_loop is set to 1, then the loop setting
-from the input will be ignored and looping will not occur. If set to 0,
-then looping will occur and will cycle the number of times according to
-the GIF. Default value is 1.
-
-
-
-
For example, with the overlay filter, place an infinitely looping GIF
-over another video:
-
-
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
-
-
-
Note that in the above example the shortest option for overlay filter is
-used to end the output video at the length of the shortest input file,
-which in this case is input.mp4 as the GIF in this example loops
-infinitely.
-
-
-
16.9 image2# TOC
-
-
Image file demuxer.
-
-
This demuxer reads from a list of image files specified by a pattern.
-The syntax and meaning of the pattern is specified by the
-option pattern_type .
-
-
The pattern may contain a suffix which is used to automatically
-determine the format of the images contained in the files.
-
-
The size, the pixel format, and the format of each image must be the
-same for all the files in the sequence.
-
-
This demuxer accepts the following options:
-
-framerate
-Set the frame rate for the video stream. It defaults to 25.
-
-loop
-If set to 1, loop over the input. Default value is 0.
-
-pattern_type
-Select the pattern type used to interpret the provided filename.
-
-pattern_type accepts one of the following values.
-
-sequence
-Select a sequence pattern type, used to specify a sequence of files
-indexed by sequential numbers.
-
-A sequence pattern may contain the string "%d" or "%0N d", which
-specifies the position of the characters representing a sequential
-number in each filename matched by the pattern. If the form
-"%d0N d" is used, the string representing the number in each
-filename is 0-padded and N is the total number of 0-padded
-digits representing the number. The literal character ’%’ can be
-specified in the pattern with the string "%%".
-
-If the sequence pattern contains "%d" or "%0N d", the first filename of
-the file list specified by the pattern must contain a number
-inclusively contained between start_number and
-start_number +start_number_range -1, and all the following
-numbers must be sequential.
-
-For example the pattern "img-%03d.bmp" will match a sequence of
-filenames of the form img-001.bmp , img-002.bmp , ...,
-img-010.bmp , etc.; the pattern "i%%m%%g-%d.jpg" will match a
-sequence of filenames of the form i%m%g-1.jpg ,
-i%m%g-2.jpg , ..., i%m%g-10.jpg , etc.
-
-Note that the pattern must not necessarily contain "%d" or
-"%0N d", for example to convert a single image file
-img.jpeg you can employ the command:
-
-
ffmpeg -i img.jpeg img.png
-
-
-
-glob
-Select a glob wildcard pattern type.
-
-The pattern is interpreted like a glob()
pattern. This is only
-selectable if libavformat was compiled with globbing support.
-
-
-glob_sequence (deprecated, will be removed)
-Select a mixed glob wildcard/sequence pattern.
-
-If your version of libavformat was compiled with globbing support, and
-the provided pattern contains at least one glob meta character among
-%*?[]{}
that is preceded by an unescaped "%", the pattern is
-interpreted like a glob()
pattern, otherwise it is interpreted
-like a sequence pattern.
-
-All glob special characters %*?[]{}
must be prefixed
-with "%". To escape a literal "%" you shall use "%%".
-
-For example the pattern foo-%*.jpeg
will match all the
-filenames prefixed by "foo-" and terminating with ".jpeg", and
-foo-%?%?%?.jpeg
will match all the filenames prefixed with
-"foo-", followed by a sequence of three characters, and terminating
-with ".jpeg".
-
-This pattern type is deprecated in favor of glob and
-sequence .
-
-
-
-Default value is glob_sequence .
-
-pixel_format
-Set the pixel format of the images to read. If not specified the pixel
-format is guessed from the first image file in the sequence.
-
-start_number
-Set the index of the file matched by the image file pattern to start
-to read from. Default value is 0.
-
-start_number_range
-Set the index interval range to check when looking for the first image
-file in the sequence, starting from start_number . Default value
-is 5.
-
-ts_from_file
-If set to 1, will set frame timestamp to modification time of image file. Note
-that monotonity of timestamps is not provided: images go in the same order as
-without this option. Default value is 0.
-If set to 2, will set frame timestamp to the modification time of the image file in
-nanosecond precision.
-
-video_size
-Set the video size of the images to read. If not specified the video
-size is guessed from the first image file in the sequence.
-
-
-
-
-
16.9.1 Examples# TOC
-
-
- Use ffmpeg
for creating a video from the images in the file
-sequence img-001.jpeg , img-002.jpeg , ..., assuming an
-input frame rate of 10 frames per second:
-
-
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
-
-
- As above, but start by reading from a file with index 100 in the sequence:
-
-
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
-
-
- Read images matching the "*.png" glob pattern , that is all the files
-terminating with the ".png" suffix:
-
-
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
-
-
-
-
-
16.10 mpegts# TOC
-
-
MPEG-2 transport stream demuxer.
-
-
-fix_teletext_pts
-Overrides teletext packet PTS and DTS values with the timestamps calculated
-from the PCR of the first program which the teletext stream is part of and is
-not discarded. Default value is 1, set this option to 0 if you want your
-teletext packet PTS and DTS values untouched.
-
-
-
-
-
16.11 rawvideo# TOC
-
-
Raw video demuxer.
-
-
This demuxer allows one to read raw video data. Since there is no header
-specifying the assumed video parameters, the user must specify them
-in order to be able to decode the data correctly.
-
-
This demuxer accepts the following options:
-
-framerate
-Set input video frame rate. Default value is 25.
-
-
-pixel_format
-Set the input video pixel format. Default value is yuv420p
.
-
-
-video_size
-Set the input video size. This value must be specified explicitly.
-
-
-
-
For example to read a rawvideo file input.raw with
-ffplay
, assuming a pixel format of rgb24
, a video
-size of 320x240
, and a frame rate of 10 images per second, use
-the command:
-
-
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
-
-
-
-
16.12 sbg# TOC
-
-
SBaGen script demuxer.
-
-
This demuxer reads the script language used by SBaGen
-http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG
-script looks like that:
-
-
-SE
-a: 300-2.5/3 440+4.5/0
-b: 300-2.5/0 440+4.5/3
-off: -
-NOW == a
-+0:07:00 == b
-+0:14:00 == a
-+0:21:00 == b
-+0:30:00 off
-
-
-
A SBG script can mix absolute and relative timestamps. If the script uses
-either only absolute timestamps (including the script start time) or only
-relative ones, then its layout is fixed, and the conversion is
-straightforward. On the other hand, if the script mixes both kind of
-timestamps, then the NOW reference for relative timestamps will be
-taken from the current time of day at the time the script is read, and the
-script layout will be frozen according to that reference. That means that if
-the script is directly played, the actual times will match the absolute
-timestamps up to the sound controller’s clock accuracy, but if the user
-somehow pauses the playback or seeks, all times will be shifted accordingly.
-
-
-
16.13 tedcaptions# TOC
-
-
JSON captions used for TED Talks .
-
-
TED does not provide links to the captions, but they can be guessed from the
-page. The file tools/bookmarklets.html from the FFmpeg source tree
-contains a bookmarklet to expose them.
-
-
This demuxer accepts the following option:
-
-start_time
-Set the start time of the TED talk, in milliseconds. The default is 15000
-(15s). It is used to sync the captions with the downloadable videos, because
-they include a 15s intro.
-
-
-
-
Example: convert the captions to a format most players understand:
-
-
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
-
-
-
-
17 Metadata# TOC
-
-
FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded
-INI-like text file and then load it back using the metadata muxer/demuxer.
-
-
The file format is as follows:
-
- A file consists of a header and a number of metadata tags divided into sections,
-each on its own line.
-
- The header is a ’;FFMETADATA’ string, followed by a version number (now 1).
-
- Metadata tags are of the form ’key=value’
-
- Immediately after header follows global metadata
-
- After global metadata there may be sections with per-stream/per-chapter
-metadata.
-
- A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
-brackets (’[’, ’]’) and ends with next section or end of file.
-
- At the beginning of a chapter section there may be an optional timebase to be
-used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and
-den are integers. If the timebase is missing then start/end times are assumed to
-be in milliseconds.
-Next a chapter section must contain chapter start and end times in form
-’START=num’, ’END=num’, where num is a positive integer.
-
- Empty lines and lines starting with ’;’ or ’#’ are ignored.
-
- Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a
-newline) must be escaped with a backslash ’\’.
-
- Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
-the tag (in the example above key is ’foo ’, value is ’ bar’).
-
-
-
A ffmetadata file might look like this:
-
-
;FFMETADATA1
-title=bike\\shed
-;this is a comment
-artist=FFmpeg troll team
-
-[CHAPTER]
-TIMEBASE=1/1000
-START=0
-#chapter ends at 0:01:00
-END=60000
-title=chapter \#1
-[STREAM]
-title=multi\
-line
-
-
-
By using the ffmetadata muxer and demuxer it is possible to extract
-metadata from an input file to an ffmetadata file, and then transcode
-the file into an output file with the edited ffmetadata file.
-
-
Extracting an ffmetadata file with ffmpeg goes as follows:
-
-
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
-
-
-
Reinserting edited metadata information from the FFMETADATAFILE file can
-be done as:
-
-
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
-
-
-
-
18 Protocols# TOC
-
-
Protocols are configured elements in FFmpeg that enable access to
-resources that require specific protocols.
-
-
When you configure your FFmpeg build, all the supported protocols are
-enabled by default. You can list all available ones using the
-configure option "–list-protocols".
-
-
You can disable all the protocols using the configure option
-"–disable-protocols", and selectively enable a protocol using the
-option "–enable-protocol=PROTOCOL ", or you can disable a
-particular protocol using the option
-"–disable-protocol=PROTOCOL ".
-
-
The option "-protocols" of the ff* tools will display the list of
-supported protocols.
-
-
A description of the currently available protocols follows.
-
-
-
18.1 bluray# TOC
-
-
Read BluRay playlist.
-
-
The accepted options are:
-
-angle
-BluRay angle
-
-
-chapter
-Start chapter (1...N)
-
-
-playlist
-Playlist to read (BDMV/PLAYLIST/?????.mpls)
-
-
-
-
-
Examples:
-
-
Read longest playlist from BluRay mounted to /mnt/bluray:
-
-
-
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
-
-
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
-
-
-
-
18.2 cache# TOC
-
-
Caching wrapper for input stream.
-
-
Cache the input stream to temporary file. It brings seeking capability to live streams.
-
-
-
-
-
18.3 concat# TOC
-
-
Physical concatenation protocol.
-
-
Allow to read and seek from many resource in sequence as if they were
-a unique resource.
-
-
A URL accepted by this protocol has the syntax:
-
-
concat:URL1 |URL2 |...|URLN
-
-
-
where URL1 , URL2 , ..., URLN are the urls of the
-resource to be concatenated, each one possibly specifying a distinct
-protocol.
-
-
For example to read a sequence of files split1.mpeg ,
-split2.mpeg , split3.mpeg with ffplay
use the
-command:
-
-
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
-
-
-
Note that you may need to escape the character "|" which is special for
-many shells.
-
-
-
18.4 crypto# TOC
-
-
AES-encrypted stream reading protocol.
-
-
The accepted options are:
-
-key
-Set the AES decryption key binary block from given hexadecimal representation.
-
-
-iv
-Set the AES decryption initialization vector binary block from given hexadecimal representation.
-
-
-
-
Accepted URL formats:
-
-
crypto:URL
-crypto+URL
-
-
-
-
18.5 data# TOC
-
-
Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme .
-
-
For example, to convert a GIF file given inline with ffmpeg
:
-
-
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
-
-
-
-
18.6 file# TOC
-
-
File access protocol.
-
-
Allow to read from or write to a file.
-
-
A file URL can have the form:
-
-
-
where filename is the path of the file to read.
-
-
An URL that does not have a protocol prefix will be assumed to be a
-file URL. Depending on the build, an URL that looks like a Windows
-path with the drive letter at the beginning will also be assumed to be
-a file URL (usually not the case in builds for unix-like systems).
-
-
For example to read from a file input.mpeg with ffmpeg
-use the command:
-
-
ffmpeg -i file:input.mpeg output.mpeg
-
-
-
This protocol accepts the following options:
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable for files on slow medium.
-
-
-
-
-
18.7 ftp# TOC
-
-
FTP (File Transfer Protocol).
-
-
Allow to read from or write to remote resources using FTP protocol.
-
-
Following syntax is required.
-
-
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-ftp-anonymous-password
-Password used when login as anonymous user. Typically an e-mail address
-should be used.
-
-
-ftp-write-seekable
-Control seekability of connection during encoding. If set to 1 the
-resource is supposed to be seekable, if set to 0 it is assumed not
-to be seekable. Default value is 0.
-
-
-
-
NOTE: Protocol can be used as output, but it is recommended to not do
-it, unless special care is taken (tests, customized server configuration
-etc.). Different FTP servers behave in different way during seek
-operation. ff* tools may produce incomplete content due to server limitations.
-
-
-
18.8 gopher# TOC
-
-
Gopher protocol.
-
-
-
18.9 hls# TOC
-
-
Read Apple HTTP Live Streaming compliant segmented stream as
-a uniform one. The M3U8 playlists describing the segments can be
-remote HTTP resources or local files, accessed using the standard
-file protocol.
-The nested protocol is declared by specifying
-"+proto " after the hls URI scheme name, where proto
-is either "file" or "http".
-
-
-
hls+http://host/path/to/remote/resource.m3u8
-hls+file://path/to/local/resource.m3u8
-
-
-
Using this protocol is discouraged - the hls demuxer should work
-just as well (if not, please report the issues) and is more complete.
-To use the hls demuxer instead, simply use the direct URLs to the
-m3u8 files.
-
-
-
18.10 http# TOC
-
-
HTTP (Hyper Text Transfer Protocol).
-
-
This protocol accepts the following options:
-
-
-seekable
-Control seekability of connection. If set to 1 the resource is
-supposed to be seekable, if set to 0 it is assumed not to be seekable,
-if set to -1 it will try to autodetect if it is seekable. Default
-value is -1.
-
-
-chunked_post
-If set to 1 use chunked Transfer-Encoding for posts, default is 1.
-
-
-content_type
-Set a specific content type for the POST messages.
-
-
-headers
-Set custom HTTP headers, can override built in default headers. The
-value must be a string encoding the headers.
-
-
-multiple_requests
-Use persistent connections if set to 1, default is 0.
-
-
-post_data
-Set custom HTTP post data.
-
-
-user-agent
-user_agent
-Override the User-Agent header. If not specified the protocol will use a
-string describing the libavformat build. ("Lavf/<version>")
-
-
-timeout
-Set timeout in microseconds of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout is
-not specified.
-
-
-mime_type
-Export the MIME type.
-
-
-icy
-If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
-supports this, the metadata has to be retrieved by the application by reading
-the icy_metadata_headers and icy_metadata_packet options.
-The default is 1.
-
-
-icy_metadata_headers
-If the server supports ICY metadata, this contains the ICY-specific HTTP reply
-headers, separated by newline characters.
-
-
-icy_metadata_packet
-If the server supports ICY metadata, and icy was set to 1, this
-contains the last non-empty metadata packet sent by the server. It should be
-polled in regular intervals by applications interested in mid-stream metadata
-updates.
-
-
-cookies
-Set the cookies to be sent in future requests. The format of each cookie is the
-same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
-delimited by a newline character.
-
-
-offset
-Set initial byte offset.
-
-
-end_offset
-Try to limit the request to bytes preceding this offset.
-
-
-
-
-
18.10.1 HTTP Cookies# TOC
-
-
Some HTTP requests will be denied unless cookie values are passed in with the
-request. The cookies option allows these cookies to be specified. At
-the very least, each cookie must specify a value along with a path and domain.
-HTTP requests that match both the domain and path will automatically include the
-cookie value in the HTTP Cookie header field. Multiple cookies can be delimited
-by a newline.
-
-
The required syntax to play a stream specifying a cookie is:
-
-
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
-
-
-
-
18.11 Icecast# TOC
-
-
Icecast protocol (stream to Icecast servers)
-
-
This protocol accepts the following options:
-
-
-ice_genre
-Set the stream genre.
-
-
-ice_name
-Set the stream name.
-
-
-ice_description
-Set the stream description.
-
-
-ice_url
-Set the stream website URL.
-
-
-ice_public
-Set if the stream should be public.
-The default is 0 (not public).
-
-
-user_agent
-Override the User-Agent header. If not specified a string of the form
-"Lavf/<version>" will be used.
-
-
-password
-Set the Icecast mountpoint password.
-
-
-content_type
-Set the stream content type. This must be set if it is different from
-audio/mpeg.
-
-
-legacy_icecast
-This enables support for Icecast versions < 2.4.0, that do not support the
-HTTP PUT method but the SOURCE method.
-
-
-
-
-
-
icecast://[username [:password ]@]server :port /mountpoint
-
-
-
-
18.12 mmst# TOC
-
-
MMS (Microsoft Media Server) protocol over TCP.
-
-
-
18.13 mmsh# TOC
-
-
MMS (Microsoft Media Server) protocol over HTTP.
-
-
The required syntax is:
-
-
mmsh://server [:port ][/app ][/playpath ]
-
-
-
-
18.14 md5# TOC
-
-
MD5 output protocol.
-
-
Computes the MD5 hash of the data to be written, and on close writes
-this to the designated output or stdout if none is specified. It can
-be used to test muxers without writing an actual file.
-
-
Some examples follow.
-
-
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
-ffmpeg -i input.flv -f avi -y md5:output.avi.md5
-
-# Write the MD5 hash of the encoded AVI file to stdout.
-ffmpeg -i input.flv -f avi -y md5:
-
-
-
Note that some formats (typically MOV) require the output protocol to
-be seekable, so they will fail with the MD5 output protocol.
-
-
-
18.15 pipe# TOC
-
-
UNIX pipe access protocol.
-
-
Allow to read and write from UNIX pipes.
-
-
The accepted syntax is:
-
-
-
number is the number corresponding to the file descriptor of the
-pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number
-is not specified, by default the stdout file descriptor will be used
-for writing, stdin for reading.
-
-
For example to read from stdin with ffmpeg
:
-
-
cat test.wav | ffmpeg -i pipe:0
-# ...this is the same as...
-cat test.wav | ffmpeg -i pipe:
-
-
-
For writing to stdout with ffmpeg
:
-
-
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
-# ...this is the same as...
-ffmpeg -i test.wav -f avi pipe: | cat > test.avi
-
-
-
This protocol accepts the following options:
-
-
-blocksize
-Set I/O operation maximum block size, in bytes. Default value is
-INT_MAX
, which results in not limiting the requested block size.
-Setting this value reasonably low improves user termination request reaction
-time, which is valuable if data transmission is slow.
-
-
-
-
Note that some formats (typically MOV), require the output protocol to
-be seekable, so they will fail with the pipe output protocol.
-
-
-
18.16 rtmp# TOC
-
-
Real-Time Messaging Protocol.
-
-
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
-content across a TCP/IP network.
-
-
The required syntax is:
-
-
rtmp://[username :password @]server [:port ][/app ][/instance ][/playpath ]
-
-
-
The accepted parameters are:
-
-username
-An optional username (mostly for publishing).
-
-
-password
-An optional password (mostly for publishing).
-
-
-server
-The address of the RTMP server.
-
-
-port
-The number of the TCP port to use (by default is 1935).
-
-
-app
-It is the name of the application to access. It usually corresponds to
-the path where the application is installed on the RTMP server
-(e.g. /ondemand/ , /flash/live/ , etc.). You can override
-the value parsed from the URI through the rtmp_app
option, too.
-
-
-playpath
-It is the path or name of the resource to play with reference to the
-application specified in app , may be prefixed by "mp4:". You
-can override the value parsed from the URI through the rtmp_playpath
-option, too.
-
-
-listen
-Act as a server, listening for an incoming connection.
-
-
-timeout
-Maximum time to wait for the incoming connection. Implies listen.
-
-
-
-
Additionally, the following parameters can be set via command line options
-(or in code via AVOption
s):
-
-rtmp_app
-Name of application to connect on the RTMP server. This option
-overrides the parameter specified in the URI.
-
-
-rtmp_buffer
-Set the client buffer time in milliseconds. The default is 3000.
-
-
-rtmp_conn
-Extra arbitrary AMF connection parameters, parsed from a string,
-e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0
.
-Each value is prefixed by a single character denoting the type,
-B for Boolean, N for number, S for string, O for object, or Z for null,
-followed by a colon. For Booleans the data must be either 0 or 1 for
-FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
-1 to end or begin an object, respectively. Data items in subobjects may
-be named, by prefixing the type with ’N’ and specifying the name before
-the value (i.e. NB:myFlag:1
). This option may be used multiple
-times to construct arbitrary AMF sequences.
-
-
-rtmp_flashver
-Version of the Flash plugin used to run the SWF player. The default
-is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
-<libavformat version>).)
-
-
-rtmp_flush_interval
-Number of packets flushed in the same request (RTMPT only). The default
-is 10.
-
-
-rtmp_live
-Specify that the media is a live stream. No resuming or seeking in
-live streams is possible. The default value is any
, which means the
-subscriber first tries to play the live stream specified in the
-playpath. If a live stream of that name is not found, it plays the
-recorded stream. The other possible values are live
and
-recorded
.
-
-
-rtmp_pageurl
-URL of the web page in which the media was embedded. By default no
-value will be sent.
-
-
-rtmp_playpath
-Stream identifier to play or to publish. This option overrides the
-parameter specified in the URI.
-
-
-rtmp_subscribe
-Name of live stream to subscribe to. By default no value will be sent.
-It is only sent if the option is specified or if rtmp_live
-is set to live.
-
-
-rtmp_swfhash
-SHA256 hash of the decompressed SWF file (32 bytes).
-
-
-rtmp_swfsize
-Size of the decompressed SWF file, required for SWFVerification.
-
-
-rtmp_swfurl
-URL of the SWF player for the media. By default no value will be sent.
-
-
-rtmp_swfverify
-URL to player swf file, compute hash/size automatically.
-
-
-rtmp_tcurl
-URL of the target stream. Defaults to proto://host[:port]/app.
-
-
-
-
-
For example to read with ffplay
a multimedia resource named
-"sample" from the application "vod" from an RTMP server "myserver":
-
-
ffplay rtmp://myserver/vod/sample
-
-
-
To publish to a password protected server, passing the playpath and
-app names separately:
-
-
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
-
-
-
-
18.17 rtmpe# TOC
-
-
Encrypted Real-Time Messaging Protocol.
-
-
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
-streaming multimedia content within standard cryptographic primitives,
-consisting of Diffie-Hellman key exchange and HMACSHA256, generating
-a pair of RC4 keys.
-
-
-
18.18 rtmps# TOC
-
-
Real-Time Messaging Protocol over a secure SSL connection.
-
-
The Real-Time Messaging Protocol (RTMPS) is used for streaming
-multimedia content across an encrypted connection.
-
-
-
18.19 rtmpt# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
-for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
18.20 rtmpte# TOC
-
-
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
-
-
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
-is used for streaming multimedia content within HTTP requests to traverse
-firewalls.
-
-
-
18.21 rtmpts# TOC
-
-
Real-Time Messaging Protocol tunneled through HTTPS.
-
-
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
-for streaming multimedia content within HTTPS requests to traverse
-firewalls.
-
-
-
18.22 libsmbclient# TOC
-
-
libsmbclient permits one to manipulate CIFS/SMB network resources.
-
-
Following syntax is required.
-
-
-
smb://[[domain:]user[:password@]]server[/share[/path[/file]]]
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout in miliseconds of socket I/O operations used by the underlying
-low level operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-workgroup
-Set the workgroup used for making connections. By default workgroup is not specified.
-
-
-
-
-
For more information see: http://www.samba.org/ .
-
-
-
18.23 libssh# TOC
-
-
Secure File Transfer Protocol via libssh
-
-
Allow to read from or write to remote resources using SFTP protocol.
-
-
Following syntax is required.
-
-
-
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
-
-
-
This protocol accepts the following options.
-
-
-timeout
-Set timeout of socket I/O operations used by the underlying low level
-operation. By default it is set to -1, which means that the timeout
-is not specified.
-
-
-truncate
-Truncate existing files on write, if set to 1. A value of 0 prevents
-truncating. Default value is 1.
-
-
-private_key
-Specify the path of the file containing private key to use during authorization.
-By default libssh searches for keys in the ~/.ssh/ directory.
-
-
-
-
-
Example: Play a file stored on remote server.
-
-
-
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
-
-
-
-
18.24 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte# TOC
-
-
Real-Time Messaging Protocol and its variants supported through
-librtmp.
-
-
Requires the presence of the librtmp headers and library during
-configuration. You need to explicitly configure the build with
-"–enable-librtmp". If enabled this will replace the native RTMP
-protocol.
-
-
This protocol provides most client functions and a few server
-functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT),
-encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled
-variants of these encrypted types (RTMPTE, RTMPTS).
-
-
The required syntax is:
-
-
rtmp_proto ://server [:port ][/app ][/playpath ] options
-
-
-
where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe",
-"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and
-server , port , app and playpath have the same
-meaning as specified for the RTMP native protocol.
-options contains a list of space-separated options of the form
-key =val .
-
-
See the librtmp manual page (man 3 librtmp) for more information.
-
-
For example, to stream a file in real-time to an RTMP server using
-ffmpeg
:
-
-
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
-
-
-
To play the same stream using ffplay
:
-
-
ffplay "rtmp://myserver/live/mystream live=1"
-
-
-
-
18.25 rtp# TOC
-
-
Real-time Transport Protocol.
-
-
The required syntax for an RTP URL is:
-rtp://hostname [:port ][?option =val ...]
-
-
port specifies the RTP port to use.
-
-
The following URL options are supported:
-
-
-ttl=n
-Set the TTL (Time-To-Live) value (for multicast only).
-
-
-rtcpport=n
-Set the remote RTCP port to n .
-
-
-localrtpport=n
-Set the local RTP port to n .
-
-
-localrtcpport=n '
-Set the local RTCP port to n .
-
-
-pkt_size=n
-Set max packet size (in bytes) to n .
-
-
-connect=0|1
-Do a connect()
on the UDP socket (if set to 1) or not (if set
-to 0).
-
-
-sources=ip [,ip ]
-List allowed source IP addresses.
-
-
-block=ip [,ip ]
-List disallowed (blocked) source IP addresses.
-
-
-write_to_source=0|1
-Send packets to the source address of the latest received packet (if
-set to 1) or to a default remote address (if set to 0).
-
-
-localport=n
-Set the local RTP port to n .
-
-This is a deprecated option. Instead, localrtpport should be
-used.
-
-
-
-
-
Important notes:
-
-
- If rtcpport is not set the RTCP port will be set to the RTP
-port value plus 1.
-
- If localrtpport (the local RTP port) is not set any available
-port will be used for the local RTP and RTCP ports.
-
- If localrtcpport (the local RTCP port) is not set it will be
-set to the local RTP port value plus 1.
-
-
-
-
18.26 rtsp# TOC
-
-
Real-Time Streaming Protocol.
-
-
RTSP is not technically a protocol handler in libavformat, it is a demuxer
-and muxer. The demuxer supports both normal RTSP (with data transferred
-over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
-data transferred over RDT).
-
-
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
-supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s
-RTSP server ).
-
-
The required syntax for a RTSP url is:
-
-
rtsp://hostname [:port ]/path
-
-
-
Options can be set on the ffmpeg
/ffplay
command
-line, or set in code via AVOption
s or in
-avformat_open_input
.
-
-
The following options are supported.
-
-
-initial_pause
-Do not start playing the stream immediately if set to 1. Default value
-is 0.
-
-
-rtsp_transport
-Set RTSP transport protocols.
-
-It accepts the following values:
-
-‘udp ’
-Use UDP as lower transport protocol.
-
-
-‘tcp ’
-Use TCP (interleaving within the RTSP control channel) as lower
-transport protocol.
-
-
-‘udp_multicast ’
-Use UDP multicast as lower transport protocol.
-
-
-‘http ’
-Use HTTP tunneling as lower transport protocol, which is useful for
-passing proxies.
-
-
-
-Multiple lower transport protocols may be specified, in that case they are
-tried one at a time (if the setup of one fails, the next one is tried).
-For the muxer, only the ‘tcp ’ and ‘udp ’ options are supported.
-
-
-rtsp_flags
-Set RTSP flags.
-
-The following values are accepted:
-
-‘filter_src ’
-Accept packets only from negotiated peer address and port.
-
-‘listen ’
-Act as a server, listening for an incoming connection.
-
-‘prefer_tcp ’
-Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
-
-
-
-Default value is ‘none ’.
-
-
-allowed_media_types
-Set media types to accept from the server.
-
-The following flags are accepted:
-
-‘video ’
-‘audio ’
-‘data ’
-
-
-By default it accepts all media types.
-
-
-min_port
-Set minimum local UDP port. Default value is 5000.
-
-
-max_port
-Set maximum local UDP port. Default value is 65000.
-
-
-timeout
-Set maximum timeout (in seconds) to wait for incoming connections.
-
-A value of -1 means infinite (default). This option implies the
-rtsp_flags set to ‘listen ’.
-
-
-reorder_queue_size
-Set number of packets to buffer for handling of reordered packets.
-
-
-stimeout
-Set socket TCP I/O timeout in microseconds.
-
-
-user-agent
-Override User-Agent header. If not specified, it defaults to the
-libavformat identifier string.
-
-
-
-
When receiving data over UDP, the demuxer tries to reorder received packets
-(since they may arrive out of order, or packets may get lost totally). This
-can be disabled by setting the maximum demuxing delay to zero (via
-the max_delay
field of AVFormatContext).
-
-
When watching multi-bitrate Real-RTSP streams with ffplay
, the
-streams to display can be chosen with -vst
n and
--ast
n for video and audio respectively, and can be switched
-on the fly by pressing v
and a
.
-
-
-
18.26.1 Examples# TOC
-
-
The following examples all make use of the ffplay
and
-ffmpeg
tools.
-
-
- Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
-
-
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
-
-
- Watch a stream tunneled over HTTP:
-
-
ffplay -rtsp_transport http rtsp://server/video.mp4
-
-
- Send a stream in realtime to a RTSP server, for others to watch:
-
-
ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
-
-
- Receive a stream in realtime:
-
-
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
-
-
-
-
-
18.27 sap# TOC
-
-
Session Announcement Protocol (RFC 2974). This is not technically a
-protocol handler in libavformat, it is a muxer and demuxer.
-It is used for signalling of RTP streams, by announcing the SDP for the
-streams regularly on a separate port.
-
-
-
18.27.1 Muxer# TOC
-
-
The syntax for a SAP url given to the muxer is:
-
-
sap://destination [:port ][?options ]
-
-
-
The RTP packets are sent to destination on port port ,
-or to port 5004 if no port is specified.
-options is a &
-separated list. The following options
-are supported:
-
-
-announce_addr=address
-Specify the destination IP address for sending the announcements to.
-If omitted, the announcements are sent to the commonly used SAP
-announcement multicast address 224.2.127.254 (sap.mcast.net), or
-ff0e::2:7ffe if destination is an IPv6 address.
-
-
-announce_port=port
-Specify the port to send the announcements on, defaults to
-9875 if not specified.
-
-
-ttl=ttl
-Specify the time to live value for the announcements and RTP packets,
-defaults to 255.
-
-
-same_port=0|1
-If set to 1, send all RTP streams on the same port pair. If zero (the
-default), all streams are sent on unique ports, with each stream on a
-port 2 numbers higher than the previous.
-VLC/Live555 requires this to be set to 1, to be able to receive the stream.
-The RTP stack in libavformat for receiving requires all streams to be sent
-on unique ports.
-
-
-
-
Example command lines follow.
-
-
To broadcast a stream on the local subnet, for watching in VLC:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
-
-
-
Similarly, for watching in ffplay
:
-
-
-
ffmpeg -re -i input -f sap sap://224.0.0.255
-
-
-
And for watching in ffplay
, over IPv6:
-
-
-
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
-
-
-
-
18.27.2 Demuxer# TOC
-
-
The syntax for a SAP url given to the demuxer is:
-
-
sap://[address ][:port ]
-
-
-
address is the multicast address to listen for announcements on,
-if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port
-is the port that is listened on, 9875 if omitted.
-
-
The demuxers listens for announcements on the given address and port.
-Once an announcement is received, it tries to receive that particular stream.
-
-
Example command lines follow.
-
-
To play back the first stream announced on the normal SAP multicast address:
-
-
-
-
To play back the first stream announced on one the default IPv6 SAP multicast address:
-
-
-
ffplay sap://[ff0e::2:7ffe]
-
-
-
-
18.28 sctp# TOC
-
-
Stream Control Transmission Protocol.
-
-
The accepted URL syntax is:
-
-
sctp://host :port [?options ]
-
-
-
The protocol accepts the following options:
-
-listen
-If set to any value, listen for an incoming connection. Outgoing connection is done by default.
-
-
-max_streams
-Set the maximum number of streams. By default no limit is set.
-
-
-
-
-
18.29 srtp# TOC
-
-
Secure Real-time Transport Protocol.
-
-
The accepted options are:
-
-srtp_in_suite
-srtp_out_suite
-Select input and output encoding suites.
-
-Supported values:
-
-‘AES_CM_128_HMAC_SHA1_80 ’
-‘SRTP_AES128_CM_HMAC_SHA1_80 ’
-‘AES_CM_128_HMAC_SHA1_32 ’
-‘SRTP_AES128_CM_HMAC_SHA1_32 ’
-
-
-
-srtp_in_params
-srtp_out_params
-Set input and output encoding parameters, which are expressed by a
-base64-encoded representation of a binary block. The first 16 bytes of
-this binary block are used as master key, the following 14 bytes are
-used as master salt.
-
-
-
-
-
18.30 subfile# TOC
-
-
Virtually extract a segment of a file or another stream.
-The underlying stream must be seekable.
-
-
Accepted options:
-
-start
-Start offset of the extracted segment, in bytes.
-
-end
-End offset of the extracted segment, in bytes.
-
-
-
-
Examples:
-
-
Extract a chapter from a DVD VOB file (start and end sectors obtained
-externally and multiplied by 2048):
-
-
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
-
-
-
Play an AVI file directly from a TAR archive:
-subfile,,start,183241728,end,366490624,,:archive.tar
-
-
-
18.31 tcp# TOC
-
-
Transmission Control Protocol.
-
-
The required syntax for a TCP url is:
-
-
tcp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form
-key =val .
-
-
The list of supported options follows.
-
-
-listen=1|0
-Listen for an incoming connection. Default value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-listen_timeout=microseconds
-Set listen timeout, expressed in microseconds.
-
-
-
-
The following example shows how to setup a listening TCP connection
-with ffmpeg
, which is then accessed with ffplay
:
-
-
ffmpeg -i input -f format tcp://hostname :port ?listen
-ffplay tcp://hostname :port
-
-
-
-
18.32 tls# TOC
-
-
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
-
-
The required syntax for a TLS/SSL url is:
-
-
tls://hostname :port [?options ]
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-ca_file, cafile=filename
-A file containing certificate authority (CA) root certificates to treat
-as trusted. If the linked TLS library contains a default this might not
-need to be specified for verification to work, but not all libraries and
-setups have defaults built in.
-The file must be in OpenSSL PEM format.
-
-
-tls_verify=1|0
-If enabled, try to verify the peer that we are communicating with.
-Note, if using OpenSSL, this currently only makes sure that the
-peer certificate is signed by one of the root certificates in the CA
-database, but it does not validate that the certificate actually
-matches the host name we are trying to connect to. (With GnuTLS,
-the host name is validated as well.)
-
-This is disabled by default since it requires a CA database to be
-provided by the caller in many cases.
-
-
-cert_file, cert=filename
-A file containing a certificate to use in the handshake with the peer.
-(When operating as server, in listen mode, this is more often required
-by the peer, while client certificates only are mandated in certain
-setups.)
-
-
-key_file, key=filename
-A file containing the private key for the certificate.
-
-
-listen=1|0
-If enabled, listen for connections on the provided port, and assume
-the server role in the handshake instead of the client role.
-
-
-
-
-
Example command lines:
-
-
To create a TLS/SSL server that serves an input stream.
-
-
-
ffmpeg -i input -f format tls://hostname :port ?listen&cert=server.crt &key=server.key
-
-
-
To play back a stream from the TLS/SSL server using ffplay
:
-
-
-
ffplay tls://hostname :port
-
-
-
-
18.33 udp# TOC
-
-
User Datagram Protocol.
-
-
The required syntax for an UDP URL is:
-
-
udp://hostname :port [?options ]
-
-
-
options contains a list of &-separated options of the form key =val .
-
-
In case threading is enabled on the system, a circular buffer is used
-to store the incoming data, which allows one to reduce loss of data due to
-UDP socket buffer overruns. The fifo_size and
-overrun_nonfatal options are related to this buffer.
-
-
The list of supported options follows.
-
-
-buffer_size=size
-Set the UDP maximum socket buffer size in bytes. This is used to set either
-the receive or send buffer size, depending on what the socket is used for.
-Default is 64KB. See also fifo_size .
-
-
-localport=port
-Override the local UDP port to bind with.
-
-
-localaddr=addr
-Choose the local IP address. This is useful e.g. if sending multicast
-and the host has multiple interfaces, where the user can choose
-which interface to send on by specifying the IP address of that interface.
-
-
-pkt_size=size
-Set the size in bytes of UDP packets.
-
-
-reuse=1|0
-Explicitly allow or disallow reusing UDP sockets.
-
-
-ttl=ttl
-Set the time to live value (for multicast only).
-
-
-connect=1|0
-Initialize the UDP socket with connect()
. In this case, the
-destination address can’t be changed with ff_udp_set_remote_url later.
-If the destination address isn’t known at the start, this option can
-be specified in ff_udp_set_remote_url, too.
-This allows finding out the source address for the packets with getsockname,
-and makes writes return with AVERROR(ECONNREFUSED) if "destination
-unreachable" is received.
-For receiving, this gives the benefit of only receiving packets from
-the specified peer address/port.
-
-
-sources=address [,address ]
-Only receive packets sent to the multicast group from one of the
-specified sender IP addresses.
-
-
-block=address [,address ]
-Ignore packets sent to the multicast group from the specified
-sender IP addresses.
-
-
-fifo_size=units
-Set the UDP receiving circular buffer size, expressed as a number of
-packets with size of 188 bytes. If not specified defaults to 7*4096.
-
-
-overrun_nonfatal=1|0
-Survive in case of UDP receiving circular buffer overrun. Default
-value is 0.
-
-
-timeout=microseconds
-Set raise error timeout, expressed in microseconds.
-
-This option is only relevant in read mode: if no data arrived in more
-than this time interval, raise error.
-
-
-broadcast=1|0
-Explicitly allow or disallow UDP broadcasting.
-
-Note that broadcasting may not work properly on networks having
-a broadcast storm protection.
-
-
-
-
-
18.33.1 Examples# TOC
-
-
- Use ffmpeg
to stream over UDP to a remote endpoint:
-
-
ffmpeg -i input -f format udp://hostname :port
-
-
- Use ffmpeg
to stream in mpegts format over UDP using 188
-sized UDP packets, using a large input buffer:
-
-
ffmpeg -i input -f mpegts udp://hostname :port ?pkt_size=188&buffer_size=65535
-
-
- Use ffmpeg
to receive over UDP from a remote endpoint:
-
-
ffmpeg -i udp://[multicast-address ]:port ...
-
-
-
-
-
18.34 unix# TOC
-
-
Unix local socket
-
-
The required syntax for a Unix socket URL is:
-
-
-
-
The following parameters can be set via command line options
-(or in code via AVOption
s):
-
-
-timeout
-Timeout in ms.
-
-listen
-Create the Unix socket in listening mode.
-
-
-
-
-
19 Device Options# TOC
-
-
The libavdevice library provides the same interface as
-libavformat. Namely, an input device is considered like a demuxer, and
-an output device like a muxer, and the interface and generic device
-options are the same provided by libavformat (see the ffmpeg-formats
-manual).
-
-
In addition each input or output device may support so-called private
-options, which are specific for that component.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, or by setting the value explicitly in the device
-AVFormatContext
options or using the libavutil/opt.h API
-for programmatic use.
-
-
-
-
20 Input Devices# TOC
-
-
Input devices are configured elements in FFmpeg which allow to access
-the data coming from a multimedia device attached to your system.
-
-
When you configure your FFmpeg build, all the supported input devices
-are enabled by default. You can list all available ones using the
-configure option "–list-indevs".
-
-
You can disable all the input devices using the configure option
-"–disable-indevs", and selectively enable an input device using the
-option "–enable-indev=INDEV ", or you can disable a particular
-input device using the option "–disable-indev=INDEV ".
-
-
The option "-devices" of the ff* tools will display the list of
-supported input devices.
-
-
A description of the currently available input devices follows.
-
-
-
20.1 alsa# TOC
-
-
ALSA (Advanced Linux Sound Architecture) input device.
-
-
To enable this input device during configuration you need libasound
-installed on your system.
-
-
This device allows capturing from an ALSA device. The name of the
-device to capture has to be an ALSA card identifier.
-
-
An ALSA identifier has the syntax:
-
-
hw:CARD [,DEV [,SUBDEV ]]
-
-
-
where the DEV and SUBDEV components are optional.
-
-
The three arguments (in order: CARD ,DEV ,SUBDEV )
-specify card number or identifier, device number and subdevice number
-(-1 means any).
-
-
To see the list of cards currently recognized by your system check the
-files /proc/asound/cards and /proc/asound/devices .
-
-
For example to capture with ffmpeg
from an ALSA device with
-card id 0, you may run the command:
-
-
ffmpeg -f alsa -i hw:0 alsaout.wav
-
-
-
For more information see:
-http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html
-
-
-
20.2 avfoundation# TOC
-
-
AVFoundation input device.
-
-
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
-The older QTKit framework has been marked deprecated since OSX version 10.7.
-
-
The input filename has to be given in the following syntax:
-
-
-i "[[VIDEO]:[AUDIO]]"
-
-
The first entry selects the video input while the latter selects the audio input.
-The stream has to be specified by the device name or the device index as shown by the device list.
-Alternatively, the video and/or audio input device can be chosen by index using the
-
- -video_device_index <INDEX>
-
-and/or
-
- -audio_device_index <INDEX>
-
-, overriding any
-device name or index given in the input filename.
-
-
All available devices can be enumerated by using -list_devices true , listing
-all device names and corresponding indices.
-
-
There are two device name aliases:
-
-default
-Select the AVFoundation default device of the corresponding type.
-
-
-none
-Do not record the corresponding media type.
-This is equivalent to specifying an empty device name or index.
-
-
-
-
-
-
20.2.1 Options# TOC
-
-
AVFoundation supports the following options:
-
-
--list_devices <TRUE|FALSE>
-If set to true, a list of all available input devices is given showing all
-device names and indices.
-
-
--video_device_index <INDEX>
-Specify the video device by its index. Overrides anything given in the input filename.
-
-
--audio_device_index <INDEX>
-Specify the audio device by its index. Overrides anything given in the input filename.
-
-
--pixel_format <FORMAT>
-Request the video device to use a specific pixel format.
-If the specified format is not supported, a list of available formats is given
-und the first one in this list is used instead. Available pixel formats are:
-monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
- bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
- yuv420p, nv12, yuyv422, gray
-
-
-
-
-
-
20.2.2 Examples# TOC
-
-
- Print the list of AVFoundation supported devices and exit:
-
-
$ ffmpeg -f avfoundation -list_devices true -i ""
-
-
- Record video from video device 0 and audio from audio device 0 into out.avi:
-
-
$ ffmpeg -f avfoundation -i "0:0" out.avi
-
-
- Record video from video device 2 and audio from audio device 1 into out.avi:
-
-
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
-
-
- Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
-
-
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
-
-
-
-
-
-
20.3 bktr# TOC
-
-
BSD video input device.
-
-
-
20.4 dshow# TOC
-
-
Windows DirectShow input device.
-
-
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
-Currently only audio and video devices are supported.
-
-
Multiple devices may be opened as separate inputs, but they may also be
-opened on the same input, which should improve synchronism between them.
-
-
The input name should be in the format:
-
-
-
-
where TYPE can be either audio or video ,
-and NAME is the device’s name.
-
-
-
20.4.1 Options# TOC
-
-
If no options are specified, the device’s defaults are used.
-If the device does not support the requested options, it will
-fail to open.
-
-
-video_size
-Set the video size in the captured video.
-
-
-framerate
-Set the frame rate in the captured video.
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-
-
-sample_size
-Set the sample size (in bits) of the captured audio.
-
-
-channels
-Set the number of channels in the captured audio.
-
-
-list_devices
-If set to true , print a list of devices and exit.
-
-
-list_options
-If set to true , print a list of selected device’s options
-and exit.
-
-
-video_device_number
-Set video device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-audio_device_number
-Set audio device number for devices with same name (starts at 0,
-defaults to 0).
-
-
-pixel_format
-Select pixel format to be used by DirectShow. This may only be set when
-the video codec is not set or set to rawvideo.
-
-
-audio_buffer_size
-Set audio device buffer size in milliseconds (which can directly
-impact latency, depending on the device).
-Defaults to using the audio device’s
-default buffer size (typically some multiple of 500ms).
-Setting this value too low can degrade performance.
-See also
-http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx
-
-
-
-
-
-
20.4.2 Examples# TOC
-
-
- Print the list of DirectShow supported devices and exit:
-
-
$ ffmpeg -list_devices true -f dshow -i dummy
-
-
- Open video device Camera :
-
-
$ ffmpeg -f dshow -i video="Camera"
-
-
- Open second video device with name Camera :
-
-
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
-
-
- Open video device Camera and audio device Microphone :
-
-
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
-
-
- Print the list of supported options in selected device and exit:
-
-
$ ffmpeg -list_options true -f dshow -i video="Camera"
-
-
-
-
-
-
20.5 dv1394# TOC
-
-
Linux DV 1394 input device.
-
-
-
20.6 fbdev# TOC
-
-
Linux framebuffer input device.
-
-
The Linux framebuffer is a graphic hardware-independent abstraction
-layer to show graphics on a computer monitor, typically on the
-console. It is accessed through a file device node, usually
-/dev/fb0 .
-
-
For more detailed information read the file
-Documentation/fb/framebuffer.txt included in the Linux source tree.
-
-
To record from the framebuffer device /dev/fb0 with
-ffmpeg
:
-
-
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
-
-
-
You can take a single screenshot image with the command:
-
-
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
-
-
-
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
-
-
-
20.7 gdigrab# TOC
-
-
Win32 GDI-based screen capture device.
-
-
This device allows you to capture a region of the display on Windows.
-
-
There are two options for the input filename:
-
-
or
-
-
-
The first option will capture the entire desktop, or a fixed region of the
-desktop. The second option will instead capture the contents of a single
-window, regardless of its position on the screen.
-
-
For example, to grab the entire desktop using ffmpeg
:
-
-
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
-
-
-
Grab a 640x480 region at position 10,20
:
-
-
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
-
-
-
Grab the contents of the window named "Calculator"
-
-
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
-
-
-
-
20.7.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. Use the value 0
to
-not draw the pointer. Default value is 1
.
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-Note that show_region is incompatible with grabbing the contents
-of a single window.
-
-For example:
-
-
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
-
-
-
-video_size
-Set the video frame size. The default is to capture the full screen if desktop is selected, or the full window size if title=window_title is selected.
-
-
-offset_x
-When capturing a region with video_size , set the distance from the left edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative offset_x value to move the region to that monitor.
-
-
-offset_y
-When capturing a region with video_size , set the distance from the top edge of the screen or desktop.
-
-Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative offset_y value to move the region to that monitor.
-
-
-
-
-
-
20.8 iec61883# TOC
-
-
FireWire DV/HDV input device using libiec61883.
-
-
To enable this input device, you need libiec61883, libraw1394 and
-libavc1394 installed on your system. Use the configure option
---enable-libiec61883
to compile with the device enabled.
-
-
The iec61883 capture device supports capturing from a video device
-connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
-FireWire stack (juju). This is the default DV/HDV input method in Linux
-Kernel 2.6.37 and later, since the old FireWire stack was removed.
-
-
Specify the FireWire port to be used as input file, or "auto"
-to choose the first port connected.
-
-
-
20.8.1 Options# TOC
-
-
-dvtype
-Override autodetection of DV/HDV. This should only be used if auto
-detection does not work, or if usage of a different device type
-should be prohibited. Treating a DV device as HDV (or vice versa) will
-not work and result in undefined behavior.
-The values auto , dv and hdv are supported.
-
-
-dvbuffer
-Set maximum size of buffer for incoming data, in frames. For DV, this
-is an exact value. For HDV, it is not frame exact, since HDV does
-not have a fixed frame size.
-
-
-dvguid
-Select the capture device by specifying it’s GUID. Capturing will only
-be performed from the specified device and fails if no device with the
-given GUID is found. This is useful to select the input if multiple
-devices are connected at the same time.
-Look at /sys/bus/firewire/devices to find out the GUIDs.
-
-
-
-
-
-
20.8.2 Examples# TOC
-
-
- Grab and show the input of a FireWire DV/HDV device.
-
-
ffplay -f iec61883 -i auto
-
-
- Grab and record the input of a FireWire DV/HDV device,
-using a packet buffer of 100000 packets if the source is HDV.
-
-
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
-
-
-
-
-
-
20.9 jack# TOC
-
-
JACK input device.
-
-
To enable this input device during configuration you need libjack
-installed on your system.
-
-
A JACK input device creates one or more JACK writable clients, one for
-each audio channel, with name client_name :input_N , where
-client_name is the name provided by the application, and N
-is a number which identifies the channel.
-Each writable client will send the acquired data to the FFmpeg input
-device.
-
-
Once you have created one or more JACK readable clients, you need to
-connect them to one or more JACK writable clients.
-
-
To connect or disconnect JACK clients you can use the jack_connect
-and jack_disconnect
programs, or do it through a graphical interface,
-for example with qjackctl
.
-
-
To list the JACK clients and their properties you can invoke the command
-jack_lsp
.
-
-
Follows an example which shows how to capture a JACK readable client
-with ffmpeg
.
-
-
# Create a JACK writable client with name "ffmpeg".
-$ ffmpeg -f jack -i ffmpeg -y out.wav
-
-# Start the sample jack_metro readable client.
-$ jack_metro -b 120 -d 0.2 -f 4000
-
-# List the current JACK clients.
-$ jack_lsp -c
-system:capture_1
-system:capture_2
-system:playback_1
-system:playback_2
-ffmpeg:input_1
-metro:120_bpm
-
-# Connect metro to the ffmpeg writable client.
-$ jack_connect metro:120_bpm ffmpeg:input_1
-
-
-
For more information read:
-http://jackaudio.org/
-
-
-
20.10 lavfi# TOC
-
-
Libavfilter input virtual device.
-
-
This input device reads data from the open output pads of a libavfilter
-filtergraph.
-
-
For each filtergraph open output, the input device will create a
-corresponding stream which is mapped to the generated output. Currently
-only video data is supported. The filtergraph is specified through the
-option graph .
-
-
-
20.10.1 Options# TOC
-
-
-graph
-Specify the filtergraph to use as input. Each video open output must be
-labelled by a unique string of the form "outN ", where N is a
-number starting from 0 corresponding to the mapped input stream
-generated by the device.
-The first unlabelled output is automatically assigned to the "out0"
-label, but all the others need to be specified explicitly.
-
-The suffix "+subcc" can be appended to the output label to create an extra
-stream with the closed captions packets attached to that output
-(experimental; only for EIA-608 / CEA-708 for now).
-The subcc streams are created after all the normal streams, in the order of
-the corresponding stream.
-For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
-stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
-
-If not specified defaults to the filename specified for the input
-device.
-
-
-graph_file
-Set the filename of the filtergraph to be read and sent to the other
-filters. Syntax of the filtergraph is the same as the one specified by
-the option graph .
-
-
-
-
-
-
20.10.2 Examples# TOC
-
-
- Create a color video stream and play it back with ffplay
:
-
-
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
-
-
- As the previous example, but use filename for specifying the graph
-description, and omit the "out0" label:
-
-
ffplay -f lavfi color=c=pink
-
-
- Create three different video test filtered sources and play them:
-
-
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
-
-
- Read an audio stream from a file using the amovie source and play it
-back with ffplay
:
-
-
ffplay -f lavfi "amovie=test.wav"
-
-
- Read an audio stream and a video stream and play it back with
-ffplay
:
-
-
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
-
-
- Dump decoded frames to images and closed captions to a file (experimental):
-
-
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
-
-
-
-
-
-
20.11 libcdio# TOC
-
-
Audio-CD input device based on cdio.
-
-
To enable this input device during configuration you need libcdio
-installed on your system. Requires the configure option
---enable-libcdio
.
-
-
This device allows playing and grabbing from an Audio-CD.
-
-
For example to copy with ffmpeg
the entire Audio-CD in /dev/sr0,
-you may run the command:
-
-
ffmpeg -f libcdio -i /dev/sr0 cd.wav
-
-
-
-
20.12 libdc1394# TOC
-
-
IIDC1394 input device, based on libdc1394 and libraw1394.
-
-
Requires the configure option --enable-libdc1394
.
-
-
-
20.13 openal# TOC
-
-
The OpenAL input device provides audio capture on all systems with a
-working OpenAL 1.1 implementation.
-
-
To enable this input device during configuration, you need OpenAL
-headers and libraries installed on your system, and need to configure
-FFmpeg with --enable-openal
.
-
-
OpenAL headers and libraries should be provided as part of your OpenAL
-implementation, or as an additional download (an SDK). Depending on your
-installation you may need to specify additional flags via the
---extra-cflags
and --extra-ldflags
for allowing the build
-system to locate the OpenAL headers and libraries.
-
-
An incomplete list of OpenAL implementations follows:
-
-
-Creative
-The official Windows implementation, providing hardware acceleration
-with supported devices and software fallback.
-See http://openal.org/ .
-
-OpenAL Soft
-Portable, open source (LGPL) software implementation. Includes
-backends for the most common sound APIs on the Windows, Linux,
-Solaris, and BSD operating systems.
-See http://kcat.strangesoft.net/openal.html .
-
-Apple
-OpenAL is part of Core Audio, the official Mac OS X Audio interface.
-See http://developer.apple.com/technologies/mac/audio-and-video.html
-
-
-
-
This device allows one to capture from an audio input device handled
-through OpenAL.
-
-
You need to specify the name of the device to capture in the provided
-filename. If the empty string is provided, the device will
-automatically select the default device. You can get the list of the
-supported devices by using the option list_devices .
-
-
-
20.13.1 Options# TOC
-
-
-channels
-Set the number of channels in the captured audio. Only the values
-1 (monaural) and 2 (stereo) are currently supported.
-Defaults to 2 .
-
-
-sample_size
-Set the sample size (in bits) of the captured audio. Only the values
-8 and 16 are currently supported. Defaults to
-16 .
-
-
-sample_rate
-Set the sample rate (in Hz) of the captured audio.
-Defaults to 44.1k .
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-
-
-
-
20.13.2 Examples# TOC
-
-
Print the list of OpenAL supported devices and exit:
-
-
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
-
-
-
Capture from the OpenAL device DR-BT101 via PulseAudio :
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
-
-
-
Capture from the default device (note the empty string ” as filename):
-
-
$ ffmpeg -f openal -i '' out.ogg
-
-
-
Capture from two devices simultaneously, writing to two different files,
-within the same ffmpeg
command:
-
-
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
-
-
Note: not all OpenAL implementations support multiple simultaneous capture -
-try the latest OpenAL Soft if the above does not work.
-
-
-
20.14 oss# TOC
-
-
Open Sound System input device.
-
-
The filename to provide to the input device is the device node
-representing the OSS input device, and is usually set to
-/dev/dsp .
-
-
For example to grab from /dev/dsp using ffmpeg
use the
-command:
-
-
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
-
-
-
For more information about OSS see:
-http://manuals.opensound.com/usersguide/dsp.html
-
-
-
20.15 pulse# TOC
-
-
PulseAudio input device.
-
-
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
-
-
The filename to provide to the input device is a source device or the
-string "default"
-
-
To list the PulseAudio source devices and their properties you can invoke
-the command pactl list sources
.
-
-
More information about PulseAudio can be found on http://www.pulseaudio.org .
-
-
-
20.15.1 Options# TOC
-
-server
-Connect to a specific PulseAudio server, specified by an IP address.
-Default server is used when not provided.
-
-
-name
-Specify the application name PulseAudio will use when showing active clients,
-by default it is the LIBAVFORMAT_IDENT
string.
-
-
-stream_name
-Specify the stream name PulseAudio will use when showing active streams,
-by default it is "record".
-
-
-sample_rate
-Specify the samplerate in Hz, by default 48kHz is used.
-
-
-channels
-Specify the channels in use, by default 2 (stereo) is set.
-
-
-frame_size
-Specify the number of bytes per frame, by default it is set to 1024.
-
-
-fragment_size
-Specify the minimal buffering fragment in PulseAudio, it will affect the
-audio latency. By default it is unset.
-
-
-
-
-
20.15.2 Examples# TOC
-
Record a stream from default device:
-
-
ffmpeg -f pulse -i default /tmp/pulse.wav
-
-
-
-
20.16 qtkit# TOC
-
-
QTKit input device.
-
-
The filename passed as input is parsed to contain either a device name or index.
-The device index can also be given by using -video_device_index.
-A given device index will override any given device name.
-If the desired device consists of numbers only, use -video_device_index to identify it.
-The default device will be chosen if an empty string or the device name "default" is given.
-The available devices can be enumerated by using -list_devices.
-
-
-
ffmpeg -f qtkit -i "0" out.mpg
-
-
-
-
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
-
-
-
-
ffmpeg -f qtkit -i "default" out.mpg
-
-
-
-
ffmpeg -f qtkit -list_devices true -i ""
-
-
-
-
20.17 sndio# TOC
-
-
sndio input device.
-
-
To enable this input device during configuration you need libsndio
-installed on your system.
-
-
The filename to provide to the input device is the device node
-representing the sndio input device, and is usually set to
-/dev/audio0 .
-
-
For example to grab from /dev/audio0 using ffmpeg
use the
-command:
-
-
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
-
-
-
-
20.18 video4linux2, v4l2# TOC
-
-
Video4Linux2 input video device.
-
-
"v4l2" can be used as alias for "video4linux2".
-
-
If FFmpeg is built with v4l-utils support (by using the
---enable-libv4l2
configure option), it is possible to use it with the
--use_libv4l2
input device option.
-
-
The name of the device to grab is a file device node, usually Linux
-systems tend to automatically create such nodes when the device
-(e.g. an USB webcam) is plugged into the system, and has a name of the
-kind /dev/videoN , where N is a number associated to
-the device.
-
-
Video4Linux2 devices usually support a limited set of
-width xheight sizes and frame rates. You can check which are
-supported using -list_formats all
for Video4Linux2 devices.
-Some devices, like TV cards, support one or more standards. It is possible
-to list all the supported standards using -list_standards all
.
-
-
The time base for the timestamps is 1 microsecond. Depending on the kernel
-version and configuration, the timestamps may be derived from the real time
-clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
-boot time, unaffected by NTP or manual changes to the clock). The
--timestamps abs or -ts abs option can be used to force
-conversion into the real time clock.
-
-
Some usage examples of the video4linux2 device with ffmpeg
-and ffplay
:
-
- Grab and show the input of a video4linux2 device:
-
-
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
-
-
- Grab and record the input of a video4linux2 device, leave the
-frame rate and size as previously set:
-
-
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
-
-
-
-
For more information about Video4Linux, check http://linuxtv.org/ .
-
-
-
20.18.1 Options# TOC
-
-
-standard
-Set the standard. Must be the name of a supported standard. To get a
-list of the supported standards, use the list_standards
-option.
-
-
-channel
-Set the input channel number. Default to -1, which means using the
-previously selected channel.
-
-
-video_size
-Set the video frame size. The argument must be a string in the form
-WIDTH xHEIGHT or a valid size abbreviation.
-
-
-pixel_format
-Select the pixel format (only valid for raw video input).
-
-
-input_format
-Set the preferred pixel format (for raw video) or a codec name.
-This option allows one to select the input format, when several are
-available.
-
-
-framerate
-Set the preferred video frame rate.
-
-
-list_formats
-List available formats (supported pixel formats, codecs, and frame
-sizes) and exit.
-
-Available values are:
-
-‘all ’
-Show all available (compressed and non-compressed) formats.
-
-
-‘raw ’
-Show only raw video (non-compressed) formats.
-
-
-‘compressed ’
-Show only compressed formats.
-
-
-
-
-list_standards
-List supported standards and exit.
-
-Available values are:
-
-‘all ’
-Show all supported standards.
-
-
-
-
-timestamps, ts
-Set type of timestamps for grabbed frames.
-
-Available values are:
-
-‘default ’
-Use timestamps from the kernel.
-
-
-‘abs ’
-Use absolute timestamps (wall clock).
-
-
-‘mono2abs ’
-Force conversion from monotonic to absolute timestamps.
-
-
-
-Default value is default
.
-
-
-
-
-
20.19 vfwcap# TOC
-
-
VfW (Video for Windows) capture input device.
-
-
The filename passed as input is the capture driver number, ranging from
-0 to 9. You may use "list" as filename to print a list of drivers. Any
-other filename will be interpreted as device number 0.
-
-
-
20.20 x11grab# TOC
-
-
X11 video input device.
-
-
Depends on X11, Xext, and Xfixes. Requires the configure option
---enable-x11grab
.
-
-
This device allows one to capture a region of an X11 display.
-
-
The filename passed as input has the syntax:
-
-
[hostname ]:display_number .screen_number [+x_offset ,y_offset ]
-
-
-
hostname :display_number .screen_number specifies the
-X11 display name of the screen to grab from. hostname can be
-omitted, and defaults to "localhost". The environment variable
-DISPLAY
contains the default display name.
-
-
x_offset and y_offset specify the offsets of the grabbed
-area with respect to the top-left border of the X11 screen. They
-default to 0.
-
-
Check the X11 documentation (e.g. man X) for more detailed information.
-
-
Use the dpyinfo
program for getting basic information about the
-properties of your X11 display (e.g. grep for "name" or "dimensions").
-
-
For example to grab from :0.0 using ffmpeg
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
Grab at position 10,20
:
-
-
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-
-
20.20.1 Options# TOC
-
-
-draw_mouse
-Specify whether to draw the mouse pointer. A value of 0
specify
-not to draw the pointer. Default value is 1
.
-
-
-follow_mouse
-Make the grabbed area follow the mouse. The argument can be
-centered
or a number of pixels PIXELS .
-
-When it is specified with "centered", the grabbing region follows the mouse
-pointer and keeps the pointer at the center of region; otherwise, the region
-follows only when the mouse pointer reaches within PIXELS (greater than
-zero) to the edge of region.
-
-For example:
-
-
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-To follow only when the mouse pointer reaches within 100 pixels to edge:
-
-
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-framerate
-Set the grabbing frame rate. Default value is ntsc
,
-corresponding to a frame rate of 30000/1001
.
-
-
-show_region
-Show grabbed region on screen.
-
-If show_region is specified with 1
, then the grabbing
-region will be indicated on screen. With this option, it is easy to
-know what is being grabbed if only a portion of the screen is grabbed.
-
-For example:
-
-
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
-
-
-With follow_mouse :
-
-
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
-
-
-
-video_size
-Set the video frame size. Default value is vga
.
-
-
-use_shm
-Use the MIT-SHM extension for shared memory. Default value is 1
.
-It may be necessary to disable it for remote displays.
-
-
-
-
-
20.21 decklink# TOC
-
-
The decklink input device provides capture capabilities for Blackmagic
-DeckLink devices.
-
-
To enable this input device, you need the Blackmagic DeckLink SDK and you
-need to configure with the appropriate --extra-cflags
-and --extra-ldflags
.
-On Windows, you need to run the IDL files through widl
.
-
-
DeckLink is very picky about the formats it supports. Pixel format is always
-uyvy422, framerate and video size must be determined for your device with
--list_formats 1
. Audio sample rate is always 48 kHz and the number
-of channels currently is limited to 2 (stereo).
-
-
-
20.21.1 Options# TOC
-
-
-list_devices
-If set to true , print a list of devices and exit.
-Defaults to false .
-
-
-list_formats
-If set to true , print a list of supported formats and exit.
-Defaults to false .
-
-
-
-
-
-
20.21.2 Examples# TOC
-
-
- List input devices:
-
-
ffmpeg -f decklink -list_devices 1 -i dummy
-
-
- List supported formats:
-
-
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
-
-
- Capture video clip at 1080i50 (format 11):
-
-
ffmpeg -f decklink -i 'Intensity Pro@11' -acodec copy -vcodec copy output.avi
-
-
-
-
-
-
-
21 Resampler Options# TOC
-
-
The audio resampler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools, option =value for the aresample filter,
-by setting the value explicitly in the
-SwrContext
options or using the libavutil/opt.h API for
-programmatic use.
-
-
-ich, in_channel_count
-Set the number of input channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-in_channel_layout is set.
-
-
-och, out_channel_count
-Set the number of output channels. Default value is 0. Setting this
-value is not mandatory if the corresponding channel layout
-out_channel_layout is set.
-
-
-uch, used_channel_count
-Set the number of used input channels. Default value is 0. This option is
-only used for special remapping.
-
-
-isr, in_sample_rate
-Set the input sample rate. Default value is 0.
-
-
-osr, out_sample_rate
-Set the output sample rate. Default value is 0.
-
-
-isf, in_sample_fmt
-Specify the input sample format. It is set by default to none
.
-
-
-osf, out_sample_fmt
-Specify the output sample format. It is set by default to none
.
-
-
-tsf, internal_sample_fmt
-Set the internal sample format. Default value is none
.
-This will automatically be chosen when it is not explicitly set.
-
-
-icl, in_channel_layout
-ocl, out_channel_layout
-Set the input/output channel layout.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-clev, center_mix_level
-Set the center mix level. It is a value expressed in deciBel, and must be
-in the interval [-32,32].
-
-
-slev, surround_mix_level
-Set the surround mix level. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-lfe_mix_level
-Set LFE mix into non LFE level. It is used when there is a LFE input but no
-LFE output. It is a value expressed in deciBel, and must
-be in the interval [-32,32].
-
-
-rmvol, rematrix_volume
-Set rematrix volume. Default value is 1.0.
-
-
-rematrix_maxval
-Set maximum output value for rematrixing.
-This can be used to prevent clipping vs. preventing volumn reduction
-A value of 1.0 prevents cliping.
-
-
-flags, swr_flags
-Set flags used by the converter. Default value is 0.
-
-It supports the following individual flags:
-
-res
-force resampling, this flag forces resampling to be used even when the
-input and output sample rates match.
-
-
-
-
-dither_scale
-Set the dither scale. Default value is 1.
-
-
-dither_method
-Set dither method. Default value is 0.
-
-Supported values:
-
-‘rectangular ’
-select rectangular dither
-
-‘triangular ’
-select triangular dither
-
-‘triangular_hp ’
-select triangular dither with high pass
-
-‘lipshitz ’
-select lipshitz noise shaping dither
-
-‘shibata ’
-select shibata noise shaping dither
-
-‘low_shibata ’
-select low shibata noise shaping dither
-
-‘high_shibata ’
-select high shibata noise shaping dither
-
-‘f_weighted ’
-select f-weighted noise shaping dither
-
-‘modified_e_weighted ’
-select modified-e-weighted noise shaping dither
-
-‘improved_e_weighted ’
-select improved-e-weighted noise shaping dither
-
-
-
-
-
-resampler
-Set resampling engine. Default value is swr.
-
-Supported values:
-
-‘swr ’
-select the native SW Resampler; filter options precision and cheby are not
-applicable in this case.
-
-‘soxr ’
-select the SoX Resampler (where available); compensation, and filter options
-filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
-case.
-
-
-
-
-filter_size
-For swr only, set resampling filter size, default value is 32.
-
-
-phase_shift
-For swr only, set resampling phase shift, default value is 10, and must be in
-the interval [0,30].
-
-
-linear_interp
-Use Linear Interpolation if set to 1, default value is 0.
-
-
-cutoff
-Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
-value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
-(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
-
-
-precision
-For soxr only, the precision in bits to which the resampled signal will be
-calculated. The default value of 20 (which, with suitable dithering, is
-appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
-value of 28 gives SoX’s ’Very High Quality’.
-
-
-cheby
-For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
-approximation for ’irrational’ ratios. Default value is 0.
-
-
-async
-For swr only, simple 1 parameter audio sync to timestamps using stretching,
-squeezing, filling and trimming. Setting this to 1 will enable filling and
-trimming, larger values represent the maximum amount in samples that the data
-may be stretched or squeezed for each second.
-Default value is 0, thus no compensation is applied to make the samples match
-the audio timestamps.
-
-
-first_pts
-For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
-This allows for padding/trimming at the start of stream. By default, no
-assumption is made about the first frame’s expected pts, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative pts due to encoder delay.
-
-
-min_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger stretching/squeezing/filling or trimming of the
-data to make it match the timestamps. The default is that
-stretching/squeezing/filling and trimming is disabled
-(min_comp = FLT_MAX
).
-
-
-min_hard_comp
-For swr only, set the minimum difference between timestamps and audio data (in
-seconds) to trigger adding/dropping samples to make it match the
-timestamps. This option effectively is a threshold to select between
-hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
-all compensation is by default disabled through min_comp .
-The default is 0.1.
-
-
-comp_duration
-For swr only, set duration (in seconds) over which data is stretched/squeezed
-to make it match the timestamps. Must be a non-negative double float value,
-default value is 1.0.
-
-
-max_soft_comp
-For swr only, set maximum factor by which data is stretched/squeezed to make it
-match the timestamps. Must be a non-negative double float value, default value
-is 0.
-
-
-matrix_encoding
-Select matrixed stereo encoding.
-
-It accepts the following values:
-
-‘none ’
-select none
-
-‘dolby ’
-select Dolby
-
-‘dplii ’
-select Dolby Pro Logic II
-
-
-
-Default value is none
.
-
-
-filter_type
-For swr only, select resampling filter type. This only affects resampling
-operations.
-
-It accepts the following values:
-
-‘cubic ’
-select cubic
-
-‘blackman_nuttall ’
-select Blackman Nuttall Windowed Sinc
-
-‘kaiser ’
-select Kaiser Windowed Sinc
-
-
-
-
-kaiser_beta
-For swr only, set Kaiser Window Beta value. Must be an integer in the
-interval [2,16], default value is 9.
-
-
-output_sample_bits
-For swr only, set number of used output sample bits for dithering. Must be an integer in the
-interval [0,64], default value is 0, which means it’s not used.
-
-
-
-
-
-
22 Scaler Options# TOC
-
-
The video scaler supports the following named options.
-
-
Options may be set by specifying -option value in the
-FFmpeg tools. For programmatic use, they can be set explicitly in the
-SwsContext
options or through the libavutil/opt.h API.
-
-
-
-
-sws_flags
-Set the scaler flags. This is also used to set the scaling
-algorithm. Only a single algorithm should be selected.
-
-It accepts the following values:
-
-‘fast_bilinear ’
-Select fast bilinear scaling algorithm.
-
-
-‘bilinear ’
-Select bilinear scaling algorithm.
-
-
-‘bicubic ’
-Select bicubic scaling algorithm.
-
-
-‘experimental ’
-Select experimental scaling algorithm.
-
-
-‘neighbor ’
-Select nearest neighbor rescaling algorithm.
-
-
-‘area ’
-Select averaging area rescaling algorithm.
-
-
-‘bicublin ’
-Select bicubic scaling algorithm for the luma component, bilinear for
-chroma components.
-
-
-‘gauss ’
-Select Gaussian rescaling algorithm.
-
-
-‘sinc ’
-Select sinc rescaling algorithm.
-
-
-‘lanczos ’
-Select lanczos rescaling algorithm.
-
-
-‘spline ’
-Select natural bicubic spline rescaling algorithm.
-
-
-‘print_info ’
-Enable printing/debug logging.
-
-
-‘accurate_rnd ’
-Enable accurate rounding.
-
-
-‘full_chroma_int ’
-Enable full chroma interpolation.
-
-
-‘full_chroma_inp ’
-Select full chroma input.
-
-
-‘bitexact ’
-Enable bitexact output.
-
-
-
-
-srcw
-Set source width.
-
-
-srch
-Set source height.
-
-
-dstw
-Set destination width.
-
-
-dsth
-Set destination height.
-
-
-src_format
-Set source pixel format (must be expressed as an integer).
-
-
-dst_format
-Set destination pixel format (must be expressed as an integer).
-
-
-src_range
-Select source range.
-
-
-dst_range
-Select destination range.
-
-
-param0, param1
-Set scaling algorithm parameters. The specified values are specific of
-some scaling algorithms and ignored by others. The specified values
-are floating point number values.
-
-
-sws_dither
-Set the dithering algorithm. Accepts one of the following
-values. Default value is ‘auto ’.
-
-
-‘auto ’
-automatic choice
-
-
-‘none ’
-no dithering
-
-
-‘bayer ’
-bayer dither
-
-
-‘ed ’
-error diffusion dither
-
-
-‘a_dither ’
-arithmetic dither, based using addition
-
-
-‘x_dither ’
-arithmetic dither, based using xor (more random/less apparent patterning that
-a_dither).
-
-
-
-
-
-
-
-
-
23 Filtering Introduction# TOC
-
-
Filtering in FFmpeg is enabled through the libavfilter library.
-
-
In libavfilter, a filter can have multiple inputs and multiple
-outputs.
-To illustrate the sorts of things that are possible, we consider the
-following filtergraph.
-
-
-
[main]
-input --> split ---------------------> overlay --> output
- | ^
- |[tmp] [flip]|
- +-----> crop --> vflip -------+
-
-
-
This filtergraph splits the input stream in two streams, then sends one
-stream through the crop filter and the vflip filter, before merging it
-back with the other stream by overlaying it on top. You can use the
-following command to achieve this:
-
-
-
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
-
-
-
The result will be that the top half of the video is mirrored
-onto the bottom half of the output video.
-
-
Filters in the same linear chain are separated by commas, and distinct
-linear chains of filters are separated by semicolons. In our example,
-crop,vflip are in one linear chain, split and
-overlay are separately in another. The points where the linear
-chains join are labelled by names enclosed in square brackets. In the
-example, the split filter generates two outputs that are associated to
-the labels [main] and [tmp] .
-
-
The stream sent to the second output of split , labelled as
-[tmp] , is processed through the crop filter, which crops
-away the lower half part of the video, and then vertically flipped. The
-overlay filter takes in input the first unchanged output of the
-split filter (which was labelled as [main] ), and overlay on its
-lower half the output generated by the crop,vflip filterchain.
-
-
Some filters take in input a list of parameters: they are specified
-after the filter name and an equal sign, and are separated from each other
-by a colon.
-
-
There exist so-called source filters that do not have an
-audio/video input, and sink filters that will not have audio/video
-output.
-
-
-
-
24 graph2dot# TOC
-
-
The graph2dot program included in the FFmpeg tools
-directory can be used to parse a filtergraph description and issue a
-corresponding textual representation in the dot language.
-
-
Invoke the command:
-
-
-
to see how to use graph2dot .
-
-
You can then pass the dot description to the dot program (from
-the graphviz suite of programs) and obtain a graphical representation
-of the filtergraph.
-
-
For example the sequence of commands:
-
-
echo GRAPH_DESCRIPTION | \
-tools/graph2dot -o graph.tmp && \
-dot -Tpng graph.tmp -o graph.png && \
-display graph.png
-
-
-
can be used to create and display an image representing the graph
-described by the GRAPH_DESCRIPTION string. Note that this string must be
-a complete self-contained graph, with its inputs and outputs explicitly defined.
-For example if your command line is of the form:
-
-
ffmpeg -i infile -vf scale=640:360 outfile
-
-
your GRAPH_DESCRIPTION string will need to be of the form:
-
-
nullsrc,scale=640:360,nullsink
-
-
you may also need to set the nullsrc parameters and add a format
-filter in order to simulate a specific input file.
-
-
-
-
25 Filtergraph description# TOC
-
-
A filtergraph is a directed graph of connected filters. It can contain
-cycles, and there can be multiple links between a pair of
-filters. Each link has one input pad on one side connecting it to one
-filter from which it takes its input, and one output pad on the other
-side connecting it to one filter accepting its output.
-
-
Each filter in a filtergraph is an instance of a filter class
-registered in the application, which defines the features and the
-number of input and output pads of the filter.
-
-
A filter with no input pads is called a "source", and a filter with no
-output pads is called a "sink".
-
-
-
25.1 Filtergraph syntax# TOC
-
-
A filtergraph has a textual representation, which is
-recognized by the -filter /-vf and -filter_complex
-options in ffmpeg
and -vf in ffplay
, and by the
-avfilter_graph_parse()
/avfilter_graph_parse2()
functions defined in
-libavfilter/avfilter.h .
-
-
A filterchain consists of a sequence of connected filters, each one
-connected to the previous one in the sequence. A filterchain is
-represented by a list of ","-separated filter descriptions.
-
-
A filtergraph consists of a sequence of filterchains. A sequence of
-filterchains is represented by a list of ";"-separated filterchain
-descriptions.
-
-
A filter is represented by a string of the form:
-[in_link_1 ]...[in_link_N ]filter_name =arguments [out_link_1 ]...[out_link_M ]
-
-
filter_name is the name of the filter class of which the
-described filter is an instance of, and has to be the name of one of
-the filter classes registered in the program.
-The name of the filter class is optionally followed by a string
-"=arguments ".
-
-
arguments is a string which contains the parameters used to
-initialize the filter instance. It may have one of two forms:
-
- A ’:’-separated list of key=value pairs.
-
- A ’:’-separated list of value . In this case, the keys are assumed to be
-the option names in the order they are declared. E.g. the fade
filter
-declares three options in this order – type , start_frame and
-nb_frames . Then the parameter list in:0:30 means that the value
-in is assigned to the option type , 0 to
-start_frame and 30 to nb_frames .
-
- A ’:’-separated list of mixed direct value and long key=value
-pairs. The direct value must precede the key=value pairs, and
-follow the same constraints order of the previous point. The following
-key=value pairs can be set in any preferred order.
-
-
-
-
If the option value itself is a list of items (e.g. the format
filter
-takes a list of pixel formats), the items in the list are usually separated by
-’|’.
-
-
The list of arguments can be quoted using the character "’" as initial
-and ending mark, and the character ’\’ for escaping the characters
-within the quoted text; otherwise the argument string is considered
-terminated when the next special character (belonging to the set
-"[]=;,") is encountered.
-
-
The name and arguments of the filter are optionally preceded and
-followed by a list of link labels.
-A link label allows one to name a link and associate it to a filter output
-or input pad. The preceding labels in_link_1
-... in_link_N , are associated to the filter input pads,
-the following labels out_link_1 ... out_link_M , are
-associated to the output pads.
-
-
When two link labels with the same name are found in the
-filtergraph, a link between the corresponding input and output pad is
-created.
-
-
If an output pad is not labelled, it is linked by default to the first
-unlabelled input pad of the next filter in the filterchain.
-For example in the filterchain
-
-
nullsrc, split[L1], [L2]overlay, nullsink
-
-
the split filter instance has two output pads, and the overlay filter
-instance two input pads. The first output pad of split is labelled
-"L1", the first input pad of overlay is labelled "L2", and the second
-output pad of split is linked to the second input pad of overlay,
-which are both unlabelled.
-
-
In a complete filterchain all the unlabelled filter input and output
-pads must be connected. A filtergraph is considered valid if all the
-filter input and output pads of all the filterchains are connected.
-
-
Libavfilter will automatically insert scale filters where format
-conversion is required. It is possible to specify swscale flags
-for those automatically inserted scalers by prepending
-sws_flags=flags ;
-to the filtergraph description.
-
-
Here is a BNF description of the filtergraph syntax:
-
-
NAME ::= sequence of alphanumeric characters and '_'
-LINKLABEL ::= "[" NAME "]"
-LINKLABELS ::= LINKLABEL [LINKLABELS ]
-FILTER_ARGUMENTS ::= sequence of chars (possibly quoted)
-FILTER ::= [LINKLABELS ] NAME ["=" FILTER_ARGUMENTS ] [LINKLABELS ]
-FILTERCHAIN ::= FILTER [,FILTERCHAIN ]
-FILTERGRAPH ::= [sws_flags=flags ;] FILTERCHAIN [;FILTERGRAPH ]
-
-
-
-
25.2 Notes on filtergraph escaping# TOC
-
-
Filtergraph description composition entails several levels of
-escaping. See (ffmpeg-utils)the "Quoting and escaping"
-section in the ffmpeg-utils(1) manual for more
-information about the employed escaping procedure.
-
-
A first level escaping affects the content of each filter option
-value, which may contain the special character :
used to
-separate values, or one of the escaping characters \'
.
-
-
A second level escaping affects the whole filter description, which
-may contain the escaping characters \'
or the special
-characters [],;
used by the filtergraph description.
-
-
Finally, when you specify a filtergraph on a shell commandline, you
-need to perform a third level escaping for the shell special
-characters contained within it.
-
-
For example, consider the following string to be embedded in
-the drawtext filter description text value:
-
-
this is a 'string': may contain one, or more, special characters
-
-
-
This string contains the '
special escaping character, and the
-:
special character, so it needs to be escaped in this way:
-
-
text=this is a \'string\'\: may contain one, or more, special characters
-
-
-
A second level of escaping is required when embedding the filter
-description in a filtergraph description, in order to escape all the
-filtergraph special characters. Thus the example above becomes:
-
-
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
-
-
(note that in addition to the \'
escaping special characters,
-also ,
needs to be escaped).
-
-
Finally an additional level of escaping is needed when writing the
-filtergraph description in a shell command, which depends on the
-escaping rules of the adopted shell. For example, assuming that
-\
is special and needs to be escaped with another \
, the
-previous string will finally result in:
-
-
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
-
-
-
-
26 Timeline editing# TOC
-
-
Some filters support a generic enable option. For the filters
-supporting timeline editing, this option can be set to an expression which is
-evaluated before sending a frame to the filter. If the evaluation is non-zero,
-the filter will be enabled, otherwise the frame will be sent unchanged to the
-next filter in the filtergraph.
-
-
The expression accepts the following values:
-
-‘t ’
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-‘n ’
-sequential number of the input frame, starting from 0
-
-
-‘pos ’
-the position in the file of the input frame, NAN if unknown
-
-
-‘w ’
-‘h ’
-width and height of the input frame if video
-
-
-
-
Additionally, these filters support an enable command that can be used
-to re-define the expression.
-
-
Like any other filtering option, the enable option follows the same
-rules.
-
-
For example, to enable a blur filter (smartblur ) from 10 seconds to 3
-minutes, and a curves filter starting at 3 seconds:
-
-
smartblur = enable='between(t,10,3*60)',
-curves = enable='gte(t,3)' : preset=cross_process
-
-
-
-
-
27 Audio Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the audio filters included in your
-build.
-
-
Below is a description of the currently available audio filters.
-
-
-
27.1 adelay# TOC
-
-
Delay one or more audio channels.
-
-
Samples in delayed channel are filled with silence.
-
-
The filter accepts the following option:
-
-
-delays
-Set list of delays in milliseconds for each channel separated by ’|’.
-At least one delay greater than 0 should be provided.
-Unused delays will be silently ignored. If number of given delays is
-smaller than number of channels all remaining channels will not be delayed.
-
-
-
-
-
27.1.1 Examples# TOC
-
-
- Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave
-the second channel (and any other channels that may be present) unchanged.
-
-
-
-
-
27.2 aecho# TOC
-
-
Apply echoing to the input audio.
-
-
Echoes are reflected sound and can occur naturally amongst mountains
-(and sometimes large buildings) when talking or shouting; digital echo
-effects emulate this behaviour and are often used to help fill out the
-sound of a single instrument or vocal. The time difference between the
-original signal and the reflection is the delay
, and the
-loudness of the reflected signal is the decay
.
-Multiple echoes can have different delays and decays.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain of reflected signal. Default is 0.6
.
-
-
-out_gain
-Set output gain of reflected signal. Default is 0.3
.
-
-
-delays
-Set list of time intervals in milliseconds between original signal and reflections
-separated by ’|’. Allowed range for each delay
is (0 - 90000.0]
.
-Default is 1000
.
-
-
-decays
-Set list of loudnesses of reflected signals separated by ’|’.
-Allowed range for each decay
is (0 - 1.0]
.
-Default is 0.5
.
-
-
-
-
-
27.2.1 Examples# TOC
-
-
- Make it sound as if there are twice as many instruments as are actually playing:
-
-
- If delay is very short, then it sound like a (metallic) robot playing music:
-
-
- A longer delay will sound like an open air concert in the mountains:
-
-
aecho=0.8:0.9:1000:0.3
-
-
- Same as above but with one more mountain:
-
-
aecho=0.8:0.9:1000|1800:0.3|0.25
-
-
-
-
-
27.3 aeval# TOC
-
-
Modify an audio signal according to the specified expressions.
-
-
This filter accepts one or more expressions (one for each channel),
-which are evaluated and used to modify a corresponding audio signal.
-
-
It accepts the following parameters:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. If
-the number of input channels is greater than the number of
-expressions, the last specified expression is used for the remaining
-output channels.
-
-
-channel_layout, c
-Set output channel layout. If not specified, the channel layout is
-specified by the number of expressions. If set to ‘same ’, it will
-use by default the same input channel layout.
-
-
-
-
Each expression in exprs can contain the following constants and functions:
-
-
-ch
-channel number of the current expression
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-s
-sample rate
-
-
-t
-time of the evaluated sample expressed in seconds
-
-
-nb_in_channels
-nb_out_channels
-input and output number of channels
-
-
-val(CH)
-the value of input channel with number CH
-
-
-
-
Note: this filter is slow. For faster processing you should use a
-dedicated filter.
-
-
-
27.3.1 Examples# TOC
-
-
- Half volume:
-
-
aeval=val(ch)/2:c=same
-
-
- Invert phase of the second channel:
-
-
-
-
-
27.4 afade# TOC
-
-
Apply fade-in/out effect to input audio.
-
-
A description of the accepted parameters follows.
-
-
-type, t
-Specify the effect type, can be either in
for fade-in, or
-out
for a fade-out effect. Default is in
.
-
-
-start_sample, ss
-Specify the number of the start sample for starting to apply the fade
-effect. Default is 0.
-
-
-nb_samples, ns
-Specify the number of samples for which the fade effect has to last. At
-the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence. Default is 44100.
-
-
-start_time, st
-Specify the start time of the fade effect. Default is 0.
-The value must be specified as a time duration; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-If set this option is used instead of start_sample .
-
-
-duration, d
-Specify the duration of the fade effect. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-At the end of the fade-in effect the output audio will have the same
-volume as the input audio, at the end of the fade-out transition
-the output audio will be silence.
-By default the duration is determined by nb_samples .
-If set this option is used instead of nb_samples .
-
-
-curve
-Set curve for fade transition.
-
-It accepts the following values:
-
-tri
-select triangular, linear slope (default)
-
-qsin
-select quarter of sine wave
-
-hsin
-select half of sine wave
-
-esin
-select exponential sine wave
-
-log
-select logarithmic
-
-par
-select inverted parabola
-
-qua
-select quadratic
-
-cub
-select cubic
-
-squ
-select square root
-
-cbr
-select cubic root
-
-
-
-
-
-
-
27.4.1 Examples# TOC
-
-
- Fade in first 15 seconds of audio:
-
-
- Fade out last 25 seconds of a 900 seconds audio:
-
-
afade=t=out:st=875:d=25
-
-
-
-
-
27.5 aformat# TOC
-
-
Set output format constraints for the input audio. The framework will
-negotiate the most appropriate format to minimize conversions.
-
-
It accepts the following parameters:
-
-sample_fmts
-A ’|’-separated list of requested sample formats.
-
-
-sample_rates
-A ’|’-separated list of requested sample rates.
-
-
-channel_layouts
-A ’|’-separated list of requested channel layouts.
-
-See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
-for the required syntax.
-
-
-
-
If a parameter is omitted, all values are allowed.
-
-
Force the output to either unsigned 8-bit or signed 16-bit stereo
-
-
aformat=sample_fmts=u8|s16:channel_layouts=stereo
-
-
-
-
27.6 allpass# TOC
-
-
Apply a two-pole all-pass filter with central frequency (in Hz)
-frequency , and filter-width width .
-An all-pass filter changes the audio’s frequency to phase relationship
-without changing its frequency to amplitude relationship.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
27.7 amerge# TOC
-
-
Merge two or more audio streams into a single multi-channel stream.
-
-
The filter accepts the following options:
-
-
-inputs
-Set the number of inputs. Default is 2.
-
-
-
-
-
If the channel layouts of the inputs are disjoint, and therefore compatible,
-the channel layout of the output will be set accordingly and the channels
-will be reordered as necessary. If the channel layouts of the inputs are not
-disjoint, the output will have all the channels of the first input then all
-the channels of the second input, in that order, and the channel layout of
-the output will be the default value corresponding to the total number of
-channels.
-
-
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
-is FC+BL+BR, then the output will be in 5.1, with the channels in the
-following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
-first input, b1 is the first channel of the second input).
-
-
On the other hand, if both input are in stereo, the output channels will be
-in the default order: a1, a2, b1, b2, and the channel layout will be
-arbitrarily set to 4.0, which may or may not be the expected value.
-
-
All inputs must have the same sample rate, and format.
-
-
If inputs do not have the same duration, the output will stop with the
-shortest.
-
-
-
27.7.1 Examples# TOC
-
-
- Merge two mono files into a stereo stream:
-
-
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
-
-
- Multiple merges assuming 1 video stream and 6 audio streams in input.mkv :
-
-
ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
-
-
-
-
-
27.8 amix# TOC
-
-
Mixes multiple audio inputs into a single output.
-
-
Note that this filter only supports float samples (the amerge
-and pan audio filters support many formats). If the amix
-input has integer samples then aresample will be automatically
-inserted to perform the conversion to float samples.
-
-
For example
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
-
-
will mix 3 input audio streams to a single output with the same duration as the
-first input and a dropout transition time of 3 seconds.
-
-
It accepts the following parameters:
-
-inputs
-The number of inputs. If unspecified, it defaults to 2.
-
-
-duration
-How to determine the end-of-stream.
-
-longest
-The duration of the longest input. (default)
-
-
-shortest
-The duration of the shortest input.
-
-
-first
-The duration of the first input.
-
-
-
-
-
-dropout_transition
-The transition time, in seconds, for volume renormalization when an input
-stream ends. The default value is 2 seconds.
-
-
-
-
-
-
27.9 anull# TOC
-
-
Pass the audio source unchanged to the output.
-
-
-
27.10 apad# TOC
-
-
Pad the end of an audio stream with silence.
-
-
This can be used together with ffmpeg
-shortest to
-extend audio streams to the same length as the video stream.
-
-
A description of the accepted options follows.
-
-
-packet_size
-Set silence packet size. Default value is 4096.
-
-
-pad_len
-Set the number of samples of silence to add to the end. After the
-value is reached, the stream is terminated. This option is mutually
-exclusive with whole_len .
-
-
-whole_len
-Set the minimum total number of samples in the output audio stream. If
-the value is longer than the input audio length, silence is added to
-the end, until the value is reached. This option is mutually exclusive
-with pad_len .
-
-
-
-
If neither the pad_len nor the whole_len option is
-set, the filter will add silence to the end of the input stream
-indefinitely.
-
-
-
27.10.1 Examples# TOC
-
-
- Add 1024 samples of silence to the end of the input:
-
-
- Make sure the audio output will contain at least 10000 samples, pad
-the input with silence if required:
-
-
- Use ffmpeg
to pad the audio input with silence, so that the
-video stream will always result the shortest and will be converted
-until the end in the output file when using the shortest
-option:
-
-
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
-
-
-
-
-
27.11 aphaser# TOC
-
Add a phasing effect to the input audio.
-
-
A phaser filter creates series of peaks and troughs in the frequency spectrum.
-The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect.
-
-
A description of the accepted parameters follows.
-
-
-in_gain
-Set input gain. Default is 0.4.
-
-
-out_gain
-Set output gain. Default is 0.74
-
-
-delay
-Set delay in milliseconds. Default is 3.0.
-
-
-decay
-Set decay. Default is 0.4.
-
-
-speed
-Set modulation speed in Hz. Default is 0.5.
-
-
-type
-Set modulation type. Default is triangular.
-
-It accepts the following values:
-
-‘triangular, t ’
-‘sinusoidal, s ’
-
-
-
-
-
-
27.12 aresample# TOC
-
-
Resample the input audio to the specified parameters, using the
-libswresample library. If none are specified then the filter will
-automatically convert between its input and output.
-
-
This filter is also able to stretch/squeeze the audio data to make it match
-the timestamps or to inject silence / cut out audio to make it match the
-timestamps, do a combination of both or do neither.
-
-
The filter accepts the syntax
-[sample_rate :]resampler_options , where sample_rate
-expresses a sample rate and resampler_options is a list of
-key =value pairs, separated by ":". See the
-ffmpeg-resampler manual for the complete list of supported options.
-
-
-
27.12.1 Examples# TOC
-
-
- Resample the input audio to 44100Hz:
-
-
- Stretch/squeeze samples to the given timestamps, with a maximum of 1000
-samples per second compensation:
-
-
-
-
-
27.13 asetnsamples# TOC
-
-
Set the number of samples per each output audio frame.
-
-
The last output packet may contain a different number of samples, as
-the filter will flush all the remaining samples when the input audio
-signal its end.
-
-
The filter accepts the following options:
-
-
-nb_out_samples, n
-Set the number of frames per each output audio frame. The number is
-intended as the number of samples per each channel .
-Default value is 1024.
-
-
-pad, p
-If set to 1, the filter will pad the last audio frame with zeroes, so
-that the last frame will contain the same number of samples as the
-previous ones. Default value is 1.
-
-
-
-
For example, to set the number of per-frame samples to 1234 and
-disable padding for the last frame, use:
-
-
asetnsamples=n=1234:p=0
-
-
-
-
27.14 asetrate# TOC
-
-
Set the sample rate without altering the PCM data.
-This will result in a change of speed and pitch.
-
-
The filter accepts the following options:
-
-
-sample_rate, r
-Set the output sample rate. Default is 44100 Hz.
-
-
-
-
-
27.15 ashowinfo# TOC
-
-
Show a line containing various information for each input audio frame.
-The input audio is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The presentation timestamp of the input frame, in time base units; the time base
-depends on the filter input pad, and is usually 1/sample_rate .
-
-
-pts_time
-The presentation timestamp of the input frame in seconds.
-
-
-pos
-position of the frame in the input stream, -1 if this information in
-unavailable and/or meaningless (for example in case of synthetic audio)
-
-
-fmt
-The sample format.
-
-
-chlayout
-The channel layout.
-
-
-rate
-The sample rate for the audio frame.
-
-
-nb_samples
-The number of samples (per channel) in the frame.
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of the audio data. For planar
-audio, the data is treated as if all the planes were concatenated.
-
-
-plane_checksums
-A list of Adler-32 checksums for each data plane.
-
-
-
-
-
27.16 astats# TOC
-
-
Display time domain statistical information about the audio channels.
-Statistics are calculated and displayed for each audio channel and,
-where applicable, an overall figure is also given.
-
-
It accepts the following option:
-
-length
-Short window length in seconds, used for peak and trough RMS measurement.
-Default is 0.05
(50 milliseconds). Allowed range is [0.1 - 10]
.
-
-
-
-
A description of each shown parameter follows:
-
-
-DC offset
-Mean amplitude displacement from zero.
-
-
-Min level
-Minimal sample level.
-
-
-Max level
-Maximal sample level.
-
-
-Peak level dB
-RMS level dB
-Standard peak and RMS level measured in dBFS.
-
-
-RMS peak dB
-RMS trough dB
-Peak and trough values for RMS level measured over a short window.
-
-
-Crest factor
-Standard ratio of peak to RMS level (note: not in dB).
-
-
-Flat factor
-Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels
-(i.e. either Min level or Max level ).
-
-
-Peak count
-Number of occasions (not the number of samples) that the signal attained either
-Min level or Max level .
-
-
-
-
-
27.17 astreamsync# TOC
-
-
Forward two audio streams and control the order the buffers are forwarded.
-
-
The filter accepts the following options:
-
-
-expr, e
-Set the expression deciding which stream should be
-forwarded next: if the result is negative, the first stream is forwarded; if
-the result is positive or zero, the second stream is forwarded. It can use
-the following variables:
-
-
-b1 b2
-number of buffers forwarded so far on each stream
-
-s1 s2
-number of samples forwarded so far on each stream
-
-t1 t2
-current timestamp of each stream
-
-
-
-The default value is t1-t2
, which means to always forward the stream
-that has a smaller timestamp.
-
-
-
-
-
27.17.1 Examples# TOC
-
-
Stress-test amerge
by randomly sending buffers on the wrong
-input, while avoiding too much of a desynchronization:
-
-
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
-[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
-[a2] [b2] amerge
-
-
-
-
27.18 asyncts# TOC
-
-
Synchronize audio data with timestamps by squeezing/stretching it and/or
-dropping samples/adding silence when needed.
-
-
This filter is not built by default, please use aresample to do squeezing/stretching.
-
-
It accepts the following parameters:
-
-compensate
-Enable stretching/squeezing the data to make it match the timestamps. Disabled
-by default. When disabled, time gaps are covered with silence.
-
-
-min_delta
-The minimum difference between timestamps and audio data (in seconds) to trigger
-adding/dropping samples. The default value is 0.1. If you get an imperfect
-sync with this filter, try setting this parameter to 0.
-
-
-max_comp
-The maximum compensation in samples per second. Only relevant with compensate=1.
-The default value is 500.
-
-
-first_pts
-Assume that the first PTS should be this value. The time base is 1 / sample
-rate. This allows for padding/trimming at the start of the stream. By default,
-no assumption is made about the first frame’s expected PTS, so no padding or
-trimming is done. For example, this could be set to 0 to pad the beginning with
-silence if an audio stream starts after the video stream or to trim any samples
-with a negative PTS due to encoder delay.
-
-
-
-
-
-
27.19 atempo# TOC
-
-
Adjust audio tempo.
-
-
The filter accepts exactly one parameter, the audio tempo. If not
-specified then the filter will assume nominal 1.0 tempo. Tempo must
-be in the [0.5, 2.0] range.
-
-
-
27.19.1 Examples# TOC
-
-
- Slow down audio to 80% tempo:
-
-
- To speed up audio to 125% tempo:
-
-
-
-
-
27.20 atrim# TOC
-
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Timestamp (in seconds) of the start of the section to keep. I.e. the audio
-sample with the timestamp start will be the first sample in the output.
-
-
-end
-Specify time of the first audio sample that will be dropped, i.e. the
-audio sample immediately preceding the one with the timestamp end will be
-the last sample in the output.
-
-
-start_pts
-Same as start , except this option sets the start timestamp in samples
-instead of seconds.
-
-
-end_pts
-Same as end , except this option sets the end timestamp in samples instead
-of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_sample
-The number of the first sample that should be output.
-
-
-end_sample
-The number of the first sample that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _sample options simply count the
-samples that pass through the filter. So start/end_pts and start/end_sample will
-give different results when the timestamps are wrong, inexact or do not start at
-zero. Also note that this filter does not modify the timestamps. If you wish
-to have the output timestamps start at zero, insert the asetpts filter after the
-atrim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all samples that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple atrim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -af atrim=60:120
-
-
- Keep only the first 1000 samples:
-
-
ffmpeg -i INPUT -af atrim=end_sample=1000
-
-
-
-
-
-
27.21 bandpass# TOC
-
-
Apply a two-pole Butterworth band-pass filter with central
-frequency frequency , and (3dB-point) band-width width.
-The csg option selects a constant skirt gain (peak gain = Q)
-instead of the default: constant 0dB peak gain.
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-csg
-Constant skirt gain if set to 1. Defaults to 0.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
27.22 bandreject# TOC
-
-
Apply a two-pole Butterworth band-reject filter with central
-frequency frequency , and (3dB-point) band-width width .
-The filter roll off at 6dB per octave (20dB per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency. Default is 3000
.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-
-
-
27.23 bass# TOC
-
-
Boost or cut the bass (lower) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at 0 Hz. Its useful range is about -20
-(for a large cut) to +20 (for a large boost).
-Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 100
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
27.24 biquad# TOC
-
-
Apply a biquad IIR filter with the given coefficients.
-Where b0 , b1 , b2 and a0 , a1 , a2
-are the numerator and denominator coefficients respectively.
-
-
-
27.25 bs2b# TOC
-
Bauer stereo to binaural transformation, which improves headphone listening of
-stereo audio records.
-
-
It accepts the following parameters:
-
-profile
-Pre-defined crossfeed level.
-
-default
-Default level (fcut=700, feed=50).
-
-
-cmoy
-Chu Moy circuit (fcut=700, feed=60).
-
-
-jmeier
-Jan Meier circuit (fcut=650, feed=95).
-
-
-
-
-
-fcut
-Cut frequency (in Hz).
-
-
-feed
-Feed level (in Hz).
-
-
-
-
-
-
27.26 channelmap# TOC
-
-
Remap input channels to new locations.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the output stream.
-
-
-map
-Map channels from input to output. The argument is a ’|’-separated list of
-mappings, each in the in_channel -out_channel
or
-in_channel form. in_channel can be either the name of the input
-channel (e.g. FL for front left) or its index in the input channel layout.
-out_channel is the name of the output channel or its index in the output
-channel layout. If out_channel is not given then it is implicitly an
-index, starting with zero and increasing by one for each mapping.
-
-
-
-
If no mapping is present, the filter will implicitly map input channels to
-output channels, preserving indices.
-
-
For example, assuming a 5.1+downmix input MOV file,
-
-
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
-
-
will create an output WAV file tagged as stereo from the downmix channels of
-the input.
-
-
To fix a 5.1 WAV improperly encoded in AAC’s native channel order
-
-
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
-
-
-
-
27.27 channelsplit# TOC
-
-
Split each channel from an input audio stream into a separate output stream.
-
-
It accepts the following parameters:
-
-channel_layout
-The channel layout of the input stream. The default is "stereo".
-
-
-
-
For example, assuming a stereo input MP3 file,
-
-
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
-
-
will create an output Matroska file with two audio streams, one containing only
-the left channel and the other the right channel.
-
-
Split a 5.1 WAV file into per-channel files:
-
-
ffmpeg -i in.wav -filter_complex
-'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
--map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
-front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
-side_right.wav
-
-
-
-
27.28 compand# TOC
-
Compress or expand the audio’s dynamic range.
-
-
It accepts the following parameters:
-
-
-attacks
-decays
-A list of times in seconds for each channel over which the instantaneous level
-of the input signal is averaged to determine its volume. attacks refers to
-increase of volume and decays refers to decrease of volume. For most
-situations, the attack time (response to the audio getting louder) should be
-shorter than the decay time, because the human ear is more sensitive to sudden
-loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and
-a typical value for decay is 0.8 seconds.
-
-
-points
-A list of points for the transfer function, specified in dB relative to the
-maximum possible signal amplitude. Each key points list must be defined using
-the following syntax: x0/y0|x1/y1|x2/y2|....
or
-x0/y0 x1/y1 x2/y2 ....
-
-The input values must be in strictly increasing order but the transfer function
-does not have to be monotonically rising. The point 0/0
is assumed but
-may be overridden (by 0/out-dBn
). Typical values for the transfer
-function are -70/-70|-60/-20
.
-
-
-soft-knee
-Set the curve radius in dB for all joints. It defaults to 0.01.
-
-
-gain
-Set the additional gain in dB to be applied at all points on the transfer
-function. This allows for easy adjustment of the overall gain.
-It defaults to 0.
-
-
-volume
-Set an initial volume, in dB, to be assumed for each channel when filtering
-starts. This permits the user to supply a nominal level initially, so that, for
-example, a very large gain is not applied to initial signal levels before the
-companding has begun to operate. A typical value for audio which is initially
-quiet is -90 dB. It defaults to 0.
-
-
-delay
-Set a delay, in seconds. The input audio is analyzed immediately, but audio is
-delayed before being fed to the volume adjuster. Specifying a delay
-approximately equal to the attack/decay times allows the filter to effectively
-operate in predictive rather than reactive mode. It defaults to 0.
-
-
-
-
-
-
27.28.1 Examples# TOC
-
-
- Make music with both quiet and loud passages suitable for listening to in a
-noisy environment:
-
-
compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
-
-
- A noise gate for when the noise is at a lower level than the signal:
-
-
compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
-
-
- Here is another noise gate, this time for when the noise is at a higher level
-than the signal (making it, in some ways, similar to squelch):
-
-
compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
-
-
-
-
-
27.29 earwax# TOC
-
-
Make audio easier to listen to on headphones.
-
-
This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio
-so that when listened to on headphones the stereo image is moved from
-inside your head (standard for headphones) to outside and in front of
-the listener (standard for speakers).
-
-
Ported from SoX.
-
-
-
27.30 equalizer# TOC
-
-
Apply a two-pole peaking equalisation (EQ) filter. With this
-filter, the signal-level at and around a selected frequency can
-be increased or decreased, whilst (unlike bandpass and bandreject
-filters) that at all other frequencies is unchanged.
-
-
In order to produce complex equalisation curves, this filter can
-be given several times, each with a different central frequency.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the filter’s central frequency in Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-
-
-gain, g
-Set the required gain or attenuation in dB.
-Beware of clipping when using a positive gain.
-
-
-
-
-
27.30.1 Examples# TOC
-
- Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz:
-
-
equalizer=f=1000:width_type=h:width=200:g=-10
-
-
- Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2:
-
-
equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
-
-
-
-
-
27.31 flanger# TOC
-
Apply a flanging effect to the audio.
-
-
The filter accepts the following options:
-
-
-delay
-Set base delay in milliseconds. Range from 0 to 30. Default value is 0.
-
-
-depth
-Set added swep delay in milliseconds. Range from 0 to 10. Default value is 2.
-
-
-regen
-Set percentage regeneration (delayed signal feedback). Range from -95 to 95.
-Default value is 0.
-
-
-width
-Set percentage of delayed signal mixed with original. Range from 0 to 100.
-Default value is 71.
-
-
-speed
-Set sweeps per second (Hz). Range from 0.1 to 10. Default value is 0.5.
-
-
-shape
-Set swept wave shape, can be triangular or sinusoidal .
-Default value is sinusoidal .
-
-
-phase
-Set swept wave percentage-shift for multi channel. Range from 0 to 100.
-Default value is 25.
-
-
-interp
-Set delay-line interpolation, linear or quadratic .
-Default is linear .
-
-
-
-
-
27.32 highpass# TOC
-
-
Apply a high-pass filter with 3dB point frequency.
-The filter can be either single-pole, or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 3000.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
27.33 join# TOC
-
-
Join multiple input streams into one multi-channel stream.
-
-
It accepts the following parameters:
-
-inputs
-The number of input streams. It defaults to 2.
-
-
-channel_layout
-The desired output channel layout. It defaults to stereo.
-
-
-map
-Map channels from inputs to output. The argument is a ’|’-separated list of
-mappings, each in the input_idx .in_channel -out_channel
-form. input_idx is the 0-based index of the input stream. in_channel
-can be either the name of the input channel (e.g. FL for front left) or its
-index in the specified input stream. out_channel is the name of the output
-channel.
-
-
-
-
The filter will attempt to guess the mappings when they are not specified
-explicitly. It does so by first trying to find an unused matching input channel
-and if that fails it picks the first unused input channel.
-
-
Join 3 inputs (with properly set channel layouts):
-
-
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
-
-
-
Build a 5.1 output from 6 single-channel streams:
-
-
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
-'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
-out
-
-
-
-
27.34 ladspa# TOC
-
-
Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-ladspa
.
-
-
-file, f
-Specifies the name of LADSPA plugin library to load. If the environment
-variable LADSPA_PATH
is defined, the LADSPA plugin is searched in
-each one of the directories specified by the colon separated list in
-LADSPA_PATH
, otherwise in the standard LADSPA paths, which are in
-this order: HOME/.ladspa/lib/ , /usr/local/lib/ladspa/ ,
-/usr/lib/ladspa/ .
-
-
-plugin, p
-Specifies the plugin within the library. Some libraries contain only
-one plugin, but others contain many of them. If this is not set filter
-will list all available plugins within the specified library.
-
-
-controls, c
-Set the ’|’ separated list of controls which are zero or more floating point
-values that determine the behavior of the loaded plugin (for example delay,
-threshold or gain).
-Controls need to be defined using the following syntax:
-c0=value0 |c1=value1 |c2=value2 |..., where
-valuei is the value set on the i -th control.
-If controls is set to help
, all available controls and
-their valid ranges are printed.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100. Only used if plugin have
-zero inputs.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame, default
-is 1024. Only used if plugin have zero inputs.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified duration,
-as the generated audio is always cut at the end of a complete frame.
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-Only used if plugin have zero inputs.
-
-
-
-
-
-
27.34.1 Examples# TOC
-
-
- List all available plugins within amp (LADSPA example plugin) library:
-
-
- List all available controls and their valid ranges for vcf_notch
-plugin from VCF
library:
-
-
ladspa=f=vcf:p=vcf_notch:c=help
-
-
- Simulate low quality audio equipment using Computer Music Toolkit
(CMT)
-plugin library:
-
-
ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
-
-
- Add reverberation to the audio using TAP-plugins
-(Tom’s Audio Processing plugins):
-
-
ladspa=file=tap_reverb:tap_reverb
-
-
- Generate white noise, with 0.2 amplitude:
-
-
ladspa=file=cmt:noise_source_white:c=c0=.2
-
-
- Generate 20 bpm clicks using plugin C* Click - Metronome
from the
-C* Audio Plugin Suite
(CAPS) library:
-
-
ladspa=file=caps:Click:c=c1=20'
-
-
- Apply C* Eq10X2 - Stereo 10-band equaliser
effect:
-
-
ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
-
-
-
-
-
27.34.2 Commands# TOC
-
-
This filter supports the following commands:
-
-cN
-Modify the N -th control value.
-
-If the specified value is not valid, it is ignored and prior one is kept.
-
-
-
-
-
27.35 lowpass# TOC
-
-
Apply a low-pass filter with 3dB point frequency.
-The filter can be either single-pole or double-pole (the default).
-The filter roll off at 6dB per pole per octave (20dB per pole per decade).
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set frequency in Hz. Default is 500.
-
-
-poles, p
-Set number of poles. Default is 2.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Specify the band-width of a filter in width_type units.
-Applies only to double-pole filter.
-The default is 0.707q and gives a Butterworth response.
-
-
-
-
-
27.36 pan# TOC
-
-
Mix channels with specific gain levels. The filter accepts the output
-channel layout followed by a set of channels definitions.
-
-
This filter is also designed to efficiently remap the channels of an audio
-stream.
-
-
The filter accepts parameters of the form:
-"l |outdef |outdef |..."
-
-
-l
-output channel layout or number of channels
-
-
-outdef
-output channel specification, of the form:
-"out_name =[gain *]in_name [+[gain *]in_name ...]"
-
-
-out_name
-output channel to define, either a channel name (FL, FR, etc.) or a channel
-number (c0, c1, etc.)
-
-
-gain
-multiplicative coefficient for the channel, 1 leaving the volume unchanged
-
-
-in_name
-input channel to use, see out_name for details; it is not possible to mix
-named and numbered input channels
-
-
-
-
If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for
-that specification will be renormalized so that the total is 1, thus
-avoiding clipping noise.
-
-
-
27.36.1 Mixing examples# TOC
-
-
For example, if you want to down-mix from stereo to mono, but with a bigger
-factor for the left channel:
-
-
pan=1c|c0=0.9*c0+0.1*c1
-
-
-
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
-7-channels surround:
-
-
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
-
-
-
Note that ffmpeg
integrates a default down-mix (and up-mix) system
-that should be preferred (see "-ac" option) unless you have very specific
-needs.
-
-
-
27.36.2 Remapping examples# TOC
-
-
The channel remapping will be effective if, and only if:
-
-
- gain coefficients are zeroes or ones,
- only one input per channel output,
-
-
-
If all these conditions are satisfied, the filter will notify the user ("Pure
-channel mapping detected"), and use an optimized and lossless method to do the
-remapping.
-
-
For example, if you have a 5.1 source and want a stereo audio stream by
-dropping the extra channels:
-
-
pan="stereo| c0=FL | c1=FR"
-
-
-
Given the same source, you can also switch front left and front right channels
-and keep the input channel layout:
-
-
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
-
-
-
If the input is a stereo audio stream, you can mute the front left channel (and
-still keep the stereo channel layout) with:
-
-
-
Still with a stereo audio stream input, you can copy the right channel in both
-front left and right:
-
-
pan="stereo| c0=FR | c1=FR"
-
-
-
-
27.37 replaygain# TOC
-
-
ReplayGain scanner filter. This filter takes an audio stream as an input and
-outputs it unchanged.
-At end of filtering it displays track_gain
and track_peak
.
-
-
-
27.38 resample# TOC
-
-
Convert the audio sample format, sample rate and channel layout. It is
-not meant to be used directly.
-
-
-
27.39 silencedetect# TOC
-
-
Detect silence in an audio stream.
-
-
This filter logs a message when it detects that the input audio volume is less
-or equal to a noise tolerance value for a duration greater or equal to the
-minimum detected noise duration.
-
-
The printed times and duration are expressed in seconds.
-
-
The filter accepts the following options:
-
-
-duration, d
-Set silence duration until notification (default is 2 seconds).
-
-
-noise, n
-Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
-specified value) or amplitude ratio. Default is -60dB, or 0.001.
-
-
-
-
-
27.39.1 Examples# TOC
-
-
- Detect 5 seconds of silence with -50dB noise tolerance:
-
-
silencedetect=n=-50dB:d=5
-
-
- Complete example with ffmpeg
to detect silence with 0.0001 noise
-tolerance in silence.mp3 :
-
-
ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
-
-
-
-
-
27.40 silenceremove# TOC
-
-
Remove silence from the beginning, middle or end of the audio.
-
-
The filter accepts the following options:
-
-
-start_periods
-This value is used to indicate if audio should be trimmed at beginning of
-the audio. A value of zero indicates no silence should be trimmed from the
-beginning. When specifying a non-zero value, it trims audio up until it
-finds non-silence. Normally, when trimming silence from beginning of audio
-the start_periods will be 1
but it can be increased to higher
-values to trim all audio up to specific count of non-silence periods.
-Default value is 0
.
-
-
-start_duration
-Specify the amount of time that non-silence must be detected before it stops
-trimming audio. By increasing the duration, bursts of noises can be treated
-as silence and trimmed off. Default value is 0
.
-
-
-start_threshold
-This indicates what sample value should be treated as silence. For digital
-audio, a value of 0
may be fine but for audio recorded from analog,
-you may wish to increase the value to account for background noise.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-stop_periods
-Set the count for trimming silence from the end of audio.
-To remove silence from the middle of a file, specify a stop_periods
-that is negative. This value is then treated as a positive value and is
-used to indicate the effect should restart processing as specified by
-start_periods , making it suitable for removing periods of silence
-in the middle of the audio.
-Default value is 0
.
-
-
-stop_duration
-Specify a duration of silence that must exist before audio is not copied any
-more. By specifying a higher duration, silence that is wanted can be left in
-the audio.
-Default value is 0
.
-
-
-stop_threshold
-This is the same as start_threshold but for trimming silence from
-the end of audio.
-Can be specified in dB (in case "dB" is appended to the specified value)
-or amplitude ratio. Default value is 0
.
-
-
-leave_silence
-This indicate that stop_duration length of audio should be left intact
-at the beginning of each period of silence.
-For example, if you want to remove long pauses between words but do not want
-to remove the pauses completely. Default value is 0
.
-
-
-
-
-
-
27.40.1 Examples# TOC
-
-
- The following example shows how this filter can be used to start a recording
-that does not contain the delay at the start which usually occurs between
-pressing the record button and the start of the performance:
-
-
silenceremove=1:5:0.02
-
-
-
-
-
27.41 treble# TOC
-
-
Boost or cut treble (upper) frequencies of the audio using a two-pole
-shelving filter with a response similar to that of a standard
-hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
-
-
The filter accepts the following options:
-
-
-gain, g
-Give the gain at whichever is the lower of ~22 kHz and the
-Nyquist frequency. Its useful range is about -20 (for a large cut)
-to +20 (for a large boost). Beware of clipping when using a positive gain.
-
-
-frequency, f
-Set the filter’s central frequency and so can be used
-to extend or reduce the frequency range to be boosted or cut.
-The default value is 3000
Hz.
-
-
-width_type
-Set method to specify band-width of filter.
-
-h
-Hz
-
-q
-Q-Factor
-
-o
-octave
-
-s
-slope
-
-
-
-
-width, w
-Determine how steep is the filter’s shelf transition.
-
-
-
-
-
27.42 volume# TOC
-
-
Adjust the input audio volume.
-
-
It accepts the following parameters:
-
-volume
-Set audio volume expression.
-
-Output values are clipped to the maximum value.
-
-The output audio volume is given by the relation:
-
-
output_volume = volume * input_volume
-
-
-The default value for volume is "1.0".
-
-
-precision
-This parameter represents the mathematical precision.
-
-It determines which input sample formats will be allowed, which affects the
-precision of the volume scaling.
-
-
-fixed
-8-bit fixed-point; this limits input sample format to U8, S16, and S32.
-
-float
-32-bit floating-point; this limits input sample format to FLT. (default)
-
-double
-64-bit floating-point; this limits input sample format to DBL.
-
-
-
-
-replaygain
-Choose the behaviour on encountering ReplayGain side data in input frames.
-
-
-drop
-Remove ReplayGain side data, ignoring its contents (the default).
-
-
-ignore
-Ignore ReplayGain side data, but leave it in the frame.
-
-
-track
-Prefer the track gain, if present.
-
-
-album
-Prefer the album gain, if present.
-
-
-
-
-replaygain_preamp
-Pre-amplification gain in dB to apply to the selected replaygain gain.
-
-Default value for replaygain_preamp is 0.0.
-
-
-eval
-Set when the volume expression is evaluated.
-
-It accepts the following values:
-
-‘once ’
-only evaluate expression once during the filter initialization, or
-when the ‘volume ’ command is sent
-
-
-‘frame ’
-evaluate expression for each incoming frame
-
-
-
-Default value is ‘once ’.
-
-
-
-
The volume expression can contain the following parameters.
-
-
-n
-frame number (starting at zero)
-
-nb_channels
-number of channels
-
-nb_consumed_samples
-number of samples consumed by the filter
-
-nb_samples
-number of samples in the current frame
-
-pos
-original frame position in the file
-
-pts
-frame PTS
-
-sample_rate
-sample rate
-
-startpts
-PTS at start of stream
-
-startt
-time at start of stream
-
-t
-frame time
-
-tb
-timestamp timebase
-
-volume
-last set volume value
-
-
-
-
Note that when eval is set to ‘once ’ only the
-sample_rate and tb variables are available, all other
-variables will evaluate to NAN.
-
-
-
27.42.1 Commands# TOC
-
-
This filter supports the following commands:
-
-volume
-Modify the volume expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-replaygain_noclip
-Prevent clipping by limiting the gain applied.
-
-Default value for replaygain_noclip is 1.
-
-
-
-
-
-
27.42.2 Examples# TOC
-
-
-
-
-
27.43 volumedetect# TOC
-
-
Detect the volume of the input video.
-
-
The filter has no parameters. The input is not modified. Statistics about
-the volume will be printed in the log when the input stream end is reached.
-
-
In particular it will show the mean volume (root mean square), maximum
-volume (on a per-sample basis), and the beginning of a histogram of the
-registered volume values (from the maximum value to a cumulated 1/1000 of
-the samples).
-
-
All volumes are in decibels relative to the maximum PCM value.
-
-
-
27.43.1 Examples# TOC
-
-
Here is an excerpt of the output:
-
-
[Parsed_volumedetect_0 0xa23120] mean_volume: -27 dB
-[Parsed_volumedetect_0 0xa23120] max_volume: -4 dB
-[Parsed_volumedetect_0 0xa23120] histogram_4db: 6
-[Parsed_volumedetect_0 0xa23120] histogram_5db: 62
-[Parsed_volumedetect_0 0xa23120] histogram_6db: 286
-[Parsed_volumedetect_0 0xa23120] histogram_7db: 1042
-[Parsed_volumedetect_0 0xa23120] histogram_8db: 2551
-[Parsed_volumedetect_0 0xa23120] histogram_9db: 4609
-[Parsed_volumedetect_0 0xa23120] histogram_10db: 8409
-
-
-
It means that:
-
- The mean square energy is approximately -27 dB, or 10^-2.7.
- The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB.
- There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc.
-
-
-
In other words, raising the volume by +4 dB does not cause any clipping,
-raising it by +5 dB causes clipping for 6 samples, etc.
-
-
-
-
28 Audio Sources# TOC
-
-
Below is a description of the currently available audio sources.
-
-
-
28.1 abuffer# TOC
-
-
Buffer audio frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/asrc_abuffer.h .
-
-
It accepts the following parameters:
-
-time_base
-The timebase which will be used for timestamps of submitted frames. It must be
-either a floating-point number or in numerator /denominator form.
-
-
-sample_rate
-The sample rate of the incoming audio buffers.
-
-
-sample_fmt
-The sample format of the incoming audio buffers.
-Either a sample format name or its corresponding integer representation from
-the enum AVSampleFormat in libavutil/samplefmt.h
-
-
-channel_layout
-The channel layout of the incoming audio buffers.
-Either a channel layout name from channel_layout_map in
-libavutil/channel_layout.c or its corresponding integer representation
-from the AV_CH_LAYOUT_* macros in libavutil/channel_layout.h
-
-
-channels
-The number of channels of the incoming audio buffers.
-If both channels and channel_layout are specified, then they
-must be consistent.
-
-
-
-
-
-
28.1.1 Examples# TOC
-
-
-
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
-
-
-
will instruct the source to accept planar 16bit signed stereo at 44100Hz.
-Since the sample format with name "s16p" corresponds to the number
-6 and the "stereo" channel layout corresponds to the value 0x3, this is
-equivalent to:
-
-
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
-
-
-
-
28.2 aevalsrc# TOC
-
-
Generate an audio signal specified by an expression.
-
-
This source accepts in input one or more expressions (one for each
-channel), which are evaluated and used to generate a corresponding
-audio signal.
-
-
This source accepts the following options:
-
-
-exprs
-Set the ’|’-separated expressions list for each separate channel. In case the
-channel_layout option is not specified, the selected channel layout
-depends on the number of provided expressions. Otherwise the last
-specified expression is applied to the remaining output channels.
-
-
-channel_layout, c
-Set the channel layout. The number of channels in the specified layout
-must be equal to the number of specified expressions.
-
-
-duration, d
-Set the minimum duration of the sourced audio. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-Note that the resulting duration may be greater than the specified
-duration, as the generated audio is always cut at the end of a
-complete frame.
-
-If not specified, or the expressed duration is negative, the audio is
-supposed to be generated forever.
-
-
-nb_samples, n
-Set the number of samples per channel per each output frame,
-default to 1024.
-
-
-sample_rate, s
-Specify the sample rate, default to 44100.
-
-
-
-
Each expression in exprs can contain the following constants:
-
-
-n
-number of the evaluated sample, starting from 0
-
-
-t
-time of the evaluated sample expressed in seconds, starting from 0
-
-
-s
-sample rate
-
-
-
-
-
-
28.2.1 Examples# TOC
-
-
- Generate silence:
-
-
- Generate a sin signal with frequency of 440 Hz, set sample rate to
-8000 Hz:
-
-
aevalsrc="sin(440*2*PI*t):s=8000"
-
-
- Generate a two channels signal, specify the channel layout (Front
-Center + Back Center) explicitly:
-
-
aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
-
-
- Generate white noise:
-
-
aevalsrc="-2+random(0)"
-
-
- Generate an amplitude modulated signal:
-
-
aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
-
-
- Generate 2.5 Hz binaural beats on a 360 Hz carrier:
-
-
aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
-
-
-
-
-
-
28.3 anullsrc# TOC
-
-
The null audio source, return unprocessed audio frames. It is mainly useful
-as a template and to be employed in analysis / debugging tools, or as
-the source for filters which ignore the input data (for example the sox
-synth filter).
-
-
This source accepts the following options:
-
-
-channel_layout, cl
-
-Specifies the channel layout, and can be either an integer or a string
-representing a channel layout. The default value of channel_layout
-is "stereo".
-
-Check the channel_layout_map definition in
-libavutil/channel_layout.c for the mapping between strings and
-channel layout values.
-
-
-sample_rate, r
-Specifies the sample rate, and defaults to 44100.
-
-
-nb_samples, n
-Set the number of samples per requested frames.
-
-
-
-
-
-
28.3.1 Examples# TOC
-
-
- Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
-
-
- Do the same operation with a more obvious syntax:
-
-
anullsrc=r=48000:cl=mono
-
-
-
-
All the parameters need to be explicitly defined.
-
-
-
28.4 flite# TOC
-
-
Synthesize a voice utterance using the libflite library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libflite
.
-
-
Note that the flite library is not thread-safe.
-
-
The filter accepts the following options:
-
-
-list_voices
-If set to 1, list the names of the available voices and exit
-immediately. Default value is 0.
-
-
-nb_samples, n
-Set the maximum number of samples per frame. Default value is 512.
-
-
-textfile
-Set the filename containing the text to speak.
-
-
-text
-Set the text to speak.
-
-
-voice, v
-Set the voice to use for the speech synthesis. Default value is
-kal
. See also the list_voices option.
-
-
-
-
-
28.4.1 Examples# TOC
-
-
- Read from file speech.txt , and synthesize the text using the
-standard flite voice:
-
-
flite=textfile=speech.txt
-
-
- Read the specified text selecting the slt
voice:
-
-
flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Input text to ffmpeg:
-
-
ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
-
-
- Make ffplay speak the specified text, using flite
and
-the lavfi
device:
-
-
ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
-
-
-
-
For more information about libflite, check:
-http://www.speech.cs.cmu.edu/flite/
-
-
-
28.5 sine# TOC
-
-
Generate an audio signal made of a sine wave with amplitude 1/8.
-
-
The audio signal is bit-exact.
-
-
The filter accepts the following options:
-
-
-frequency, f
-Set the carrier frequency. Default is 440 Hz.
-
-
-beep_factor, b
-Enable a periodic beep every second with frequency beep_factor times
-the carrier frequency. Default is 0, meaning the beep is disabled.
-
-
-sample_rate, r
-Specify the sample rate, default is 44100.
-
-
-duration, d
-Specify the duration of the generated audio stream.
-
-
-samples_per_frame
-Set the number of samples per output frame, default is 1024.
-
-
-
-
-
28.5.1 Examples# TOC
-
-
- Generate a simple 440 Hz sine wave:
-
-
- Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
-
-
sine=220:4:d=5
-sine=f=220:b=4:d=5
-sine=frequency=220:beep_factor=4:duration=5
-
-
-
-
-
-
-
29 Audio Sinks# TOC
-
-
Below is a description of the currently available audio sinks.
-
-
-
29.1 abuffersink# TOC
-
-
Buffer audio frames, and make them available to the end of filter chain.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVABufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
29.2 anullsink# TOC
-
-
Null audio sink; do absolutely nothing with the input audio. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
30 Video Filters# TOC
-
-
When you configure your FFmpeg build, you can disable any of the
-existing filters using --disable-filters
.
-The configure output will show the video filters included in your
-build.
-
-
Below is a description of the currently available video filters.
-
-
-
30.1 alphaextract# TOC
-
-
Extract the alpha component from the input as a grayscale video. This
-is especially useful with the alphamerge filter.
-
-
-
30.2 alphamerge# TOC
-
-
Add or replace the alpha component of the primary input with the
-grayscale value of a second input. This is intended for use with
-alphaextract to allow the transmission or storage of frame
-sequences that have alpha in a format that doesn’t support an alpha
-channel.
-
-
For example, to reconstruct full frames from a normal YUV-encoded video
-and a separate video created with alphaextract , you might use:
-
-
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
-
-
-
Since this filter is designed for reconstruction, it operates on frame
-sequences without considering timestamps, and terminates when either
-input reaches end of stream. This will cause problems if your encoding
-pipeline drops frames. If you’re trying to apply an image as an
-overlay to a video stream, consider the overlay filter instead.
-
-
-
30.3 ass# TOC
-
-
Same as the subtitles filter, except that it doesn’t require libavcodec
-and libavformat to work. On the other hand, it is limited to ASS (Advanced
-Substation Alpha) subtitles files.
-
-
This filter accepts the following option in addition to the common options from
-the subtitles filter:
-
-
-shaping
-Set the shaping engine
-
-Available values are:
-
-‘auto ’
-The default libass shaping engine, which is the best available.
-
-‘simple ’
-Fast, font-agnostic shaper that can do only substitutions
-
-‘complex ’
-Slower shaper using OpenType for substitutions and positioning
-
-
-
-The default is auto
.
-
-
-
-
-
30.4 bbox# TOC
-
-
Compute the bounding box for the non-black pixels in the input frame
-luminance plane.
-
-
This filter computes the bounding box containing all the pixels with a
-luminance value greater than the minimum allowed value.
-The parameters describing the bounding box are printed on the filter
-log.
-
-
The filter accepts the following option:
-
-
-min_val
-Set the minimal luminance value. Default is 16
.
-
-
-
-
-
30.5 blackdetect# TOC
-
-
Detect video intervals that are (almost) completely black. Can be
-useful to detect chapter transitions, commercials, or invalid
-recordings. Output lines contains the time for the start, end and
-duration of the detected black interval expressed in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
The filter accepts the following options:
-
-
-black_min_duration, d
-Set the minimum detected black duration expressed in seconds. It must
-be a non-negative floating point number.
-
-Default value is 2.0.
-
-
-picture_black_ratio_th, pic_th
-Set the threshold for considering a picture "black".
-Express the minimum value for the ratio:
-
-
nb_black_pixels / nb_pixels
-
-
-for which a picture is considered black.
-Default value is 0.98.
-
-
-pixel_black_th, pix_th
-Set the threshold for considering a pixel "black".
-
-The threshold expresses the maximum pixel luminance value for which a
-pixel is considered "black". The provided value is scaled according to
-the following equation:
-
-
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
-
-
-luminance_range_size and luminance_minimum_value depend on
-the input video format, the range is [0-255] for YUV full-range
-formats and [16-235] for YUV non full-range formats.
-
-Default value is 0.10.
-
-
-
-
The following example sets the maximum pixel threshold to the minimum
-value, and detects only black intervals of 2 or more seconds:
-
-
blackdetect=d=2:pix_th=0.00
-
-
-
-
30.6 blackframe# TOC
-
-
Detect frames that are (almost) completely black. Can be useful to
-detect chapter transitions or commercials. Output lines consist of
-the frame number of the detected frame, the percentage of blackness,
-the position in the file if known or -1 and the timestamp in seconds.
-
-
In order to display the output lines, you need to set the loglevel at
-least to the AV_LOG_INFO value.
-
-
It accepts the following parameters:
-
-
-amount
-The percentage of the pixels that have to be below the threshold; it defaults to
-98
.
-
-
-threshold, thresh
-The threshold below which a pixel value is considered black; it defaults to
-32
.
-
-
-
-
-
-
30.7 blend, tblend# TOC
-
-
Blend two video frames into each other.
-
-
The blend
filter takes two input streams and outputs one
-stream, the first input is the "top" layer and second input is
-"bottom" layer. Output terminates when shortest input terminates.
-
-
The tblend
(time blend) filter takes two consecutive frames
-from one single stream, and outputs the result obtained by blending
-the new frame on top of the old frame.
-
-
A description of the accepted options follows.
-
-
-c0_mode
-c1_mode
-c2_mode
-c3_mode
-all_mode
-Set blend mode for specific pixel component or all pixel components in case
-of all_mode . Default value is normal
.
-
-Available values for component modes are:
-
-‘addition ’
-‘and ’
-‘average ’
-‘burn ’
-‘darken ’
-‘difference ’
-‘difference128 ’
-‘divide ’
-‘dodge ’
-‘exclusion ’
-‘hardlight ’
-‘lighten ’
-‘multiply ’
-‘negation ’
-‘normal ’
-‘or ’
-‘overlay ’
-‘phoenix ’
-‘pinlight ’
-‘reflect ’
-‘screen ’
-‘softlight ’
-‘subtract ’
-‘vividlight ’
-‘xor ’
-
-
-
-c0_opacity
-c1_opacity
-c2_opacity
-c3_opacity
-all_opacity
-Set blend opacity for specific pixel component or all pixel components in case
-of all_opacity . Only used in combination with pixel component blend modes.
-
-
-c0_expr
-c1_expr
-c2_expr
-c3_expr
-all_expr
-Set blend expression for specific pixel component or all pixel components in case
-of all_expr . Note that related mode options will be ignored if those are set.
-
-The expressions can use the following variables:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-the coordinates of the current sample
-
-
-W
-H
-the width and height of currently filtered plane
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-TOP, A
-Value of pixel component at current location for first video frame (top layer).
-
-
-BOTTOM, B
-Value of pixel component at current location for second video frame (bottom layer).
-
-
-
-
-shortest
-Force termination when the shortest input terminates. Default is
-0
. This option is only defined for the blend
filter.
-
-
-repeatlast
-Continue applying the last bottom frame after the end of the stream. A value of
-0
disable the filter after the last frame of the bottom layer is reached.
-Default is 1
. This option is only defined for the blend
filter.
-
-
-
-
-
30.7.1 Examples# TOC
-
-
- Apply transition from bottom layer to top layer in first 10 seconds:
-
-
blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
-
-
- Apply 1x1 checkerboard effect:
-
-
blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
-
-
- Apply uncover left effect:
-
-
blend=all_expr='if(gte(N*SW+X,W),A,B)'
-
-
- Apply uncover down effect:
-
-
blend=all_expr='if(gte(Y-N*SH,0),A,B)'
-
-
- Apply uncover up-left effect:
-
-
blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
-
-
- Display differences between the current and the previous frame:
-
-
tblend=all_mode=difference128
-
-
-
-
-
30.8 boxblur# TOC
-
-
Apply a boxblur algorithm to the input video.
-
-
It accepts the following parameters:
-
-
-luma_radius, lr
-luma_power, lp
-chroma_radius, cr
-chroma_power, cp
-alpha_radius, ar
-alpha_power, ap
-
-
-
A description of the accepted options follows.
-
-
-luma_radius, lr
-chroma_radius, cr
-alpha_radius, ar
-Set an expression for the box radius in pixels used for blurring the
-corresponding input plane.
-
-The radius value must be a non-negative number, and must not be
-greater than the value of the expression min(w,h)/2
for the
-luma and alpha planes, and of min(cw,ch)/2
for the chroma
-planes.
-
-Default value for luma_radius is "2". If not specified,
-chroma_radius and alpha_radius default to the
-corresponding value set for luma_radius .
-
-The expressions can contain the following constants:
-
-w
-h
-The input width and height in pixels.
-
-
-cw
-ch
-The input chroma image width and height in pixels.
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p", hsub is 2 and vsub is 1.
-
-
-
-
-luma_power, lp
-chroma_power, cp
-alpha_power, ap
-Specify how many times the boxblur filter is applied to the
-corresponding plane.
-
-Default value for luma_power is 2. If not specified,
-chroma_power and alpha_power default to the
-corresponding value set for luma_power .
-
-A value of 0 will disable the effect.
-
-
-
-
-
30.8.1 Examples# TOC
-
-
- Apply a boxblur filter with the luma, chroma, and alpha radii
-set to 2:
-
-
boxblur=luma_radius=2:luma_power=1
-boxblur=2:1
-
-
- Set the luma radius to 2, and alpha and chroma radius to 0:
-
-
- Set the luma and chroma radii to a fraction of the video dimension:
-
-
boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
-
-
-
-
-
30.9 codecview# TOC
-
-
Visualize information exported by some codecs.
-
-
Some codecs can export information through frames using side-data or other
-means. For example, some MPEG based codecs export motion vectors through the
-export_mvs flag in the codec flags2 option.
-
-
The filter accepts the following option:
-
-
-mv
-Set motion vectors to visualize.
-
-Available flags for mv are:
-
-
-‘pf ’
-forward predicted MVs of P-frames
-
-‘bf ’
-forward predicted MVs of B-frames
-
-‘bb ’
-backward predicted MVs of B-frames
-
-
-
-
-
-
-
30.9.1 Examples# TOC
-
-
- Visualizes multi-directionals MVs from P and B-Frames using ffplay
:
-
-
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
-
-
-
-
-
30.10 colorbalance# TOC
-
Modify intensity of primary colors (red, green and blue) of input frames.
-
-
The filter allows an input frame to be adjusted in the shadows, midtones or highlights
-regions for the red-cyan, green-magenta or blue-yellow balance.
-
-
A positive adjustment value shifts the balance towards the primary color, a negative
-value towards the complementary color.
-
-
The filter accepts the following options:
-
-
-rs
-gs
-bs
-Adjust red, green and blue shadows (darkest pixels).
-
-
-rm
-gm
-bm
-Adjust red, green and blue midtones (medium pixels).
-
-
-rh
-gh
-bh
-Adjust red, green and blue highlights (brightest pixels).
-
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-
-
-
30.10.1 Examples# TOC
-
-
- Add red color cast to shadows:
-
-
-
-
-
30.11 colorlevels# TOC
-
-
Adjust video input frames using levels.
-
-
The filter accepts the following options:
-
-
-rimin
-gimin
-bimin
-aimin
-Adjust red, green, blue and alpha input black point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
-
-
-rimax
-gimax
-bimax
-aimax
-Adjust red, green, blue and alpha input white point.
-Allowed ranges for options are [-1.0, 1.0]
. Defaults are 1
.
-
-Input levels are used to lighten highlights (bright tones), darken shadows
-(dark tones), change the balance of bright and dark tones.
-
-
-romin
-gomin
-bomin
-aomin
-Adjust red, green, blue and alpha output black point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 0
.
-
-
-romax
-gomax
-bomax
-aomax
-Adjust red, green, blue and alpha output white point.
-Allowed ranges for options are [0, 1.0]
. Defaults are 1
.
-
-Output levels allows manual selection of a constrained output level range.
-
-
-
-
-
30.11.1 Examples# TOC
-
-
- Make video output darker:
-
-
colorlevels=rimin=0.058:gimin=0.058:bimin=0.058
-
-
- Increase contrast:
-
-
colorlevels=rimin=0.039:gimin=0.039:bimin=0.039:rimax=0.96:gimax=0.96:bimax=0.96
-
-
- Make video output lighter:
-
-
colorlevels=rimax=0.902:gimax=0.902:bimax=0.902
-
-
- Increase brightness:
-
-
colorlevels=romin=0.5:gomin=0.5:bomin=0.5
-
-
-
-
-
30.12 colorchannelmixer# TOC
-
-
Adjust video input frames by re-mixing color channels.
-
-
This filter modifies a color channel by adding the values associated to
-the other channels of the same pixels. For example if the value to
-modify is red, the output value will be:
-
-
red =red *rr + blue *rb + green *rg + alpha *ra
-
-
-
The filter accepts the following options:
-
-
-rr
-rg
-rb
-ra
-Adjust contribution of input red, green, blue and alpha channels for output red channel.
-Default is 1
for rr , and 0
for rg , rb and ra .
-
-
-gr
-gg
-gb
-ga
-Adjust contribution of input red, green, blue and alpha channels for output green channel.
-Default is 1
for gg , and 0
for gr , gb and ga .
-
-
-br
-bg
-bb
-ba
-Adjust contribution of input red, green, blue and alpha channels for output blue channel.
-Default is 1
for bb , and 0
for br , bg and ba .
-
-
-ar
-ag
-ab
-aa
-Adjust contribution of input red, green, blue and alpha channels for output alpha channel.
-Default is 1
for aa , and 0
for ar , ag and ab .
-
-Allowed ranges for options are [-2.0, 2.0]
.
-
-
-
-
-
30.12.1 Examples# TOC
-
-
- Convert source to grayscale:
-
-
colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
-
- Simulate sepia tones:
-
-
colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
-
-
-
-
-
30.13 colormatrix# TOC
-
-
Convert color matrix.
-
-
The filter accepts the following options:
-
-
-src
-dst
-Specify the source and destination color matrix. Both values must be
-specified.
-
-The accepted values are:
-
-‘bt709 ’
-BT.709
-
-
-‘bt601 ’
-BT.601
-
-
-‘smpte240m ’
-SMPTE-240M
-
-
-‘fcc ’
-FCC
-
-
-
-
-
-
For example to convert from BT.601 to SMPTE-240M, use the command:
-
-
colormatrix=bt601:smpte240m
-
-
-
-
30.14 copy# TOC
-
-
Copy the input source unchanged to the output. This is mainly useful for
-testing purposes.
-
-
-
30.15 crop# TOC
-
-
Crop the input video to given dimensions.
-
-
It accepts the following parameters:
-
-
-w, out_w
-The width of the output video. It defaults to iw
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-h, out_h
-The height of the output video. It defaults to ih
.
-This expression is evaluated only once during the filter
-configuration.
-
-
-x
-The horizontal position, in the input video, of the left edge of the output
-video. It defaults to (in_w-out_w)/2
.
-This expression is evaluated per-frame.
-
-
-y
-The vertical position, in the input video, of the top edge of the output video.
-It defaults to (in_h-out_h)/2
.
-This expression is evaluated per-frame.
-
-
-keep_aspect
-If set to 1 will force the output display aspect ratio
-to be the same of the input, by changing the output sample aspect
-ratio. It defaults to 0.
-
-
-
-
The out_w , out_h , x , y parameters are
-expressions containing the following constants:
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-in_w
-in_h
-The input width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (cropped) width and height.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-n
-The number of the input frame, starting from 0.
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
The expression for out_w may depend on the value of out_h ,
-and the expression for out_h may depend on out_w , but they
-cannot depend on x and y , as x and y are
-evaluated after out_w and out_h .
-
-
The x and y parameters specify the expressions for the
-position of the top-left corner of the output (non-cropped) area. They
-are evaluated for each frame. If the evaluated value is not valid, it
-is approximated to the nearest valid value.
-
-
The expression for x may depend on y , and the expression
-for y may depend on x .
-
-
-
30.15.1 Examples# TOC
-
-
-
-
-
30.16 cropdetect# TOC
-
-
Auto-detect the crop size.
-
-
It calculates the necessary cropping parameters and prints the
-recommended parameters via the logging system. The detected dimensions
-correspond to the non-black area of the input video.
-
-
It accepts the following parameters:
-
-
-limit
-Set higher black value threshold, which can be optionally specified
-from nothing (0) to everything (255 for 8bit based formats). An intensity
-value greater to the set value is considered non-black. It defaults to 24.
-You can also specify a value between 0.0 and 1.0 which will be scaled depending
-on the bitdepth of the pixel format.
-
-
-round
-The value which the width/height should be divisible by. It defaults to
-16. The offset is automatically adjusted to center the video. Use 2 to
-get only even dimensions (needed for 4:2:2 video). 16 is best when
-encoding to most video codecs.
-
-
-reset_count, reset
-Set the counter that determines after how many frames cropdetect will
-reset the previously detected largest video area and start over to
-detect the current optimal crop area. Default value is 0.
-
-This can be useful when channel logos distort the video area. 0
-indicates ’never reset’, and returns the largest area encountered during
-playback.
-
-
-
-
-
30.17 curves# TOC
-
-
Apply color adjustments using curves.
-
-
This filter is similar to the Adobe Photoshop and GIMP curves tools. Each
-component (red, green and blue) has its values defined by N key points
-tied from each other using a smooth curve. The x-axis represents the pixel
-values from the input frame, and the y-axis the new pixel values to be set for
-the output frame.
-
-
By default, a component curve is defined by the two points (0;0) and
-(1;1) . This creates a straight line where each original pixel value is
-"adjusted" to its own value, which means no change to the image.
-
-
The filter allows you to redefine these two points and add some more. A new
-curve (using a natural cubic spline interpolation) will be define to pass
-smoothly through all these new coordinates. The new defined points needs to be
-strictly increasing over the x-axis, and their x and y values must
-be in the [0;1] interval. If the computed curves happened to go outside
-the vector spaces, the values will be clipped accordingly.
-
-
If there is no key point defined in x=0
, the filter will automatically
-insert a (0;0) point. In the same way, if there is no key point defined
-in x=1
, the filter will automatically insert a (1;1) point.
-
-
The filter accepts the following options:
-
-
-preset
-Select one of the available color presets. This option can be used in addition
-to the r , g , b parameters; in this case, the later
-options takes priority on the preset values.
-Available presets are:
-
-‘none ’
-‘color_negative ’
-‘cross_process ’
-‘darker ’
-‘increase_contrast ’
-‘lighter ’
-‘linear_contrast ’
-‘medium_contrast ’
-‘negative ’
-‘strong_contrast ’
-‘vintage ’
-
-Default is none
.
-
-master, m
-Set the master key points. These points will define a second pass mapping. It
-is sometimes called a "luminance" or "value" mapping. It can be used with
-r , g , b or all since it acts like a
-post-processing LUT.
-
-red, r
-Set the key points for the red component.
-
-green, g
-Set the key points for the green component.
-
-blue, b
-Set the key points for the blue component.
-
-all
-Set the key points for all components (not including master).
-Can be used in addition to the other key points component
-options. In this case, the unset component(s) will fallback on this
-all setting.
-
-psfile
-Specify a Photoshop curves file (.asv
) to import the settings from.
-
-
-
-
To avoid some filtergraph syntax conflicts, each key points list need to be
-defined using the following syntax: x0/y0 x1/y1 x2/y2 ...
.
-
-
-
30.17.1 Examples# TOC
-
-
-
-
-
30.18 dctdnoiz# TOC
-
-
Denoise frames using 2D DCT (frequency domain filtering).
-
-
This filter is not designed for real time.
-
-
The filter accepts the following options:
-
-
-sigma, s
-Set the noise sigma constant.
-
-This sigma defines a hard threshold of 3 * sigma
; every DCT
-coefficient (absolute value) below this threshold with be dropped.
-
-If you need a more advanced filtering, see expr .
-
-Default is 0
.
-
-
-overlap
-Set number overlapping pixels for each block. Since the filter can be slow, you
-may want to reduce this value, at the cost of a less effective filter and the
-risk of various artefacts.
-
-If the overlapping value doesn’t allow to process the whole input width or
-height, a warning will be displayed and according borders won’t be denoised.
-
-Default value is blocksize -1, which is the best possible setting.
-
-
-expr, e
-Set the coefficient factor expression.
-
-For each coefficient of a DCT block, this expression will be evaluated as a
-multiplier value for the coefficient.
-
-If this is option is set, the sigma option will be ignored.
-
-The absolute value of the coefficient can be accessed through the c
-variable.
-
-
-n
-Set the blocksize using the number of bits. 1<<n
defines the
-blocksize , which is the width and height of the processed blocks.
-
-The default value is 3 (8x8) and can be raised to 4 for a
-blocksize of 16x16. Note that changing this setting has huge consequences
-on the speed processing. Also, a larger block size does not necessarily means a
-better de-noising.
-
-
-
-
-
30.18.1 Examples# TOC
-
-
Apply a denoise with a sigma of 4.5
:
-
-
-
The same operation can be achieved using the expression system:
-
-
dctdnoiz=e='gte(c, 4.5*3)'
-
-
-
Violent denoise using a block size of 16x16
:
-
-
-
-
30.19 decimate# TOC
-
-
Drop duplicated frames at regular intervals.
-
-
The filter accepts the following options:
-
-
-cycle
-Set the number of frames from which one will be dropped. Setting this to
-N means one frame in every batch of N frames will be dropped.
-Default is 5
.
-
-
-dupthresh
-Set the threshold for duplicate detection. If the difference metric for a frame
-is less than or equal to this value, then it is declared as duplicate. Default
-is 1.1
-
-
-scthresh
-Set scene change threshold. Default is 15
.
-
-
-blockx
-blocky
-Set the size of the x and y-axis blocks used during metric calculations.
-Larger blocks give better noise suppression, but also give worse detection of
-small movements. Must be a power of two. Default is 32
.
-
-
-ppsrc
-Mark main input as a pre-processed input and activate clean source input
-stream. This allows the input to be pre-processed with various filters to help
-the metrics calculation while keeping the frame selection lossless. When set to
-1
, the first stream is for the pre-processed input, and the second
-stream is the clean source from where the kept frames are chosen. Default is
-0
.
-
-
-chroma
-Set whether or not chroma is considered in the metric calculations. Default is
-1
.
-
-
-
-
-
30.20 dejudder# TOC
-
-
Remove judder produced by partially interlaced telecined content.
-
-
Judder can be introduced, for instance, by pullup filter. If the original
-source was partially telecined content then the output of pullup,dejudder
-will have a variable frame rate. May change the recorded frame rate of the
-container. Aside from that change, this filter will not affect constant frame
-rate video.
-
-
The option available in this filter is:
-
-cycle
-Specify the length of the window over which the judder repeats.
-
-Accepts any integer greater than 1. Useful values are:
-
-‘4 ’
-If the original was telecined from 24 to 30 fps (Film to NTSC).
-
-
-‘5 ’
-If the original was telecined from 25 to 30 fps (PAL to NTSC).
-
-
-‘20 ’
-If a mixture of the two.
-
-
-
-The default is ‘4 ’.
-
-
-
-
-
30.21 delogo# TOC
-
-
Suppress a TV station logo by a simple interpolation of the surrounding
-pixels. Just set a rectangle covering the logo and watch it disappear
-(and sometimes something even uglier appear - your mileage may vary).
-
-
It accepts the following parameters:
-
-x
-y
-Specify the top left corner coordinates of the logo. They must be
-specified.
-
-
-w
-h
-Specify the width and height of the logo to clear. They must be
-specified.
-
-
-band, t
-Specify the thickness of the fuzzy edge of the rectangle (added to
-w and h ). The default value is 4.
-
-
-show
-When set to 1, a green rectangle is drawn on the screen to simplify
-finding the right x , y , w , and h parameters.
-The default value is 0.
-
-The rectangle is drawn on the outermost pixels which will be (partly)
-replaced with interpolated values. The values of the next pixels
-immediately outside this rectangle in each direction will be used to
-compute the interpolated pixel values inside the rectangle.
-
-
-
-
-
-
30.21.1 Examples# TOC
-
-
- Set a rectangle covering the area with top left corner coordinates 0,0
-and size 100x77, and a band of size 10:
-
-
delogo=x=0:y=0:w=100:h=77:band=10
-
-
-
-
-
-
30.22 deshake# TOC
-
-
Attempt to fix small changes in horizontal and/or vertical shift. This
-filter helps remove camera shake from hand-holding a camera, bumping a
-tripod, moving on a vehicle, etc.
-
-
The filter accepts the following options:
-
-
-x
-y
-w
-h
-Specify a rectangular area where to limit the search for motion
-vectors.
-If desired the search for motion vectors can be limited to a
-rectangular area of the frame defined by its top left corner, width
-and height. These parameters have the same meaning as the drawbox
-filter which can be used to visualise the position of the bounding
-box.
-
-This is useful when simultaneous movement of subjects within the frame
-might be confused for camera motion by the motion vector search.
-
-If any or all of x , y , w and h are set to -1
-then the full frame is used. This allows later options to be set
-without specifying the bounding box for the motion vector search.
-
-Default - search the whole frame.
-
-
-rx
-ry
-Specify the maximum extent of movement in x and y directions in the
-range 0-64 pixels. Default 16.
-
-
-edge
-Specify how to generate pixels to fill blanks at the edge of the
-frame. Available values are:
-
-‘blank, 0 ’
-Fill zeroes at blank locations
-
-‘original, 1 ’
-Original image at blank locations
-
-‘clamp, 2 ’
-Extruded edge value at blank locations
-
-‘mirror, 3 ’
-Mirrored edge at blank locations
-
-
-Default value is ‘mirror ’.
-
-
-blocksize
-Specify the blocksize to use for motion search. Range 4-128 pixels,
-default 8.
-
-
-contrast
-Specify the contrast threshold for blocks. Only blocks with more than
-the specified contrast (difference between darkest and lightest
-pixels) will be considered. Range 1-255, default 125.
-
-
-search
-Specify the search strategy. Available values are:
-
-‘exhaustive, 0 ’
-Set exhaustive search
-
-‘less, 1 ’
-Set less exhaustive search.
-
-
-Default value is ‘exhaustive ’.
-
-
-filename
-If set then a detailed log of the motion search is written to the
-specified file.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
-
30.23 drawbox# TOC
-
-
Draw a colored box on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the top left corner coordinates of the box. It defaults to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the box; if 0 they are interpreted as
-the input width and height. It defaults to 0.
-
-
-color, c
-Specify the color of the box to write. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the box edge color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the box edge. Default value is 3
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y offset coordinates where the box is drawn.
-
-
-w
-h
-The width and height of the drawn box.
-
-
-t
-The thickness of the drawn box.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
30.23.1 Examples# TOC
-
-
-
-
-
30.24 drawgrid# TOC
-
-
Draw a grid on the input image.
-
-
It accepts the following parameters:
-
-
-x
-y
-The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0.
-
-
-width, w
-height, h
-The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the
-input width and height, respectively, minus thickness
, so image gets
-framed. Default to 0.
-
-
-color, c
-Specify the color of the grid. For the general syntax of this option,
-check the "Color" section in the ffmpeg-utils manual. If the special
-value invert
is used, the grid color is the same as the
-video with inverted luma.
-
-
-thickness, t
-The expression which sets the thickness of the grid line. Default value is 1
.
-
-See below for the list of accepted constants.
-
-
-
-
The parameters for x , y , w and h and t are expressions containing the
-following constants:
-
-
-dar
-The input display aspect ratio, it is the same as (w / h ) * sar .
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_h, ih
-in_w, iw
-The input grid cell width and height.
-
-
-sar
-The input sample aspect ratio.
-
-
-x
-y
-The x and y coordinates of some point of grid intersection (meant to configure offset).
-
-
-w
-h
-The width and height of the drawn cell.
-
-
-t
-The thickness of the drawn cell.
-
-These constants allow the x , y , w , h and t expressions to refer to
-each other, so you may for example specify y=x/dar
or h=w/dar
.
-
-
-
-
-
-
30.24.1 Examples# TOC
-
-
- Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%:
-
-
drawgrid=width=100:height=100:thickness=2:color=red@0.5
-
-
- Draw a white 3x3 grid with an opacity of 50%:
-
-
drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
-
-
-
-
-
30.25 drawtext# TOC
-
-
Draw a text string or text from a specified file on top of a video, using the
-libfreetype library.
-
-
To enable compilation of this filter, you need to configure FFmpeg with
---enable-libfreetype
.
-To enable default font fallback and the font option you need to
-configure FFmpeg with --enable-libfontconfig
.
-To enable the text_shaping option, you need to configure FFmpeg with
---enable-libfribidi
.
-
-
-
30.25.1 Syntax# TOC
-
-
It accepts the following parameters:
-
-
-box
-Used to draw a box around text using the background color.
-The value must be either 1 (enable) or 0 (disable).
-The default value of box is 0.
-
-
-boxcolor
-The color to be used for drawing box around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of boxcolor is "white".
-
-
-borderw
-Set the width of the border to be drawn around the text using bordercolor .
-The default value of borderw is 0.
-
-
-bordercolor
-Set the color to be used for drawing border around text. For the syntax of this
-option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of bordercolor is "black".
-
-
-expansion
-Select how the text is expanded. Can be either none
,
-strftime
(deprecated) or
-normal
(default). See the Text expansion section
-below for details.
-
-
-fix_bounds
-If true, check and fix text coords to avoid clipping.
-
-
-fontcolor
-The color to be used for drawing fonts. For the syntax of this option, check
-the "Color" section in the ffmpeg-utils manual.
-
-The default value of fontcolor is "black".
-
-
-fontcolor_expr
-String which is expanded the same way as text to obtain dynamic
-fontcolor value. By default this option has empty value and is not
-processed. When this option is set, it overrides fontcolor option.
-
-
-font
-The font family to be used for drawing text. By default Sans.
-
-
-fontfile
-The font file to be used for drawing text. The path must be included.
-This parameter is mandatory if the fontconfig support is disabled.
-
-
-fontsize
-The font size to be used for drawing text.
-The default value of fontsize is 16.
-
-
-text_shaping
-If set to 1, attempt to shape the text (for example, reverse the order of
-right-to-left text and join Arabic characters) before drawing it.
-Otherwise, just draw the text exactly as given.
-By default 1 (if supported).
-
-
-ft_load_flags
-The flags to be used for loading the fonts.
-
-The flags map the corresponding flags supported by libfreetype, and are
-a combination of the following values:
-
-default
-no_scale
-no_hinting
-render
-no_bitmap
-vertical_layout
-force_autohint
-crop_bitmap
-pedantic
-ignore_global_advance_width
-no_recurse
-ignore_transform
-monochrome
-linear_design
-no_autohint
-
-
-Default value is "default".
-
-For more information consult the documentation for the FT_LOAD_*
-libfreetype flags.
-
-
-shadowcolor
-The color to be used for drawing a shadow behind the drawn text. For the
-syntax of this option, check the "Color" section in the ffmpeg-utils manual.
-
-The default value of shadowcolor is "black".
-
-
-shadowx
-shadowy
-The x and y offsets for the text shadow position with respect to the
-position of the text. They can be either positive or negative
-values. The default value for both is "0".
-
-
-start_number
-The starting frame number for the n/frame_num variable. The default value
-is "0".
-
-
-tabsize
-The size in number of spaces to use for rendering the tab.
-Default value is 4.
-
-
-timecode
-Set the initial timecode representation in "hh:mm:ss[:;.]ff"
-format. It can be used with or without text parameter. timecode_rate
-option must be specified.
-
-
-timecode_rate, rate, r
-Set the timecode frame rate (timecode only).
-
-
-text
-The text string to be drawn. The text must be a sequence of UTF-8
-encoded characters.
-This parameter is mandatory if no file is specified with the parameter
-textfile .
-
-
-textfile
-A text file containing text to be drawn. The text must be a sequence
-of UTF-8 encoded characters.
-
-This parameter is mandatory if no text string is specified with the
-parameter text .
-
-If both text and textfile are specified, an error is thrown.
-
-
-reload
-If set to 1, the textfile will be reloaded before each frame.
-Be sure to update it atomically, or it may be read partially, or even fail.
-
-
-x
-y
-The expressions which specify the offsets where text will be drawn
-within the video frame. They are relative to the top/left border of the
-output image.
-
-The default value of x and y is "0".
-
-See below for the list of accepted constants and functions.
-
-
-
-
The parameters for x and y are expressions containing the
-following constants and functions:
-
-
-dar
-input display aspect ratio, it is the same as (w / h ) * sar
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-line_h, lh
-the height of each text line
-
-
-main_h, h, H
-the input height
-
-
-main_w, w, W
-the input width
-
-
-max_glyph_a, ascent
-the maximum distance from the baseline to the highest/upper grid
-coordinate used to place a glyph outline point, for all the rendered
-glyphs.
-It is a positive value, due to the grid’s orientation with the Y axis
-upwards.
-
-
-max_glyph_d, descent
-the maximum distance from the baseline to the lowest grid coordinate
-used to place a glyph outline point, for all the rendered glyphs.
-This is a negative value, due to the grid’s orientation, with the Y axis
-upwards.
-
-
-max_glyph_h
-maximum glyph height, that is the maximum height for all the glyphs
-contained in the rendered text, it is equivalent to ascent -
-descent .
-
-
-max_glyph_w
-maximum glyph width, that is the maximum width for all the glyphs
-contained in the rendered text
-
-
-n
-the number of input frame, starting from 0
-
-
-rand(min, max)
-return a random number included between min and max
-
-
-sar
-The input sample aspect ratio.
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-text_h, th
-the height of the rendered text
-
-
-text_w, tw
-the width of the rendered text
-
-
-x
-y
-the x and y offset coordinates where the text is drawn.
-
-These parameters allow the x and y expressions to refer
-each other, so you can for example specify y=x/dar
.
-
-
-
-
-
30.25.2 Text expansion# TOC
-
-
If expansion is set to strftime
,
-the filter recognizes strftime() sequences in the provided text and
-expands them accordingly. Check the documentation of strftime(). This
-feature is deprecated.
-
-
If expansion is set to none
, the text is printed verbatim.
-
-
If expansion is set to normal
(which is the default),
-the following expansion mechanism is used.
-
-
The backslash character ’\’, followed by any character, always expands to
-the second character.
-
-
Sequence of the form %{...}
are expanded. The text between the
-braces is a function name, possibly followed by arguments separated by ’:’.
-If the arguments contain special characters or delimiters (’:’ or ’}’),
-they should be escaped.
-
-
Note that they probably must also be escaped as the value for the
-text option in the filter argument string and as the filter
-argument in the filtergraph description, and possibly also for the shell,
-that makes up to four levels of escaping; using a text file avoids these
-problems.
-
-
The following functions are available:
-
-
-expr, e
-The expression evaluation result.
-
-It must take one argument specifying the expression to be evaluated,
-which accepts the same constants and functions as the x and
-y values. Note that not all constants should be used, for
-example the text size is not known when evaluating the expression, so
-the constants text_w and text_h will have an undefined
-value.
-
-
-expr_int_format, eif
-Evaluate the expression’s value and output as formatted integer.
-
-The first argument is the expression to be evaluated, just as for the expr function.
-The second argument specifies the output format. Allowed values are ’x’, ’X’, ’d’ and
-’u’. They are treated exactly as in the printf function.
-The third parameter is optional and sets the number of positions taken by the output.
-It can be used to add padding with zeros from the left.
-
-
-gmtime
-The time at which the filter is running, expressed in UTC.
-It can accept an argument: a strftime() format string.
-
-
-localtime
-The time at which the filter is running, expressed in the local time zone.
-It can accept an argument: a strftime() format string.
-
-
-metadata
-Frame metadata. It must take one argument specifying metadata key.
-
-
-n, frame_num
-The frame number, starting from 0.
-
-
-pict_type
-A 1 character description of the current picture type.
-
-
-pts
-The timestamp of the current frame.
-It can take up to two arguments.
-
-The first argument is the format of the timestamp; it defaults to flt
-for seconds as a decimal number with microsecond accuracy; hms
stands
-for a formatted [-]HH:MM:SS.mmm timestamp with millisecond accuracy.
-
-The second argument is an offset added to the timestamp.
-
-
-
-
-
-
30.25.3 Examples# TOC
-
-
-
-
For more information about libfreetype, check:
-http://www.freetype.org/ .
-
-
For more information about fontconfig, check:
-http://freedesktop.org/software/fontconfig/fontconfig-user.html .
-
-
For more information about libfribidi, check:
-http://fribidi.org/ .
-
-
-
30.26 edgedetect# TOC
-
-
Detect and draw edges. The filter uses the Canny Edge Detection algorithm.
-
-
The filter accepts the following options:
-
-
-low
-high
-Set low and high threshold values used by the Canny thresholding
-algorithm.
-
-The high threshold selects the "strong" edge pixels, which are then
-connected through 8-connectivity with the "weak" edge pixels selected
-by the low threshold.
-
-low and high threshold values must be chosen in the range
-[0,1], and low should be lesser or equal to high .
-
-Default value for low is 20/255
, and default value for high
-is 50/255
.
-
-
-mode
-Define the drawing mode.
-
-
-‘wires ’
-Draw white/gray wires on black background.
-
-
-‘colormix ’
-Mix the colors to create a paint/cartoon effect.
-
-
-
-Default value is wires .
-
-
-
-
-
30.26.1 Examples# TOC
-
-
- Standard edge detection with custom values for the hysteresis thresholding:
-
-
edgedetect=low=0.1:high=0.4
-
-
- Painting effect without thresholding:
-
-
edgedetect=mode=colormix:high=0
-
-
-
-
-
30.27 extractplanes# TOC
-
-
Extract color channel components from input video stream into
-separate grayscale video streams.
-
-
The filter accepts the following option:
-
-
-planes
-Set plane(s) to extract.
-
-Available values for planes are:
-
-‘y ’
-‘u ’
-‘v ’
-‘a ’
-‘r ’
-‘g ’
-‘b ’
-
-
-Choosing planes not available in the input will result in an error.
-That means you cannot select r
, g
, b
planes
-with y
, u
, v
planes at same time.
-
-
-
-
-
30.27.1 Examples# TOC
-
-
- Extract luma, u and v color channel component from input video frame
-into 3 grayscale outputs:
-
-
ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
-
-
-
-
-
30.28 elbg# TOC
-
-
Apply a posterize effect using the ELBG (Enhanced LBG) algorithm.
-
-
For each input image, the filter will compute the optimal mapping from
-the input to the output given the codebook length, that is the number
-of distinct output colors.
-
-
This filter accepts the following options.
-
-
-codebook_length, l
-Set codebook length. The value must be a positive integer, and
-represents the number of distinct output colors. Default value is 256.
-
-
-nb_steps, n
-Set the maximum number of iterations to apply for computing the optimal
-mapping. The higher the value the better the result and the higher the
-computation time. Default value is 1.
-
-
-seed, s
-Set a random seed, must be an integer included between 0 and
-UINT32_MAX. If not specified, or if explicitly set to -1, the filter
-will try to use a good random seed on a best effort basis.
-
-
-
-
-
30.29 fade# TOC
-
-
Apply a fade-in/out effect to the input video.
-
-
It accepts the following parameters:
-
-
-type, t
-The effect type can be either "in" for a fade-in, or "out" for a fade-out
-effect.
-Default is in
.
-
-
-start_frame, s
-Specify the number of the frame to start applying the fade
-effect at. Default is 0.
-
-
-nb_frames, n
-The number of frames that the fade effect lasts. At the end of the
-fade-in effect, the output video will have the same intensity as the input video.
-At the end of the fade-out transition, the output video will be filled with the
-selected color .
-Default is 25.
-
-
-alpha
-If set to 1, fade only alpha channel, if one exists on the input.
-Default value is 0.
-
-
-start_time, st
-Specify the timestamp (in seconds) of the frame to start to apply the fade
-effect. If both start_frame and start_time are specified, the fade will start at
-whichever comes last. Default is 0.
-
-
-duration, d
-The number of seconds for which the fade effect has to last. At the end of the
-fade-in effect the output video will have the same intensity as the input video,
-at the end of the fade-out transition the output video will be filled with the
-selected color .
-If both duration and nb_frames are specified, duration is used. Default is 0.
-
-
-color, c
-Specify the color of the fade. Default is "black".
-
-
-
-
-
30.29.1 Examples# TOC
-
-
-
-
-
30.30 field# TOC
-
-
Extract a single field from an interlaced image using stride
-arithmetic to avoid wasting CPU time. The output frames are marked as
-non-interlaced.
-
-
The filter accepts the following options:
-
-
-type
-Specify whether to extract the top (if the value is 0
or
-top
) or the bottom field (if the value is 1
or
-bottom
).
-
-
-
-
-
30.31 fieldmatch# TOC
-
-
Field matching filter for inverse telecine. It is meant to reconstruct the
-progressive frames from a telecined stream. The filter does not drop duplicated
-frames, so to achieve a complete inverse telecine fieldmatch
needs to be
-followed by a decimation filter such as decimate in the filtergraph.
-
-
The separation of the field matching and the decimation is notably motivated by
-the possibility of inserting a de-interlacing filter fallback between the two.
-If the source has mixed telecined and real interlaced content,
-fieldmatch
will not be able to match fields for the interlaced parts.
-But these remaining combed frames will be marked as interlaced, and thus can be
-de-interlaced by a later filter such as yadif before decimation.
-
-
In addition to the various configuration options, fieldmatch
can take an
-optional second stream, activated through the ppsrc option. If
-enabled, the frames reconstruction will be based on the fields and frames from
-this second stream. This allows the first input to be pre-processed in order to
-help the various algorithms of the filter, while keeping the output lossless
-(assuming the fields are matched properly). Typically, a field-aware denoiser,
-or brightness/contrast adjustments can help.
-
-
Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project)
-and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
-which fieldmatch
is based on. While the semantic and usage are very
-close, some behaviour and options names can differ.
-
-
The decimate filter currently only works for constant frame rate input.
-Do not use fieldmatch
and decimate if your input has mixed
-telecined and progressive content with changing framerate.
-
-
The filter accepts the following options:
-
-
-order
-Specify the assumed field order of the input stream. Available values are:
-
-
-‘auto ’
-Auto detect parity (use FFmpeg’s internal parity value).
-
-‘bff ’
-Assume bottom field first.
-
-‘tff ’
-Assume top field first.
-
-
-
-Note that it is sometimes recommended not to trust the parity announced by the
-stream.
-
-Default value is auto .
-
-
-mode
-Set the matching mode or strategy to use. pc mode is the safest in the
-sense that it won’t risk creating jerkiness due to duplicate frames when
-possible, but if there are bad edits or blended fields it will end up
-outputting combed frames when a good match might actually exist. On the other
-hand, pcn_ub mode is the most risky in terms of creating jerkiness,
-but will almost always find a good frame if there is one. The other values are
-all somewhere in between pc and pcn_ub in terms of risking
-jerkiness and creating duplicate frames versus finding good matches in sections
-with bad edits, orphaned fields, blended fields, etc.
-
-More details about p/c/n/u/b are available in p/c/n/u/b meaning section.
-
-Available values are:
-
-
-‘pc ’
-2-way matching (p/c)
-
-‘pc_n ’
-2-way matching, and trying 3rd match if still combed (p/c + n)
-
-‘pc_u ’
-2-way matching, and trying 3rd match (same order) if still combed (p/c + u)
-
-‘pc_n_ub ’
-2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if
-still combed (p/c + n + u/b)
-
-‘pcn ’
-3-way matching (p/c/n)
-
-‘pcn_ub ’
-3-way matching, and trying 4th/5th matches if all 3 of the original matches are
-detected as combed (p/c/n + u/b)
-
-
-
-The parenthesis at the end indicate the matches that would be used for that
-mode assuming order =tff (and field on auto or
-top ).
-
-In terms of speed pc mode is by far the fastest and pcn_ub is
-the slowest.
-
-Default value is pc_n .
-
-
-ppsrc
-Mark the main input stream as a pre-processed input, and enable the secondary
-input stream as the clean source to pick the fields from. See the filter
-introduction for more details. It is similar to the clip2 feature from
-VFM/TFM.
-
-Default value is 0
(disabled).
-
-
-field
-Set the field to match from. It is recommended to set this to the same value as
-order unless you experience matching failures with that setting. In
-certain circumstances changing the field that is used to match from can have a
-large impact on matching performance. Available values are:
-
-
-‘auto ’
-Automatic (same value as order ).
-
-‘bottom ’
-Match from the bottom field.
-
-‘top ’
-Match from the top field.
-
-
-
-Default value is auto .
-
-
-mchroma
-Set whether or not chroma is included during the match comparisons. In most
-cases it is recommended to leave this enabled. You should set this to 0
-only if your clip has bad chroma problems such as heavy rainbowing or other
-artifacts. Setting this to 0
could also be used to speed things up at
-the cost of some accuracy.
-
-Default value is 1
.
-
-
-y0
-y1
-These define an exclusion band which excludes the lines between y0 and
-y1 from being included in the field matching decision. An exclusion
-band can be used to ignore subtitles, a logo, or other things that may
-interfere with the matching. y0 sets the starting scan line and
-y1 sets the ending line; all lines in between y0 and
-y1 (including y0 and y1 ) will be ignored. Setting
-y0 and y1 to the same value will disable the feature.
-y0 and y1 defaults to 0
.
-
-
-scthresh
-Set the scene change detection threshold as a percentage of maximum change on
-the luma plane. Good values are in the [8.0, 14.0]
range. Scene change
-detection is only relevant in case combmatch =sc . The range for
-scthresh is [0.0, 100.0]
.
-
-Default value is 12.0
.
-
-
-combmatch
-When combatch is not none , fieldmatch
will take into
-account the combed scores of matches when deciding what match to use as the
-final match. Available values are:
-
-
-‘none ’
-No final matching based on combed scores.
-
-‘sc ’
-Combed scores are only used when a scene change is detected.
-
-‘full ’
-Use combed scores all the time.
-
-
-
-Default is sc .
-
-
-combdbg
-Force fieldmatch
to calculate the combed metrics for certain matches and
-print them. This setting is known as micout in TFM/VFM vocabulary.
-Available values are:
-
-
-‘none ’
-No forced calculation.
-
-‘pcn ’
-Force p/c/n calculations.
-
-‘pcnub ’
-Force p/c/n/u/b calculations.
-
-
-
-Default value is none .
-
-
-cthresh
-This is the area combing threshold used for combed frame detection. This
-essentially controls how "strong" or "visible" combing must be to be detected.
-Larger values mean combing must be more visible and smaller values mean combing
-can be less visible or strong and still be detected. Valid settings are from
--1
(every pixel will be detected as combed) to 255
(no pixel will
-be detected as combed). This is basically a pixel difference value. A good
-range is [8, 12]
.
-
-Default value is 9
.
-
-
-chroma
-Sets whether or not chroma is considered in the combed frame decision. Only
-disable this if your source has chroma problems (rainbowing, etc.) that are
-causing problems for the combed frame detection with chroma enabled. Actually,
-using chroma =0 is usually more reliable, except for the case
-where there is chroma only combing in the source.
-
-Default value is 0
.
-
-
-blockx
-blocky
-Respectively set the x-axis and y-axis size of the window used during combed
-frame detection. This has to do with the size of the area in which
-combpel pixels are required to be detected as combed for a frame to be
-declared combed. See the combpel parameter description for more info.
-Possible values are any number that is a power of 2 starting at 4 and going up
-to 512.
-
-Default value is 16
.
-
-
-combpel
-The number of combed pixels inside any of the blocky by
-blockx size blocks on the frame for the frame to be detected as
-combed. While cthresh controls how "visible" the combing must be, this
-setting controls "how much" combing there must be in any localized area (a
-window defined by the blockx and blocky settings) on the
-frame. Minimum value is 0
and maximum is blocky x blockx
(at
-which point no frames will ever be detected as combed). This setting is known
-as MI in TFM/VFM vocabulary.
-
-Default value is 80
.
-
-
-
-
-
30.31.1 p/c/n/u/b meaning# TOC
-
-
-
30.31.1.1 p/c/n# TOC
-
-
We assume the following telecined stream:
-
-
-
Top fields: 1 2 2 3 4
-Bottom fields: 1 2 3 4 4
-
-
-
The numbers correspond to the progressive frame the fields relate to. Here, the
-first two frames are progressive, the 3rd and 4th are combed, and so on.
-
-
When fieldmatch
is configured to run a matching from bottom
-(field =bottom ) this is how this input stream get transformed:
-
-
-
Input stream:
- T 1 2 2 3 4
- B 1 2 3 4 4 <-- matching reference
-
-Matches: c c n n c
-
-Output stream:
- T 1 2 3 4 4
- B 1 2 3 4 4
-
-
-
As a result of the field matching, we can see that some frames get duplicated.
-To perform a complete inverse telecine, you need to rely on a decimation filter
-after this operation. See for instance the decimate filter.
-
-
The same operation now matching from top fields (field =top )
-looks like this:
-
-
-
Input stream:
- T 1 2 2 3 4 <-- matching reference
- B 1 2 3 4 4
-
-Matches: c c p p c
-
-Output stream:
- T 1 2 2 3 4
- B 1 2 2 3 4
-
-
-
In these examples, we can see what p , c and n mean;
-basically, they refer to the frame and field of the opposite parity:
-
-
- p matches the field of the opposite parity in the previous frame
- c matches the field of the opposite parity in the current frame
- n matches the field of the opposite parity in the next frame
-
-
-
-
30.31.1.2 u/b# TOC
-
-
The u and b matching are a bit special in the sense that they match
-from the opposite parity flag. In the following examples, we assume that we are
-currently matching the 2nd frame (Top:2, bottom:2). According to the match, a
-’x’ is placed above and below each matched fields.
-
-
With bottom matching (field =bottom ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 1 2 2 2
- 2 2 2 1 3
-
-
-
With top matching (field =top ):
-
-
Match: c p n b u
-
- x x x x x
- Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
- Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
- x x x x x
-
-Output frames:
- 2 2 2 1 2
- 2 1 3 2 2
-
-
-
-
30.31.2 Examples# TOC
-
-
Simple IVTC of a top field first telecined stream:
-
-
fieldmatch=order=tff:combmatch=none, decimate
-
-
-
Advanced IVTC, with fallback on yadif for still combed frames:
-
-
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
-
-
-
-
30.32 fieldorder# TOC
-
-
Transform the field order of the input video.
-
-
It accepts the following parameters:
-
-
-order
-The output field order. Valid values are tff for top field first or bff
-for bottom field first.
-
-
-
-
The default value is ‘tff ’.
-
-
The transformation is done by shifting the picture content up or down
-by one line, and filling the remaining line with appropriate picture content.
-This method is consistent with most broadcast field order converters.
-
-
If the input video is not flagged as being interlaced, or it is already
-flagged as being of the required output field order, then this filter does
-not alter the incoming video.
-
-
It is very useful when converting to or from PAL DV material,
-which is bottom field first.
-
-
For example:
-
-
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
-
-
-
-
30.33 fifo# TOC
-
-
Buffer input images and send them when they are requested.
-
-
It is mainly useful when auto-inserted by the libavfilter
-framework.
-
-
It does not take parameters.
-
-
-
30.34 format# TOC
-
-
Convert the input video to one of the specified pixel formats.
-Libavfilter will try to pick one that is suitable as input to
-the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-"pix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
30.34.1 Examples# TOC
-
-
-
-
-
30.35 fps# TOC
-
-
Convert the video to specified constant frame rate by duplicating or dropping
-frames as necessary.
-
-
It accepts the following parameters:
-
-fps
-The desired output frame rate. The default is 25
.
-
-
-round
-Rounding method.
-
-Possible values are:
-
-zero
-zero round towards 0
-
-inf
-round away from 0
-
-down
-round towards -infinity
-
-up
-round towards +infinity
-
-near
-round to nearest
-
-
-The default is near
.
-
-
-start_time
-Assume the first PTS should be the given value, in seconds. This allows for
-padding/trimming at the start of stream. By default, no assumption is made
-about the first frame’s expected PTS, so no padding or trimming is done.
-For example, this could be set to 0 to pad the beginning with duplicates of
-the first frame if a video stream starts after the audio stream or to trim any
-frames with a negative PTS.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-fps [:round ].
-
-
See also the setpts filter.
-
-
-
30.35.1 Examples# TOC
-
-
- A typical usage in order to set the fps to 25:
-
-
- Sets the fps to 24, using abbreviation and rounding method to round to nearest:
-
-
fps=fps=film:round=near
-
-
-
-
-
30.36 framepack# TOC
-
-
Pack two different video streams into a stereoscopic video, setting proper
-metadata on supported codecs. The two views should have the same size and
-framerate and processing will stop when the shorter video ends. Please note
-that you may conveniently adjust view properties with the scale and
-fps filters.
-
-
It accepts the following parameters:
-
-format
-The desired packing format. Supported values are:
-
-
-sbs
-The views are next to each other (default).
-
-
-tab
-The views are on top of each other.
-
-
-lines
-The views are packed by line.
-
-
-columns
-The views are packed by column.
-
-
-frameseq
-The views are temporally interleaved.
-
-
-
-
-
-
-
-
Some examples:
-
-
-
# Convert left and right views into a frame-sequential video
-ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
-
-# Convert views into a side-by-side video with the same output resolution as the input
-ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
-
-
-
-
30.37 framestep# TOC
-
-
Select one frame every N-th frame.
-
-
This filter accepts the following option:
-
-step
-Select frame after every step
frames.
-Allowed values are positive integers higher than 0. Default value is 1
.
-
-
-
-
-
30.38 frei0r# TOC
-
-
Apply a frei0r effect to the input video.
-
-
To enable the compilation of this filter, you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the frei0r effect to load. If the environment variable
-FREI0R_PATH
is defined, the frei0r effect is searched for in each of the
-directories specified by the colon-separated list in FREIOR_PATH
.
-Otherwise, the standard frei0r paths are searched, in this order:
-HOME/.frei0r-1/lib/ , /usr/local/lib/frei0r-1/ ,
-/usr/lib/frei0r-1/ .
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r effect.
-
-
-
-
-
A frei0r effect parameter can be a boolean (its value is either
-"y" or "n"), a double, a color (specified as
-R /G /B , where R , G , and B are floating point
-numbers between 0.0 and 1.0, inclusive) or by a color description specified in the "Color"
-section in the ffmpeg-utils manual), a position (specified as X /Y , where
-X and Y are floating point numbers) and/or a string.
-
-
The number and types of parameters depend on the loaded effect. If an
-effect parameter is not specified, the default value is set.
-
-
-
30.38.1 Examples# TOC
-
-
- Apply the distort0r effect, setting the first two double parameters:
-
-
frei0r=filter_name=distort0r:filter_params=0.5|0.01
-
-
- Apply the colordistance effect, taking a color as the first parameter:
-
-
frei0r=colordistance:0.2/0.3/0.4
-frei0r=colordistance:violet
-frei0r=colordistance:0x112233
-
-
- Apply the perspective effect, specifying the top left and top right image
-positions:
-
-
frei0r=perspective:0.2/0.2|0.8/0.2
-
-
-
-
For more information, see
-http://frei0r.dyne.org
-
-
-
30.39 fspp# TOC
-
-
Apply fast and simple postprocessing. It is a faster version of spp .
-
-
It splits (I)DCT into horizontal/vertical passes. Unlike the simple post-
-processing filter, one of them is performed once per block, not per pixel.
-This allows for much higher speed.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 4-5. Default value is 4
.
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range 0-63.
-If not set, the filter will use the QP from the video stream (if available).
-
-
-strength
-Set filter strength. It accepts an integer in range -15 to 32. Lower values mean
-more details but also more artifacts, while higher values make the image smoother
-but also blurrier. Default value is 0
− PSNR optimal.
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
-
30.40 geq# TOC
-
-
The filter accepts the following options:
-
-
-lum_expr, lum
-Set the luminance expression.
-
-cb_expr, cb
-Set the chrominance blue expression.
-
-cr_expr, cr
-Set the chrominance red expression.
-
-alpha_expr, a
-Set the alpha expression.
-
-red_expr, r
-Set the red expression.
-
-green_expr, g
-Set the green expression.
-
-blue_expr, b
-Set the blue expression.
-
-
-
-
The colorspace is selected according to the specified options. If one
-of the lum_expr , cb_expr , or cr_expr
-options is specified, the filter will automatically select a YCbCr
-colorspace. If one of the red_expr , green_expr , or
-blue_expr options is specified, it will select an RGB
-colorspace.
-
-
If one of the chrominance expression is not defined, it falls back on the other
-one. If no alpha expression is specified it will evaluate to opaque value.
-If none of chrominance expressions are specified, they will evaluate
-to the luminance expression.
-
-
The expressions can use the following variables and functions:
-
-
-N
-The sequential number of the filtered frame, starting from 0
.
-
-
-X
-Y
-The coordinates of the current sample.
-
-
-W
-H
-The width and height of the image.
-
-
-SW
-SH
-Width and height scale depending on the currently filtered plane. It is the
-ratio between the corresponding luma plane number of pixels and the current
-plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
-0.5,0.5
for chroma planes.
-
-
-T
-Time of the current frame, expressed in seconds.
-
-
-p(x, y)
-Return the value of the pixel at location (x ,y ) of the current
-plane.
-
-
-lum(x, y)
-Return the value of the pixel at location (x ,y ) of the luminance
-plane.
-
-
-cb(x, y)
-Return the value of the pixel at location (x ,y ) of the
-blue-difference chroma plane. Return 0 if there is no such plane.
-
-
-cr(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red-difference chroma plane. Return 0 if there is no such plane.
-
-
-r(x, y)
-g(x, y)
-b(x, y)
-Return the value of the pixel at location (x ,y ) of the
-red/green/blue component. Return 0 if there is no such component.
-
-
-alpha(x, y)
-Return the value of the pixel at location (x ,y ) of the alpha
-plane. Return 0 if there is no such plane.
-
-
-
-
For functions, if x and y are outside the area, the value will be
-automatically clipped to the closer edge.
-
-
-
30.40.1 Examples# TOC
-
-
- Flip the image horizontally:
-
-
- Generate a bidimensional sine wave, with angle PI/3
and a
-wavelength of 100 pixels:
-
-
geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
-
-
- Generate a fancy enigmatic moving light:
-
-
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
-
-
- Generate a quick emboss effect:
-
-
format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
-
-
- Modify RGB components depending on pixel position:
-
-
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
-
-
- Create a radial gradient that is the same size as the input (also see
-the vignette filter):
-
-
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
-
-
- Create a linear gradient to use as a mask for another filter, then
-compose with overlay . In this example the video will gradually
-become more blurry from the top to the bottom of the y-axis as defined
-by the linear gradient:
-
-
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
-
-
-
-
-
30.41 gradfun# TOC
-
-
Fix the banding artifacts that are sometimes introduced into nearly flat
-regions by truncation to 8bit color depth.
-Interpolate the gradients that should go where the bands are, and
-dither them.
-
-
It is designed for playback only. Do not use it prior to
-lossy compression, because compression tends to lose the dither and
-bring back the bands.
-
-
It accepts the following parameters:
-
-
-strength
-The maximum amount by which the filter will change any one pixel. This is also
-the threshold for detecting nearly flat regions. Acceptable values range from
-.51 to 64; the default value is 1.2. Out-of-range values will be clipped to the
-valid range.
-
-
-radius
-The neighborhood to fit the gradient to. A larger radius makes for smoother
-gradients, but also prevents the filter from modifying the pixels near detailed
-regions. Acceptable values are 8-32; the default value is 16. Out-of-range
-values will be clipped to the valid range.
-
-
-
-
-
Alternatively, the options can be specified as a flat string:
-strength [:radius ]
-
-
-
30.41.1 Examples# TOC
-
-
- Apply the filter with a 3.5
strength and radius of 8
:
-
-
- Specify radius, omitting the strength (which will fall-back to the default
-value):
-
-
-
-
-
-
30.42 haldclut# TOC
-
-
Apply a Hald CLUT to a video stream.
-
-
First input is the video stream to process, and second one is the Hald CLUT.
-The Hald CLUT input can be a simple picture or a complete video stream.
-
-
The filter accepts the following options:
-
-
-shortest
-Force termination when the shortest input terminates. Default is 0
.
-
-repeatlast
-Continue applying the last CLUT after the end of the stream. A value of
-0
disable the filter after the last frame of the CLUT is reached.
-Default is 1
.
-
-
-
-
haldclut
also has the same interpolation options as lut3d (both
-filters share the same internals).
-
-
More information about the Hald CLUT can be found on Eskil Steenberg’s website
-(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html .
-
-
-
30.42.1 Workflow examples# TOC
-
-
-
30.42.1.1 Hald CLUT video stream# TOC
-
-
Generate an identity Hald CLUT stream altered with various effects:
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
-
-
-
Note: make sure you use a lossless codec.
-
-
Then use it with haldclut
to apply it on some random stream:
-
-
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
-
-
-
The Hald CLUT will be applied to the 10 first seconds (duration of
-clut.nut ), then the latest picture of that CLUT stream will be applied
-to the remaining frames of the mandelbrot
stream.
-
-
-
30.42.1.2 Hald CLUT with preview# TOC
-
-
A Hald CLUT is supposed to be a squared image of Level*Level*Level
by
-Level*Level*Level
pixels. For a given Hald CLUT, FFmpeg will select the
-biggest possible square starting at the top left of the picture. The remaining
-padding pixels (bottom or right) will be ignored. This area can be used to add
-a preview of the Hald CLUT.
-
-
Typically, the following generated Hald CLUT will be supported by the
-haldclut
filter:
-
-
-
ffmpeg -f lavfi -i haldclutsrc =8 -vf "
- pad=iw+320 [padded_clut];
- smptebars=s=320x256, split [a][b];
- [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
- [main][b] overlay=W-320" -frames:v 1 clut.png
-
-
-
It contains the original and a preview of the effect of the CLUT: SMPTE color
-bars are displayed on the right-top, and below the same color bars processed by
-the color changes.
-
-
Then, the effect of this Hald CLUT can be visualized with:
-
-
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
-
-
-
-
30.43 hflip# TOC
-
-
Flip the input video horizontally.
-
-
For example, to horizontally flip the input video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "hflip" out.avi
-
-
-
-
30.44 histeq# TOC
-
This filter applies a global color histogram equalization on a
-per-frame basis.
-
-
It can be used to correct video that has a compressed range of pixel
-intensities. The filter redistributes the pixel intensities to
-equalize their distribution across the intensity range. It may be
-viewed as an "automatically adjusting contrast filter". This filter is
-useful only for correcting degraded or poorly captured source
-video.
-
-
The filter accepts the following options:
-
-
-strength
-Determine the amount of equalization to be applied. As the strength
-is reduced, the distribution of pixel intensities more-and-more
-approaches that of the input frame. The value must be a float number
-in the range [0,1] and defaults to 0.200.
-
-
-intensity
-Set the maximum intensity that can generated and scale the output
-values appropriately. The strength should be set as desired and then
-the intensity can be limited if needed to avoid washing-out. The value
-must be a float number in the range [0,1] and defaults to 0.210.
-
-
-antibanding
-Set the antibanding level. If enabled the filter will randomly vary
-the luminance of output pixels by a small amount to avoid banding of
-the histogram. Possible values are none
, weak
or
-strong
. It defaults to none
.
-
-
-
-
-
30.45 histogram# TOC
-
-
Compute and draw a color distribution histogram for the input video.
-
-
The computed histogram is a representation of the color component
-distribution in an image.
-
-
The filter accepts the following options:
-
-
-mode
-Set histogram mode.
-
-It accepts the following values:
-
-‘levels ’
-Standard histogram that displays the color components distribution in an
-image. Displays color graph for each color component. Shows distribution of
-the Y, U, V, A or R, G, B components, depending on input format, in the
-current frame. Below each graph a color component scale meter is shown.
-
-
-‘color ’
-Displays chroma values (U/V color placement) in a two dimensional
-graph (which is called a vectorscope). The brighter a pixel in the
-vectorscope, the more pixels of the input frame correspond to that pixel
-(i.e., more pixels have this chroma value). The V component is displayed on
-the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost
-side being V = 255. The U component is displayed on the vertical (Y) axis,
-with the top representing U = 0 and the bottom representing U = 255.
-
-The position of a white pixel in the graph corresponds to the chroma value of
-a pixel of the input clip. The graph can therefore be used to read the hue
-(color flavor) and the saturation (the dominance of the hue in the color). As
-the hue of a color changes, it moves around the square. At the center of the
-square the saturation is zero, which means that the corresponding pixel has no
-color. If the amount of a specific color is increased (while leaving the other
-colors unchanged) the saturation increases, and the indicator moves towards
-the edge of the square.
-
-
-‘color2 ’
-Chroma values in vectorscope, similar as color
but actual chroma values
-are displayed.
-
-
-‘waveform ’
-Per row/column color component graph. In row mode, the graph on the left side
-represents color component value 0 and the right side represents value = 255.
-In column mode, the top side represents color component value = 0 and bottom
-side represents value = 255.
-
-
-Default value is levels
.
-
-
-level_height
-Set height of level in levels
. Default value is 200
.
-Allowed range is [50, 2048].
-
-
-scale_height
-Set height of color scale in levels
. Default value is 12
.
-Allowed range is [0, 40].
-
-
-step
-Set step for waveform
mode. Smaller values are useful to find out how
-many values of the same luminance are distributed across input rows/columns.
-Default value is 10
. Allowed range is [1, 255].
-
-
-waveform_mode
-Set mode for waveform
. Can be either row
, or column
.
-Default is row
.
-
-
-waveform_mirror
-Set mirroring mode for waveform
. 0
means unmirrored, 1
-means mirrored. In mirrored mode, higher values will be represented on the left
-side for row
mode and at the top for column
mode. Default is
-0
(unmirrored).
-
-
-display_mode
-Set display mode for waveform
and levels
.
-It accepts the following values:
-
-‘parade ’
-Display separate graph for the color components side by side in
-row
waveform mode or one below the other in column
waveform mode
-for waveform
histogram mode. For levels
histogram mode,
-per color component graphs are placed below each other.
-
-Using this display mode in waveform
histogram mode makes it easy to
-spot color casts in the highlights and shadows of an image, by comparing the
-contours of the top and the bottom graphs of each waveform. Since whites,
-grays, and blacks are characterized by exactly equal amounts of red, green,
-and blue, neutral areas of the picture should display three waveforms of
-roughly equal width/height. If not, the correction is easy to perform by
-making level adjustments the three waveforms.
-
-
-‘overlay ’
-Presents information identical to that in the parade
, except
-that the graphs representing color components are superimposed directly
-over one another.
-
-This display mode in waveform
histogram mode makes it easier to spot
-relative differences or similarities in overlapping areas of the color
-components that are supposed to be identical, such as neutral whites, grays,
-or blacks.
-
-
-Default is parade
.
-
-
-levels_mode
-Set mode for levels
. Can be either linear
, or logarithmic
.
-Default is linear
.
-
-
-
-
-
30.45.1 Examples# TOC
-
-
- Calculate and draw histogram:
-
-
ffplay -i input -vf histogram
-
-
-
-
-
-
30.46 hqdn3d# TOC
-
-
This is a high precision/quality 3d denoise filter. It aims to reduce
-image noise, producing smooth images and making still images really
-still. It should enhance compressibility.
-
-
It accepts the following optional parameters:
-
-
-luma_spatial
-A non-negative floating point number which specifies spatial luma strength.
-It defaults to 4.0.
-
-
-chroma_spatial
-A non-negative floating point number which specifies spatial chroma strength.
-It defaults to 3.0*luma_spatial /4.0.
-
-
-luma_tmp
-A floating point number which specifies luma temporal strength. It defaults to
-6.0*luma_spatial /4.0.
-
-
-chroma_tmp
-A floating point number which specifies chroma temporal strength. It defaults to
-luma_tmp *chroma_spatial /luma_spatial .
-
-
-
-
-
30.47 hqx# TOC
-
-
Apply a high-quality magnification filter designed for pixel art. This filter
-was originally created by Maxim Stepin.
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for hq2x
, 3
for
-hq3x
and 4
for hq4x
.
-Default is 3
.
-
-
-
-
-
30.48 hue# TOC
-
-
Modify the hue and/or the saturation of the input.
-
-
It accepts the following parameters:
-
-
-h
-Specify the hue angle as a number of degrees. It accepts an expression,
-and defaults to "0".
-
-
-s
-Specify the saturation in the [-10,10] range. It accepts an expression and
-defaults to "1".
-
-
-H
-Specify the hue angle as a number of radians. It accepts an
-expression, and defaults to "0".
-
-
-b
-Specify the brightness in the [-10,10] range. It accepts an expression and
-defaults to "0".
-
-
-
-
h and H are mutually exclusive, and can’t be
-specified at the same time.
-
-
The b , h , H and s option values are
-expressions containing the following constants:
-
-
-n
-frame count of the input frame starting from 0
-
-
-pts
-presentation timestamp of the input frame expressed in time base units
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-timestamp expressed in seconds, NAN if the input timestamp is unknown
-
-
-tb
-time base of the input video
-
-
-
-
-
30.48.1 Examples# TOC
-
-
-
-
-
30.48.2 Commands# TOC
-
-
This filter supports the following commands:
-
-b
-s
-h
-H
-Modify the hue and/or the saturation and/or brightness of the input video.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
30.49 idet# TOC
-
-
Detect video interlacing type.
-
-
This filter tries to detect if the input frames as interlaced, progressive,
-top or bottom field first. It will also try and detect fields that are
-repeated between adjacent frames (a sign of telecine).
-
-
Single frame detection considers only immediately adjacent frames when classifying each frame.
-Multiple frame detection incorporates the classification history of previous frames.
-
-
The filter will log these metadata values:
-
-
-single.current_frame
-Detected type of current frame using single-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-single.tff
-Cumulative number of frames detected as top field first using single-frame detection.
-
-
-multiple.tff
-Cumulative number of frames detected as top field first using multiple-frame detection.
-
-
-single.bff
-Cumulative number of frames detected as bottom field first using single-frame detection.
-
-
-multiple.current_frame
-Detected type of current frame using multiple-frame detection. One of:
-“tff” (top field first), “bff” (bottom field first),
-“progressive”, or “undetermined”
-
-
-multiple.bff
-Cumulative number of frames detected as bottom field first using multiple-frame detection.
-
-
-single.progressive
-Cumulative number of frames detected as progressive using single-frame detection.
-
-
-multiple.progressive
-Cumulative number of frames detected as progressive using multiple-frame detection.
-
-
-single.undetermined
-Cumulative number of frames that could not be classified using single-frame detection.
-
-
-multiple.undetermined
-Cumulative number of frames that could not be classified using multiple-frame detection.
-
-
-repeated.current_frame
-Which field in the current frame is repeated from the last. One of “neither”, “top”, or “bottom”.
-
-
-repeated.neither
-Cumulative number of frames with no repeated field.
-
-
-repeated.top
-Cumulative number of frames with the top field repeated from the previous frame’s top field.
-
-
-repeated.bottom
-Cumulative number of frames with the bottom field repeated from the previous frame’s bottom field.
-
-
-
-
The filter accepts the following options:
-
-
-intl_thres
-Set interlacing threshold.
-
-prog_thres
-Set progressive threshold.
-
-repeat_thres
-Threshold for repeated field detection.
-
-half_life
-Number of frames after which a given frame’s contribution to the
-statistics is halved (i.e., it contributes only 0.5 to it’s
-classification). The default of 0 means that all frames seen are given
-full weight of 1.0 forever.
-
-analyze_interlaced_flag
-When this is not 0 then idet will use the specified number of frames to determine
-if the interlaced flag is accurate, it will not count undetermined frames.
-If the flag is found to be accurate it will be used without any further
-computations, if it is found to be inaccuarte it will be cleared without any
-further computations. This allows inserting the idet filter as a low computational
-method to clean up the interlaced flag
-
-
-
-
-
30.50 il# TOC
-
-
Deinterleave or interleave fields.
-
-
This filter allows one to process interlaced images fields without
-deinterlacing them. Deinterleaving splits the input frame into 2
-fields (so called half pictures). Odd lines are moved to the top
-half of the output image, even lines to the bottom half.
-You can process (filter) them independently and then re-interleave them.
-
-
The filter accepts the following options:
-
-
-luma_mode, l
-chroma_mode, c
-alpha_mode, a
-Available values for luma_mode , chroma_mode and
-alpha_mode are:
-
-
-‘none ’
-Do nothing.
-
-
-‘deinterleave, d ’
-Deinterleave fields, placing one above the other.
-
-
-‘interleave, i ’
-Interleave fields. Reverse the effect of deinterleaving.
-
-
-Default value is none
.
-
-
-luma_swap, ls
-chroma_swap, cs
-alpha_swap, as
-Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0
.
-
-
-
-
-
30.51 interlace# TOC
-
-
Simple interlacing filter from progressive contents. This interleaves upper (or
-lower) lines from odd frames with lower (or upper) lines from even frames,
-halving the frame rate and preserving image height.
-
-
-
Original Original New Frame
- Frame 'j' Frame 'j+1' (tff)
- ========== =========== ==================
- Line 0 --------------------> Frame 'j' Line 0
- Line 1 Line 1 ----> Frame 'j+1' Line 1
- Line 2 ---------------------> Frame 'j' Line 2
- Line 3 Line 3 ----> Frame 'j+1' Line 3
- ... ... ...
-New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
-
-
-
It accepts the following optional parameters:
-
-
-scan
-This determines whether the interlaced frame is taken from the even
-(tff - default) or odd (bff) lines of the progressive frame.
-
-
-lowpass
-Enable (default) or disable the vertical lowpass filter to avoid twitter
-interlacing and reduce moire patterns.
-
-
-
-
-
30.52 kerndeint# TOC
-
-
Deinterlace input video by applying Donald Graft’s adaptive kernel
-deinterling. Work on interlaced parts of a video to produce
-progressive frames.
-
-
The description of the accepted parameters follows.
-
-
-thresh
-Set the threshold which affects the filter’s tolerance when
-determining if a pixel line must be processed. It must be an integer
-in the range [0,255] and defaults to 10. A value of 0 will result in
-applying the process on every pixels.
-
-
-map
-Paint pixels exceeding the threshold value to white if set to 1.
-Default is 0.
-
-
-order
-Set the fields order. Swap fields if set to 1, leave fields alone if
-0. Default is 0.
-
-
-sharp
-Enable additional sharpening if set to 1. Default is 0.
-
-
-twoway
-Enable twoway sharpening if set to 1. Default is 0.
-
-
-
-
-
30.52.1 Examples# TOC
-
-
- Apply default values:
-
-
kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
-
-
- Enable additional sharpening:
-
-
- Paint processed pixels in white:
-
-
-
-
-
30.53 lenscorrection# TOC
-
-
Correct radial lens distortion
-
-
This filter can be used to correct for radial distortion as can result from the use
-of wide angle lenses, and thereby re-rectify the image. To find the right parameters
-one can use tools available for example as part of opencv or simply trial-and-error.
-To use opencv use the calibration sample (under samples/cpp) from the opencv sources
-and extract the k1 and k2 coefficients from the resulting matrix.
-
-
Note that effectively the same filter is available in the open-source tools Krita and
-Digikam from the KDE project.
-
-
In contrast to the vignette filter, which can also be used to compensate lens errors,
-this filter corrects the distortion of the image, whereas vignette corrects the
-brightness distribution, so you may want to use both filters together in certain
-cases, though you will have to take care of ordering, i.e. whether vignetting should
-be applied before or after lens correction.
-
-
-
30.53.1 Options# TOC
-
-
The filter accepts the following options:
-
-
-cx
-Relative x-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-width.
-
-cy
-Relative y-coordinate of the focal point of the image, and thereby the center of the
-distortion. This value has a range [0,1] and is expressed as fractions of the image
-height.
-
-k1
-Coefficient of the quadratic correction term. 0.5 means no correction.
-
-k2
-Coefficient of the double quadratic correction term. 0.5 means no correction.
-
-
-
-
The formula that generates the correction is:
-
-
r_src = r_tgt * (1 + k1 * (r_tgt / r_0 )^2 + k2 * (r_tgt / r_0 )^4)
-
-
where r_0 is halve of the image diagonal and r_src and r_tgt are the
-distances from the focal point in the source and target images, respectively.
-
-
-
30.54 lut3d# TOC
-
-
Apply a 3D LUT to an input video.
-
-
The filter accepts the following options:
-
-
-file
-Set the 3D LUT file name.
-
-Currently supported formats:
-
-‘3dl ’
-AfterEffects
-
-‘cube ’
-Iridas
-
-‘dat ’
-DaVinci
-
-‘m3d ’
-Pandora
-
-
-
-interp
-Select interpolation mode.
-
-Available values are:
-
-
-‘nearest ’
-Use values from the nearest defined point.
-
-‘trilinear ’
-Interpolate values using the 8 points defining a cube.
-
-‘tetrahedral ’
-Interpolate values using a tetrahedron.
-
-
-
-
-
-
-
30.55 lut, lutrgb, lutyuv# TOC
-
-
Compute a look-up table for binding each pixel component input value
-to an output value, and apply it to the input video.
-
-
lutyuv applies a lookup table to a YUV input video, lutrgb
-to an RGB input video.
-
-
These filters accept the following parameters:
-
-c0
-set first pixel component expression
-
-c1
-set second pixel component expression
-
-c2
-set third pixel component expression
-
-c3
-set fourth pixel component expression, corresponds to the alpha component
-
-
-r
-set red component expression
-
-g
-set green component expression
-
-b
-set blue component expression
-
-a
-alpha component expression
-
-
-y
-set Y/luminance component expression
-
-u
-set U/Cb component expression
-
-v
-set V/Cr component expression
-
-
-
-
Each of them specifies the expression to use for computing the lookup table for
-the corresponding pixel component values.
-
-
The exact component associated to each of the c* options depends on the
-format in input.
-
-
The lut filter requires either YUV or RGB pixel formats in input,
-lutrgb requires RGB pixel formats in input, and lutyuv requires YUV.
-
-
The expressions can contain the following constants and functions:
-
-
-w
-h
-The input width and height.
-
-
-val
-The input value for the pixel component.
-
-
-clipval
-The input value, clipped to the minval -maxval range.
-
-
-maxval
-The maximum value for the pixel component.
-
-
-minval
-The minimum value for the pixel component.
-
-
-negval
-The negated value for the pixel component value, clipped to the
-minval -maxval range; it corresponds to the expression
-"maxval-clipval+minval".
-
-
-clip(val)
-The computed value in val , clipped to the
-minval -maxval range.
-
-
-gammaval(gamma)
-The computed gamma correction value of the pixel component value,
-clipped to the minval -maxval range. It corresponds to the
-expression
-"pow((clipval-minval)/(maxval-minval)\,gamma )*(maxval-minval)+minval"
-
-
-
-
-
All expressions default to "val".
-
-
-
30.55.1 Examples# TOC
-
-
-
-
-
30.56 mergeplanes# TOC
-
-
Merge color channel components from several video streams.
-
-
The filter accepts up to 4 input streams, and merge selected input
-planes to the output video.
-
-
This filter accepts the following options:
-
-mapping
-Set input to output plane mapping. Default is 0
.
-
-The mappings is specified as a bitmap. It should be specified as a
-hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the
-mapping for the first plane of the output stream. ’A’ sets the number of
-the input stream to use (from 0 to 3), and ’a’ the plane number of the
-corresponding input to use (from 0 to 3). The rest of the mappings is
-similar, ’Bb’ describes the mapping for the output stream second
-plane, ’Cc’ describes the mapping for the output stream third plane and
-’Dd’ describes the mapping for the output stream fourth plane.
-
-
-format
-Set output pixel format. Default is yuva444p
.
-
-
-
-
-
30.56.1 Examples# TOC
-
-
- Merge three gray video streams of same width and height into single video stream:
-
-
[a0][a1][a2]mergeplanes=0x001020:yuv444p
-
-
- Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream:
-
-
[a0][a1]mergeplanes=0x00010210:yuva444p
-
-
- Swap Y and A plane in yuva444p stream:
-
-
format=yuva444p,mergeplanes=0x03010200:yuva444p
-
-
- Swap U and V plane in yuv420p stream:
-
-
format=yuv420p,mergeplanes=0x000201:yuv420p
-
-
- Cast a rgb24 clip to yuv444p:
-
-
format=rgb24,mergeplanes=0x000102:yuv444p
-
-
-
-
-
30.57 mcdeint# TOC
-
-
Apply motion-compensation deinterlacing.
-
-
It needs one field per frame as input and must thus be used together
-with yadif=1/3 or equivalent.
-
-
This filter accepts the following options:
-
-mode
-Set the deinterlacing mode.
-
-It accepts one of the following values:
-
-‘fast ’
-‘medium ’
-‘slow ’
-use iterative motion estimation
-
-‘extra_slow ’
-like ‘slow ’, but use multiple reference frames.
-
-
-Default value is ‘fast ’.
-
-
-parity
-Set the picture field parity assumed for the input video. It must be
-one of the following values:
-
-
-‘0, tff ’
-assume top field first
-
-‘1, bff ’
-assume bottom field first
-
-
-
-Default value is ‘bff ’.
-
-
-qp
-Set per-block quantization parameter (QP) used by the internal
-encoder.
-
-Higher values should result in a smoother motion vector field but less
-optimal individual vectors. Default value is 1.
-
-
-
-
-
30.58 mp# TOC
-
-
Apply an MPlayer filter to the input video.
-
-
This filter provides a wrapper around some of the filters of
-MPlayer/MEncoder.
-
-
This wrapper is considered experimental. Some of the wrapped filters
-may not work properly and we may drop support for them, as they will
-be implemented natively into FFmpeg. Thus you should avoid
-depending on them when writing portable scripts.
-
-
The filter accepts the parameters:
-filter_name [:=]filter_params
-
-
filter_name is the name of a supported MPlayer filter,
-filter_params is a string containing the parameters accepted by
-the named filter.
-
-
The list of the currently supported filters follows:
-
-eq2
-eq
-ilpack
-softpulldown
-
-
-
The parameter syntax and behavior for the listed filters are the same
-of the corresponding MPlayer filters. For detailed instructions check
-the "VIDEO FILTERS" section in the MPlayer manual.
-
-
-
30.58.1 Examples# TOC
-
-
- Adjust gamma, brightness, contrast:
-
-
-
-
See also mplayer(1), http://www.mplayerhq.hu/ .
-
-
-
30.59 mpdecimate# TOC
-
-
Drop frames that do not differ greatly from the previous frame in
-order to reduce frame rate.
-
-
The main use of this filter is for very-low-bitrate encoding
-(e.g. streaming over dialup modem), but it could in theory be used for
-fixing movies that were inverse-telecined incorrectly.
-
-
A description of the accepted options follows.
-
-
-max
-Set the maximum number of consecutive frames which can be dropped (if
-positive), or the minimum interval between dropped frames (if
-negative). If the value is 0, the frame is dropped unregarding the
-number of previous sequentially dropped frames.
-
-Default value is 0.
-
-
-hi
-lo
-frac
-Set the dropping threshold values.
-
-Values for hi and lo are for 8x8 pixel blocks and
-represent actual pixel value differences, so a threshold of 64
-corresponds to 1 unit of difference for each pixel, or the same spread
-out differently over the block.
-
-A frame is a candidate for dropping if no 8x8 blocks differ by more
-than a threshold of hi , and if no more than frac blocks (1
-meaning the whole image) differ by more than a threshold of lo .
-
-Default value for hi is 64*12, default value for lo is
-64*5, and default value for frac is 0.33.
-
-
-
-
-
-
30.60 negate# TOC
-
-
Negate input video.
-
-
It accepts an integer in input; if non-zero it negates the
-alpha component (if available). The default value in input is 0.
-
-
-
30.61 noformat# TOC
-
-
Force libavfilter not to use any of the specified pixel formats for the
-input to the next filter.
-
-
It accepts the following parameters:
-
-pix_fmts
-A ’|’-separated list of pixel format names, such as
-apix_fmts=yuv420p|monow|rgb24".
-
-
-
-
-
-
30.61.1 Examples# TOC
-
-
- Force libavfilter to use a format different from yuv420p for the
-input to the vflip filter:
-
-
noformat=pix_fmts=yuv420p,vflip
-
-
- Convert the input video to any of the formats not contained in the list:
-
-
noformat=yuv420p|yuv444p|yuv410p
-
-
-
-
-
30.62 noise# TOC
-
-
Add noise on video input frame.
-
-
The filter accepts the following options:
-
-
-all_seed
-c0_seed
-c1_seed
-c2_seed
-c3_seed
-Set noise seed for specific pixel component or all pixel components in case
-of all_seed . Default value is 123457
.
-
-
-all_strength, alls
-c0_strength, c0s
-c1_strength, c1s
-c2_strength, c2s
-c3_strength, c3s
-Set noise strength for specific pixel component or all pixel components in case
-all_strength . Default value is 0
. Allowed range is [0, 100].
-
-
-all_flags, allf
-c0_flags, c0f
-c1_flags, c1f
-c2_flags, c2f
-c3_flags, c3f
-Set pixel component flags or set flags for all components if all_flags .
-Available values for component flags are:
-
-‘a ’
-averaged temporal noise (smoother)
-
-‘p ’
-mix random noise with a (semi)regular pattern
-
-‘t ’
-temporal noise (noise pattern changes between frames)
-
-‘u ’
-uniform noise (gaussian otherwise)
-
-
-
-
-
-
-
30.62.1 Examples# TOC
-
-
Add temporal and uniform noise to input video:
-
-
noise=alls=20:allf=t+u
-
-
-
-
30.63 null# TOC
-
-
Pass the video source unchanged to the output.
-
-
-
30.64 ocv# TOC
-
-
Apply a video transform using libopencv.
-
-
To enable this filter, install the libopencv library and headers and
-configure FFmpeg with --enable-libopencv
.
-
-
It accepts the following parameters:
-
-
-filter_name
-The name of the libopencv filter to apply.
-
-
-filter_params
-The parameters to pass to the libopencv filter. If not specified, the default
-values are assumed.
-
-
-
-
-
Refer to the official libopencv documentation for more precise
-information:
-http://docs.opencv.org/master/modules/imgproc/doc/filtering.html
-
-
Several libopencv filters are supported; see the following subsections.
-
-
-
30.64.1 dilate# TOC
-
-
Dilate an image by using a specific structuring element.
-It corresponds to the libopencv function cvDilate
.
-
-
It accepts the parameters: struct_el |nb_iterations .
-
-
struct_el represents a structuring element, and has the syntax:
-cols xrows +anchor_x xanchor_y /shape
-
-
cols and rows represent the number of columns and rows of
-the structuring element, anchor_x and anchor_y the anchor
-point, and shape the shape for the structuring element. shape
-must be "rect", "cross", "ellipse", or "custom".
-
-
If the value for shape is "custom", it must be followed by a
-string of the form "=filename ". The file with name
-filename is assumed to represent a binary image, with each
-printable character corresponding to a bright pixel. When a custom
-shape is used, cols and rows are ignored, the number
-or columns and rows of the read file are assumed instead.
-
-
The default value for struct_el is "3x3+0x0/rect".
-
-
nb_iterations specifies the number of times the transform is
-applied to the image, and defaults to 1.
-
-
Some examples:
-
-
# Use the default values
-ocv=dilate
-
-# Dilate using a structuring element with a 5x5 cross, iterating two times
-ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
-
-# Read the shape from the file diamond.shape, iterating two times.
-# The file diamond.shape may contain a pattern of characters like this
-# *
-# ***
-# *****
-# ***
-# *
-# The specified columns and rows are ignored
-# but the anchor point coordinates are not
-ocv=dilate:0x0+2x2/custom=diamond.shape|2
-
-
-
-
30.64.2 erode# TOC
-
-
Erode an image by using a specific structuring element.
-It corresponds to the libopencv function cvErode
.
-
-
It accepts the parameters: struct_el :nb_iterations ,
-with the same syntax and semantics as the dilate filter.
-
-
-
30.64.3 smooth# TOC
-
-
Smooth the input video.
-
-
The filter takes the following parameters:
-type |param1 |param2 |param3 |param4 .
-
-
type is the type of smooth filter to apply, and must be one of
-the following values: "blur", "blur_no_scale", "median", "gaussian",
-or "bilateral". The default value is "gaussian".
-
-
The meaning of param1 , param2 , param3 , and param4
-depend on the smooth type. param1 and
-param2 accept integer positive values or 0. param3 and
-param4 accept floating point values.
-
-
The default value for param1 is 3. The default value for the
-other parameters is 0.
-
-
These parameters correspond to the parameters assigned to the
-libopencv function cvSmooth
.
-
-
-
30.65 overlay# TOC
-
-
Overlay one video on top of another.
-
-
It takes two inputs and has one output. The first input is the "main"
-video on which the second input is overlaid.
-
-
It accepts the following parameters:
-
-
A description of the accepted options follows.
-
-
-x
-y
-Set the expression for the x and y coordinates of the overlaid video
-on the main video. Default value is "0" for both expressions. In case
-the expression is invalid, it is set to a huge value (meaning that the
-overlay will not be displayed within the output visible area).
-
-
-eof_action
-The action to take when EOF is encountered on the secondary input; it accepts
-one of the following values:
-
-
-repeat
-Repeat the last frame (the default).
-
-endall
-End both streams.
-
-pass
-Pass the main input through.
-
-
-
-
-eval
-Set when the expressions for x , and y are evaluated.
-
-It accepts the following values:
-
-‘init ’
-only evaluate expressions once during the filter initialization or
-when a command is processed
-
-
-‘frame ’
-evaluate expressions for each incoming frame
-
-
-
-Default value is ‘frame ’.
-
-
-shortest
-If set to 1, force the output to terminate when the shortest input
-terminates. Default value is 0.
-
-
-format
-Set the format for the output video.
-
-It accepts the following values:
-
-‘yuv420 ’
-force YUV420 output
-
-
-‘yuv422 ’
-force YUV422 output
-
-
-‘yuv444 ’
-force YUV444 output
-
-
-‘rgb ’
-force RGB output
-
-
-
-Default value is ‘yuv420 ’.
-
-
-rgb (deprecated)
-If set to 1, force the filter to accept inputs in the RGB
-color space. Default value is 0. This option is deprecated, use
-format instead.
-
-
-repeatlast
-If set to 1, force the filter to draw the last overlay frame over the
-main input until the end of the stream. A value of 0 disables this
-behavior. Default value is 1.
-
-
-
-
The x , and y expressions can contain the following
-parameters.
-
-
-main_w, W
-main_h, H
-The main input width and height.
-
-
-overlay_w, w
-overlay_h, h
-The overlay input width and height.
-
-
-x
-y
-The computed values for x and y . They are evaluated for
-each new frame.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values of the output
-format. For example for the pixel format "yuv422p" hsub is 2 and
-vsub is 1.
-
-
-n
-the number of input frame, starting from 0
-
-
-pos
-the position in the file of the input frame, NAN if unknown
-
-
-t
-The timestamp, expressed in seconds. It’s NAN if the input timestamp is unknown.
-
-
-
-
-
Note that the n , pos , t variables are available only
-when evaluation is done per frame , and will evaluate to NAN
-when eval is set to ‘init ’.
-
-
Be aware that frames are taken from each input video in timestamp
-order, hence, if their initial timestamps differ, it is a good idea
-to pass the two inputs through a setpts=PTS-STARTPTS filter to
-have them begin in the same zero timestamp, as the example for
-the movie filter does.
-
-
You can chain together more overlays but you should test the
-efficiency of such approach.
-
-
-
30.65.1 Commands# TOC
-
-
This filter supports the following commands:
-
-x
-y
-Modify the x and y of the overlay input.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
30.65.2 Examples# TOC
-
-
-
-
-
30.66 owdenoise# TOC
-
-
Apply Overcomplete Wavelet denoiser.
-
-
The filter accepts the following options:
-
-
-depth
-Set depth.
-
-Larger depth values will denoise lower frequency components more, but
-slow down filtering.
-
-Must be an int in the range 8-16, default is 8
.
-
-
-luma_strength, ls
-Set luma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-chroma_strength, cs
-Set chroma strength.
-
-Must be a double value in the range 0-1000, default is 1.0
.
-
-
-
-
-
30.67 pad# TOC
-
-
Add paddings to the input image, and place the original input at the
-provided x , y coordinates.
-
-
It accepts the following parameters:
-
-
-width, w
-height, h
-Specify an expression for the size of the output image with the
-paddings added. If the value for width or height is 0, the
-corresponding input size is used for the output.
-
-The width expression can reference the value set by the
-height expression, and vice versa.
-
-The default value of width and height is 0.
-
-
-x
-y
-Specify the offsets to place the input image at within the padded area,
-with respect to the top/left border of the output image.
-
-The x expression can reference the value set by the y
-expression, and vice versa.
-
-The default value of x and y is 0.
-
-
-color
-Specify the color of the padded area. For the syntax of this option,
-check the "Color" section in the ffmpeg-utils manual.
-
-The default value of color is "black".
-
-
-
-
The value for the width , height , x , and y
-options are expressions containing the following constants:
-
-
-in_w
-in_h
-The input video width and height.
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output width and height (the size of the padded area), as
-specified by the width and height expressions.
-
-
-ow
-oh
-These are the same as out_w and out_h .
-
-
-x
-y
-The x and y offsets as specified by the x and y
-expressions, or NAN if not yet specified.
-
-
-a
-same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-input display aspect ratio, it is the same as (iw / ih ) * sar
-
-
-hsub
-vsub
-The horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
30.67.1 Examples# TOC
-
-
-
-
-
30.68 perspective# TOC
-
-
Correct perspective of video not recorded perpendicular to the screen.
-
-
A description of the accepted parameters follows.
-
-
-x0
-y0
-x1
-y1
-x2
-y2
-x3
-y3
-Set coordinates expression for top left, top right, bottom left and bottom right corners.
-Default values are 0:0:W:0:0:H:W:H
with which perspective will remain unchanged.
-If the sense
option is set to source
, then the specified points will be sent
-to the corners of the destination. If the sense
option is set to destination
,
-then the corners of the source will be sent to the specified coordinates.
-
-The expressions can use the following variables:
-
-
-W
-H
-the width and height of video frame.
-
-
-
-
-interpolation
-Set interpolation for perspective correction.
-
-It accepts the following values:
-
-‘linear ’
-‘cubic ’
-
-
-Default value is ‘linear ’.
-
-
-sense
-Set interpretation of coordinate options.
-
-It accepts the following values:
-
-‘0, source ’
-
-Send point in the source specified by the given coordinates to
-the corners of the destination.
-
-
-‘1, destination ’
-
-Send the corners of the source to the point in the destination specified
-by the given coordinates.
-
-Default value is ‘source ’.
-
-
-
-
-
-
-
30.69 phase# TOC
-
-
Delay interlaced video by one field time so that the field order changes.
-
-
The intended use is to fix PAL movies that have been captured with the
-opposite field order to the film-to-video transfer.
-
-
A description of the accepted parameters follows.
-
-
-mode
-Set phase mode.
-
-It accepts the following values:
-
-‘t ’
-Capture field order top-first, transfer bottom-first.
-Filter will delay the bottom field.
-
-
-‘b ’
-Capture field order bottom-first, transfer top-first.
-Filter will delay the top field.
-
-
-‘p ’
-Capture and transfer with the same field order. This mode only exists
-for the documentation of the other options to refer to, but if you
-actually select it, the filter will faithfully do nothing.
-
-
-‘a ’
-Capture field order determined automatically by field flags, transfer
-opposite.
-Filter selects among ‘t ’ and ‘b ’ modes on a frame by frame
-basis using field flags. If no field information is available,
-then this works just like ‘u ’.
-
-
-‘u ’
-Capture unknown or varying, transfer opposite.
-Filter selects among ‘t ’ and ‘b ’ on a frame by frame basis by
-analyzing the images and selecting the alternative that produces best
-match between the fields.
-
-
-‘T ’
-Capture top-first, transfer unknown or varying.
-Filter selects among ‘t ’ and ‘p ’ using image analysis.
-
-
-‘B ’
-Capture bottom-first, transfer unknown or varying.
-Filter selects among ‘b ’ and ‘p ’ using image analysis.
-
-
-‘A ’
-Capture determined by field flags, transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using field flags and
-image analysis. If no field information is available, then this works just
-like ‘U ’. This is the default mode.
-
-
-‘U ’
-Both capture and transfer unknown or varying.
-Filter selects among ‘t ’, ‘b ’ and ‘p ’ using image analysis only.
-
-
-
-
-
-
-
30.70 pixdesctest# TOC
-
-
Pixel format descriptor test filter, mainly useful for internal
-testing. The output video should be equal to the input video.
-
-
For example:
-
-
format=monow, pixdesctest
-
-
-
can be used to test the monowhite pixel format descriptor definition.
-
-
-
30.71 pp# TOC
-
-
Enable the specified chain of postprocessing subfilters using libpostproc. This
-library should be automatically selected with a GPL build (--enable-gpl
).
-Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’.
-Each subfilter and some options have a short and a long name that can be used
-interchangeably, i.e. dr/dering are the same.
-
-
The filters accept the following options:
-
-
-subfilters
-Set postprocessing subfilters string.
-
-
-
-
All subfilters share common options to determine their scope:
-
-
-a/autoq
-Honor the quality commands for this subfilter.
-
-
-c/chrom
-Do chrominance filtering, too (default).
-
-
-y/nochrom
-Do luminance filtering only (no chrominance).
-
-
-n/noluma
-Do chrominance filtering only (no luminance).
-
-
-
-
These options can be appended after the subfilter name, separated by a ’|’.
-
-
Available subfilters are:
-
-
-hb/hdeblock[|difference[|flatness]]
-Horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-vb/vdeblock[|difference[|flatness]]
-Vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-ha/hadeblock[|difference[|flatness]]
-Accurate horizontal deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-va/vadeblock[|difference[|flatness]]
-Accurate vertical deblocking filter
-
-difference
-Difference factor where higher values mean more deblocking (default: 32
).
-
-flatness
-Flatness threshold where lower values mean more deblocking (default: 39
).
-
-
-
-
-
-
The horizontal and vertical deblocking filters share the difference and
-flatness values so you cannot set different horizontal and vertical
-thresholds.
-
-
-h1/x1hdeblock
-Experimental horizontal deblocking filter
-
-
-v1/x1vdeblock
-Experimental vertical deblocking filter
-
-
-dr/dering
-Deringing filter
-
-
-tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
-
-threshold1
-larger -> stronger filtering
-
-threshold2
-larger -> stronger filtering
-
-threshold3
-larger -> stronger filtering
-
-
-
-
-al/autolevels[:f/fullyrange], automatic brightness / contrast correction
-
-f/fullyrange
-Stretch luminance to 0-255
.
-
-
-
-
-lb/linblenddeint
-Linear blend deinterlacing filter that deinterlaces the given block by
-filtering all lines with a (1 2 1)
filter.
-
-
-li/linipoldeint
-Linear interpolating deinterlacing filter that deinterlaces the given block by
-linearly interpolating every second line.
-
-
-ci/cubicipoldeint
-Cubic interpolating deinterlacing filter deinterlaces the given block by
-cubically interpolating every second line.
-
-
-md/mediandeint
-Median deinterlacing filter that deinterlaces the given block by applying a
-median filter to every second line.
-
-
-fd/ffmpegdeint
-FFmpeg deinterlacing filter that deinterlaces the given block by filtering every
-second line with a (-1 4 2 4 -1)
filter.
-
-
-l5/lowpass5
-Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given
-block by filtering all lines with a (-1 2 6 2 -1)
filter.
-
-
-fq/forceQuant[|quantizer]
-Overrides the quantizer table from the input with the constant quantizer you
-specify.
-
-quantizer
-Quantizer to use
-
-
-
-
-de/default
-Default pp filter combination (hb|a,vb|a,dr|a
)
-
-
-fa/fast
-Fast pp filter combination (h1|a,v1|a,dr|a
)
-
-
-ac
-High quality pp filter combination (ha|a|128|7,va|a,dr|a
)
-
-
-
-
-
30.71.1 Examples# TOC
-
-
- Apply horizontal and vertical deblocking, deringing and automatic
-brightness/contrast:
-
-
- Apply default filters without brightness/contrast correction:
-
-
- Apply default filters and temporal denoiser:
-
-
pp=default/tmpnoise|1|2|3
-
-
- Apply deblocking on luminance only, and switch vertical deblocking on or off
-automatically depending on available CPU time:
-
-
-
-
-
30.72 pp7# TOC
-
Apply Postprocessing filter 7. It is variant of the spp filter,
-similar to spp = 6 with 7 point DCT, where only the center sample is
-used after IDCT.
-
-
The filter accepts the following options:
-
-
-qp
-Force a constant quantization parameter. It accepts an integer in range
-0 to 63. If not set, the filter will use the QP from the video stream
-(if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding.
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-‘medium ’
-Set medium thresholding (good results, default).
-
-
-
-
-
-
-
30.73 psnr# TOC
-
-
Obtain the average, maximum and minimum PSNR (Peak Signal to Noise
-Ratio) between two input videos.
-
-
This filter takes in input two input videos, the first input is
-considered the "main" source and is passed unchanged to the
-output. The second input is used as a "reference" video for computing
-the PSNR.
-
-
Both video inputs must have the same resolution and pixel format for
-this filter to work correctly. Also it assumes that both inputs
-have the same number of frames, which are compared one by one.
-
-
The obtained average PSNR is printed through the logging system.
-
-
The filter stores the accumulated MSE (mean squared error) of each
-frame, and at the end of the processing it is averaged across all frames
-equally, and the following formula is applied to obtain the PSNR:
-
-
-
PSNR = 10*log10(MAX^2/MSE)
-
-
-
Where MAX is the average of the maximum values of each component of the
-image.
-
-
The description of the accepted parameters follows.
-
-
-stats_file, f
-If specified the filter will use the named file to save the PSNR of
-each individual frame.
-
-
-
-
The file printed if stats_file is selected, contains a sequence of
-key/value pairs of the form key :value for each compared
-couple of frames.
-
-
A description of each shown parameter follows:
-
-
-n
-sequential number of the input frame, starting from 1
-
-
-mse_avg
-Mean Square Error pixel-by-pixel average difference of the compared
-frames, averaged over all the image components.
-
-
-mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
-Mean Square Error pixel-by-pixel average difference of the compared
-frames for the component specified by the suffix.
-
-
-psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
-Peak Signal to Noise ratio of the compared frames for the component
-specified by the suffix.
-
-
-
-
For example:
-
-
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
-[main][ref] psnr="stats_file=stats.log" [out]
-
-
-
On this example the input file being processed is compared with the
-reference file ref_movie.mpg . The PSNR of each individual frame
-is stored in stats.log .
-
-
-
30.74 pullup# TOC
-
-
Pulldown reversal (inverse telecine) filter, capable of handling mixed
-hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive
-content.
-
-
The pullup filter is designed to take advantage of future context in making
-its decisions. This filter is stateless in the sense that it does not lock
-onto a pattern to follow, but it instead looks forward to the following
-fields in order to identify matches and rebuild progressive frames.
-
-
To produce content with an even framerate, insert the fps filter after
-pullup, use fps=24000/1001
if the input frame rate is 29.97fps,
-fps=24
for 30fps and the (rare) telecined 25fps input.
-
-
The filter accepts the following options:
-
-
-jl
-jr
-jt
-jb
-These options set the amount of "junk" to ignore at the left, right, top, and
-bottom of the image, respectively. Left and right are in units of 8 pixels,
-while top and bottom are in units of 2 lines.
-The default is 8 pixels on each side.
-
-
-sb
-Set the strict breaks. Setting this option to 1 will reduce the chances of
-filter generating an occasional mismatched frame, but it may also cause an
-excessive number of frames to be dropped during high motion sequences.
-Conversely, setting it to -1 will make filter match fields more easily.
-This may help processing of video where there is slight blurring between
-the fields, but may also cause there to be interlaced frames in the output.
-Default value is 0
.
-
-
-mp
-Set the metric plane to use. It accepts the following values:
-
-‘l ’
-Use luma plane.
-
-
-‘u ’
-Use chroma blue plane.
-
-
-‘v ’
-Use chroma red plane.
-
-
-
-This option may be set to use chroma plane instead of the default luma plane
-for doing filter’s computations. This may improve accuracy on very clean
-source material, but more likely will decrease accuracy, especially if there
-is chroma noise (rainbow effect) or any grayscale video.
-The main purpose of setting mp to a chroma plane is to reduce CPU
-load and make pullup usable in realtime on slow machines.
-
-
-
-
For best results (without duplicated frames in the output file) it is
-necessary to change the output frame rate. For example, to inverse
-telecine NTSC input:
-
-
ffmpeg -i input -vf pullup -r 24000/1001 ...
-
-
-
-
30.75 qp# TOC
-
-
Change video quantization parameters (QP).
-
-
The filter accepts the following option:
-
-
-qp
-Set expression for quantization parameter.
-
-
-
-
The expression is evaluated through the eval API and can contain, among others,
-the following constants:
-
-
-known
-1 if index is not 129, 0 otherwise.
-
-
-qp
-Sequentional index starting from -129 to 128.
-
-
-
-
-
30.75.1 Examples# TOC
-
-
- Some equation like:
-
-
-
-
-
30.76 removelogo# TOC
-
-
Suppress a TV station logo, using an image file to determine which
-pixels comprise the logo. It works by filling in the pixels that
-comprise the logo with neighboring pixels.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filter bitmap file, which can be any image format supported by
-libavformat. The width and height of the image file must match those of the
-video stream being processed.
-
-
-
-
Pixels in the provided bitmap image with a value of zero are not
-considered part of the logo, non-zero pixels are considered part of
-the logo. If you use white (255) for the logo and black (0) for the
-rest, you will be safe. For making the filter bitmap, it is
-recommended to take a screen capture of a black frame with the logo
-visible, and then using a threshold filter followed by the erode
-filter once or twice.
-
-
If needed, little splotches can be fixed manually. Remember that if
-logo pixels are not covered, the filter quality will be much
-reduced. Marking too many pixels as part of the logo does not hurt as
-much, but it will increase the amount of blurring needed to cover over
-the image and will destroy more information than necessary, and extra
-pixels will slow things down on a large logo.
-
-
-
30.77 rotate# TOC
-
-
Rotate video by an arbitrary angle expressed in radians.
-
-
The filter accepts the following options:
-
-
A description of the optional parameters follows.
-
-angle, a
-Set an expression for the angle by which to rotate the input video
-clockwise, expressed as a number of radians. A negative value will
-result in a counter-clockwise rotation. By default it is set to "0".
-
-This expression is evaluated for each frame.
-
-
-out_w, ow
-Set the output width expression, default value is "iw".
-This expression is evaluated just once during configuration.
-
-
-out_h, oh
-Set the output height expression, default value is "ih".
-This expression is evaluated just once during configuration.
-
-
-bilinear
-Enable bilinear interpolation if set to 1, a value of 0 disables
-it. Default value is 1.
-
-
-fillcolor, c
-Set the color used to fill the output area not covered by the rotated
-image. For the general syntax of this option, check the "Color" section in the
-ffmpeg-utils manual. If the special value "none" is selected then no
-background is printed (useful for example if the background is never shown).
-
-Default value is "black".
-
-
-
-
The expressions for the angle and the output size can contain the
-following constants and functions:
-
-
-n
-sequential number of the input frame, starting from 0. It is always NAN
-before the first frame is filtered.
-
-
-t
-time in seconds of the input frame, it is set to 0 when the filter is
-configured. It is always NAN before the first frame is filtered.
-
-
-hsub
-vsub
-horizontal and vertical chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-in_w, iw
-in_h, ih
-the input video width and height
-
-
-out_w, ow
-out_h, oh
-the output width and height, that is the size of the padded area as
-specified by the width and height expressions
-
-
-rotw(a)
-roth(a)
-the minimal width/height required for completely containing the input
-video rotated by a radians.
-
-These are only available when computing the out_w and
-out_h expressions.
-
-
-
-
-
30.77.1 Examples# TOC
-
-
- Rotate the input by PI/6 radians clockwise:
-
-
- Rotate the input by PI/6 radians counter-clockwise:
-
-
- Rotate the input by 45 degrees clockwise:
-
-
- Apply a constant rotation with period T, starting from an angle of PI/3:
-
-
- Make the input video rotation oscillating with a period of T
-seconds and an amplitude of A radians:
-
-
rotate=A*sin(2*PI/T*t)
-
-
- Rotate the video, output size is chosen so that the whole rotating
-input video is always completely contained in the output:
-
-
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
-
-
- Rotate the video, reduce the output size so that no background is ever
-shown:
-
-
rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
-
-
-
-
-
30.77.2 Commands# TOC
-
-
The filter supports the following commands:
-
-
-a, angle
-Set the angle expression.
-The command accepts the same syntax of the corresponding option.
-
-If the specified expression is not valid, it is kept at its current
-value.
-
-
-
-
-
30.78 sab# TOC
-
-
Apply Shape Adaptive Blur.
-
-
The filter accepts the following options:
-
-
-luma_radius, lr
-Set luma blur filter strength, must be a value in range 0.1-4.0, default
-value is 1.0. A greater value will result in a more blurred image, and
-in slower processing.
-
-
-luma_pre_filter_radius, lpfr
-Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default
-value is 1.0.
-
-
-luma_strength, ls
-Set luma maximum difference between pixels to still be considered, must
-be a value in the 0.1-100.0 range, default value is 1.0.
-
-
-chroma_radius, cr
-Set chroma blur filter strength, must be a value in range 0.1-4.0. A
-greater value will result in a more blurred image, and in slower
-processing.
-
-
-chroma_pre_filter_radius, cpfr
-Set chroma pre-filter radius, must be a value in the 0.1-2.0 range.
-
-
-chroma_strength, cs
-Set chroma maximum difference between pixels to still be considered,
-must be a value in the 0.1-100.0 range.
-
-
-
-
Each chroma option value, if not explicitly specified, is set to the
-corresponding luma option value.
-
-
-
30.79 scale# TOC
-
-
Scale (resize) the input video, using the libswscale library.
-
-
The scale filter forces the output display aspect ratio to be the same
-of the input, by changing the output sample aspect ratio.
-
-
If the input image format is different from the format requested by
-the next filter, the scale filter will convert the input to the
-requested format.
-
-
-
30.79.1 Options# TOC
-
The filter accepts the following options, or any of the options
-supported by the libswscale scaler.
-
-
See (ffmpeg-scaler)the ffmpeg-scaler manual for
-the complete list of scaler options.
-
-
-width, w
-height, h
-Set the output video dimension expression. Default value is the input
-dimension.
-
-If the value is 0, the input width is used for the output.
-
-If one of the values is -1, the scale filter will use a value that
-maintains the aspect ratio of the input image, calculated from the
-other specified dimension. If both of them are -1, the input size is
-used
-
-If one of the values is -n with n > 1, the scale filter will also use a value
-that maintains the aspect ratio of the input image, calculated from the other
-specified dimension. After that it will, however, make sure that the calculated
-dimension is divisible by n and adjust the value if necessary.
-
-See below for the list of accepted constants for use in the dimension
-expression.
-
-
-interl
-Set the interlacing mode. It accepts the following values:
-
-
-‘1 ’
-Force interlaced aware scaling.
-
-
-‘0 ’
-Do not apply interlaced scaling.
-
-
-‘-1 ’
-Select interlaced aware scaling depending on whether the source frames
-are flagged as interlaced or not.
-
-
-
-Default value is ‘0 ’.
-
-
-flags
-Set libswscale scaling flags. See
-(ffmpeg-scaler)the ffmpeg-scaler manual for the
-complete list of values. If not explicitly specified the filter applies
-the default flags.
-
-
-size, s
-Set the video size. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-in_color_matrix
-out_color_matrix
-Set in/output YCbCr color space type.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder.
-
-If not specified, the color space type depends on the pixel format.
-
-Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘bt709 ’
-Format conforming to International Telecommunication Union (ITU)
-Recommendation BT.709.
-
-
-‘fcc ’
-Set color space conforming to the United States Federal Communications
-Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a).
-
-
-‘bt601 ’
-Set color space conforming to:
-
-
- ITU Radiocommunication Sector (ITU-R) Recommendation BT.601
-
- ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G
-
- Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004
-
-
-
-
-‘smpte240m ’
-Set color space conforming to SMPTE ST 240:1999.
-
-
-
-
-in_range
-out_range
-Set in/output YCbCr sample range.
-
-This allows the autodetected value to be overridden as well as allows forcing
-a specific value used for the output and encoder. If not specified, the
-range depends on the pixel format. Possible values:
-
-
-‘auto ’
-Choose automatically.
-
-
-‘jpeg/full/pc ’
-Set full range (0-255 in case of 8-bit luma).
-
-
-‘mpeg/tv ’
-Set "MPEG" range (16-235 in case of 8-bit luma).
-
-
-
-
-force_original_aspect_ratio
-Enable decreasing or increasing output video width or height if necessary to
-keep the original aspect ratio. Possible values:
-
-
-‘disable ’
-Scale the video as specified and disable this feature.
-
-
-‘decrease ’
-The output video dimensions will automatically be decreased if needed.
-
-
-‘increase ’
-The output video dimensions will automatically be increased if needed.
-
-
-
-
-One useful instance of this option is that when you know a specific device’s
-maximum allowed resolution, you can use this to limit the output video to
-that, while retaining the aspect ratio. For example, device A allows
-1280x720 playback, and your video is 1920x800. Using this option (set it to
-decrease) and specifying 1280x720 to the command line makes the output
-1280x533.
-
-Please note that this is a different thing than specifying -1 for w
-or h , you still need to specify the output resolution for this option
-to work.
-
-
-
-
-
The values of the w and h options are expressions
-containing the following constants:
-
-
-in_w
-in_h
-The input width and height
-
-
-iw
-ih
-These are the same as in_w and in_h .
-
-
-out_w
-out_h
-The output (scaled) width and height
-
-
-ow
-oh
-These are the same as out_w and out_h
-
-
-a
-The same as iw / ih
-
-
-sar
-input sample aspect ratio
-
-
-dar
-The input display aspect ratio. Calculated from (iw / ih) * sar
.
-
-
-hsub
-vsub
-horizontal and vertical input chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-ohsub
-ovsub
-horizontal and vertical output chroma subsample values. For example for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
30.79.2 Examples# TOC
-
-
-
-
-
30.80 separatefields# TOC
-
-
The separatefields
takes a frame-based video input and splits
-each frame into its components fields, producing a new half height clip
-with twice the frame rate and twice the frame count.
-
-
This filter use field-dominance information in frame to decide which
-of each pair of fields to place first in the output.
-If it gets it wrong use setfield filter before separatefields
filter.
-
-
-
30.81 setdar, setsar# TOC
-
-
The setdar
filter sets the Display Aspect Ratio for the filter
-output video.
-
-
This is done by changing the specified Sample (aka Pixel) Aspect
-Ratio, according to the following equation:
-
-
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
-
-
-
Keep in mind that the setdar
filter does not modify the pixel
-dimensions of the video frame. Also, the display aspect ratio set by
-this filter may be changed by later filters in the filterchain,
-e.g. in case of scaling or if another "setdar" or a "setsar" filter is
-applied.
-
-
The setsar
filter sets the Sample (aka Pixel) Aspect Ratio for
-the filter output video.
-
-
Note that as a consequence of the application of this filter, the
-output display aspect ratio will change according to the equation
-above.
-
-
Keep in mind that the sample aspect ratio set by the setsar
-filter may be changed by later filters in the filterchain, e.g. if
-another "setsar" or a "setdar" filter is applied.
-
-
It accepts the following parameters:
-
-
-r, ratio, dar (setdar
only), sar (setsar
only)
-Set the aspect ratio used by the filter.
-
-The parameter can be a floating point number string, an expression, or
-a string of the form num :den , where num and
-den are the numerator and denominator of the aspect ratio. If
-the parameter is not specified, it is assumed the value "0".
-In case the form "num :den " is used, the :
character
-should be escaped.
-
-
-max
-Set the maximum integer value to use for expressing numerator and
-denominator when reducing the expressed aspect ratio to a rational.
-Default value is 100
.
-
-
-
-
-
The parameter sar is an expression containing
-the following constants:
-
-
-E, PI, PHI
-These are approximated values for the mathematical constants e
-(Euler’s number), pi (Greek pi), and phi (the golden ratio).
-
-
-w, h
-The input width and height.
-
-
-a
-These are the same as w / h .
-
-
-sar
-The input sample aspect ratio.
-
-
-dar
-The input display aspect ratio. It is the same as
-(w / h ) * sar .
-
-
-hsub, vsub
-Horizontal and vertical chroma subsample values. For example, for the
-pixel format "yuv422p" hsub is 2 and vsub is 1.
-
-
-
-
-
30.81.1 Examples# TOC
-
-
- To change the display aspect ratio to 16:9, specify one of the following:
-
-
setdar=dar=1.77777
-setdar=dar=16/9
-setdar=dar=1.77777
-
-
- To change the sample aspect ratio to 10:11, specify:
-
-
- To set a display aspect ratio of 16:9, and specify a maximum integer value of
-1000 in the aspect ratio reduction, use the command:
-
-
setdar=ratio=16/9:max=1000
-
-
-
-
-
-
30.82 setfield# TOC
-
-
Force field for the output video frame.
-
-
The setfield
filter marks the interlace type field for the
-output frames. It does not change the input frame, but only sets the
-corresponding property, which affects how the frame is treated by
-following filters (e.g. fieldorder
or yadif
).
-
-
The filter accepts the following options:
-
-
-mode
-Available values are:
-
-
-‘auto ’
-Keep the same field property.
-
-
-‘bff ’
-Mark the frame as bottom-field-first.
-
-
-‘tff ’
-Mark the frame as top-field-first.
-
-
-‘prog ’
-Mark the frame as progressive.
-
-
-
-
-
-
-
30.83 showinfo# TOC
-
-
Show a line containing various information for each input video frame.
-The input video is not modified.
-
-
The shown line contains a sequence of key/value pairs of the form
-key :value .
-
-
The following values are shown in the output:
-
-
-n
-The (sequential) number of the input frame, starting from 0.
-
-
-pts
-The Presentation TimeStamp of the input frame, expressed as a number of
-time base units. The time base unit depends on the filter input pad.
-
-
-pts_time
-The Presentation TimeStamp of the input frame, expressed as a number of
-seconds.
-
-
-pos
-The position of the frame in the input stream, or -1 if this information is
-unavailable and/or meaningless (for example in case of synthetic video).
-
-
-fmt
-The pixel format name.
-
-
-sar
-The sample aspect ratio of the input frame, expressed in the form
-num /den .
-
-
-s
-The size of the input frame. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual.
-
-
-i
-The type of interlaced mode ("P" for "progressive", "T" for top field first, "B"
-for bottom field first).
-
-
-iskey
-This is 1 if the frame is a key frame, 0 otherwise.
-
-
-type
-The picture type of the input frame ("I" for an I-frame, "P" for a
-P-frame, "B" for a B-frame, or "?" for an unknown type).
-Also refer to the documentation of the AVPictureType
enum and of
-the av_get_picture_type_char
function defined in
-libavutil/avutil.h .
-
-
-checksum
-The Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame.
-
-
-plane_checksum
-The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
-expressed in the form "[c0 c1 c2 c3 ]".
-
-
-
-
-
30.84 shuffleplanes# TOC
-
-
Reorder and/or duplicate video planes.
-
-
It accepts the following parameters:
-
-
-map0
-The index of the input plane to be used as the first output plane.
-
-
-map1
-The index of the input plane to be used as the second output plane.
-
-
-map2
-The index of the input plane to be used as the third output plane.
-
-
-map3
-The index of the input plane to be used as the fourth output plane.
-
-
-
-
-
The first plane has the index 0. The default is to keep the input unchanged.
-
-
Swap the second and third planes of the input:
-
-
ffmpeg -i INPUT -vf shuffleplanes=0:2:1:3 OUTPUT
-
-
-
-
30.85 signalstats# TOC
-
Evaluate various visual metrics that assist in determining issues associated
-with the digitization of analog video media.
-
-
By default the filter will log these metadata values:
-
-
-YMIN
-Display the minimal Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-YLOW
-Display the Y value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YAVG
-Display the average Y value within the input frame. Expressed in range of
-[0-255].
-
-
-YHIGH
-Display the Y value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-YMAX
-Display the maximum Y value contained within the input frame. Expressed in
-range of [0-255].
-
-
-UMIN
-Display the minimal U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-ULOW
-Display the U value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UAVG
-Display the average U value within the input frame. Expressed in range of
-[0-255].
-
-
-UHIGH
-Display the U value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-UMAX
-Display the maximum U value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VMIN
-Display the minimal V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-VLOW
-Display the V value at the 10% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VAVG
-Display the average V value within the input frame. Expressed in range of
-[0-255].
-
-
-VHIGH
-Display the V value at the 90% percentile within the input frame. Expressed in
-range of [0-255].
-
-
-VMAX
-Display the maximum V value contained within the input frame. Expressed in
-range of [0-255].
-
-
-SATMIN
-Display the minimal saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATLOW
-Display the saturation value at the 10% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATAVG
-Display the average saturation value within the input frame. Expressed in range
-of [0-~181.02].
-
-
-SATHIGH
-Display the saturation value at the 90% percentile within the input frame.
-Expressed in range of [0-~181.02].
-
-
-SATMAX
-Display the maximum saturation value contained within the input frame.
-Expressed in range of [0-~181.02].
-
-
-HUEMED
-Display the median value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-HUEAVG
-Display the average value for hue within the input frame. Expressed in range of
-[0-360].
-
-
-YDIF
-Display the average of sample value difference between all values of the Y
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-UDIF
-Display the average of sample value difference between all values of the U
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-VDIF
-Display the average of sample value difference between all values of the V
-plane in the current frame and corresponding values of the previous input frame.
-Expressed in range of [0-255].
-
-
-
-
The filter accepts the following options:
-
-
-stat
-out
-
-stat specify an additional form of image analysis.
-out output video with the specified type of pixel highlighted.
-
-Both options accept the following values:
-
-
-‘tout ’
-Identify temporal outliers pixels. A temporal outlier is a pixel
-unlike the neighboring pixels of the same field. Examples of temporal outliers
-include the results of video dropouts, head clogs, or tape tracking issues.
-
-
-‘vrep ’
-Identify vertical line repetition . Vertical line repetition includes
-similar rows of pixels within a frame. In born-digital video vertical line
-repetition is common, but this pattern is uncommon in video digitized from an
-analog source. When it occurs in video that results from the digitization of an
-analog source it can indicate concealment from a dropout compensator.
-
-
-‘brng ’
-Identify pixels that fall outside of legal broadcast range.
-
-
-
-
-color, c
-Set the highlight color for the out option. The default color is
-yellow.
-
-
-
-
-
30.85.1 Examples# TOC
-
-
-
-
-
30.86 smartblur# TOC
-
-
Blur the input video without impacting the outlines.
-
-
It accepts the following options:
-
-
-luma_radius, lr
-Set the luma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-luma_strength, ls
-Set the luma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-luma_threshold, lt
-Set the luma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-chroma_radius, cr
-Set the chroma radius. The option value must be a float number in
-the range [0.1,5.0] that specifies the variance of the gaussian filter
-used to blur the image (slower if larger). Default value is 1.0.
-
-
-chroma_strength, cs
-Set the chroma strength. The option value must be a float number
-in the range [-1.0,1.0] that configures the blurring. A value included
-in [0.0,1.0] will blur the image whereas a value included in
-[-1.0,0.0] will sharpen the image. Default value is 1.0.
-
-
-chroma_threshold, ct
-Set the chroma threshold used as a coefficient to determine
-whether a pixel should be blurred or not. The option value must be an
-integer in the range [-30,30]. A value of 0 will filter all the image,
-a value included in [0,30] will filter flat areas and a value included
-in [-30,0] will filter edges. Default value is 0.
-
-
-
-
If a chroma option is not explicitly set, the corresponding luma value
-is set.
-
-
-
30.87 stereo3d# TOC
-
-
Convert between different stereoscopic image formats.
-
-
The filters accept the following options:
-
-
-in
-Set stereoscopic image format of input.
-
-Available values for input image formats are:
-
-‘sbsl ’
-side by side parallel (left eye left, right eye right)
-
-
-‘sbsr ’
-side by side crosseye (right eye left, left eye right)
-
-
-‘sbs2l ’
-side by side parallel with half width resolution
-(left eye left, right eye right)
-
-
-‘sbs2r ’
-side by side crosseye with half width resolution
-(right eye left, left eye right)
-
-
-‘abl ’
-above-below (left eye above, right eye below)
-
-
-‘abr ’
-above-below (right eye above, left eye below)
-
-
-‘ab2l ’
-above-below with half height resolution
-(left eye above, right eye below)
-
-
-‘ab2r ’
-above-below with half height resolution
-(right eye above, left eye below)
-
-
-‘al ’
-alternating frames (left eye first, right eye second)
-
-
-‘ar ’
-alternating frames (right eye first, left eye second)
-
-Default value is ‘sbsl ’.
-
-
-
-
-out
-Set stereoscopic image format of output.
-
-Available values for output image formats are all the input formats as well as:
-
-‘arbg ’
-anaglyph red/blue gray
-(red filter on left eye, blue filter on right eye)
-
-
-‘argg ’
-anaglyph red/green gray
-(red filter on left eye, green filter on right eye)
-
-
-‘arcg ’
-anaglyph red/cyan gray
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arch ’
-anaglyph red/cyan half colored
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcc ’
-anaglyph red/cyan color
-(red filter on left eye, cyan filter on right eye)
-
-
-‘arcd ’
-anaglyph red/cyan color optimized with the least squares projection of dubois
-(red filter on left eye, cyan filter on right eye)
-
-
-‘agmg ’
-anaglyph green/magenta gray
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmh ’
-anaglyph green/magenta half colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmc ’
-anaglyph green/magenta colored
-(green filter on left eye, magenta filter on right eye)
-
-
-‘agmd ’
-anaglyph green/magenta color optimized with the least squares projection of dubois
-(green filter on left eye, magenta filter on right eye)
-
-
-‘aybg ’
-anaglyph yellow/blue gray
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybh ’
-anaglyph yellow/blue half colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybc ’
-anaglyph yellow/blue colored
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘aybd ’
-anaglyph yellow/blue color optimized with the least squares projection of dubois
-(yellow filter on left eye, blue filter on right eye)
-
-
-‘irl ’
-interleaved rows (left eye has top row, right eye starts on next row)
-
-
-‘irr ’
-interleaved rows (right eye has top row, left eye starts on next row)
-
-
-‘ml ’
-mono output (left eye only)
-
-
-‘mr ’
-mono output (right eye only)
-
-
-
-Default value is ‘arcd ’.
-
-
-
-
-
30.87.1 Examples# TOC
-
-
- Convert input video from side by side parallel to anaglyph yellow/blue dubois:
-
-
- Convert input video from above bellow (left eye above, right eye below) to side by side crosseye.
-
-
-
-
-
30.88 spp# TOC
-
-
Apply a simple postprocessing filter that compresses and decompresses the image
-at several (or - in the case of quality level 6
- all) shifts
-and average the results.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-6. If set to 0
, the filter will have no
-effect. A value of 6
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-mode
-Set thresholding mode. Available modes are:
-
-
-‘hard ’
-Set hard thresholding (default).
-
-‘soft ’
-Set soft thresholding (better de-ringing effect, but likely blurrier).
-
-
-
-
-use_bframe_qp
-Enable the use of the QP from the B-Frames if set to 1
. Using this
-option may cause flicker since the B-Frames have often larger QP. Default is
-0
(not enabled).
-
-
-
-
-
30.89 subtitles# TOC
-
-
Draw subtitles on top of input video using the libass library.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libass
. This filter also requires a build with libavcodec and
-libavformat to convert the passed subtitles file to ASS (Advanced Substation
-Alpha) subtitles format.
-
-
The filter accepts the following options:
-
-
-filename, f
-Set the filename of the subtitle file to read. It must be specified.
-
-
-original_size
-Specify the size of the original video, the video for which the ASS file
-was composed. For the syntax of this option, check the "Video size" section in
-the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic,
-this is necessary to correctly scale the fonts if the aspect ratio has been
-changed.
-
-
-charenc
-Set subtitles input character encoding. subtitles
filter only. Only
-useful if not UTF-8.
-
-
-stream_index, si
-Set subtitles stream index. subtitles
filter only.
-
-
-
-
If the first key is not specified, it is assumed that the first value
-specifies the filename .
-
-
For example, to render the file sub.srt on top of the input
-video, use the command:
-
-
-
which is equivalent to:
-
-
subtitles=filename=sub.srt
-
-
-
To render the default subtitles stream from file video.mkv , use:
-
-
-
To render the second subtitles stream from that file, use:
-
-
subtitles=video.mkv:si=1
-
-
-
-
30.90 super2xsai# TOC
-
-
Scale the input by 2x and smooth using the Super2xSaI (Scale and
-Interpolate) pixel art scaling algorithm.
-
-
Useful for enlarging pixel art images without reducing sharpness.
-
-
-
30.91 swapuv# TOC
-
Swap U & V plane.
-
-
-
30.92 telecine# TOC
-
-
Apply telecine process to the video.
-
-
This filter accepts the following options:
-
-
-first_field
-
-‘top, t ’
-top field first
-
-‘bottom, b ’
-bottom field first
-The default value is top
.
-
-
-
-
-pattern
-A string of numbers representing the pulldown pattern you wish to apply.
-The default value is 23
.
-
-
-
-
-
Some typical patterns:
-
-NTSC output (30i):
-27.5p: 32222
-24p: 23 (classic)
-24p: 2332 (preferred)
-20p: 33
-18p: 334
-16p: 3444
-
-PAL output (25i):
-27.5p: 12222
-24p: 222222222223 ("Euro pulldown")
-16.67p: 33
-16p: 33333334
-
-
-
-
30.93 thumbnail# TOC
-
Select the most representative frame in a given sequence of consecutive frames.
-
-
The filter accepts the following options:
-
-
-n
-Set the frames batch size to analyze; in a set of n frames, the filter
-will pick one of them, and then handle the next batch of n frames until
-the end. Default is 100
.
-
-
-
-
Since the filter keeps track of the whole frames sequence, a bigger n
-value will result in a higher memory usage, so a high value is not recommended.
-
-
-
30.93.1 Examples# TOC
-
-
- Extract one picture each 50 frames:
-
-
- Complete example of a thumbnail creation with ffmpeg
:
-
-
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
-
-
-
-
-
30.94 tile# TOC
-
-
Tile several successive frames together.
-
-
The filter accepts the following options:
-
-
-layout
-Set the grid size (i.e. the number of lines and columns). For the syntax of
-this option, check the "Video size" section in the ffmpeg-utils manual.
-
-
-nb_frames
-Set the maximum number of frames to render in the given area. It must be less
-than or equal to w xh . The default value is 0
, meaning all
-the area will be used.
-
-
-margin
-Set the outer border margin in pixels.
-
-
-padding
-Set the inner border thickness (i.e. the number of pixels between frames). For
-more advanced padding options (such as having different values for the edges),
-refer to the pad video filter.
-
-
-color
-Specify the color of the unused area. For the syntax of this option, check the
-"Color" section in the ffmpeg-utils manual. The default value of color
-is "black".
-
-
-
-
-
30.94.1 Examples# TOC
-
-
-
-
-
30.95 tinterlace# TOC
-
-
Perform various types of temporal field interlacing.
-
-
Frames are counted starting from 1, so the first input frame is
-considered odd.
-
-
The filter accepts the following options:
-
-
-mode
-Specify the mode of the interlacing. This option can also be specified
-as a value alone. See below for a list of values for this option.
-
-Available values are:
-
-
-‘merge, 0 ’
-Move odd frames into the upper field, even into the lower field,
-generating a double height frame at half frame rate.
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-‘drop_odd, 1 ’
-Only output even frames, odd frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
- 22222 44444
- 22222 44444
- 22222 44444
- 22222 44444
-
-
-
-‘drop_even, 2 ’
-Only output odd frames, even frames are dropped, generating a frame with
-unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 33333
-11111 33333
-11111 33333
-11111 33333
-
-
-
-‘pad, 3 ’
-Expand each frame to full height, but pad alternate lines with black,
-generating a frame with double height at the same input frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-11111 22222 33333 44444
-
-Output:
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-11111 ..... 33333 .....
-..... 22222 ..... 44444
-
-
-
-
-‘interleave_top, 4 ’
-Interleave the upper field from odd frames with the lower field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-
-Output:
-11111 33333
-22222 44444
-11111 33333
-22222 44444
-
-
-
-
-‘interleave_bottom, 5 ’
-Interleave the lower field from odd frames with the upper field from
-even frames, generating a frame with unchanged height at half frame rate.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-11111 22222<- 33333 44444<-
-11111<- 22222 33333<- 44444
-
-Output:
-22222 44444
-11111 33333
-22222 44444
-11111 33333
-
-
-
-
-‘interlacex2, 6 ’
-Double frame rate with unchanged height. Frames are inserted each
-containing the second temporal field from the previous input frame and
-the first temporal field from the next input frame. This mode relies on
-the top_field_first flag. Useful for interlaced video displays with no
-field synchronisation.
-
-
-
------> time
-Input:
-Frame 1 Frame 2 Frame 3 Frame 4
-
-11111 22222 33333 44444
- 11111 22222 33333 44444
-11111 22222 33333 44444
- 11111 22222 33333 44444
-
-Output:
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-11111 22222 22222 33333 33333 44444 44444
- 11111 11111 22222 22222 33333 33333 44444
-
-
-
-
-
-
-Numeric values are deprecated but are accepted for backward
-compatibility reasons.
-
-Default mode is merge
.
-
-
-flags
-Specify flags influencing the filter process.
-
-Available value for flags is:
-
-
-low_pass_filter, vlfp
-Enable vertical low-pass filtering in the filter.
-Vertical low-pass filtering is required when creating an interlaced
-destination from a progressive source which contains high-frequency
-vertical detail. Filtering will reduce interlace ’twitter’ and Moire
-patterning.
-
-Vertical low-pass filtering can only be enabled for mode
-interleave_top and interleave_bottom .
-
-
-
-
-
-
-
-
30.96 transpose# TOC
-
-
Transpose rows with columns in the input video and optionally flip it.
-
-
It accepts the following parameters:
-
-
-dir
-Specify the transposition direction.
-
-Can assume the following values:
-
-‘0, 4, cclock_flip ’
-Rotate by 90 degrees counterclockwise and vertically flip (default), that is:
-
-
L.R L.l
-. . -> . .
-l.r R.r
-
-
-
-‘1, 5, clock ’
-Rotate by 90 degrees clockwise, that is:
-
-
L.R l.L
-. . -> . .
-l.r r.R
-
-
-
-‘2, 6, cclock ’
-Rotate by 90 degrees counterclockwise, that is:
-
-
L.R R.r
-. . -> . .
-l.r L.l
-
-
-
-‘3, 7, clock_flip ’
-Rotate by 90 degrees clockwise and vertically flip, that is:
-
-
L.R r.R
-. . -> . .
-l.r l.L
-
-
-
-
-For values between 4-7, the transposition is only done if the input
-video geometry is portrait and not landscape. These values are
-deprecated, the passthrough
option should be used instead.
-
-Numerical values are deprecated, and should be dropped in favor of
-symbolic constants.
-
-
-passthrough
-Do not apply the transposition if the input geometry matches the one
-specified by the specified value. It accepts the following values:
-
-‘none ’
-Always apply transposition.
-
-‘portrait ’
-Preserve portrait geometry (when height >= width ).
-
-‘landscape ’
-Preserve landscape geometry (when width >= height ).
-
-
-
-Default value is none
.
-
-
-
-
For example to rotate by 90 degrees clockwise and preserve portrait
-layout:
-
-
transpose=dir=1:passthrough=portrait
-
-
-
The command above can also be specified as:
-
-
-
-
30.97 trim# TOC
-
Trim the input so that the output contains one continuous subpart of the input.
-
-
It accepts the following parameters:
-
-start
-Specify the time of the start of the kept section, i.e. the frame with the
-timestamp start will be the first frame in the output.
-
-
-end
-Specify the time of the first frame that will be dropped, i.e. the frame
-immediately preceding the one with the timestamp end will be the last
-frame in the output.
-
-
-start_pts
-This is the same as start , except this option sets the start timestamp
-in timebase units instead of seconds.
-
-
-end_pts
-This is the same as end , except this option sets the end timestamp
-in timebase units instead of seconds.
-
-
-duration
-The maximum duration of the output in seconds.
-
-
-start_frame
-The number of the first frame that should be passed to the output.
-
-
-end_frame
-The number of the first frame that should be dropped.
-
-
-
-
start , end , and duration are expressed as time
-duration specifications; see
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-
Note that the first two sets of the start/end options and the duration
-option look at the frame timestamp, while the _frame variants simply count the
-frames that pass through the filter. Also note that this filter does not modify
-the timestamps. If you wish for the output timestamps to start at zero, insert a
-setpts filter after the trim filter.
-
-
If multiple start or end options are set, this filter tries to be greedy and
-keep all the frames that match at least one of the specified constraints. To keep
-only the part that matches all the constraints at once, chain multiple trim
-filters.
-
-
The defaults are such that all the input is kept. So it is possible to set e.g.
-just the end values to keep everything before the specified time.
-
-
Examples:
-
- Drop everything except the second minute of input:
-
-
ffmpeg -i INPUT -vf trim=60:120
-
-
- Keep only the first second:
-
-
ffmpeg -i INPUT -vf trim=duration=1
-
-
-
-
-
-
-
30.98 unsharp# TOC
-
-
Sharpen or blur the input video.
-
-
It accepts the following parameters:
-
-
-luma_msize_x, lx
-Set the luma matrix horizontal size. It must be an odd integer between
-3 and 63. The default value is 5.
-
-
-luma_msize_y, ly
-Set the luma matrix vertical size. It must be an odd integer between 3
-and 63. The default value is 5.
-
-
-luma_amount, la
-Set the luma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 1.0.
-
-
-chroma_msize_x, cx
-Set the chroma matrix horizontal size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_msize_y, cy
-Set the chroma matrix vertical size. It must be an odd integer
-between 3 and 63. The default value is 5.
-
-
-chroma_amount, ca
-Set the chroma effect strength. It must be a floating point number, reasonable
-values lay between -1.5 and 1.5.
-
-Negative values will blur the input video, while positive values will
-sharpen it, a value of zero will disable the effect.
-
-Default value is 0.0.
-
-
-opencl
-If set to 1, specify using OpenCL capabilities, only available if
-FFmpeg was configured with --enable-opencl
. Default value is 0.
-
-
-
-
-
All parameters are optional and default to the equivalent of the
-string ’5:5:1.0:5:5:0.0’.
-
-
-
30.98.1 Examples# TOC
-
-
- Apply strong luma sharpen effect:
-
-
unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
-
-
- Apply a strong blur of both luma and chroma parameters:
-
-
-
-
-
30.99 uspp# TOC
-
-
Apply ultra slow/simple postprocessing filter that compresses and decompresses
-the image at several (or - in the case of quality level 8
- all)
-shifts and average the results.
-
-
The way this differs from the behavior of spp is that uspp actually encodes &
-decodes each case with libavcodec Snow, whereas spp uses a simplified intra only 8x8
-DCT similar to MJPEG.
-
-
The filter accepts the following options:
-
-
-quality
-Set quality. This option defines the number of levels for averaging. It accepts
-an integer in the range 0-8. If set to 0
, the filter will have no
-effect. A value of 8
means the higher quality. For each increment of
-that value the speed drops by a factor of approximately 2. Default value is
-3
.
-
-
-qp
-Force a constant quantization parameter. If not set, the filter will use the QP
-from the video stream (if available).
-
-
-
-
-
30.100 vidstabdetect# TOC
-
-
Analyze video stabilization/deshaking. Perform pass 1 of 2, see
-vidstabtransform for pass 2.
-
-
This filter generates a file with relative translation and rotation
-transform information about subsequent frames, which is then used by
-the vidstabtransform filter.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
This filter accepts the following options:
-
-
-result
-Set the path to the file used to write the transforms information.
-Default value is transforms.trf .
-
-
-shakiness
-Set how shaky the video is and how quick the camera is. It accepts an
-integer in the range 1-10, a value of 1 means little shakiness, a
-value of 10 means strong shakiness. Default value is 5.
-
-
-accuracy
-Set the accuracy of the detection process. It must be a value in the
-range 1-15. A value of 1 means low accuracy, a value of 15 means high
-accuracy. Default value is 15.
-
-
-stepsize
-Set stepsize of the search process. The region around minimum is
-scanned with 1 pixel resolution. Default value is 6.
-
-
-mincontrast
-Set minimum contrast. Below this value a local measurement field is
-discarded. Must be a floating point value in the range 0-1. Default
-value is 0.3.
-
-
-tripod
-Set reference frame number for tripod mode.
-
-If enabled, the motion of the frames is compared to a reference frame
-in the filtered stream, identified by the specified number. The idea
-is to compensate all movements in a more-or-less static scene and keep
-the camera view absolutely still.
-
-If set to 0, it is disabled. The frames are counted starting from 1.
-
-
-show
-Show fields and transforms in the resulting frames. It accepts an
-integer in the range 0-2. Default value is 0, which disables any
-visualization.
-
-
-
-
-
30.100.1 Examples# TOC
-
-
- Use default values:
-
-
- Analyze strongly shaky movie and put the results in file
-mytransforms.trf :
-
-
vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
-
-
- Visualize the result of internal transformations in the resulting
-video:
-
-
- Analyze a video with medium shakiness using ffmpeg
:
-
-
ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
-
-
-
-
-
30.101 vidstabtransform# TOC
-
-
Video stabilization/deshaking: pass 2 of 2,
-see vidstabdetect for pass 1.
-
-
Read a file with transform information for each frame and
-apply/compensate them. Together with the vidstabdetect
-filter this can be used to deshake videos. See also
-http://public.hronopik.de/vid.stab . It is important to also use
-the unsharp filter, see below.
-
-
To enable compilation of this filter you need to configure FFmpeg with
---enable-libvidstab
.
-
-
-
30.101.1 Options# TOC
-
-
-input
-Set path to the file used to read the transforms. Default value is
-transforms.trf .
-
-
-smoothing
-Set the number of frames (value*2 + 1) used for lowpass filtering the
-camera movements. Default value is 10.
-
-For example a number of 10 means that 21 frames are used (10 in the
-past and 10 in the future) to smoothen the motion in the video. A
-larger value leads to a smoother video, but limits the acceleration of
-the camera (pan/tilt movements). 0 is a special case where a static
-camera is simulated.
-
-
-optalgo
-Set the camera path optimization algorithm.
-
-Accepted values are:
-
-‘gauss ’
-gaussian kernel low-pass filter on camera motion (default)
-
-‘avg ’
-averaging on transformations
-
-
-
-
-maxshift
-Set maximal number of pixels to translate frames. Default value is -1,
-meaning no limit.
-
-
-maxangle
-Set maximal angle in radians (degree*PI/180) to rotate frames. Default
-value is -1, meaning no limit.
-
-
-crop
-Specify how to deal with borders that may be visible due to movement
-compensation.
-
-Available values are:
-
-‘keep ’
-keep image information from previous frame (default)
-
-‘black ’
-fill the border black
-
-
-
-
-invert
-Invert transforms if set to 1. Default value is 0.
-
-
-relative
-Consider transforms as relative to previous frame if set to 1,
-absolute if set to 0. Default value is 0.
-
-
-zoom
-Set percentage to zoom. A positive value will result in a zoom-in
-effect, a negative value in a zoom-out effect. Default value is 0 (no
-zoom).
-
-
-optzoom
-Set optimal zooming to avoid borders.
-
-Accepted values are:
-
-‘0 ’
-disabled
-
-‘1 ’
-optimal static zoom value is determined (only very strong movements
-will lead to visible borders) (default)
-
-‘2 ’
-optimal adaptive zoom value is determined (no borders will be
-visible), see zoomspeed
-
-
-
-Note that the value given at zoom is added to the one calculated here.
-
-
-zoomspeed
-Set percent to zoom maximally each frame (enabled when
-optzoom is set to 2). Range is from 0 to 5, default value is
-0.25.
-
-
-interpol
-Specify type of interpolation.
-
-Available values are:
-
-‘no ’
-no interpolation
-
-‘linear ’
-linear only horizontal
-
-‘bilinear ’
-linear in both directions (default)
-
-‘bicubic ’
-cubic in both directions (slow)
-
-
-
-
-tripod
-Enable virtual tripod mode if set to 1, which is equivalent to
-relative=0:smoothing=0
. Default value is 0.
-
-Use also tripod
option of vidstabdetect .
-
-
-debug
-Increase log verbosity if set to 1. Also the detected global motions
-are written to the temporary file global_motions.trf . Default
-value is 0.
-
-
-
-
-
30.101.2 Examples# TOC
-
-
-
-
-
30.102 vflip# TOC
-
-
Flip the input video vertically.
-
-
For example, to vertically flip a video with ffmpeg
:
-
-
ffmpeg -i in.avi -vf "vflip" out.avi
-
-
-
-
30.103 vignette# TOC
-
-
Make or reverse a natural vignetting effect.
-
-
The filter accepts the following options:
-
-
-angle, a
-Set lens angle expression as a number of radians.
-
-The value is clipped in the [0,PI/2]
range.
-
-Default value: "PI/5"
-
-
-x0
-y0
-Set center coordinates expressions. Respectively "w/2"
and "h/2"
-by default.
-
-
-mode
-Set forward/backward mode.
-
-Available modes are:
-
-‘forward ’
-The larger the distance from the central point, the darker the image becomes.
-
-
-‘backward ’
-The larger the distance from the central point, the brighter the image becomes.
-This can be used to reverse a vignette effect, though there is no automatic
-detection to extract the lens angle and other settings (yet). It can
-also be used to create a burning effect.
-
-
-
-Default value is ‘forward ’.
-
-
-eval
-Set evaluation mode for the expressions (angle , x0 , y0 ).
-
-It accepts the following values:
-
-‘init ’
-Evaluate expressions only once during the filter initialization.
-
-
-‘frame ’
-Evaluate expressions for each incoming frame. This is way slower than the
-‘init ’ mode since it requires all the scalers to be re-computed, but it
-allows advanced dynamic expressions.
-
-
-
-Default value is ‘init ’.
-
-
-dither
-Set dithering to reduce the circular banding effects. Default is 1
-(enabled).
-
-
-aspect
-Set vignette aspect. This setting allows one to adjust the shape of the vignette.
-Setting this value to the SAR of the input will make a rectangular vignetting
-following the dimensions of the video.
-
-Default is 1/1
.
-
-
-
-
-
30.103.1 Expressions# TOC
-
-
The alpha , x0 and y0 expressions can contain the
-following parameters.
-
-
-w
-h
-input width and height
-
-
-n
-the number of input frame, starting from 0
-
-
-pts
-the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in
-TB units, NAN if undefined
-
-
-r
-frame rate of the input video, NAN if the input frame rate is unknown
-
-
-t
-the PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in seconds, NAN if undefined
-
-
-tb
-time base of the input video
-
-
-
-
-
-
30.103.2 Examples# TOC
-
-
- Apply simple strong vignetting effect:
-
-
- Make a flickering vignetting:
-
-
vignette='PI/4+random(1)*PI/50':eval=frame
-
-
-
-
-
-
30.104 w3fdif# TOC
-
-
Deinterlace the input video ("w3fdif" stands for "Weston 3 Field
-Deinterlacing Filter").
-
-
Based on the process described by Martin Weston for BBC R&D, and
-implemented based on the de-interlace algorithm written by Jim
-Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter
-uses filter coefficients calculated by BBC R&D.
-
-
There are two sets of filter coefficients, so called "simple":
-and "complex". Which set of filter coefficients is used can
-be set by passing an optional parameter:
-
-
-filter
-Set the interlacing filter coefficients. Accepts one of the following values:
-
-
-‘simple ’
-Simple filter coefficient set.
-
-‘complex ’
-More-complex filter coefficient set.
-
-
-Default value is ‘complex ’.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following values:
-
-
-‘all ’
-Deinterlace all frames,
-
-‘interlaced ’
-Only deinterlace frames marked as interlaced.
-
-
-
-Default value is ‘all ’.
-
-
-
-
-
30.105 xbr# TOC
-
Apply the xBR high-quality magnification filter which is designed for pixel
-art. It follows a set of edge-detection rules, see
-http://www.libretro.com/forums/viewtopic.php?f=6&t=134 .
-
-
It accepts the following option:
-
-
-n
-Set the scaling dimension: 2
for 2xBR
, 3
for
-3xBR
and 4
for 4xBR
.
-Default is 3
.
-
-
-
-
-
30.106 yadif# TOC
-
-
Deinterlace the input video ("yadif" means "yet another deinterlacing
-filter").
-
-
It accepts the following parameters:
-
-
-
-mode
-The interlacing mode to adopt. It accepts one of the following values:
-
-
-0, send_frame
-Output one frame for each frame.
-
-1, send_field
-Output one frame for each field.
-
-2, send_frame_nospatial
-Like send_frame
, but it skips the spatial interlacing check.
-
-3, send_field_nospatial
-Like send_field
, but it skips the spatial interlacing check.
-
-
-
-The default value is send_frame
.
-
-
-parity
-The picture field parity assumed for the input interlaced video. It accepts one
-of the following values:
-
-
-0, tff
-Assume the top field is first.
-
-1, bff
-Assume the bottom field is first.
-
--1, auto
-Enable automatic detection of field parity.
-
-
-
-The default value is auto
.
-If the interlacing is unknown or the decoder does not export this information,
-top field first will be assumed.
-
-
-deint
-Specify which frames to deinterlace. Accept one of the following
-values:
-
-
-0, all
-Deinterlace all frames.
-
-1, interlaced
-Only deinterlace frames marked as interlaced.
-
-
-
-The default value is all
.
-
-
-
-
-
30.107 zoompan# TOC
-
-
Apply Zoom & Pan effect.
-
-
This filter accepts the following options:
-
-
-zoom, z
-Set the zoom expression. Default is 1.
-
-
-x
-y
-Set the x and y expression. Default is 0.
-
-
-d
-Set the duration expression in number of frames.
-This sets for how many number of frames effect will last for
-single input image.
-
-
-s
-Set the output image size, default is ’hd720’.
-
-
-
-
Each expression can contain the following constants:
-
-
-in_w, iw
-Input width.
-
-
-in_h, ih
-Input height.
-
-
-out_w, ow
-Output width.
-
-
-out_h, oh
-Output height.
-
-
-in
-Input frame count.
-
-
-on
-Output frame count.
-
-
-x
-y
-Last calculated ’x’ and ’y’ position from ’x’ and ’y’ expression
-for current input frame.
-
-
-px
-py
-’x’ and ’y’ of last output frame of previous input frame or 0 when there was
-not yet such frame (first input frame).
-
-
-zoom
-Last calculated zoom from ’z’ expression for current input frame.
-
-
-pzoom
-Last calculated zoom of last output frame of previous input frame.
-
-
-duration
-Number of output frames for current input frame. Calculated from ’d’ expression
-for each input frame.
-
-
-pduration
-number of output frames created for previous input frame
-
-
-a
-Rational number: input width / input height
-
-
-sar
-sample aspect ratio
-
-
-dar
-display aspect ratio
-
-
-
-
-
-
30.107.1 Examples# TOC
-
-
- Zoom-in up to 1.5 and pan at same time to some spot near center of picture:
-
-
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
-
-
-
-
-
-
31 Video Sources# TOC
-
-
Below is a description of the currently available video sources.
-
-
-
31.1 buffer# TOC
-
-
Buffer video frames, and make them available to the filter chain.
-
-
This source is mainly intended for a programmatic use, in particular
-through the interface defined in libavfilter/vsrc_buffer.h .
-
-
It accepts the following parameters:
-
-
-video_size
-Specify the size (width and height) of the buffered video frames. For the
-syntax of this option, check the "Video size" section in the ffmpeg-utils
-manual.
-
-
-width
-The input video width.
-
-
-height
-The input video height.
-
-
-pix_fmt
-A string representing the pixel format of the buffered video frames.
-It may be a number corresponding to a pixel format, or a pixel format
-name.
-
-
-time_base
-Specify the timebase assumed by the timestamps of the buffered frames.
-
-
-frame_rate
-Specify the frame rate expected for the video stream.
-
-
-pixel_aspect, sar
-The sample (pixel) aspect ratio of the input video.
-
-
-sws_param
-Specify the optional parameters to be used for the scale filter which
-is automatically inserted when an input change is detected in the
-input size or format.
-
-
-
-
For example:
-
-
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
-
-
-
will instruct the source to accept video frames with size 320x240 and
-with format "yuv410p", assuming 1/24 as the timestamps timebase and
-square pixels (1:1 sample aspect ratio).
-Since the pixel format with name "yuv410p" corresponds to the number 6
-(check the enum AVPixelFormat definition in libavutil/pixfmt.h ),
-this example corresponds to:
-
-
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
-
-
-
Alternatively, the options can be specified as a flat string, but this
-syntax is deprecated:
-
-
width :height :pix_fmt :time_base.num :time_base.den :pixel_aspect.num :pixel_aspect.den [:sws_param ]
-
-
-
31.2 cellauto# TOC
-
-
Create a pattern generated by an elementary cellular automaton.
-
-
The initial state of the cellular automaton can be defined through the
-filename , and pattern options. If such options are
-not specified an initial state is created randomly.
-
-
At each new frame a new row in the video is filled with the result of
-the cellular automaton next generation. The behavior when the whole
-frame is filled is defined by the scroll option.
-
-
This source accepts the following options:
-
-
-filename, f
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified file.
-In the file, each non-whitespace character is considered an alive
-cell, a newline will terminate the row, and further characters in the
-file will be ignored.
-
-
-pattern, p
-Read the initial cellular automaton state, i.e. the starting row, from
-the specified string.
-
-Each non-whitespace character in the string is considered an alive
-cell, a newline will terminate the row, and further characters in the
-string will be ignored.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial cellular automaton row. It
-is a floating point number value ranging from 0 to 1, defaults to
-1/PHI.
-
-This option is ignored when a file or a pattern is specified.
-
-
-random_seed, seed
-Set the seed for filling randomly the initial row, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the cellular automaton rule, it is a number ranging from 0 to 255.
-Default value is 110.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual.
-
-If filename or pattern is specified, the size is set
-by default to the width of the specified initial state row, and the
-height is set to width * PHI.
-
-If size is set, it must contain the width of the specified
-pattern string, and the specified pattern will be centered in the
-larger row.
-
-If a filename or a pattern string is not specified, the size value
-defaults to "320x518" (used for a randomly generated initial state).
-
-
-scroll
-If set to 1, scroll the output upward when all the rows in the output
-have been already filled. If set to 0, the new generated row will be
-written over the top row just after the bottom row is filled.
-Defaults to 1.
-
-
-start_full, full
-If set to 1, completely fill the output with generated rows before
-outputting the first frame.
-This is the default behavior, for disabling set the value to 0.
-
-
-stitch
-If set to 1, stitch the left and right row edges together.
-This is the default behavior, for disabling set the value to 0.
-
-
-
-
-
31.2.1 Examples# TOC
-
-
- Read the initial state from pattern , and specify an output of
-size 200x400.
-
-
cellauto=f=pattern:s=200x400
-
-
- Generate a random initial row with a width of 200 cells, with a fill
-ratio of 2/3:
-
-
cellauto=ratio=2/3:s=200x200
-
-
- Create a pattern generated by rule 18 starting by a single alive cell
-centered on an initial row with width 100:
-
-
cellauto=p=@:s=100x400:full=0:rule=18
-
-
- Specify a more elaborated initial pattern:
-
-
cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
-
-
-
-
-
-
31.3 mandelbrot# TOC
-
-
Generate a Mandelbrot set fractal, and progressively zoom towards the
-point specified with start_x and start_y .
-
-
This source accepts the following options:
-
-
-end_pts
-Set the terminal pts value. Default value is 400.
-
-
-end_scale
-Set the terminal scale value.
-Must be a floating point value. Default value is 0.3.
-
-
-inner
-Set the inner coloring mode, that is the algorithm used to draw the
-Mandelbrot fractal internal region.
-
-It shall assume one of the following values:
-
-black
-Set black mode.
-
-convergence
-Show time until convergence.
-
-mincol
-Set color based on point closest to the origin of the iterations.
-
-period
-Set period mode.
-
-
-
-Default value is mincol .
-
-
-bailout
-Set the bailout value. Default value is 10.0.
-
-
-maxiter
-Set the maximum of iterations performed by the rendering
-algorithm. Default value is 7189.
-
-
-outer
-Set outer coloring mode.
-It shall assume one of following values:
-
-iteration_count
-Set iteration cound mode.
-
-normalized_iteration_count
-set normalized iteration count mode.
-
-
-Default value is normalized_iteration_count .
-
-
-rate, r
-Set frame rate, expressed as number of frames per second. Default
-value is "25".
-
-
-size, s
-Set frame size. For the syntax of this option, check the "Video
-size" section in the ffmpeg-utils manual. Default value is "640x480".
-
-
-start_scale
-Set the initial scale value. Default value is 3.0.
-
-
-start_x
-Set the initial x position. Must be a floating point value between
--100 and 100. Default value is -0.743643887037158704752191506114774.
-
-
-start_y
-Set the initial y position. Must be a floating point value between
--100 and 100. Default value is -0.131825904205311970493132056385139.
-
-
-
-
-
31.4 mptestsrc# TOC
-
-
Generate various test patterns, as generated by the MPlayer test filter.
-
-
The size of the generated video is fixed, and is 256x256.
-This source is useful in particular for testing encoding features.
-
-
This source accepts the following options:
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-test, t
-
-Set the number or the name of the test to perform. Supported tests are:
-
-dc_luma
-dc_chroma
-freq_luma
-freq_chroma
-amp_luma
-amp_chroma
-cbp
-mv
-ring1
-ring2
-all
-
-
-Default value is "all", which will cycle through the list of all tests.
-
-
-
-
Some examples:
-
-
-
will generate a "dc_luma" test pattern.
-
-
-
31.5 frei0r_src# TOC
-
-
Provide a frei0r source.
-
-
To enable compilation of this filter you need to install the frei0r
-header and configure FFmpeg with --enable-frei0r
.
-
-
This source accepts the following parameters:
-
-
-size
-The size of the video to generate. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-
-framerate
-The framerate of the generated video. It may be a string of the form
-num /den or a frame rate abbreviation.
-
-
-filter_name
-The name to the frei0r source to load. For more information regarding frei0r and
-how to set the parameters, read the frei0r section in the video filters
-documentation.
-
-
-filter_params
-A ’|’-separated list of parameters to pass to the frei0r source.
-
-
-
-
-
For example, to generate a frei0r partik0l source with size 200x200
-and frame rate 10 which is overlaid on the overlay filter main input:
-
-
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
-
-
-
-
31.6 life# TOC
-
-
Generate a life pattern.
-
-
This source is based on a generalization of John Conway’s life game.
-
-
The sourced input represents a life grid, each pixel represents a cell
-which can be in one of two possible states, alive or dead. Every cell
-interacts with its eight neighbours, which are the cells that are
-horizontally, vertically, or diagonally adjacent.
-
-
At each interaction the grid evolves according to the adopted rule,
-which specifies the number of neighbor alive cells which will make a
-cell stay alive or born. The rule option allows one to specify
-the rule to adopt.
-
-
This source accepts the following options:
-
-
-filename, f
-Set the file from which to read the initial grid state. In the file,
-each non-whitespace character is considered an alive cell, and newline
-is used to delimit the end of each row.
-
-If this option is not specified, the initial grid is generated
-randomly.
-
-
-rate, r
-Set the video rate, that is the number of frames generated per second.
-Default is 25.
-
-
-random_fill_ratio, ratio
-Set the random fill ratio for the initial random grid. It is a
-floating point number value ranging from 0 to 1, defaults to 1/PHI.
-It is ignored when a file is specified.
-
-
-random_seed, seed
-Set the seed for filling the initial random grid, must be an integer
-included between 0 and UINT32_MAX. If not specified, or if explicitly
-set to -1, the filter will try to use a good random seed on a best
-effort basis.
-
-
-rule
-Set the life rule.
-
-A rule can be specified with a code of the kind "SNS /BNB ",
-where NS and NB are sequences of numbers in the range 0-8,
-NS specifies the number of alive neighbor cells which make a
-live cell stay alive, and NB the number of alive neighbor cells
-which make a dead cell to become alive (i.e. to "born").
-"s" and "b" can be used in place of "S" and "B", respectively.
-
-Alternatively a rule can be specified by an 18-bits integer. The 9
-high order bits are used to encode the next cell state if it is alive
-for each number of neighbor alive cells, the low order bits specify
-the rule for "borning" new cells. Higher order bits encode for an
-higher number of neighbor cells.
-For example the number 6153 = (12<<9)+9
specifies a stay alive
-rule of 12 and a born rule of 9, which corresponds to "S23/B03".
-
-Default value is "S23/B3", which is the original Conway’s game of life
-rule, and will keep a cell alive if it has 2 or 3 neighbor alive
-cells, and will born a new cell if there are three alive cells around
-a dead cell.
-
-
-size, s
-Set the size of the output video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual.
-
-If filename is specified, the size is set by default to the
-same size of the input file. If size is set, it must contain
-the size specified in the input file, and the initial grid defined in
-that file is centered in the larger resulting area.
-
-If a filename is not specified, the size value defaults to "320x240"
-(used for a randomly generated initial grid).
-
-
-stitch
-If set to 1, stitch the left and right grid edges together, and the
-top and bottom edges also. Defaults to 1.
-
-
-mold
-Set cell mold speed. If set, a dead cell will go from death_color to
-mold_color with a step of mold . mold can have a
-value from 0 to 255.
-
-
-life_color
-Set the color of living (or new born) cells.
-
-
-death_color
-Set the color of dead cells. If mold is set, this is the first color
-used to represent a dead cell.
-
-
-mold_color
-Set mold color, for definitely dead and moldy cells.
-
-For the syntax of these 3 color options, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-
-
-
31.6.1 Examples# TOC
-
-
- Read a grid from pattern , and center it on a grid of size
-300x300 pixels:
-
-
life=f=pattern:s=300x300
-
-
- Generate a random grid of size 200x200, with a fill ratio of 2/3:
-
-
life=ratio=2/3:s=200x200
-
-
- Specify a custom rule for evolving a randomly generated grid:
-
-
- Full example with slow death effect (mold) using ffplay
:
-
-
ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
-
-
-
-
-
31.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc# TOC
-
-
The color
source provides an uniformly colored input.
-
-
The haldclutsrc
source provides an identity Hald CLUT. See also
-haldclut filter.
-
-
The nullsrc
source returns unprocessed video frames. It is
-mainly useful to be employed in analysis / debugging tools, or as the
-source for filters which ignore the input data.
-
-
The rgbtestsrc
source generates an RGB test pattern useful for
-detecting RGB vs BGR issues. You should see a red, green and blue
-stripe from top to bottom.
-
-
The smptebars
source generates a color bars pattern, based on
-the SMPTE Engineering Guideline EG 1-1990.
-
-
The smptehdbars
source generates a color bars pattern, based on
-the SMPTE RP 219-2002.
-
-
The testsrc
source generates a test video pattern, showing a
-color pattern, a scrolling gradient and a timestamp. This is mainly
-intended for testing purposes.
-
-
The sources accept the following parameters:
-
-
-color, c
-Specify the color of the source, only available in the color
-source. For the syntax of this option, check the "Color" section in the
-ffmpeg-utils manual.
-
-
-level
-Specify the level of the Hald CLUT, only available in the haldclutsrc
-source. A level of N
generates a picture of N*N*N
by N*N*N
-pixels to be used as identity matrix for 3D lookup tables. Each component is
-coded on a 1/(N*N)
scale.
-
-
-size, s
-Specify the size of the sourced video. For the syntax of this option, check the
-"Video size" section in the ffmpeg-utils manual. The default value is
-"320x240".
-
-This option is not available with the haldclutsrc
filter.
-
-
-rate, r
-Specify the frame rate of the sourced video, as the number of frames
-generated per second. It has to be a string in the format
-frame_rate_num /frame_rate_den , an integer number, a floating point
-number or a valid video frame rate abbreviation. The default value is
-"25".
-
-
-sar
-Set the sample aspect ratio of the sourced video.
-
-
-duration, d
-Set the duration of the sourced video. See
-(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
-for the accepted syntax.
-
-If not specified, or the expressed duration is negative, the video is
-supposed to be generated forever.
-
-
-decimals, n
-Set the number of decimals to show in the timestamp, only available in the
-testsrc
source.
-
-The displayed timestamp value will correspond to the original
-timestamp value multiplied by the power of 10 of the specified
-value. Default value is 0.
-
-
-
-
For example the following:
-
-
testsrc=duration=5.3:size=qcif:rate=10
-
-
-
will generate a video with a duration of 5.3 seconds, with size
-176x144 and a frame rate of 10 frames per second.
-
-
The following graph description will generate a red source
-with an opacity of 0.2, with size "qcif" and a frame rate of 10
-frames per second.
-
-
color=c=red@0.2:s=qcif:r=10
-
-
-
If the input content is to be ignored, nullsrc
can be used. The
-following command generates noise in the luminance plane by employing
-the geq
filter:
-
-
nullsrc=s=256x256, geq=random(1)*255:128:128
-
-
-
-
31.7.1 Commands# TOC
-
-
The color
source supports the following commands:
-
-
-c, color
-Set the color of the created image. Accepts the same syntax of the
-corresponding color option.
-
-
-
-
-
-
32 Video Sinks# TOC
-
-
Below is a description of the currently available video sinks.
-
-
-
32.1 buffersink# TOC
-
-
Buffer video frames, and make them available to the end of the filter
-graph.
-
-
This sink is mainly intended for programmatic use, in particular
-through the interface defined in libavfilter/buffersink.h
-or the options system.
-
-
It accepts a pointer to an AVBufferSinkContext structure, which
-defines the incoming buffers’ formats, to be passed as the opaque
-parameter to avfilter_init_filter
for initialization.
-
-
-
32.2 nullsink# TOC
-
-
Null video sink: do absolutely nothing with the input video. It is
-mainly useful as a template and for use in analysis / debugging
-tools.
-
-
-
-
33 Multimedia Filters# TOC
-
-
Below is a description of the currently available multimedia filters.
-
-
-
33.1 avectorscope# TOC
-
-
Convert input audio to a video output, representing the audio vector
-scope.
-
-
The filter is used to measure the difference between channels of stereo
-audio stream. A monoaural signal, consisting of identical left and right
-signal, results in straight vertical line. Any stereo separation is visible
-as a deviation from this line, creating a Lissajous figure.
-If the straight (or deviation from it) but horizontal line appears this
-indicates that the left and right channels are out of phase.
-
-
The filter accepts the following options:
-
-
-mode, m
-Set the vectorscope mode.
-
-Available values are:
-
-‘lissajous ’
-Lissajous rotated by 45 degrees.
-
-
-‘lissajous_xy ’
-Same as above but not rotated.
-
-
-
-Default value is ‘lissajous ’.
-
-
-size, s
-Set the video size for the output. For the syntax of this option, check the "Video size"
-section in the ffmpeg-utils manual. Default value is 400x400
.
-
-
-rate, r
-Set the output frame rate. Default value is 25
.
-
-
-rc
-gc
-bc
-Specify the red, green and blue contrast. Default values are 40
, 160
and 80
.
-Allowed range is [0, 255]
.
-
-
-rf
-gf
-bf
-Specify the red, green and blue fade. Default values are 15
, 10
and 5
.
-Allowed range is [0, 255]
.
-
-
-zoom
-Set the zoom factor. Default value is 1
. Allowed range is [1, 10]
.
-
-
-
-
-
33.1.1 Examples# TOC
-
-
- Complete example using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
-
-
-
-
-
33.2 concat# TOC
-
-
Concatenate audio and video streams, joining them together one after the
-other.
-
-
The filter works on segments of synchronized video and audio streams. All
-segments must have the same number of streams of each type, and that will
-also be the number of streams at output.
-
-
The filter accepts the following options:
-
-
-n
-Set the number of segments. Default is 2.
-
-
-v
-Set the number of output video streams, that is also the number of video
-streams in each segment. Default is 1.
-
-
-a
-Set the number of output audio streams, that is also the number of audio
-streams in each segment. Default is 0.
-
-
-unsafe
-Activate unsafe mode: do not fail if segments have a different format.
-
-
-
-
-
The filter has v +a outputs: first v video outputs, then
-a audio outputs.
-
-
There are n x(v +a ) inputs: first the inputs for the first
-segment, in the same order as the outputs, then the inputs for the second
-segment, etc.
-
-
Related streams do not always have exactly the same duration, for various
-reasons including codec frame size or sloppy authoring. For that reason,
-related synchronized streams (e.g. a video and its audio track) should be
-concatenated at once. The concat filter will use the duration of the longest
-stream in each segment (except the last one), and if necessary pad shorter
-audio streams with silence.
-
-
For this filter to work correctly, all segments must start at timestamp 0.
-
-
All corresponding streams must have the same parameters in all segments; the
-filtering system will automatically select a common pixel format for video
-streams, and a common sample format, sample rate and channel layout for
-audio streams, but other settings, such as resolution, must be converted
-explicitly by the user.
-
-
Different frame rates are acceptable but will result in variable frame rate
-at output; be sure to configure the output file to handle it.
-
-
-
33.2.1 Examples# TOC
-
-
-
-
-
33.3 ebur128# TOC
-
-
EBU R128 scanner filter. This filter takes an audio stream as input and outputs
-it unchanged. By default, it logs a message at a frequency of 10Hz with the
-Momentary loudness (identified by M
), Short-term loudness (S
),
-Integrated loudness (I
) and Loudness Range (LRA
).
-
-
The filter also has a video output (see the video option) with a real
-time graph to observe the loudness evolution. The graphic contains the logged
-message mentioned above, so it is not printed anymore when this option is set,
-unless the verbose logging is set. The main graphing area contains the
-short-term loudness (3 seconds of analysis), and the gauge on the right is for
-the momentary loudness (400 milliseconds).
-
-
More information about the Loudness Recommendation EBU R128 on
-http://tech.ebu.ch/loudness .
-
-
The filter accepts the following options:
-
-
-video
-Activate the video output. The audio stream is passed unchanged whether this
-option is set or no. The video stream will be the first output stream if
-activated. Default is 0
.
-
-
-size
-Set the video size. This option is for video only. For the syntax of this
-option, check the "Video size" section in the ffmpeg-utils manual. Default
-and minimum resolution is 640x480
.
-
-
-meter
-Set the EBU scale meter. Default is 9
. Common values are 9
and
-18
, respectively for EBU scale meter +9 and EBU scale meter +18. Any
-other integer value between this range is allowed.
-
-
-metadata
-Set metadata injection. If set to 1
, the audio input will be segmented
-into 100ms output frames, each of them containing various loudness information
-in metadata. All the metadata keys are prefixed with lavfi.r128.
.
-
-Default is 0
.
-
-
-framelog
-Force the frame logging level.
-
-Available values are:
-
-‘info ’
-information logging level
-
-‘verbose ’
-verbose logging level
-
-
-
-By default, the logging level is set to info . If the video or
-the metadata options are set, it switches to verbose .
-
-
-peak
-Set peak mode(s).
-
-Available modes can be cumulated (the option is a flag
type). Possible
-values are:
-
-‘none ’
-Disable any peak mode (default).
-
-‘sample ’
-Enable sample-peak mode.
-
-Simple peak mode looking for the higher sample value. It logs a message
-for sample-peak (identified by SPK
).
-
-‘true ’
-Enable true-peak mode.
-
-If enabled, the peak lookup is done on an over-sampled version of the input
-stream for better peak accuracy. It logs a message for true-peak.
-(identified by TPK
) and true-peak per frame (identified by FTPK
).
-This mode requires a build with libswresample
.
-
-
-
-
-
-
-
-
33.3.1 Examples# TOC
-
-
- Real-time graph using ffplay
, with a EBU scale meter +18:
-
-
ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
-
-
- Run an analysis with ffmpeg
:
-
-
ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
-
-
-
-
-
33.4 interleave, ainterleave# TOC
-
-
Temporally interleave frames from several inputs.
-
-
interleave
works with video inputs, ainterleave
with audio.
-
-
These filters read frames from several inputs and send the oldest
-queued frame to the output.
-
-
Input streams must have a well defined, monotonically increasing frame
-timestamp values.
-
-
In order to submit one frame to output, these filters need to enqueue
-at least one frame for each input, so they cannot work in case one
-input is not yet terminated and will not receive incoming frames.
-
-
For example consider the case when one input is a select
filter
-which always drop input frames. The interleave
filter will keep
-reading from that input, but it will never be able to send new frames
-to output until the input will send an end-of-stream signal.
-
-
Also, depending on inputs synchronization, the filters will drop
-frames in case one input receives more frames than the other ones, and
-the queue is already filled.
-
-
These filters accept the following options:
-
-
-nb_inputs, n
-Set the number of different inputs, it is 2 by default.
-
-
-
-
-
33.4.1 Examples# TOC
-
-
- Interleave frames belonging to different streams using ffmpeg
:
-
-
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
-
-
- Add flickering blur effect:
-
-
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
-
-
-
-
-
33.5 perms, aperms# TOC
-
-
Set read/write permissions for the output frames.
-
-
These filters are mainly aimed at developers to test direct path in the
-following filter in the filtergraph.
-
-
The filters accept the following options:
-
-
-mode
-Select the permissions mode.
-
-It accepts the following values:
-
-‘none ’
-Do nothing. This is the default.
-
-‘ro ’
-Set all the output frames read-only.
-
-‘rw ’
-Set all the output frames directly writable.
-
-‘toggle ’
-Make the frame read-only if writable, and writable if read-only.
-
-‘random ’
-Set each output frame read-only or writable randomly.
-
-
-
-
-seed
-Set the seed for the random mode, must be an integer included between
-0
and UINT32_MAX
. If not specified, or if explicitly set to
--1
, the filter will try to use a good random seed on a best effort
-basis.
-
-
-
-
Note: in case of auto-inserted filter between the permission filter and the
-following one, the permission might not be received as expected in that
-following filter. Inserting a format or aformat filter before the
-perms/aperms filter can avoid this problem.
-
-
-
33.6 select, aselect# TOC
-
-
Select frames to pass in output.
-
-
This filter accepts the following options:
-
-
-expr, e
-Set expression, which is evaluated for each input frame.
-
-If the expression is evaluated to zero, the frame is discarded.
-
-If the evaluation result is negative or NaN, the frame is sent to the
-first output; otherwise it is sent to the output with index
-ceil(val)-1
, assuming that the input index starts from 0.
-
-For example a value of 1.2
corresponds to the output with index
-ceil(1.2)-1 = 2-1 = 1
, that is the second output.
-
-
-outputs, n
-Set the number of outputs. The output to which to send the selected
-frame is based on the result of the evaluation. Default value is 1.
-
-
-
-
The expression can contain the following constants:
-
-
-n
-The (sequential) number of the filtered frame, starting from 0.
-
-
-selected_n
-The (sequential) number of the selected frame, starting from 0.
-
-
-prev_selected_n
-The sequential number of the last selected frame. It’s NAN if undefined.
-
-
-TB
-The timebase of the input timestamps.
-
-
-pts
-The PTS (Presentation TimeStamp) of the filtered video frame,
-expressed in TB units. It’s NAN if undefined.
-
-
-t
-The PTS of the filtered video frame,
-expressed in seconds. It’s NAN if undefined.
-
-
-prev_pts
-The PTS of the previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_pts
-The PTS of the last previously filtered video frame. It’s NAN if undefined.
-
-
-prev_selected_t
-The PTS of the last previously selected video frame. It’s NAN if undefined.
-
-
-start_pts
-The PTS of the first video frame in the video. It’s NAN if undefined.
-
-
-start_t
-The time of the first video frame in the video. It’s NAN if undefined.
-
-
-pict_type (video only)
-The type of the filtered frame. It can assume one of the following
-values:
-
-I
-P
-B
-S
-SI
-SP
-BI
-
-
-
-interlace_type (video only)
-The frame interlace type. It can assume one of the following values:
-
-PROGRESSIVE
-The frame is progressive (not interlaced).
-
-TOPFIRST
-The frame is top-field-first.
-
-BOTTOMFIRST
-The frame is bottom-field-first.
-
-
-
-
-consumed_sample_n (audio only)
-the number of selected samples before the current frame
-
-
-samples_n (audio only)
-the number of samples in the current frame
-
-
-sample_rate (audio only)
-the input sample rate
-
-
-key
-This is 1 if the filtered frame is a key-frame, 0 otherwise.
-
-
-pos
-the position in the file of the filtered frame, -1 if the information
-is not available (e.g. for synthetic video)
-
-
-scene (video only)
-value between 0 and 1 to indicate a new scene; a low value reflects a low
-probability for the current frame to introduce a new scene, while a higher
-value means the current frame is more likely to be one (see the example below)
-
-
-
-
-
The default value of the select expression is "1".
-
-
-
33.6.1 Examples# TOC
-
-
-
-
-
33.7 sendcmd, asendcmd# TOC
-
-
Send commands to filters in the filtergraph.
-
-
These filters read commands to be sent to other filters in the
-filtergraph.
-
-
sendcmd
must be inserted between two video filters,
-asendcmd
must be inserted between two audio filters, but apart
-from that they act the same way.
-
-
The specification of commands can be provided in the filter arguments
-with the commands option, or in a file specified by the
-filename option.
-
-
These filters accept the following options:
-
-commands, c
-Set the commands to be read and sent to the other filters.
-
-filename, f
-Set the filename of the commands to be read and sent to the other
-filters.
-
-
-
-
-
33.7.1 Commands syntax# TOC
-
-
A commands description consists of a sequence of interval
-specifications, comprising a list of commands to be executed when a
-particular event related to that interval occurs. The occurring event
-is typically the current frame time entering or leaving a given time
-interval.
-
-
An interval is specified by the following syntax:
-
-
-
The time interval is specified by the START and END times.
-END is optional and defaults to the maximum time.
-
-
The current frame time is considered within the specified interval if
-it is included in the interval [START , END ), that is when
-the time is greater or equal to START and is lesser than
-END .
-
-
COMMANDS consists of a sequence of one or more command
-specifications, separated by ",", relating to that interval. The
-syntax of a command specification is given by:
-
-
[FLAGS ] TARGET COMMAND ARG
-
-
-
FLAGS is optional and specifies the type of events relating to
-the time interval which enable sending the specified command, and must
-be a non-null sequence of identifier flags separated by "+" or "|" and
-enclosed between "[" and "]".
-
-
The following flags are recognized:
-
-enter
-The command is sent when the current frame timestamp enters the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was not in the given interval, and the
-current is.
-
-
-leave
-The command is sent when the current frame timestamp leaves the
-specified interval. In other words, the command is sent when the
-previous frame timestamp was in the given interval, and the
-current is not.
-
-
-
-
If FLAGS is not specified, a default value of [enter]
is
-assumed.
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional list of argument for
-the given COMMAND .
-
-
Between one interval specification and another, whitespaces, or
-sequences of characters starting with #
until the end of line,
-are ignored and can be used to annotate comments.
-
-
A simplified BNF description of the commands specification syntax
-follows:
-
-
COMMAND_FLAG ::= "enter" | "leave"
-COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG ]
-COMMAND ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG ]
-COMMANDS ::= COMMAND [,COMMANDS ]
-INTERVAL ::= START [-END ] COMMANDS
-INTERVALS ::= INTERVAL [;INTERVALS ]
-
-
-
-
33.7.2 Examples# TOC
-
-
-
-
-
33.8 setpts, asetpts# TOC
-
-
Change the PTS (presentation timestamp) of the input frames.
-
-
setpts
works on video frames, asetpts
on audio frames.
-
-
This filter accepts the following options:
-
-
-expr
-The expression which is evaluated for each frame to construct its timestamp.
-
-
-
-
-
The expression is evaluated through the eval API and can contain the following
-constants:
-
-
-FRAME_RATE
-frame rate, only defined for constant frame-rate video
-
-
-PTS
-The presentation timestamp in input
-
-
-N
-The count of the input frame for video or the number of consumed samples,
-not including the current frame for audio, starting from 0.
-
-
-NB_CONSUMED_SAMPLES
-The number of consumed samples, not including the current frame (only
-audio)
-
-
-NB_SAMPLES, S
-The number of samples in the current frame (only audio)
-
-
-SAMPLE_RATE, SR
-The audio sample rate.
-
-
-STARTPTS
-The PTS of the first frame.
-
-
-STARTT
-the time in seconds of the first frame
-
-
-INTERLACED
-State whether the current frame is interlaced.
-
-
-T
-the time in seconds of the current frame
-
-
-POS
-original position in the file of the frame, or undefined if undefined
-for the current frame
-
-
-PREV_INPTS
-The previous input PTS.
-
-
-PREV_INT
-previous input time in seconds
-
-
-PREV_OUTPTS
-The previous output PTS.
-
-
-PREV_OUTT
-previous output time in seconds
-
-
-RTCTIME
-The wallclock (RTC) time in microseconds. This is deprecated, use time(0)
-instead.
-
-
-RTCSTART
-The wallclock (RTC) time at the start of the movie in microseconds.
-
-
-TB
-The timebase of the input timestamps.
-
-
-
-
-
-
33.8.1 Examples# TOC
-
-
- Start counting PTS from zero
-
-
- Apply fast motion effect:
-
-
- Apply slow motion effect:
-
-
- Set fixed rate of 25 frames per second:
-
-
- Set fixed rate 25 fps with some jitter:
-
-
setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
-
-
- Apply an offset of 10 seconds to the input PTS:
-
-
- Generate timestamps from a "live source" and rebase onto the current timebase:
-
-
setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
-
-
- Generate timestamps by counting samples:
-
-
-
-
-
-
33.9 settb, asettb# TOC
-
-
Set the timebase to use for the output frames timestamps.
-It is mainly useful for testing timebase configuration.
-
-
It accepts the following parameters:
-
-
-expr, tb
-The expression which is evaluated into the output timebase.
-
-
-
-
-
The value for tb is an arithmetic expression representing a
-rational. The expression can contain the constants "AVTB" (the default
-timebase), "intb" (the input timebase) and "sr" (the sample rate,
-audio only). Default value is "intb".
-
-
-
33.9.1 Examples# TOC
-
-
- Set the timebase to 1/25:
-
-
- Set the timebase to 1/10:
-
-
- Set the timebase to 1001/1000:
-
-
- Set the timebase to 2*intb:
-
-
- Set the default timebase value:
-
-
-
-
-
33.10 showcqt# TOC
-
Convert input audio to a video output representing
-frequency spectrum logarithmically (using constant Q transform with
-Brown-Puckette algorithm), with musical tone scale, from E0 to D#10 (10 octaves).
-
-
The filter accepts the following options:
-
-
-volume
-Specify transform volume (multiplier) expression. The expression can contain
-variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-a_weighting(f)
-A-weighting of equal loudness
-
-b_weighting(f)
-B-weighting of equal loudness
-
-c_weighting(f)
-C-weighting of equal loudness
-
-
-Default value is 16
.
-
-
-tlength
-Specify transform length expression. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-Default value is 384/f*tc/(384/f+tc)
.
-
-
-timeclamp
-Specify the transform timeclamp. At low frequency, there is trade-off between
-accuracy in time domain and frequency domain. If timeclamp is lower,
-event in time domain is represented more accurately (such as fast bass drum),
-otherwise event in frequency domain is represented more accurately
-(such as bass guitar). Acceptable value is [0.1, 1.0]. Default value is 0.17
.
-
-
-coeffclamp
-Specify the transform coeffclamp. If coeffclamp is lower, transform is
-more accurate, otherwise transform is faster. Acceptable value is [0.1, 10.0].
-Default value is 1.0
.
-
-
-gamma
-Specify gamma. Lower gamma makes the spectrum more contrast, higher gamma
-makes the spectrum having more range. Acceptable value is [1.0, 7.0].
-Default value is 3.0
.
-
-
-fontfile
-Specify font file for use with freetype. If not specified, use embedded font.
-
-
-fontcolor
-Specify font color expression. This is arithmetic expression that should return
-integer value 0xRRGGBB. The expression can contain variables:
-
-frequency, freq, f
-the frequency where transform is evaluated
-
-timeclamp, tc
-value of timeclamp option
-
-
-and functions:
-
-midi(f)
-midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
-
-r(x), g(x), b(x)
-red, green, and blue value of intensity x
-
-
-Default value is st(0, (midi(f)-59.5)/12);
-st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
-r(1-ld(1)) + b(ld(1))
-
-
-fullhd
-If set to 1 (the default), the video size is 1920x1080 (full HD),
-if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
-
-
-fps
-Specify video fps. Default value is 25
.
-
-
-count
-Specify number of transform per frame, so there are fps*count transforms
-per second. Note that audio data rate must be divisible by fps*count.
-Default value is 6
.
-
-
-
-
-
-
33.10.1 Examples# TOC
-
-
- Playing audio while showing the spectrum:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with frame rate 30 fps:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fps=30:count=5 [out0]'
-
-
- Playing at 960x540 and lower CPU usage:
-
-
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fullhd=0:count=3 [out0]'
-
-
- A1 and its harmonics: A1, A2, (near)E3, A3:
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt [out0]'
-
-
- Same as above, but with more accuracy in frequency domain (and slower):
-
-
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
- asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
-
-
- B-weighting of equal loudness
-
-
volume=16*b_weighting(f)
-
-
- Lower Q factor
-
-
tlength=100/f*tc/(100/f+tc)
-
-
- Custom fontcolor, C-note is colored green, others are colored blue
-
-
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
-
-
-
-
-
-
33.11 showspectrum# TOC
-
-
Convert input audio to a video output, representing the audio frequency
-spectrum.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value is
-640x512
.
-
-
-slide
-Specify how the spectrum should slide along the window.
-
-It accepts the following values:
-
-‘replace ’
-the samples start again on the left when they reach the right
-
-‘scroll ’
-the samples scroll from right to left
-
-‘fullframe ’
-frames are only produced when the samples reach the right
-
-
-
-Default value is replace
.
-
-
-mode
-Specify display mode.
-
-It accepts the following values:
-
-‘combined ’
-all channels are displayed in the same row
-
-‘separate ’
-all channels are displayed in separate rows
-
-
-
-Default value is ‘combined ’.
-
-
-color
-Specify display color mode.
-
-It accepts the following values:
-
-‘channel ’
-each channel is displayed in a separate color
-
-‘intensity ’
-each channel is is displayed using the same color scheme
-
-
-
-Default value is ‘channel ’.
-
-
-scale
-Specify scale used for calculating intensity color values.
-
-It accepts the following values:
-
-‘lin ’
-linear
-
-‘sqrt ’
-square root, default
-
-‘cbrt ’
-cubic root
-
-‘log ’
-logarithmic
-
-
-
-Default value is ‘sqrt ’.
-
-
-saturation
-Set saturation modifier for displayed colors. Negative values provide
-alternative color scheme. 0
is no saturation at all.
-Saturation must be in [-10.0, 10.0] range.
-Default value is 1
.
-
-
-win_func
-Set window function.
-
-It accepts the following values:
-
-‘none ’
-No samples pre-processing (do not expect this to be faster)
-
-‘hann ’
-Hann window
-
-‘hamming ’
-Hamming window
-
-‘blackman ’
-Blackman window
-
-
-
-Default value is hann
.
-
-
-
-
The usage is very similar to the showwaves filter; see the examples in that
-section.
-
-
-
33.11.1 Examples# TOC
-
-
- Large window with logarithmic color scaling:
-
-
showspectrum=s=1280x480:scale=log
-
-
- Complete example for a colored and sliding spectrum per channel using ffplay
:
-
-
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
- [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
-
-
-
-
-
33.12 showwaves# TOC
-
-
Convert input audio to a video output, representing the samples waves.
-
-
The filter accepts the following options:
-
-
-size, s
-Specify the video size for the output. For the syntax of this option, check
-the "Video size" section in the ffmpeg-utils manual. Default value
-is "600x240".
-
-
-mode
-Set display mode.
-
-Available values are:
-
-‘point ’
-Draw a point for each sample.
-
-
-‘line ’
-Draw a vertical line for each sample.
-
-
-‘p2p ’
-Draw a point for each sample and a line between them.
-
-
-‘cline ’
-Draw a centered vertical line for each sample.
-
-
-
-Default value is point
.
-
-
-n
-Set the number of samples which are printed on the same column. A
-larger value will decrease the frame rate. Must be a positive
-integer. This option can be set only if the value for rate
-is not explicitly specified.
-
-
-rate, r
-Set the (approximate) output frame rate. This is done by setting the
-option n . Default value is "25".
-
-
-split_channels
-Set if channels should be drawn separately or overlap. Default value is 0.
-
-
-
-
-
-
33.12.1 Examples# TOC
-
-
- Output the input file audio and the corresponding video representation
-at the same time:
-
-
amovie=a.mp3,asplit[out0],showwaves[out1]
-
-
- Create a synthetic signal and show it with showwaves, forcing a
-frame rate of 30 frames per second:
-
-
aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
-
-
-
-
-
33.13 split, asplit# TOC
-
-
Split input into several identical outputs.
-
-
asplit
works with audio input, split
with video.
-
-
The filter accepts a single parameter which specifies the number of outputs. If
-unspecified, it defaults to 2.
-
-
-
33.13.1 Examples# TOC
-
-
- Create two separate outputs from the same input:
-
-
[in] split [out0][out1]
-
-
- To create 3 or more outputs, you need to specify the number of
-outputs, like in:
-
-
[in] asplit=3 [out0][out1][out2]
-
-
- Create two separate outputs from the same input, one cropped and
-one padded:
-
-
[in] split [splitout1][splitout2];
-[splitout1] crop=100:100:0:0 [cropout];
-[splitout2] pad=200:200:100:100 [padout];
-
-
- Create 5 copies of the input audio with ffmpeg
:
-
-
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
-
-
-
-
-
33.14 zmq, azmq# TOC
-
-
Receive commands sent through a libzmq client, and forward them to
-filters in the filtergraph.
-
-
zmq
and azmq
work as a pass-through filters. zmq
-must be inserted between two video filters, azmq
between two
-audio filters.
-
-
To enable these filters you need to install the libzmq library and
-headers and configure FFmpeg with --enable-libzmq
.
-
-
For more information about libzmq see:
-http://www.zeromq.org/
-
-
The zmq
and azmq
filters work as a libzmq server, which
-receives messages sent through a network interface defined by the
-bind_address option.
-
-
The received message must be in the form:
-
-
-
TARGET specifies the target of the command, usually the name of
-the filter class or a specific filter instance name.
-
-
COMMAND specifies the name of the command for the target filter.
-
-
ARG is optional and specifies the optional argument list for the
-given COMMAND .
-
-
Upon reception, the message is processed and the corresponding command
-is injected into the filtergraph. Depending on the result, the filter
-will send a reply to the client, adopting the format:
-
-
ERROR_CODE ERROR_REASON
-MESSAGE
-
-
-
MESSAGE is optional.
-
-
-
33.14.1 Examples# TOC
-
-
Look at tools/zmqsend for an example of a zmq client which can
-be used to send commands processed by these filters.
-
-
Consider the following filtergraph generated by ffplay
-
-
ffplay -dumpgraph 1 -f lavfi "
-color=s=100x100:c=red [l];
-color=s=100x100:c=blue [r];
-nullsrc=s=200x100, zmq [bg];
-[bg][l] overlay [bg+l];
-[bg+l][r] overlay=x=100 "
-
-
-
To change the color of the left side of the video, the following
-command can be used:
-
-
echo Parsed_color_0 c yellow | tools/zmqsend
-
-
-
To change the right side:
-
-
echo Parsed_color_1 c pink | tools/zmqsend
-
-
-
-
-
34 Multimedia Sources# TOC
-
-
Below is a description of the currently available multimedia sources.
-
-
-
34.1 amovie# TOC
-
-
This is the same as movie source, except it selects an audio
-stream by default.
-
-
-
34.2 movie# TOC
-
-
Read audio and/or video stream(s) from a movie container.
-
-
It accepts the following parameters:
-
-
-filename
-The name of the resource to read (not necessarily a file; it can also be a
-device or a stream accessed through some protocol).
-
-
-format_name, f
-Specifies the format assumed for the movie to read, and can be either
-the name of a container or an input device. If not specified, the
-format is guessed from movie_name or by probing.
-
-
-seek_point, sp
-Specifies the seek point in seconds. The frames will be output
-starting from this seek point. The parameter is evaluated with
-av_strtod
, so the numerical value may be suffixed by an IS
-postfix. The default value is "0".
-
-
-streams, s
-Specifies the streams to read. Several streams can be specified,
-separated by "+". The source will then have as many outputs, in the
-same order. The syntax is explained in the “Stream specifiers”
-section in the ffmpeg manual. Two special names, "dv" and "da" specify
-respectively the default (best suited) video and audio stream. Default
-is "dv", or "da" if the filter is called as "amovie".
-
-
-stream_index, si
-Specifies the index of the video stream to read. If the value is -1,
-the most suitable video stream will be automatically selected. The default
-value is "-1". Deprecated. If the filter is called "amovie", it will select
-audio instead of video.
-
-
-loop
-Specifies how many times to read the stream in sequence.
-If the value is less than 1, the stream will be read again and again.
-Default value is "1".
-
-Note that when the movie is looped the source timestamps are not
-changed, so it will generate non monotonically increasing timestamps.
-
-
-
-
It allows overlaying a second video on top of the main input of
-a filtergraph, as shown in this graph:
-
-
input -----------> deltapts0 --> overlay --> output
- ^
- |
-movie --> scale--> deltapts1 -------+
-
-
-
34.2.1 Examples# TOC
-
-
- Skip 3.2 seconds from the start of the AVI file in.avi, and overlay it
-on top of the input labelled "in":
-
-
movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read from a video4linux2 device, and overlay it on top of the input
-labelled "in":
-
-
movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
-[in] setpts=PTS-STARTPTS [main];
-[main][over] overlay=16:16 [out]
-
-
- Read the first video stream and the audio stream with id 0x81 from
-dvd.vob; the video is connected to the pad named "video" and the audio is
-connected to the pad named "audio":
-
-
movie=dvd.vob:s=v:0+#0x81 [video] [audio]
-
-
-
-
-
-
35 See Also# TOC
-
-
ffprobe ,
-ffmpeg , ffplay , ffserver ,
-ffmpeg-utils ,
-ffmpeg-scaler ,
-ffmpeg-resampler ,
-ffmpeg-codecs ,
-ffmpeg-bitstream-filters ,
-ffmpeg-formats ,
-ffmpeg-devices ,
-ffmpeg-protocols ,
-ffmpeg-filters
-
-
-
-
36 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/ffprobe.html b/Externals/ffmpeg/shared/doc/ffprobe.html
deleted file mode 100644
index 1e10bf8cf5..0000000000
--- a/Externals/ffmpeg/shared/doc/ffprobe.html
+++ /dev/null
@@ -1,1113 +0,0 @@
-
-
-
-
-
-
- ffprobe Documentation
-
-
-
-
-
-
-
-
- ffprobe Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Synopsis# TOC
-
-
ffprobe [options ] [input_file ]
-
-
-
2 Description# TOC
-
-
ffprobe gathers information from multimedia streams and prints it in
-human- and machine-readable fashion.
-
-
For example it can be used to check the format of the container used
-by a multimedia stream and the format and type of each media stream
-contained in it.
-
-
If a filename is specified in input, ffprobe will try to open and
-probe the file content. If the file cannot be opened or recognized as
-a multimedia file, a positive exit code is returned.
-
-
ffprobe may be employed both as a standalone application or in
-combination with a textual filter, which may perform more
-sophisticated processing, e.g. statistical processing or plotting.
-
-
Options are used to list some of the formats supported by ffprobe or
-for specifying which information to display, and for setting how
-ffprobe will show it.
-
-
ffprobe output is designed to be easily parsable by a textual filter,
-and consists of one or more sections of a form defined by the selected
-writer, which is specified by the print_format option.
-
-
Sections may contain other nested sections, and are identified by a
-name (which may be shared by other sections), and an unique
-name. See the output of sections .
-
-
Metadata tags stored in the container or in the streams are recognized
-and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM"
-section.
-
-
-
-
3 Options# TOC
-
-
All the numerical options, if not specified otherwise, accept a string
-representing a number as input, which may be followed by one of the SI
-unit prefixes, for example: ’K’, ’M’, or ’G’.
-
-
If ’i’ is appended to the SI unit prefix, the complete prefix will be
-interpreted as a unit prefix for binary multiples, which are based on
-powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
-prefix multiplies the value by 8. This allows using, for example:
-’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
-
-
Options which do not take arguments are boolean options, and set the
-corresponding value to true. They can be set to false by prefixing
-the option name with "no". For example using "-nofoo"
-will set the boolean option with name "foo" to false.
-
-
-
3.1 Stream specifiers# TOC
-
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
-are used to precisely specify which stream(s) a given option belongs to.
-
-
A stream specifier is a string generally appended to the option name and
-separated from it by a colon. E.g. -codec:a:1 ac3
contains the
-a:1
stream specifier, which matches the second audio stream. Therefore, it
-would select the ac3 codec for the second audio stream.
-
-
A stream specifier can match several streams, so that the option is applied to all
-of them. E.g. the stream specifier in -b:a 128k
matches all audio
-streams.
-
-
An empty stream specifier matches all streams. For example, -codec copy
-or -codec: copy
would copy all the streams without reencoding.
-
-
Possible forms of stream specifiers are:
-
-stream_index
-Matches the stream with this index. E.g. -threads:1 4
would set the
-thread count for the second stream to 4.
-
-stream_type [:stream_index ]
-stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
-’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
-stream number stream_index of this type. Otherwise, it matches all
-streams of this type.
-
-p:program_id [:stream_index ]
-If stream_index is given, then it matches the stream with number stream_index
-in the program with the id program_id . Otherwise, it matches all streams in the
-program.
-
-#stream_id or i:stream_id
-Match the stream by stream id (e.g. PID in MPEG-TS container).
-
-m:key [:value ]
-Matches streams with the metadata tag key having the specified value. If
-value is not given, matches streams that contain the given tag with any
-value.
-
-Note that in ffmpeg
, matching by metadata will only work properly for
-input files.
-
-
-
-
-
3.2 Generic options# TOC
-
-
These options are shared amongst the ff* tools.
-
-
--L
-Show license.
-
-
--h, -?, -help, --help [arg ]
-Show help. An optional parameter may be specified to print help about a specific
-item. If no argument is specified, only basic (non advanced) tool
-options are shown.
-
-Possible values of arg are:
-
-long
-Print advanced tool options in addition to the basic tool options.
-
-
-full
-Print complete list of options, including shared and private options
-for encoders, decoders, demuxers, muxers, filters, etc.
-
-
-decoder=decoder_name
-Print detailed information about the decoder named decoder_name . Use the
--decoders option to get a list of all decoders.
-
-
-encoder=encoder_name
-Print detailed information about the encoder named encoder_name . Use the
--encoders option to get a list of all encoders.
-
-
-demuxer=demuxer_name
-Print detailed information about the demuxer named demuxer_name . Use the
--formats option to get a list of all demuxers and muxers.
-
-
-muxer=muxer_name
-Print detailed information about the muxer named muxer_name . Use the
--formats option to get a list of all muxers and demuxers.
-
-
-filter=filter_name
-Print detailed information about the filter name filter_name . Use the
--filters option to get a list of all filters.
-
-
-
-
--version
-Show version.
-
-
--formats
-Show available formats (including devices).
-
-
--devices
-Show available devices.
-
-
--codecs
-Show all codecs known to libavcodec.
-
-Note that the term ’codec’ is used throughout this documentation as a shortcut
-for what is more correctly called a media bitstream format.
-
-
--decoders
-Show available decoders.
-
-
--encoders
-Show all available encoders.
-
-
--bsfs
-Show available bitstream filters.
-
-
--protocols
-Show available protocols.
-
-
--filters
-Show available libavfilter filters.
-
-
--pix_fmts
-Show available pixel formats.
-
-
--sample_fmts
-Show available sample formats.
-
-
--layouts
-Show channel names and standard channel layouts.
-
-
--colors
-Show recognized color names.
-
-
--sources device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sources of the intput device.
-Some devices may provide system-dependent source names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sources pulse,server=192.168.0.4
-
-
-
--sinks device [,opt1 =val1 [,opt2 =val2 ]...]
-Show autodetected sinks of the output device.
-Some devices may provide system-dependent sink names that cannot be autodetected.
-The returned list cannot be assumed to be always complete.
-
-
ffmpeg -sinks pulse,server=192.168.0.4
-
-
-
--loglevel [repeat+]loglevel | -v [repeat+]loglevel
-Set the logging level used by the library.
-Adding "repeat+" indicates that repeated log output should not be compressed
-to the first line and the "Last message repeated n times" line will be
-omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-’repeat’ will not change the loglevel.
-loglevel is a string or a number containing one of the following values:
-
-‘quiet, -8 ’
-Show nothing at all; be silent.
-
-‘panic, 0 ’
-Only show fatal errors which could lead the process to crash, such as
-and assert failure. This is not currently used for anything.
-
-‘fatal, 8 ’
-Only show fatal errors. These are errors after which the process absolutely
-cannot continue after.
-
-‘error, 16 ’
-Show all errors, including ones which can be recovered from.
-
-‘warning, 24 ’
-Show all warnings and errors. Any message related to possibly
-incorrect or unexpected events will be shown.
-
-‘info, 32 ’
-Show informative messages during processing. This is in addition to
-warnings and errors. This is the default value.
-
-‘verbose, 40 ’
-Same as info
, except more verbose.
-
-‘debug, 48 ’
-Show everything, including debugging information.
-
-
-
-By default the program logs to stderr, if coloring is supported by the
-terminal, colors are used to mark errors and warnings. Log coloring
-can be disabled setting the environment variable
-AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
-the environment variable AV_LOG_FORCE_COLOR
.
-The use of the environment variable NO_COLOR
is deprecated and
-will be dropped in a following FFmpeg version.
-
-
--report
-Dump full command line and console output to a file named
-program -YYYYMMDD -HHMMSS .log
in the current
-directory.
-This file can be useful for bug reports.
-It also implies -loglevel verbose
.
-
-Setting the environment variable FFREPORT
to any value has the
-same effect. If the value is a ’:’-separated key=value sequence, these
-options will affect the report; option values must be escaped if they
-contain special characters or the options delimiter ’:’ (see the
-“Quoting and escaping” section in the ffmpeg-utils manual).
-
-The following options are recognized:
-
-file
-set the file name to use for the report; %p
is expanded to the name
-of the program, %t
is expanded to a timestamp, %%
is expanded
-to a plain %
-
-level
-set the log verbosity level using a numerical value (see -loglevel
).
-
-
-
-For example, to output a report to a file named ffreport.log
-using a log level of 32
(alias for log level info
):
-
-
-
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
-
-
-Errors in parsing the environment variable are not fatal, and will not
-appear in the report.
-
-
--hide_banner
-Suppress printing banner.
-
-All FFmpeg tools will normally show a copyright notice, build options
-and library versions. This option can be used to suppress printing
-this information.
-
-
--cpuflags flags (global )
-Allows setting and clearing cpu flags. This option is intended
-for testing. Do not use it unless you know what you’re doing.
-
-
ffmpeg -cpuflags -sse+mmx ...
-ffmpeg -cpuflags mmx ...
-ffmpeg -cpuflags 0 ...
-
-Possible flags for this option are:
-
-‘x86 ’
-
-‘mmx ’
-‘mmxext ’
-‘sse ’
-‘sse2 ’
-‘sse2slow ’
-‘sse3 ’
-‘sse3slow ’
-‘ssse3 ’
-‘atom ’
-‘sse4.1 ’
-‘sse4.2 ’
-‘avx ’
-‘xop ’
-‘fma4 ’
-‘3dnow ’
-‘3dnowext ’
-‘cmov ’
-
-
-‘ARM ’
-
-‘armv5te ’
-‘armv6 ’
-‘armv6t2 ’
-‘vfp ’
-‘vfpv3 ’
-‘neon ’
-
-
-‘PowerPC ’
-
-‘altivec ’
-
-
-‘Specific Processors ’
-
-‘pentium2 ’
-‘pentium3 ’
-‘pentium4 ’
-‘k6 ’
-‘k62 ’
-‘athlon ’
-‘athlonxp ’
-‘k8 ’
-
-
-
-
-
--opencl_bench
-Benchmark all available OpenCL devices and show the results. This option
-is only available when FFmpeg has been compiled with --enable-opencl
.
-
-
--opencl_options options (global )
-Set OpenCL environment options. This option is only available when
-FFmpeg has been compiled with --enable-opencl
.
-
-options must be a list of key =value option pairs
-separated by ’:’. See the “OpenCL Options” section in the
-ffmpeg-utils manual for the list of supported options.
-
-
-
-
-
3.3 AVOptions# TOC
-
-
These options are provided directly by the libavformat, libavdevice and
-libavcodec libraries. To see the list of available AVOptions, use the
--help option. They are separated into two categories:
-
-generic
-These options can be set for any container, codec or device. Generic options
-are listed under AVFormatContext options for containers/devices and under
-AVCodecContext options for codecs.
-
-private
-These options are specific to the given container, device or codec. Private
-options are listed under their corresponding containers/devices/codecs.
-
-
-
-
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
-an MP3 file, use the id3v2_version private option of the MP3
-muxer:
-
-
ffmpeg -i input.flac -id3v2_version 3 out.mp3
-
-
-
All codec AVOptions are per-stream, and thus a stream specifier
-should be attached to them.
-
-
Note: the -nooption syntax cannot be used for boolean
-AVOptions, use -option 0 /-option 1 .
-
-
Note: the old undocumented way of specifying per-stream AVOptions by
-prepending v/a/s to the options name is now obsolete and will be
-removed soon.
-
-
-
3.4 Main options# TOC
-
-
--f format
-Force format to use.
-
-
--unit
-Show the unit of the displayed values.
-
-
--prefix
-Use SI prefixes for the displayed values.
-Unless the "-byte_binary_prefix" option is used all the prefixes
-are decimal.
-
-
--byte_binary_prefix
-Force the use of binary prefixes for byte values.
-
-
--sexagesimal
-Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
-
-
--pretty
-Prettify the format of the displayed values, it corresponds to the
-options "-unit -prefix -byte_binary_prefix -sexagesimal".
-
-
--of, -print_format writer_name [=writer_options ]
-Set the output printing format.
-
-writer_name specifies the name of the writer, and
-writer_options specifies the options to be passed to the writer.
-
-For example for printing the output in JSON format, specify:
-
-
-For more details on the available output printing formats, see the
-Writers section below.
-
-
--sections
-Print sections structure and section information, and exit. The output
-is not meant to be parsed by a machine.
-
-
--select_streams stream_specifier
-Select only the streams specified by stream_specifier . This
-option affects only the options related to streams
-(e.g. show_streams
, show_packets
, etc.).
-
-For example to show only audio streams, you can use the command:
-
-
ffprobe -show_streams -select_streams a INPUT
-
-
-To show only video packets belonging to the video stream with index 1:
-
-
ffprobe -show_packets -select_streams v:1 INPUT
-
-
-
--show_data
-Show payload data, as a hexadecimal and ASCII dump. Coupled with
--show_packets , it will dump the packets’ data. Coupled with
--show_streams , it will dump the codec extradata.
-
-The dump is printed as the "data" field. It may contain newlines.
-
-
--show_data_hash algorithm
-Show a hash of payload data, for packets with -show_packets and for
-codec extradata with -show_streams .
-
-
--show_error
-Show information about the error found when trying to probe the input.
-
-The error information is printed within a section with name "ERROR".
-
-
--show_format
-Show information about the container format of the input multimedia
-stream.
-
-All the container format information is printed within a section with
-name "FORMAT".
-
-
--show_format_entry name
-Like -show_format , but only prints the specified entry of the
-container format information, rather than all. This option may be given more
-than once, then all specified entries will be shown.
-
-This option is deprecated, use show_entries
instead.
-
-
--show_entries section_entries
-Set list of entries to show.
-
-Entries are specified according to the following
-syntax. section_entries contains a list of section entries
-separated by :
. Each section entry is composed by a section
-name (or unique name), optionally followed by a list of entries local
-to that section, separated by ,
.
-
-If section name is specified but is followed by no =
, all
-entries are printed to output, together with all the contained
-sections. Otherwise only the entries specified in the local section
-entries list are printed. In particular, if =
is specified but
-the list of local entries is empty, then no entries will be shown for
-that section.
-
-Note that the order of specification of the local section entries is
-not honored in the output, and the usual display order will be
-retained.
-
-The formal syntax is given by:
-
-
LOCAL_SECTION_ENTRIES ::= SECTION_ENTRY_NAME [,LOCAL_SECTION_ENTRIES ]
-SECTION_ENTRY ::= SECTION_NAME [=[LOCAL_SECTION_ENTRIES ]]
-SECTION_ENTRIES ::= SECTION_ENTRY [:SECTION_ENTRIES ]
-
-
-For example, to show only the index and type of each stream, and the PTS
-time, duration time, and stream index of the packets, you can specify
-the argument:
-
-
packet=pts_time,duration_time,stream_index : stream=index,codec_type
-
-
-To show all the entries in the section "format", but only the codec
-type in the section "stream", specify the argument:
-
-
format : stream=codec_type
-
-
-To show all the tags in the stream and format sections:
-
-
stream_tags : format_tags
-
-
-To show only the title
tag (if available) in the stream
-sections:
-
-
-
--show_packets
-Show information about each packet contained in the input multimedia
-stream.
-
-The information for each single packet is printed within a dedicated
-section with name "PACKET".
-
-
--show_frames
-Show information about each frame and subtitle contained in the input
-multimedia stream.
-
-The information for each single frame is printed within a dedicated
-section with name "FRAME" or "SUBTITLE".
-
-
--show_streams
-Show information about each media stream contained in the input
-multimedia stream.
-
-Each media stream information is printed within a dedicated section
-with name "STREAM".
-
-
--show_programs
-Show information about programs and their streams contained in the input
-multimedia stream.
-
-Each media stream information is printed within a dedicated section
-with name "PROGRAM_STREAM".
-
-
--show_chapters
-Show information about chapters stored in the format.
-
-Each chapter is printed within a dedicated section with name "CHAPTER".
-
-
--count_frames
-Count the number of frames per stream and report it in the
-corresponding stream section.
-
-
--count_packets
-Count the number of packets per stream and report it in the
-corresponding stream section.
-
-
--read_intervals read_intervals
-
-Read only the specified intervals. read_intervals must be a
-sequence of interval specifications separated by ",".
-ffprobe
will seek to the interval starting point, and will
-continue reading from that.
-
-Each interval is specified by two optional parts, separated by "%".
-
-The first part specifies the interval start position. It is
-interpreted as an abolute position, or as a relative offset from the
-current position if it is preceded by the "+" character. If this first
-part is not specified, no seeking will be performed when reading this
-interval.
-
-The second part specifies the interval end position. It is interpreted
-as an absolute position, or as a relative offset from the current
-position if it is preceded by the "+" character. If the offset
-specification starts with "#", it is interpreted as the number of
-packets to read (not including the flushing packets) from the interval
-start. If no second part is specified, the program will read until the
-end of the input.
-
-Note that seeking is not accurate, thus the actual interval start
-point may be different from the specified position. Also, when an
-interval duration is specified, the absolute end time will be computed
-by adding the duration to the interval start point found by seeking
-the file, rather than to the specified start value.
-
-The formal syntax is given by:
-
-
INTERVAL ::= [START |+START_OFFSET ][%[END |+END_OFFSET ]]
-INTERVALS ::= INTERVAL [,INTERVALS ]
-
-
-A few examples follow.
-
- Seek to time 10, read packets until 20 seconds after the found seek
-point, then seek to position 01:30
(1 minute and thirty
-seconds) and read packets until position 01:45
.
-
-
- Read only 42 packets after seeking to position 01:23
:
-
-
- Read only the first 20 seconds from the start:
-
-
- Read from the start until position 02:30
:
-
-
-
-
--show_private_data, -private
-Show private data, that is data depending on the format of the
-particular shown element.
-This option is enabled by default, but you may need to disable it
-for specific uses, for example when creating XSD-compliant XML output.
-
-
--show_program_version
-Show information related to program version.
-
-Version information is printed within a section with name
-"PROGRAM_VERSION".
-
-
--show_library_versions
-Show information related to library versions.
-
-Version information for each library is printed within a section with
-name "LIBRARY_VERSION".
-
-
--show_versions
-Show information related to program and library versions. This is the
-equivalent of setting both -show_program_version and
--show_library_versions options.
-
-
--show_pixel_formats
-Show information about all pixel formats supported by FFmpeg.
-
-Pixel format information for each format is printed within a section
-with name "PIXEL_FORMAT".
-
-
--bitexact
-Force bitexact output, useful to produce output which is not dependent
-on the specific build.
-
-
--i input_file
-Read input_file .
-
-
-
-
-
-
4 Writers# TOC
-
-
A writer defines the output format adopted by ffprobe
, and will be
-used for printing all the parts of the output.
-
-
A writer may accept one or more arguments, which specify the options
-to adopt. The options are specified as a list of key =value
-pairs, separated by ":".
-
-
All writers support the following options:
-
-
-string_validation, sv
-Set string validation mode.
-
-The following values are accepted.
-
-‘fail ’
-The writer will fail immediately in case an invalid string (UTF-8)
-sequence or code point is found in the input. This is especially
-useful to validate input metadata.
-
-
-‘ignore ’
-Any validation error will be ignored. This will result in possibly
-broken output, especially with the json or xml writer.
-
-
-‘replace ’
-The writer will substitute invalid UTF-8 sequences or code points with
-the string specified with the string_validation_replacement .
-
-
-
-Default value is ‘replace ’.
-
-
-string_validation_replacement, svr
-Set replacement string to use in case string_validation is
-set to ‘replace ’.
-
-In case the option is not specified, the writer will assume the empty
-string, that is it will remove the invalid sequences from the input
-strings.
-
-
-
-
A description of the currently available writers follows.
-
-
-
4.1 default# TOC
-
Default format.
-
-
Print each section in the form:
-
-
[SECTION]
-key1=val1
-...
-keyN=valN
-[/SECTION]
-
-
-
Metadata tags are printed as a line in the corresponding FORMAT, STREAM or
-PROGRAM_STREAM section, and are prefixed by the string "TAG:".
-
-
A description of the accepted options follows.
-
-
-nokey, nk
-If set to 1 specify not to print the key of each field. Default value
-is 0.
-
-
-noprint_wrappers, nw
-If set to 1 specify not to print the section header and footer.
-Default value is 0.
-
-
-
-
-
4.2 compact, csv# TOC
-
Compact and CSV format.
-
-
The csv
writer is equivalent to compact
, but supports
-different defaults.
-
-
Each section is printed on a single line.
-If no option is specifid, the output has the form:
-
-
section|key1=val1| ... |keyN=valN
-
-
-
Metadata tags are printed in the corresponding "format" or "stream"
-section. A metadata tag key, if printed, is prefixed by the string
-"tag:".
-
-
The description of the accepted options follows.
-
-
-item_sep, s
-Specify the character to use for separating fields in the output line.
-It must be a single printable character, it is "|" by default ("," for
-the csv
writer).
-
-
-nokey, nk
-If set to 1 specify not to print the key of each field. Its default
-value is 0 (1 for the csv
writer).
-
-
-escape, e
-Set the escape mode to use, default to "c" ("csv" for the csv
-writer).
-
-It can assume one of the following values:
-
-c
-Perform C-like escaping. Strings containing a newline (’\n’), carriage
-return (’\r’), a tab (’\t’), a form feed (’\f’), the escaping
-character (’\’) or the item separator character SEP are escaped using C-like fashioned
-escaping, so that a newline is converted to the sequence "\n", a
-carriage return to "\r", ’\’ to "\\" and the separator SEP is
-converted to "\SEP ".
-
-
-csv
-Perform CSV-like escaping, as described in RFC4180. Strings
-containing a newline (’\n’), a carriage return (’\r’), a double quote
-(’"’), or SEP are enclosed in double-quotes.
-
-
-none
-Perform no escaping.
-
-
-
-
-print_section, p
-Print the section name at the begin of each line if the value is
-1
, disable it with value set to 0
. Default value is
-1
.
-
-
-
-
-
-
4.3 flat# TOC
-
Flat format.
-
-
A free-form output where each line contains an explicit key=value, such as
-"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be
-directly embedded in sh scripts as long as the separator character is an
-alphanumeric character or an underscore (see sep_char option).
-
-
The description of the accepted options follows.
-
-
-sep_char, s
-Separator character used to separate the chapter, the section name, IDs and
-potential tags in the printed field key.
-
-Default value is ’.’.
-
-
-hierarchical, h
-Specify if the section name specification should be hierarchical. If
-set to 1, and if there is more than one section in the current
-chapter, the section name will be prefixed by the name of the
-chapter. A value of 0 will disable this behavior.
-
-Default value is 1.
-
-
-
-
-
-
INI format output.
-
-
Print output in an INI based format.
-
-
The following conventions are adopted:
-
-
- all key and values are UTF-8
- ’.’ is the subgroup separator
- newline, ’\t’, ’\f’, ’\b’ and the following characters are escaped
- ’\’ is the escape character
- ’#’ is the comment indicator
- ’=’ is the key/value separator
- ’:’ is not used but usually parsed as key/value separator
-
-
-
This writer accepts options as a list of key =value pairs,
-separated by ":".
-
-
The description of the accepted options follows.
-
-
-hierarchical, h
-Specify if the section name specification should be hierarchical. If
-set to 1, and if there is more than one section in the current
-chapter, the section name will be prefixed by the name of the
-chapter. A value of 0 will disable this behavior.
-
-Default value is 1.
-
-
-
-
-
4.5 json# TOC
-
JSON based format.
-
-
Each section is printed using JSON notation.
-
-
The description of the accepted options follows.
-
-
-compact, c
-If set to 1 enable compact output, that is each section will be
-printed on a single line. Default value is 0.
-
-
-
-
For more information about JSON, see http://www.json.org/ .
-
-
-
-
XML based format.
-
-
The XML output is described in the XML schema description file
-ffprobe.xsd installed in the FFmpeg datadir.
-
-
An updated version of the schema can be retrieved at the url
-http://www.ffmpeg.org/schema/ffprobe.xsd , which redirects to the
-latest schema committed into the FFmpeg development source code tree.
-
-
Note that the output issued will be compliant to the
-ffprobe.xsd schema only when no special global output options
-(unit , prefix , byte_binary_prefix ,
-sexagesimal etc.) are specified.
-
-
The description of the accepted options follows.
-
-
-fully_qualified, q
-If set to 1 specify if the output should be fully qualified. Default
-value is 0.
-This is required for generating an XML file which can be validated
-through an XSD file.
-
-
-xsd_compliant, x
-If set to 1 perform more checks for ensuring that the output is XSD
-compliant. Default value is 0.
-This option automatically sets fully_qualified to 1.
-
-
-
-
For more information about the XML format, see
-http://www.w3.org/XML/ .
-
-
-
5 Timecode# TOC
-
-
ffprobe
supports Timecode extraction:
-
-
- MPEG1/2 timecode is extracted from the GOP, and is available in the video
-stream details (-show_streams , see timecode ).
-
- MOV timecode is extracted from tmcd track, so is available in the tmcd
-stream metadata (-show_streams , see TAG:timecode ).
-
- DV, GXF and AVI timecodes are available in format metadata
-(-show_format , see TAG:timecode ).
-
-
-
-
-
-
6 See Also# TOC
-
-
ffprobe-all ,
-ffmpeg , ffplay , ffserver ,
-ffmpeg-utils ,
-ffmpeg-scaler ,
-ffmpeg-resampler ,
-ffmpeg-codecs ,
-ffmpeg-bitstream-filters ,
-ffmpeg-formats ,
-ffmpeg-devices ,
-ffmpeg-protocols ,
-ffmpeg-filters
-
-
-
-
7 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/general.html b/Externals/ffmpeg/shared/doc/general.html
deleted file mode 100644
index df45ecabca..0000000000
--- a/Externals/ffmpeg/shared/doc/general.html
+++ /dev/null
@@ -1,986 +0,0 @@
-
-
-
-
-
-
- General Documentation
-
-
-
-
-
-
-
-
- General Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 External libraries# TOC
-
-
FFmpeg can be hooked up with a number of external libraries to add support
-for more formats. None of them are used by default, their use has to be
-explicitly requested by passing the appropriate flags to
-./configure
.
-
-
-
1.1 OpenJPEG# TOC
-
-
FFmpeg can use the OpenJPEG libraries for encoding/decoding J2K videos. Go to
-http://www.openjpeg.org/ to get the libraries and follow the installation
-instructions. To enable using OpenJPEG in FFmpeg, pass --enable-libopenjpeg
to
-./configure .
-
-
-
-
1.2 OpenCORE, VisualOn, and Fraunhofer libraries# TOC
-
-
Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer
-libraries provide encoders for a number of audio codecs.
-
-
-
OpenCORE and VisualOn libraries are under the Apache License 2.0
-(see http://www.apache.org/licenses/LICENSE-2.0 for details), which is
-incompatible to the LGPL version 2.1 and GPL version 2. You have to
-upgrade FFmpeg’s license to LGPL version 3 (or if you have enabled
-GPL components, GPL version 3) by passing --enable-version3
to configure in
-order to use it.
-
-
The Fraunhofer AAC library is licensed under a license incompatible to the GPL
-and is not known to be compatible to the LGPL. Therefore, you have to pass
---enable-nonfree
to configure to use it.
-
-
-
1.2.1 OpenCORE AMR# TOC
-
-
FFmpeg can make use of the OpenCORE libraries for AMR-NB
-decoding/encoding and AMR-WB decoding.
-
-
Go to http://sourceforge.net/projects/opencore-amr/ and follow the
-instructions for installing the libraries.
-Then pass --enable-libopencore-amrnb
and/or
---enable-libopencore-amrwb
to configure to enable them.
-
-
-
1.2.2 VisualOn AAC encoder library# TOC
-
-
FFmpeg can make use of the VisualOn AACenc library for AAC encoding.
-
-
Go to http://sourceforge.net/projects/opencore-amr/ and follow the
-instructions for installing the library.
-Then pass --enable-libvo-aacenc
to configure to enable it.
-
-
-
1.2.3 VisualOn AMR-WB encoder library# TOC
-
-
FFmpeg can make use of the VisualOn AMR-WBenc library for AMR-WB encoding.
-
-
Go to http://sourceforge.net/projects/opencore-amr/ and follow the
-instructions for installing the library.
-Then pass --enable-libvo-amrwbenc
to configure to enable it.
-
-
-
1.2.4 Fraunhofer AAC library# TOC
-
-
FFmpeg can make use of the Fraunhofer AAC library for AAC encoding.
-
-
Go to http://sourceforge.net/projects/opencore-amr/ and follow the
-instructions for installing the library.
-Then pass --enable-libfdk-aac
to configure to enable it.
-
-
-
1.3 LAME# TOC
-
-
FFmpeg can make use of the LAME library for MP3 encoding.
-
-
Go to http://lame.sourceforge.net/ and follow the
-instructions for installing the library.
-Then pass --enable-libmp3lame
to configure to enable it.
-
-
-
1.4 TwoLAME# TOC
-
-
FFmpeg can make use of the TwoLAME library for MP2 encoding.
-
-
Go to http://www.twolame.org/ and follow the
-instructions for installing the library.
-Then pass --enable-libtwolame
to configure to enable it.
-
-
-
1.5 libvpx# TOC
-
-
FFmpeg can make use of the libvpx library for VP8/VP9 encoding.
-
-
Go to http://www.webmproject.org/ and follow the instructions for
-installing the library. Then pass --enable-libvpx
to configure to
-enable it.
-
-
-
1.6 libwavpack# TOC
-
-
FFmpeg can make use of the libwavpack library for WavPack encoding.
-
-
Go to http://www.wavpack.com/ and follow the instructions for
-installing the library. Then pass --enable-libwavpack
to configure to
-enable it.
-
-
-
1.7 OpenH264# TOC
-
-
FFmpeg can make use of the OpenH264 library for H.264 encoding.
-
-
Go to http://www.openh264.org/ and follow the instructions for
-installing the library. Then pass --enable-libopenh264
to configure to
-enable it.
-
-
-
1.8 x264# TOC
-
-
FFmpeg can make use of the x264 library for H.264 encoding.
-
-
Go to http://www.videolan.org/developers/x264.html and follow the
-instructions for installing the library. Then pass --enable-libx264
to
-configure to enable it.
-
-
-
-
1.9 x265# TOC
-
-
FFmpeg can make use of the x265 library for HEVC encoding.
-
-
Go to http://x265.org/developers.html and follow the instructions
-for installing the library. Then pass --enable-libx265
to configure
-to enable it.
-
-
-
-
1.10 libilbc# TOC
-
-
iLBC is a narrowband speech codec that has been made freely available
-by Google as part of the WebRTC project. libilbc is a packaging friendly
-copy of the iLBC codec. FFmpeg can make use of the libilbc library for
-iLBC encoding and decoding.
-
-
Go to https://github.com/TimothyGu/libilbc and follow the instructions for
-installing the library. Then pass --enable-libilbc
to configure to
-enable it.
-
-
-
1.11 libzvbi# TOC
-
-
libzvbi is a VBI decoding library which can be used by FFmpeg to decode DVB
-teletext pages and DVB teletext subtitles.
-
-
Go to http://sourceforge.net/projects/zapping/ and follow the instructions for
-installing the library. Then pass --enable-libzvbi
to configure to
-enable it.
-
-
-
-
1.12 AviSynth# TOC
-
-
FFmpeg can read AviSynth scripts as input. To enable support, pass
---enable-avisynth
to configure. The correct headers are
-included in compat/avisynth/, which allows the user to enable support
-without needing to search for these headers themselves.
-
-
For Windows, supported AviSynth variants are
-AviSynth 2.5 or 2.6 for 32-bit builds and
-AviSynth+ 0.1 for 32-bit and 64-bit builds.
-
-
For Linux and OS X, the supported AviSynth variant is
-AvxSynth .
-
-
-
AviSynth and AvxSynth are loaded dynamically. Distributors can build FFmpeg
-with --enable-avisynth
, and the binaries will work regardless of the
-end user having AviSynth or AvxSynth installed - they’ll only need to be
-installed to use AviSynth scripts (obviously).
-
-
-
-
2 Supported File Formats, Codecs or Features# TOC
-
-
You can use the -formats
and -codecs
options to have an exhaustive list.
-
-
-
2.1 File Formats# TOC
-
-
FFmpeg supports the following file formats through the libavformat
-library:
-
-
-Name Encoding Decoding Comments
-4xm X 4X Technologies format, used in some games.
-8088flex TMV X
-ACT Voice X contains G.729 audio
-Adobe Filmstrip X X
-Audio IFF (AIFF) X X
-American Laser Games MM X Multimedia format used in games like Mad Dog McCree.
-3GPP AMR X X
-Amazing Studio Packed Animation File X Multimedia format used in game Heart Of Darkness.
-Apple HTTP Live Streaming X
-Artworx Data Format X
-ADP X Audio format used on the Nintendo Gamecube.
-AFC X Audio format used on the Nintendo Gamecube.
-ASF X X
-AST X X Audio format used on the Nintendo Wii.
-AVI X X
-AviSynth X
-AVR X Audio format used on Mac.
-AVS X Multimedia format used by the Creature Shock game.
-Beam Software SIFF X Audio and video format used in some games by Beam Software.
-Bethesda Softworks VID X Used in some games from Bethesda Softworks.
-Binary text X
-Bink X Multimedia format used by many games.
-Bitmap Brothers JV X Used in Z and Z95 games.
-Brute Force & Ignorance X Used in the game Flash Traffic: City of Angels.
-BRSTM X Audio format used on the Nintendo Wii.
-BWF X X
-CRI ADX X X Audio-only format used in console video games.
-Discworld II BMV X
-Interplay C93 X Used in the game Cyberia from Interplay.
-Delphine Software International CIN X Multimedia format used by Delphine Software games.
-CD+G X Video format used by CD+G karaoke disks
-Phantom Cine X
-Commodore CDXL X Amiga CD video format
-Core Audio Format X X Apple Core Audio Format
-CRC testing format X
-Creative Voice X X Created for the Sound Blaster Pro.
-CRYO APC X Audio format used in some games by CRYO Interactive Entertainment.
-D-Cinema audio X X
-Deluxe Paint Animation X
-DFA X This format is used in Chronomaster game
-DSD Stream File (DSF) X
-DV video X X
-DXA X This format is used in the non-Windows version of the Feeble Files
- game and different game cutscenes repacked for use with ScummVM.
-Electronic Arts cdata X
-Electronic Arts Multimedia X Used in various EA games; files have extensions like WVE and UV2.
-Ensoniq Paris Audio File X
-FFM (FFserver live feed) X X
-Flash (SWF) X X
-Flash 9 (AVM2) X X Only embedded audio is decoded.
-FLI/FLC/FLX animation X .fli/.flc files
-Flash Video (FLV) X X Macromedia Flash video files
-framecrc testing format X
-FunCom ISS X Audio format used in various games from FunCom like The Longest Journey.
-G.723.1 X X
-G.729 BIT X X
-G.729 raw X
-GIF Animation X X
-GXF X X General eXchange Format SMPTE 360M, used by Thomson Grass Valley
- playout servers.
-HNM X Only version 4 supported, used in some games from Cryo Interactive
-iCEDraw File X
-ICO X X Microsoft Windows ICO
-id Quake II CIN video X
-id RoQ X X Used in Quake III, Jedi Knight 2 and other computer games.
-IEC61937 encapsulation X X
-IFF X Interchange File Format
-iLBC X X
-Interplay MVE X Format used in various Interplay computer games.
-IV8 X A format generated by IndigoVision 8000 video server.
-IVF (On2) X X A format used by libvpx
-IRCAM X X
-LATM X X
-LMLM4 X Used by Linux Media Labs MPEG-4 PCI boards
-LOAS X contains LATM multiplexed AAC audio
-LRC X X
-LVF X
-LXF X VR native stream format, used by Leitch/Harris’ video servers.
-Magic Lantern Video (MLV) X
-Matroska X X
-Matroska audio X
-FFmpeg metadata X X Metadata in text format.
-MAXIS XA X Used in Sim City 3000; file extension .xa.
-MD Studio X
-Metal Gear Solid: The Twin Snakes X
-Megalux Frame X Used by Megalux Ultimate Paint
-Mobotix .mxg X
-Monkey’s Audio X
-Motion Pixels MVI X
-MOV/QuickTime/MP4 X X 3GP, 3GP2, PSP, iPod variants supported
-MP2 X X
-MP3 X X
-MPEG-1 System X X muxed audio and video, VCD format supported
-MPEG-PS (program stream) X X also known as VOB
file, SVCD and DVD format supported
-MPEG-TS (transport stream) X X also known as DVB Transport Stream
-MPEG-4 X X MPEG-4 is a variant of QuickTime.
-Mirillis FIC video X No cursor rendering.
-MIME multipart JPEG X
-MSN TCP webcam X Used by MSN Messenger webcam streams.
-MTV X
-Musepack X
-Musepack SV8 X
-Material eXchange Format (MXF) X X SMPTE 377M, used by D-Cinema, broadcast industry.
-Material eXchange Format (MXF), D-10 Mapping X X SMPTE 386M, D-10/IMX Mapping.
-NC camera feed X NC (AVIP NC4600) camera streams
-NIST SPeech HEader REsources X
-NTT TwinVQ (VQF) X Nippon Telegraph and Telephone Corporation TwinVQ.
-Nullsoft Streaming Video X
-NuppelVideo X
-NUT X X NUT Open Container Format
-Ogg X X
-Playstation Portable PMP X
-Portable Voice Format X
-TechnoTrend PVA X Used by TechnoTrend DVB PCI boards.
-QCP X
-raw ADTS (AAC) X X
-raw AC-3 X X
-raw Chinese AVS video X X
-raw CRI ADX X X
-raw Dirac X X
-raw DNxHD X X
-raw DTS X X
-raw DTS-HD X
-raw E-AC-3 X X
-raw FLAC X X
-raw GSM X
-raw H.261 X X
-raw H.263 X X
-raw H.264 X X
-raw HEVC X X
-raw Ingenient MJPEG X
-raw MJPEG X X
-raw MLP X
-raw MPEG X
-raw MPEG-1 X
-raw MPEG-2 X
-raw MPEG-4 X X
-raw NULL X
-raw video X X
-raw id RoQ X
-raw Shorten X
-raw TAK X
-raw TrueHD X X
-raw VC-1 X X
-raw PCM A-law X X
-raw PCM mu-law X X
-raw PCM signed 8 bit X X
-raw PCM signed 16 bit big-endian X X
-raw PCM signed 16 bit little-endian X X
-raw PCM signed 24 bit big-endian X X
-raw PCM signed 24 bit little-endian X X
-raw PCM signed 32 bit big-endian X X
-raw PCM signed 32 bit little-endian X X
-raw PCM unsigned 8 bit X X
-raw PCM unsigned 16 bit big-endian X X
-raw PCM unsigned 16 bit little-endian X X
-raw PCM unsigned 24 bit big-endian X X
-raw PCM unsigned 24 bit little-endian X X
-raw PCM unsigned 32 bit big-endian X X
-raw PCM unsigned 32 bit little-endian X X
-raw PCM floating-point 32 bit big-endian X X
-raw PCM floating-point 32 bit little-endian X X
-raw PCM floating-point 64 bit big-endian X X
-raw PCM floating-point 64 bit little-endian X X
-RDT X
-REDCODE R3D X File format used by RED Digital cameras, contains JPEG 2000 frames and PCM audio.
-RealMedia X X
-Redirector X
-RedSpark X
-Renderware TeXture Dictionary X
-RL2 X Audio and video format used in some games by Entertainment Software Partners.
-RPL/ARMovie X
-Lego Mindstorms RSO X X
-RSD X
-RTMP X X Output is performed by publishing stream to RTMP server
-RTP X X
-RTSP X X
-SAP X X
-SBG X
-SDP X
-Sega FILM/CPK X Used in many Sega Saturn console games.
-Silicon Graphics Movie X
-Sierra SOL X .sol files used in Sierra Online games.
-Sierra VMD X Used in Sierra CD-ROM games.
-Smacker X Multimedia format used by many games.
-SMJPEG X X Used in certain Loki game ports.
-Smush X Multimedia format used in some LucasArts games.
-Sony OpenMG (OMA) X X Audio format used in Sony Sonic Stage and Sony Vegas.
-Sony PlayStation STR X
-Sony Wave64 (W64) X X
-SoX native format X X
-SUN AU format X X
-SUP raw PGS subtitles X
-Text files X
-THP X Used on the Nintendo GameCube.
-Tiertex Limited SEQ X Tiertex .seq files used in the DOS CD-ROM version of the game Flashback.
-True Audio X
-VC-1 test bitstream X X
-Vivo X
-WAV X X
-WavPack X X
-WebM X X
-Windows Televison (WTV) X X
-Wing Commander III movie X Multimedia format used in Origin’s Wing Commander III computer game.
-Westwood Studios audio X Multimedia format used in Westwood Studios games.
-Westwood Studios VQA X Multimedia format used in Westwood Studios games.
-XMV X Microsoft video container used in Xbox games.
-xWMA X Microsoft audio container used by XAudio 2.
-eXtended BINary text (XBIN) X
-YUV4MPEG pipe X X
-Psygnosis YOP X
-
-
-
X
means that encoding (resp. decoding) is supported.
-
-
-
2.2 Image Formats# TOC
-
-
FFmpeg can read and write images for each frame of a video sequence. The
-following image formats are supported:
-
-
-Name Encoding Decoding Comments
-.Y.U.V X X one raw file per component
-Alias PIX X X Alias/Wavefront PIX image format
-animated GIF X X
-BMP X X Microsoft BMP image
-BRender PIX X Argonaut BRender 3D engine image format.
-DPX X X Digital Picture Exchange
-EXR X OpenEXR
-JPEG X X Progressive JPEG is not supported.
-JPEG 2000 X X
-JPEG-LS X X
-LJPEG X Lossless JPEG
-PAM X X PAM is a PNM extension with alpha support.
-PBM X X Portable BitMap image
-PCX X X PC Paintbrush
-PGM X X Portable GrayMap image
-PGMYUV X X PGM with U and V components in YUV 4:2:0
-PIC X Pictor/PC Paint
-PNG X X
-PPM X X Portable PixelMap image
-PTX X V.Flash PTX format
-SGI X X SGI RGB image format
-Sun Rasterfile X X Sun RAS image format
-TIFF X X YUV, JPEG and some extension is not supported yet.
-Truevision Targa X X Targa (.TGA) image format
-WebP E X WebP image format, encoding supported through external library libwebp
-XBM X X X BitMap image format
-XFace X X X-Face image format
-XWD X X X Window Dump image format
-
-
-
X
means that encoding (resp. decoding) is supported.
-
-
E
means that support is provided through an external library.
-
-
-
2.3 Video Codecs# TOC
-
-
-Name Encoding Decoding Comments
-4X Movie X Used in certain computer games.
-8088flex TMV X
-A64 multicolor X Creates video suitable to be played on a commodore 64 (multicolor mode).
-Amazing Studio PAF Video X
-American Laser Games MM X Used in games like Mad Dog McCree.
-AMV Video X X Used in Chinese MP3 players.
-ANSI/ASCII art X
-Apple Intermediate Codec X
-Apple MJPEG-B X
-Apple ProRes X X
-Apple QuickDraw X fourcc: qdrw
-Asus v1 X X fourcc: ASV1
-Asus v2 X X fourcc: ASV2
-ATI VCR1 X fourcc: VCR1
-ATI VCR2 X fourcc: VCR2
-Auravision Aura X
-Auravision Aura 2 X
-Autodesk Animator Flic video X
-Autodesk RLE X fourcc: AASC
-Avid 1:1 10-bit RGB Packer X X fourcc: AVrp
-AVS (Audio Video Standard) video X Video encoding used by the Creature Shock game.
-AYUV X X Microsoft uncompressed packed 4:4:4:4
-Beam Software VB X
-Bethesda VID video X Used in some games from Bethesda Softworks.
-Bink Video X
-Bitmap Brothers JV video X
-y41p Brooktree uncompressed 4:1:1 12-bit X X
-Brute Force & Ignorance X Used in the game Flash Traffic: City of Angels.
-C93 video X Codec used in Cyberia game.
-CamStudio X fourcc: CSCD
-CD+G X Video codec for CD+G karaoke disks
-CDXL X Amiga CD video codec
-Chinese AVS video E X AVS1-P2, JiZhun profile, encoding through external library libxavs
-Delphine Software International CIN video X Codec used in Delphine Software International games.
-Discworld II BMV Video X
-Canopus Lossless Codec X
-Cinepak X
-Cirrus Logic AccuPak X X fourcc: CLJR
-CPiA Video Format X
-Creative YUV (CYUV) X
-DFA X Codec used in Chronomaster game.
-Dirac E X supported through external library libschroedinger
-Deluxe Paint Animation X
-DNxHD X X aka SMPTE VC3
-Duck TrueMotion 1.0 X fourcc: DUCK
-Duck TrueMotion 2.0 X fourcc: TM20
-DV (Digital Video) X X
-Dxtory capture format X
-Feeble Files/ScummVM DXA X Codec originally used in Feeble Files game.
-Electronic Arts CMV video X Used in NHL 95 game.
-Electronic Arts Madcow video X
-Electronic Arts TGV video X
-Electronic Arts TGQ video X
-Electronic Arts TQI video X
-Escape 124 X
-Escape 130 X
-FFmpeg video codec #1 X X lossless codec (fourcc: FFV1)
-Flash Screen Video v1 X X fourcc: FSV1
-Flash Screen Video v2 X X
-Flash Video (FLV) X X Sorenson H.263 used in Flash
-Forward Uncompressed X
-Fraps X
-Go2Webinar X fourcc: G2M4
-H.261 X X
-H.263 / H.263-1996 X X
-H.263+ / H.263-1998 / H.263 version 2 X X
-H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 E X encoding supported through external library libx264 and OpenH264
-HEVC X X encoding supported through the external library libx265
-HNM version 4 X
-HuffYUV X X
-HuffYUV FFmpeg variant X X
-IBM Ultimotion X fourcc: ULTI
-id Cinematic video X Used in Quake II.
-id RoQ video X X Used in Quake III, Jedi Knight 2, other computer games.
-IFF ILBM X IFF interleaved bitmap
-IFF ByteRun1 X IFF run length encoded bitmap
-Intel H.263 X
-Intel Indeo 2 X
-Intel Indeo 3 X
-Intel Indeo 4 X
-Intel Indeo 5 X
-Interplay C93 X Used in the game Cyberia from Interplay.
-Interplay MVE video X Used in Interplay .MVE files.
-J2K X X
-Karl Morton’s video codec X Codec used in Worms games.
-Kega Game Video (KGV1) X Kega emulator screen capture codec.
-Lagarith X
-LCL (LossLess Codec Library) MSZH X
-LCL (LossLess Codec Library) ZLIB E E
-LOCO X
-LucasArts SANM/Smush X Used in LucasArts games / SMUSH animations.
-lossless MJPEG X X
-Microsoft ATC Screen X Also known as Microsoft Screen 3.
-Microsoft Expression Encoder Screen X Also known as Microsoft Titanium Screen 2.
-Microsoft RLE X
-Microsoft Screen 1 X Also known as Windows Media Video V7 Screen.
-Microsoft Screen 2 X Also known as Windows Media Video V9 Screen.
-Microsoft Video 1 X
-Mimic X Used in MSN Messenger Webcam streams.
-Miro VideoXL X fourcc: VIXL
-MJPEG (Motion JPEG) X X
-Mobotix MxPEG video X
-Motion Pixels video X
-MPEG-1 video X X
-MPEG-2 video X X
-MPEG-4 part 2 X X libxvidcore can be used alternatively for encoding.
-MPEG-4 part 2 Microsoft variant version 1 X
-MPEG-4 part 2 Microsoft variant version 2 X X
-MPEG-4 part 2 Microsoft variant version 3 X X
-Nintendo Gamecube THP video X
-NuppelVideo/RTjpeg X Video encoding used in NuppelVideo files.
-On2 VP3 X still experimental
-On2 VP5 X fourcc: VP50
-On2 VP6 X fourcc: VP60,VP61,VP62
-On2 VP7 X fourcc: VP70,VP71
-VP8 E X fourcc: VP80, encoding supported through external library libvpx
-VP9 E X encoding supported through external library libvpx
-Pinnacle TARGA CineWave YUV16 X fourcc: Y216
-Prores X fourcc: apch,apcn,apcs,apco
-Q-team QPEG X fourccs: QPEG, Q1.0, Q1.1
-QuickTime 8BPS video X
-QuickTime Animation (RLE) video X X fourcc: ’rle ’
-QuickTime Graphics (SMC) X fourcc: ’smc ’
-QuickTime video (RPZA) X fourcc: rpza
-R10K AJA Kona 10-bit RGB Codec X X
-R210 Quicktime Uncompressed RGB 10-bit X X
-Raw Video X X
-RealVideo 1.0 X X
-RealVideo 2.0 X X
-RealVideo 3.0 X still far from ideal
-RealVideo 4.0 X
-Renderware TXD (TeXture Dictionary) X Texture dictionaries used by the Renderware Engine.
-RL2 video X used in some games by Entertainment Software Partners
-Sierra VMD video X Used in Sierra VMD files.
-Silicon Graphics Motion Video Compressor 1 (MVC1) X
-Silicon Graphics Motion Video Compressor 2 (MVC2) X
-Silicon Graphics RLE 8-bit video X
-Smacker video X Video encoding used in Smacker.
-SMPTE VC-1 X
-Snow X X experimental wavelet codec (fourcc: SNOW)
-Sony PlayStation MDEC (Motion DECoder) X
-Sorenson Vector Quantizer 1 X X fourcc: SVQ1
-Sorenson Vector Quantizer 3 X fourcc: SVQ3
-Sunplus JPEG (SP5X) X fourcc: SP5X
-TechSmith Screen Capture Codec X fourcc: TSCC
-TechSmith Screen Capture Codec 2 X fourcc: TSC2
-Theora E X encoding supported through external library libtheora
-Tiertex Limited SEQ video X Codec used in DOS CD-ROM FlashBack game.
-Ut Video X X
-v210 QuickTime uncompressed 4:2:2 10-bit X X
-v308 QuickTime uncompressed 4:4:4 X X
-v408 QuickTime uncompressed 4:4:4:4 X X
-v410 QuickTime uncompressed 4:4:4 10-bit X X
-VBLE Lossless Codec X
-VMware Screen Codec / VMware Video X Codec used in videos captured by VMware.
-Westwood Studios VQA (Vector Quantized Animation) video X
-Windows Media Image X
-Windows Media Video 7 X X
-Windows Media Video 8 X X
-Windows Media Video 9 X not completely working
-Wing Commander III / Xan X Used in Wing Commander III .MVE files.
-Wing Commander IV / Xan X Used in Wing Commander IV.
-Winnov WNV1 X
-WMV7 X X
-YAMAHA SMAF X X
-Psygnosis YOP Video X
-yuv4 X X libquicktime uncompressed packed 4:2:0
-ZeroCodec Lossless Video X
-ZLIB X X part of LCL, encoder experimental
-Zip Motion Blocks Video X X Encoder works only in PAL8.
-
-
-
X
means that encoding (resp. decoding) is supported.
-
-
E
means that support is provided through an external library.
-
-
-
2.4 Audio Codecs# TOC
-
-
-Name Encoding Decoding Comments
-8SVX exponential X
-8SVX fibonacci X
-AAC+ E X encoding supported through external library libaacplus
-AAC E X encoding supported through external library libfaac and libvo-aacenc
-AC-3 IX IX
-ADPCM 4X Movie X
-ADPCM CDROM XA X
-ADPCM Creative Technology X 16 -> 4, 8 -> 4, 8 -> 3, 8 -> 2
-ADPCM Electronic Arts X Used in various EA titles.
-ADPCM Electronic Arts Maxis CDROM XS X Used in Sim City 3000.
-ADPCM Electronic Arts R1 X
-ADPCM Electronic Arts R2 X
-ADPCM Electronic Arts R3 X
-ADPCM Electronic Arts XAS X
-ADPCM G.722 X X
-ADPCM G.726 X X
-ADPCM IMA AMV X Used in AMV files
-ADPCM IMA Electronic Arts EACS X
-ADPCM IMA Electronic Arts SEAD X
-ADPCM IMA Funcom X
-ADPCM IMA QuickTime X X
-ADPCM IMA Loki SDL MJPEG X
-ADPCM IMA WAV X X
-ADPCM IMA Westwood X
-ADPCM ISS IMA X Used in FunCom games.
-ADPCM IMA Dialogic X
-ADPCM IMA Duck DK3 X Used in some Sega Saturn console games.
-ADPCM IMA Duck DK4 X Used in some Sega Saturn console games.
-ADPCM IMA Radical X
-ADPCM Microsoft X X
-ADPCM MS IMA X X
-ADPCM Nintendo Gamecube AFC X
-ADPCM Nintendo Gamecube DTK X
-ADPCM Nintendo Gamecube THP X
-ADPCM QT IMA X X
-ADPCM SEGA CRI ADX X X Used in Sega Dreamcast games.
-ADPCM Shockwave Flash X X
-ADPCM Sound Blaster Pro 2-bit X
-ADPCM Sound Blaster Pro 2.6-bit X
-ADPCM Sound Blaster Pro 4-bit X
-ADPCM VIMA Used in LucasArts SMUSH animations.
-ADPCM Westwood Studios IMA X Used in Westwood Studios games like Command and Conquer.
-ADPCM Yamaha X X
-AMR-NB E X encoding supported through external library libopencore-amrnb
-AMR-WB E X encoding supported through external library libvo-amrwbenc
-Amazing Studio PAF Audio X
-Apple lossless audio X X QuickTime fourcc ’alac’
-ATRAC1 X
-ATRAC3 X
-ATRAC3+ X
-Bink Audio X Used in Bink and Smacker files in many games.
-CELT E decoding supported through external library libcelt
-Delphine Software International CIN audio X Codec used in Delphine Software International games.
-Discworld II BMV Audio X
-COOK X All versions except 5.1 are supported.
-DCA (DTS Coherent Acoustics) X X
-DPCM id RoQ X X Used in Quake III, Jedi Knight 2 and other computer games.
-DPCM Interplay X Used in various Interplay computer games.
-DPCM Sierra Online X Used in Sierra Online game audio files.
-DPCM Sol X
-DPCM Xan X Used in Origin’s Wing Commander IV AVI files.
-DSD (Direct Stream Digitial), least significant bit first X
-DSD (Direct Stream Digitial), most significant bit first X
-DSD (Direct Stream Digitial), least significant bit first, planar X
-DSD (Direct Stream Digitial), most significant bit first, planar X
-DSP Group TrueSpeech X
-DV audio X
-Enhanced AC-3 X X
-EVRC (Enhanced Variable Rate Codec) X
-FLAC (Free Lossless Audio Codec) X IX
-G.723.1 X X
-G.729 X
-GSM E X encoding supported through external library libgsm
-GSM Microsoft variant E X encoding supported through external library libgsm
-IAC (Indeo Audio Coder) X
-iLBC (Internet Low Bitrate Codec) E E encoding and decoding supported through external library libilbc
-IMC (Intel Music Coder) X
-MACE (Macintosh Audio Compression/Expansion) 3:1 X
-MACE (Macintosh Audio Compression/Expansion) 6:1 X
-MLP (Meridian Lossless Packing) X Used in DVD-Audio discs.
-Monkey’s Audio X
-MP1 (MPEG audio layer 1) IX
-MP2 (MPEG audio layer 2) IX IX encoding supported also through external library TwoLAME
-MP3 (MPEG audio layer 3) E IX encoding supported through external library LAME, ADU MP3 and MP3onMP4 also supported
-MPEG-4 Audio Lossless Coding (ALS) X
-Musepack SV7 X
-Musepack SV8 X
-Nellymoser Asao X X
-On2 AVC (Audio for Video Codec) X
-Opus E E supported through external library libopus
-PCM A-law X X
-PCM mu-law X X
-PCM signed 8-bit planar X X
-PCM signed 16-bit big-endian planar X X
-PCM signed 16-bit little-endian planar X X
-PCM signed 24-bit little-endian planar X X
-PCM signed 32-bit little-endian planar X X
-PCM 32-bit floating point big-endian X X
-PCM 32-bit floating point little-endian X X
-PCM 64-bit floating point big-endian X X
-PCM 64-bit floating point little-endian X X
-PCM D-Cinema audio signed 24-bit X X
-PCM signed 8-bit X X
-PCM signed 16-bit big-endian X X
-PCM signed 16-bit little-endian X X
-PCM signed 24-bit big-endian X X
-PCM signed 24-bit little-endian X X
-PCM signed 32-bit big-endian X X
-PCM signed 32-bit little-endian X X
-PCM signed 16/20/24-bit big-endian in MPEG-TS X
-PCM unsigned 8-bit X X
-PCM unsigned 16-bit big-endian X X
-PCM unsigned 16-bit little-endian X X
-PCM unsigned 24-bit big-endian X X
-PCM unsigned 24-bit little-endian X X
-PCM unsigned 32-bit big-endian X X
-PCM unsigned 32-bit little-endian X X
-PCM Zork X
-QCELP / PureVoice X
-QDesign Music Codec 2 X There are still some distortions.
-RealAudio 1.0 (14.4K) X X Real 14400 bit/s codec
-RealAudio 2.0 (28.8K) X Real 28800 bit/s codec
-RealAudio 3.0 (dnet) IX X Real low bitrate AC-3 codec
-RealAudio Lossless X
-RealAudio SIPR / ACELP.NET X
-Shorten X
-Sierra VMD audio X Used in Sierra VMD files.
-Smacker audio X
-SMPTE 302M AES3 audio X X
-Sonic X X experimental codec
-Sonic lossless X X experimental codec
-Speex E E supported through external library libspeex
-TAK (Tom’s lossless Audio Kompressor) X
-True Audio (TTA) X X
-TrueHD X Used in HD-DVD and Blu-Ray discs.
-TwinVQ (VQF flavor) X
-VIMA X Used in LucasArts SMUSH animations.
-Vorbis E X A native but very primitive encoder exists.
-Voxware MetaSound X
-WavPack X X
-Westwood Audio (SND1) X
-Windows Media Audio 1 X X
-Windows Media Audio 2 X X
-Windows Media Audio Lossless X
-Windows Media Audio Pro X
-Windows Media Audio Voice X
-
-
-
X
means that encoding (resp. decoding) is supported.
-
-
E
means that support is provided through an external library.
-
-
I
means that an integer-only version is available, too (ensures high
-performance on systems without hardware floating point support).
-
-
-
2.5 Subtitle Formats# TOC
-
-
-Name Muxing Demuxing Encoding Decoding
-3GPP Timed Text X X
-AQTitle X X
-DVB X X X X
-DVB teletext X E
-DVD X X X X
-JACOsub X X X
-MicroDVD X X X
-MPL2 X X
-MPsub (MPlayer) X X
-PGS X
-PJS (Phoenix) X X
-RealText X X
-SAMI X X
-Spruce format (STL) X X
-SSA/ASS X X X X
-SubRip (SRT) X X X X
-SubViewer v1 X X
-SubViewer X X
-TED Talks captions X X
-VobSub (IDX+SUB) X X
-VPlayer X X
-WebVTT X X X X
-XSUB X X
-
-
-
X
means that the feature is supported.
-
-
E
means that support is provided through an external library.
-
-
-
2.6 Network Protocols# TOC
-
-
-Name Support
-file X
-FTP X
-Gopher X
-HLS X
-HTTP X
-HTTPS X
-Icecast X
-MMSH X
-MMST X
-pipe X
-RTMP X
-RTMPE X
-RTMPS X
-RTMPT X
-RTMPTE X
-RTMPTS X
-RTP X
-SAMBA E
-SCTP X
-SFTP E
-TCP X
-TLS X
-UDP X
-
-
-
X
means that the protocol is supported.
-
-
E
means that support is provided through an external library.
-
-
-
-
2.7 Input/Output Devices# TOC
-
-
-Name Input Output
-ALSA X X
-BKTR X
-caca X
-DV1394 X
-Lavfi virtual device X
-Linux framebuffer X X
-JACK X
-LIBCDIO X
-LIBDC1394 X
-OpenAL X
-OpenGL X
-OSS X X
-PulseAudio X X
-SDL X
-Video4Linux2 X X
-VfW capture X
-X11 grabbing X
-Win32 grabbing X
-
-
-
X
means that input/output is supported.
-
-
-
2.8 Timecode# TOC
-
-
-Codec/format Read Write
-AVI X X
-DV X X
-GXF X X
-MOV X X
-MPEG1/2 X X
-MXF X X
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/git-howto.html b/Externals/ffmpeg/shared/doc/git-howto.html
deleted file mode 100644
index dbbc681f4e..0000000000
--- a/Externals/ffmpeg/shared/doc/git-howto.html
+++ /dev/null
@@ -1,493 +0,0 @@
-
-
-
-
-
-
- Using git to develop FFmpeg
-
-
-
-
-
-
-
-
- Using git to develop FFmpeg
-
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Introduction# TOC
-
-
This document aims in giving some quick references on a set of useful git
-commands. You should always use the extensive and detailed documentation
-provided directly by git:
-
-
-
-
shows you the available subcommands,
-
-
-
git <command> --help
-man git-<command>
-
-
-
shows information about the subcommand <command>.
-
-
Additional information could be found on the
-Git Reference website
-
-
For more information about the Git project, visit the
-
-
Git website
-
-
Consult these resources whenever you have problems, they are quite exhaustive.
-
-
What follows now is a basic introduction to Git and some FFmpeg-specific
-guidelines to ease the contribution to the project
-
-
-
2 Basics Usage# TOC
-
-
-
2.1 Get GIT# TOC
-
-
You can get git from http://git-scm.com/
-Most distribution and operating system provide a package for it.
-
-
-
-
2.2 Cloning the source tree# TOC
-
-
-
git clone git://source.ffmpeg.org/ffmpeg <target>
-
-
-
This will put the FFmpeg sources into the directory <target> .
-
-
-
git clone git@source.ffmpeg.org:ffmpeg <target>
-
-
-
This will put the FFmpeg sources into the directory <target> and let
-you push back your changes to the remote repository.
-
-
Make sure that you do not have Windows line endings in your checkouts,
-otherwise you may experience spurious compilation failures. One way to
-achieve this is to run
-
-
-
git config --global core.autocrlf false
-
-
-
-
-
2.3 Updating the source tree to the latest revision# TOC
-
-
-
-
pulls in the latest changes from the tracked branch. The tracked branch
-can be remote. By default the master branch tracks the branch master in
-the remote origin.
-
-
-
--rebase
(see below) is recommended.
-
-
-
2.4 Rebasing your local branches# TOC
-
-
-
-
fetches the changes from the main repository and replays your local commits
-over it. This is required to keep all your local changes at the top of
-FFmpeg’s master tree. The master tree will reject pushes with merge commits.
-
-
-
-
2.5 Adding/removing files/directories# TOC
-
-
-
git add [-A] <filename/dirname>
-git rm [-r] <filename/dirname>
-
-
-
GIT needs to get notified of all changes you make to your working
-directory that makes files appear or disappear.
-Line moves across files are automatically tracked.
-
-
-
-
2.6 Showing modifications# TOC
-
-
-
git diff <filename(s)>
-
-
-
will show all local modifications in your working directory as unified diff.
-
-
-
-
2.7 Inspecting the changelog# TOC
-
-
-
-
You may also use the graphical tools like gitview or gitk or the web
-interface available at http://source.ffmpeg.org/
-
-
-
2.8 Checking source tree status# TOC
-
-
-
-
detects all the changes you made and lists what actions will be taken in case
-of a commit (additions, modifications, deletions, etc.).
-
-
-
-
2.9 Committing# TOC
-
-
-
-
to double check your changes before committing them to avoid trouble later
-on. All experienced developers do this on each and every commit, no matter
-how small.
-Every one of them has been saved from looking like a fool by this many times.
-It’s very easy for stray debug output or cosmetic modifications to slip in,
-please avoid problems through this extra level of scrutiny.
-
-
For cosmetics-only commits you should get (almost) empty output from
-
-
-
git diff -w -b <filename(s)>
-
-
-
Also check the output of
-
-
-
-
to make sure you don’t have untracked files or deletions.
-
-
-
git add [-i|-p|-A] <filenames/dirnames>
-
-
-
Make sure you have told git your name and email address
-
-
-
git config --global user.name "My Name"
-git config --global user.email my@email.invalid
-
-
-
Use –global to set the global configuration for all your git checkouts.
-
-
Git will select the changes to the files for commit. Optionally you can use
-the interactive or the patch mode to select hunk by hunk what should be
-added to the commit.
-
-
-
-
-
Git will commit the selected changes to your current local branch.
-
-
You will be prompted for a log message in an editor, which is either
-set in your personal configuration file through
-
-
-
git config --global core.editor
-
-
-
or set by one of the following environment variables:
-GIT_EDITOR , VISUAL or EDITOR .
-
-
Log messages should be concise but descriptive. Explain why you made a change,
-what you did will be obvious from the changes themselves most of the time.
-Saying just "bug fix" or "10l" is bad. Remember that people of varying skill
-levels look at and educate themselves while reading through your code. Don’t
-include filenames in log messages, Git provides that information.
-
-
Possibly make the commit message have a terse, descriptive first line, an
-empty line and then a full description. The first line will be used to name
-the patch by git format-patch.
-
-
-
2.10 Preparing a patchset# TOC
-
-
-
git format-patch <commit> [-o directory]
-
-
-
will generate a set of patches for each commit between <commit> and
-current HEAD . E.g.
-
-
-
git format-patch origin/master
-
-
-
will generate patches for all commits on current branch which are not
-present in upstream.
-A useful shortcut is also
-
-
-
-
which will generate patches from last n commits.
-By default the patches are created in the current directory.
-
-
-
2.11 Sending patches for review# TOC
-
-
-
git send-email <commit list|directory>
-
-
-
will send the patches created by git format-patch
or directly
-generates them. All the email fields can be configured in the global/local
-configuration or overridden by command line.
-Note that this tool must often be installed separately (e.g. git-email
-package on Debian-based distros).
-
-
-
-
2.12 Renaming/moving/copying files or contents of files# TOC
-
-
Git automatically tracks such changes, making those normal commits.
-
-
-
mv/cp path/file otherpath/otherfile
-git add [-A] .
-git commit
-
-
-
-
-
3 Git configuration# TOC
-
-
In order to simplify a few workflows, it is advisable to configure both
-your personal Git installation and your local FFmpeg repository.
-
-
-
3.1 Personal Git installation# TOC
-
-
Add the following to your ~/.gitconfig to help git send-email
-and git format-patch
detect renames:
-
-
-
[diff]
- renames = copy
-
-
-
-
3.2 Repository configuration# TOC
-
-
In order to have git send-email
automatically send patches
-to the ffmpeg-devel mailing list, add the following stanza
-to /path/to/ffmpeg/repository/.git/config :
-
-
-
[sendemail]
- to = ffmpeg-devel@ffmpeg.org
-
-
-
-
4 FFmpeg specific# TOC
-
-
-
4.1 Reverting broken commits# TOC
-
-
-
-
git reset
will uncommit the changes till <commit> rewriting
-the current branch history.
-
-
-
-
allows one to amend the last commit details quickly.
-
-
-
git rebase -i origin/master
-
-
-
will replay local commits over the main repository allowing to edit, merge
-or remove some of them in the process.
-
-
-
git reset
, git commit --amend
and git rebase
-rewrite history, so you should use them ONLY on your local or topic branches.
-The main repository will reject those changes.
-
-
-
-
git revert
will generate a revert commit. This will not make the
-faulty commit disappear from the history.
-
-
-
4.2 Pushing changes to remote trees# TOC
-
-
-
-
Will push the changes to the default remote (origin ).
-Git will prevent you from pushing changes if the local and remote trees are
-out of sync. Refer to and to sync the local tree.
-
-
-
git remote add <name> <url>
-
-
-
Will add additional remote with a name reference, it is useful if you want
-to push your local branch for review on a remote host.
-
-
-
git push <remote> <refspec>
-
-
-
Will push the changes to the <remote> repository.
-Omitting <refspec> makes git push
update all the remote
-branches matching the local ones.
-
-
-
4.3 Finding a specific svn revision# TOC
-
-
Since version 1.7.1 git supports :/foo syntax for specifying commits
-based on a regular expression. see man gitrevisions
-
-
-
git show :/'as revision 23456'
-
-
-
will show the svn changeset r23456 . With older git versions searching in
-the git log
output is the easiest option (especially if a pager with
-search capabilities is used).
-This commit can be checked out with
-
-
-
git checkout -b svn_23456 :/'as revision 23456'
-
-
-
or for git < 1.7.1 with
-
-
-
git checkout -b svn_23456 $SHA1
-
-
-
where $SHA1 is the commit hash from the git log
output.
-
-
-
-
5 pre-push checklist# TOC
-
-
Once you have a set of commits that you feel are ready for pushing,
-work through the following checklist to doublecheck everything is in
-proper order. This list tries to be exhaustive. In case you are just
-pushing a typo in a comment, some of the steps may be unnecessary.
-Apply your common sense, but if in doubt, err on the side of caution.
-
-
First, make sure that the commits and branches you are going to push
-match what you want pushed and that nothing is missing, extraneous or
-wrong. You can see what will be pushed by running the git push command
-with –dry-run first. And then inspecting the commits listed with
-git log -p 1234567..987654
. The git status
command
-may help in finding local changes that have been forgotten to be added.
-
-
Next let the code pass through a full run of our testsuite.
-
-
- make distclean
- /path/to/ffmpeg/configure
- make check
- if fate fails due to missing samples run make fate-rsync
and retry
-
-
-
Make sure all your changes have been checked before pushing them, the
-testsuite only checks against regressions and that only to some extend. It does
-obviously not check newly added features/code to be working unless you have
-added a test for that (which is recommended).
-
-
Also note that every single commit should pass the test suite, not just
-the result of a series of patches.
-
-
Once everything passed, push the changes to your public ffmpeg clone and post a
-merge request to ffmpeg-devel. You can also push them directly but this is not
-recommended.
-
-
-
6 Server Issues# TOC
-
-
Contact the project admins root@ffmpeg.org if you have technical
-problems with the GIT server.
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/libavcodec.html b/Externals/ffmpeg/shared/doc/libavcodec.html
deleted file mode 100644
index b15f00ec35..0000000000
--- a/Externals/ffmpeg/shared/doc/libavcodec.html
+++ /dev/null
@@ -1,76 +0,0 @@
-
-
-
-
-
-
- Libavcodec Documentation
-
-
-
-
-
-
-
-
- Libavcodec Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libavcodec library provides a generic encoding/decoding framework
-and contains multiple decoders and encoders for audio, video and
-subtitle streams, and several bitstream filters.
-
-
The shared architecture provides various services ranging from bit
-stream I/O to DSP optimizations, and makes it suitable for
-implementing robust and fast codecs as well as for experimentation.
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-codecs , bitstream-filters ,
-libavutil
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/libavdevice.html b/Externals/ffmpeg/shared/doc/libavdevice.html
deleted file mode 100644
index dd0379b147..0000000000
--- a/Externals/ffmpeg/shared/doc/libavdevice.html
+++ /dev/null
@@ -1,73 +0,0 @@
-
-
-
-
-
-
- Libavdevice Documentation
-
-
-
-
-
-
-
-
- Libavdevice Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libavdevice library provides a generic framework for grabbing from
-and rendering to many common multimedia input/output devices, and
-supports several input and output devices, including Video4Linux2,
-VfW, DShow, and ALSA.
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-devices ,
-libavutil , libavcodec , libavformat
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/libavfilter.html b/Externals/ffmpeg/shared/doc/libavfilter.html
deleted file mode 100644
index 0bfd0e598a..0000000000
--- a/Externals/ffmpeg/shared/doc/libavfilter.html
+++ /dev/null
@@ -1,72 +0,0 @@
-
-
-
-
-
-
- Libavfilter Documentation
-
-
-
-
-
-
-
-
- Libavfilter Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libavfilter library provides a generic audio/video filtering
-framework containing several filters, sources and sinks.
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-filters ,
-libavutil , libswscale , libswresample ,
-libavcodec , libavformat , libavdevice
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/libavformat.html b/Externals/ffmpeg/shared/doc/libavformat.html
deleted file mode 100644
index 57e29c5057..0000000000
--- a/Externals/ffmpeg/shared/doc/libavformat.html
+++ /dev/null
@@ -1,76 +0,0 @@
-
-
-
-
-
-
- Libavformat Documentation
-
-
-
-
-
-
-
-
- Libavformat Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libavformat library provides a generic framework for multiplexing
-and demultiplexing (muxing and demuxing) audio, video and subtitle
-streams. It encompasses multiple muxers and demuxers for multimedia
-container formats.
-
-
It also supports several input and output protocols to access a media
-resource.
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-formats , ffmpeg-protocols ,
-libavutil , libavcodec
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/libavutil.html b/Externals/ffmpeg/shared/doc/libavutil.html
deleted file mode 100644
index 23e471d17a..0000000000
--- a/Externals/ffmpeg/shared/doc/libavutil.html
+++ /dev/null
@@ -1,95 +0,0 @@
-
-
-
-
-
-
- Libavutil Documentation
-
-
-
-
-
-
-
-
- Libavutil Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libavutil library is a utility library to aid portable
-multimedia programming. It contains safe portable string functions,
-random number generators, data structures, additional mathematics
-functions, cryptography and multimedia related functionality (like
-enumerations for pixel and sample formats). It is not a library for
-code needed by both libavcodec and libavformat.
-
-
The goals for this library is to be:
-
-
-Modular
-It should have few interdependencies and the possibility of disabling individual
-parts during ./configure
.
-
-
-Small
-Both sources and objects should be small.
-
-
-Efficient
-It should have low CPU and memory usage.
-
-
-Useful
-It should avoid useless features that almost no one needs.
-
-
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-utils
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/libswresample.html b/Externals/ffmpeg/shared/doc/libswresample.html
deleted file mode 100644
index 6df93990ef..0000000000
--- a/Externals/ffmpeg/shared/doc/libswresample.html
+++ /dev/null
@@ -1,95 +0,0 @@
-
-
-
-
-
-
- Libswresample Documentation
-
-
-
-
-
-
-
-
- Libswresample Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libswresample library performs highly optimized audio resampling,
-rematrixing and sample format conversion operations.
-
-
Specifically, this library performs the following conversions:
-
-
- Resampling : is the process of changing the audio rate, for
-example from a high sample rate of 44100Hz to 8000Hz. Audio
-conversion from high to low sample rate is a lossy process. Several
-resampling options and algorithms are available.
-
- Format conversion : is the process of converting the type of
-samples, for example from 16-bit signed samples to unsigned 8-bit or
-float samples. It also handles packing conversion, when passing from
-packed layout (all samples belonging to distinct channels interleaved
-in the same buffer), to planar layout (all samples belonging to the
-same channel stored in a dedicated buffer or "plane").
-
- Rematrixing : is the process of changing the channel layout, for
-example from stereo to mono. When the input channels cannot be mapped
-to the output streams, the process is lossy, since it involves
-different gain factors and mixing.
-
-
-
Various other audio conversions (e.g. stretching and padding) are
-enabled through dedicated options.
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-resampler ,
-libavutil
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/libswscale.html b/Externals/ffmpeg/shared/doc/libswscale.html
deleted file mode 100644
index 425df90758..0000000000
--- a/Externals/ffmpeg/shared/doc/libswscale.html
+++ /dev/null
@@ -1,89 +0,0 @@
-
-
-
-
-
-
- Libswscale Documentation
-
-
-
-
-
-
-
-
- Libswscale Documentation
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
-
The libswscale library performs highly optimized image scaling and
-colorspace and pixel format conversion operations.
-
-
Specifically, this library performs the following conversions:
-
-
-
-
-
-
2 See Also# TOC
-
-
ffmpeg , ffplay , ffprobe , ffserver ,
-ffmpeg-scaler ,
-libavutil
-
-
-
-
3 Authors# TOC
-
-
The FFmpeg developers.
-
-
For details about the authorship, see the Git history of the project
-(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
-git log
in the FFmpeg source directory, or browsing the
-online repository at http://source.ffmpeg.org .
-
-
Maintainers for the specific components are listed in the file
-MAINTAINERS in the source code tree.
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/nut.html b/Externals/ffmpeg/shared/doc/nut.html
deleted file mode 100644
index 7b16df6a4d..0000000000
--- a/Externals/ffmpeg/shared/doc/nut.html
+++ /dev/null
@@ -1,211 +0,0 @@
-
-
-
-
-
-
- NUT
-
-
-
-
-
-
-
-
- NUT
-
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Description# TOC
-
NUT is a low overhead generic container format. It stores audio, video,
-subtitle and user-defined streams in a simple, yet efficient, way.
-
-
It was created by a group of FFmpeg and MPlayer developers in 2003
-and was finalized in 2008.
-
-
The official nut specification is at svn://svn.mplayerhq.hu/nut
-In case of any differences between this text and the official specification,
-the official specification shall prevail.
-
-
-
-
NUT has some variants signaled by using the flags field in its main header.
-
-
-BROADCAST Extend the syncpoint to report the sender wallclock
-PIPE Omit completely the syncpoint
-
-
-
-
2.1 BROADCAST# TOC
-
-
The BROADCAST variant provides a secondary time reference to facilitate
-detecting endpoint latency and network delays.
-It assumes all the endpoint clocks are syncronized.
-To be used in real-time scenarios.
-
-
-
2.2 PIPE# TOC
-
-
The PIPE variant assumes NUT is used as non-seekable intermediate container,
-by not using syncpoint removes unneeded overhead and reduces the overall
-memory usage.
-
-
-
3 Container-specific codec tags# TOC
-
-
-
3.1 Generic raw YUVA formats# TOC
-
-
Since many exotic planar YUVA pixel formats are not considered by
-the AVI/QuickTime FourCC lists, the following scheme is adopted for
-representing them.
-
-
The first two bytes can contain the values:
-Y1 = only Y
-Y2 = Y+A
-Y3 = YUV
-Y4 = YUVA
-
-
The third byte represents the width and height chroma subsampling
-values for the UV planes, that is the amount to shift the luma
-width/height right to find the chroma width/height.
-
-
The fourth byte is the number of bits used (8, 16, ...).
-
-
If the order of bytes is inverted, that means that each component has
-to be read big-endian.
-
-
-
3.2 Raw Audio# TOC
-
-
-ALAW A-LAW
-ULAW MU-LAW
-P<type><interleaving><bits> little-endian PCM
-<bits><interleaving><type>P big-endian PCM
-
-
-
<type> is S for signed integer, U for unsigned integer, F for IEEE float
-<interleaving> is D for default, P is for planar.
-<bits> is 8/16/24/32
-
-
-
PFD[32] would for example be signed 32 bit little-endian IEEE float
-
-
-
-
3.3 Subtitles# TOC
-
-
-UTF8 Raw UTF-8
-SSA[0] SubStation Alpha
-DVDS DVD subtitles
-DVBS DVB subtitles
-
-
-
-
3.4 Raw Data# TOC
-
-
-
-
-
3.5 Codecs# TOC
-
-
-3IV1 non-compliant MPEG-4 generated by old 3ivx
-ASV1 Asus Video
-ASV2 Asus Video 2
-CVID Cinepak
-CYUV Creative YUV
-DIVX non-compliant MPEG-4 generated by old DivX
-DUCK Truemotion 1
-FFV1 FFmpeg video 1
-FFVH FFmpeg Huffyuv
-H261 ITU H.261
-H262 ITU H.262
-H263 ITU H.263
-H264 ITU H.264
-HFYU Huffyuv
-I263 Intel H.263
-IV31 Indeo 3.1
-IV32 Indeo 3.2
-IV50 Indeo 5.0
-LJPG ITU JPEG (lossless)
-MJLS ITU JPEG-LS
-MJPG ITU JPEG
-MPG4 MS MPEG-4v1 (not ISO MPEG-4)
-MP42 MS MPEG-4v2
-MP43 MS MPEG-4v3
-MP4V ISO MPEG-4 Part 2 Video (from old encoders)
-mpg1 ISO MPEG-1 Video
-mpg2 ISO MPEG-2 Video
-MRLE MS RLE
-MSVC MS Video 1
-RT21 Indeo 2.1
-RV10 RealVideo 1.0
-RV20 RealVideo 2.0
-RV30 RealVideo 3.0
-RV40 RealVideo 4.0
-SNOW FFmpeg Snow
-SVQ1 Sorenson Video 1
-SVQ3 Sorenson Video 3
-theo Xiph Theora
-TM20 Truemotion 2.0
-UMP4 non-compliant MPEG-4 generated by UB Video MPEG-4
-VCR1 ATI VCR1
-VP30 VP 3.0
-VP31 VP 3.1
-VP50 VP 5.0
-VP60 VP 6.0
-VP61 VP 6.1
-VP62 VP 6.2
-VP70 VP 7.0
-WMV1 MS WMV7
-WMV2 MS WMV8
-WMV3 MS WMV9
-WV1F non-compliant MPEG-4 generated by ?
-WVC1 VC-1
-XVID non-compliant MPEG-4 generated by old Xvid
-XVIX non-compliant MPEG-4 generated by old Xvid with interlacing bug
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/doc/platform.html b/Externals/ffmpeg/shared/doc/platform.html
deleted file mode 100644
index fb57926de2..0000000000
--- a/Externals/ffmpeg/shared/doc/platform.html
+++ /dev/null
@@ -1,447 +0,0 @@
-
-
-
-
-
-
- Platform Specific Information
-
-
-
-
-
-
-
-
- Platform Specific Information
-
-
-
-
-
-
-
-
Table of Contents
-
-
-
-
-
-
1 Unix-like# TOC
-
-
Some parts of FFmpeg cannot be built with version 2.15 of the GNU
-assembler which is still provided by a few AMD64 distributions. To
-make sure your compiler really uses the required version of gas
-after a binutils upgrade, run:
-
-
-
$(gcc -print-prog-name=as) --version
-
-
-
If not, then you should install a different compiler that has no
-hard-coded path to gas. In the worst case pass --disable-asm
-to configure.
-
-
-
1.1 Advanced linking configuration# TOC
-
-
If you compiled FFmpeg libraries statically and you want to use them to
-build your own shared library, you may need to force PIC support (with
---enable-pic
during FFmpeg configure) and add the following option
-to your project LDFLAGS:
-
-
-
-
If your target platform requires position independent binaries, you should
-pass the correct linking flag (e.g. -pie
) to --extra-ldexeflags
.
-
-
-
-
-
BSD make will not build FFmpeg, you need to install and use GNU Make
-(gmake
).
-
-
-
1.3 (Open)Solaris# TOC
-
-
GNU Make is required to build FFmpeg, so you have to invoke (gmake
),
-standard Solaris Make will not work. When building with a non-c99 front-end
-(gcc, generic suncc) add either --extra-libs=/usr/lib/values-xpg6.o
-or --extra-libs=/usr/lib/64/values-xpg6.o
to the configure options
-since the libc is not c99-compliant by default. The probes performed by
-configure may raise an exception leading to the death of configure itself
-due to a bug in the system shell. Simply invoke a different shell such as
-bash directly to work around this:
-
-
-
-
-
1.4 Darwin (Mac OS X, iPhone)# TOC
-
-
The toolchain provided with Xcode is sufficient to build the basic
-unacelerated code.
-
-
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
-https://github.com/FFmpeg/gas-preprocessor or
-https://github.com/yuvi/gas-preprocessor (currently outdated) to build the optimized
-assembly functions. Put the Perl script somewhere
-in your PATH, FFmpeg’s configure will pick it up automatically.
-
-
Mac OS X on amd64 and x86 requires yasm
to build most of the
-optimized assembly functions. Fink ,
-Gentoo Prefix ,
-Homebrew
-or MacPorts can easily provide it.
-
-
-
-
-
-
Using a cross-compiler is preferred for various reasons.
-http://www.delorie.com/howto/djgpp/linux-x-djgpp.html
-
-
-
-
-
-
For information about compiling FFmpeg on OS/2 see
-http://www.edm2.com/index.php/FFmpeg .
-
-
-
-
4 Windows# TOC
-
-
To get help and instructions for building FFmpeg under Windows, check out
-the FFmpeg Windows Help Forum at http://ffmpeg.zeranoe.com/forum/ .
-
-
-
4.1 Native Windows compilation using MinGW or MinGW-w64# TOC
-
-
FFmpeg can be built to run natively on Windows using the MinGW or MinGW-w64
-toolchains. Install the latest versions of MSYS and MinGW or MinGW-w64 from
-http://www.mingw.org/ or http://mingw-w64.sourceforge.net/ .
-You can find detailed installation instructions in the download section and
-the FAQ.
-
-
Notes:
-
-
- Building natively using MSYS can be sped up by disabling implicit rules
-in the Makefile by calling make -r
instead of plain make
. This
-speed up is close to non-existent for normal one-off builds and is only
-noticeable when running make for a second time (for example during
-make install
).
-
- In order to compile FFplay, you must have the MinGW development library
-of SDL and pkg-config
installed.
-
- By using ./configure --enable-shared
when configuring FFmpeg,
-you can build the FFmpeg libraries (e.g. libavutil, libavcodec,
-libavformat) as DLLs.
-
-
-
-
-
4.2 Microsoft Visual C++ or Intel C++ Compiler for Windows# TOC
-
-
FFmpeg can be built with MSVC 2012 or earlier using a C99-to-C89 conversion utility
-and wrapper, or with MSVC 2013 and ICL natively.
-
-
You will need the following prerequisites:
-
-
-
-
To set up a proper environment in MSYS, you need to run msys.bat
from
-the Visual Studio or Intel Compiler command prompt.
-
-
Place yasm.exe
somewhere in your PATH
. If using MSVC 2012 or
-earlier, place c99wrap.exe
and c99conv.exe
somewhere in your
-PATH
as well.
-
-
Next, make sure any other headers and libs you want to use, such as zlib, are
-located in a spot that the compiler can see. Do so by modifying the LIB
-and INCLUDE
environment variables to include the Windows-style
-paths to these directories. Alternatively, you can try and use the
---extra-cflags
/--extra-ldflags
configure options. If using MSVC
-2012 or earlier, place inttypes.h
somewhere the compiler can see too.
-
-
Finally, run:
-
-
-
For MSVC:
-./configure --toolchain=msvc
-
-For ICL:
-./configure --toolchain=icl
-
-make
-make install
-
-
-
If you wish to compile shared libraries, add --enable-shared
to your
-configure options. Note that due to the way MSVC and ICL handle DLL imports and
-exports, you cannot compile static and shared libraries at the same time, and
-enabling shared libraries will automatically disable the static ones.
-
-
Notes:
-
-
-
-
-
4.2.1 Linking to FFmpeg with Microsoft Visual C++# TOC
-
-
If you plan to link with MSVC-built static libraries, you will need
-to make sure you have Runtime Library
set to
-Multi-threaded (/MT)
in your project’s settings.
-
-
You will need to define inline
to something MSVC understands:
-
-
#define inline __inline
-
-
-
Also note, that as stated in Microsoft Visual C++ , you will need
-an MSVC-compatible inttypes.h .
-
-
If you plan on using import libraries created by dlltool, you must
-set References
to No (/OPT:NOREF)
under the linker optimization
-settings, otherwise the resulting binaries will fail during runtime.
-This is not required when using import libraries generated by lib.exe
.
-This issue is reported upstream at
-http://sourceware.org/bugzilla/show_bug.cgi?id=12633 .
-
-
To create import libraries that work with the /OPT:REF
option
-(which is enabled by default in Release mode), follow these steps:
-
-
- Open the Visual Studio Command Prompt .
-
-Alternatively, in a normal command line prompt, call vcvars32.bat
-which sets up the environment variables for the Visual C++ tools
-(the standard location for this file is something like
-C:\Program Files (x86_\Microsoft Visual Studio 10.0\VC\bin\vcvars32.bat ).
-
- Enter the bin directory where the created LIB and DLL files
-are stored.
-
- Generate new import libraries with lib.exe
:
-
-
-
lib /machine:i386 /def:..\lib\foo-version.def /out:foo.lib
-
-
-Replace foo-version
and foo
with the respective library names.
-
-
-
-
-
4.3 Cross compilation for Windows with Linux# TOC
-
-
You must use the MinGW cross compilation tools available at
-http://www.mingw.org/ .
-
-
Then configure FFmpeg with the following options:
-
-
./configure --target-os=mingw32 --cross-prefix=i386-mingw32msvc-
-
-
(you can change the cross-prefix according to the prefix chosen for the
-MinGW tools).
-
-
Then you can easily test FFmpeg with Wine .
-
-
-
4.4 Compilation under Cygwin# TOC
-
-
Please use Cygwin 1.7.x as the obsolete 1.5.x Cygwin versions lack
-llrint() in its C library.
-
-
Install your Cygwin with all the "Base" packages, plus the
-following "Devel" ones:
-
-
binutils, gcc4-core, make, git, mingw-runtime, texinfo
-
-
-
In order to run FATE you will also need the following "Utils" packages:
-
-
-
If you want to build FFmpeg with additional libraries, download Cygwin
-"Devel" packages for Ogg and Vorbis from any Cygwin packages repository:
-
-
libogg-devel, libvorbis-devel
-
-
-
These library packages are only available from
-Cygwin Ports :
-
-
-
yasm, libSDL-devel, libfaac-devel, libaacplus-devel, libgsm-devel, libmp3lame-devel,
-libschroedinger1.0-devel, speex-devel, libtheora-devel, libxvidcore-devel
-
-
-
The recommendation for x264 is to build it from source, as it evolves too
-quickly for Cygwin Ports to be up to date.
-
-
-
4.5 Crosscompilation for Windows under Cygwin# TOC
-
-
With Cygwin you can create Windows binaries that do not need the cygwin1.dll.
-
-
Just install your Cygwin as explained before, plus these additional
-"Devel" packages:
-
-
gcc-mingw-core, mingw-runtime, mingw-zlib
-
-
-
and add some special flags to your configure invocation.
-
-
For a static build run
-
-
./configure --target-os=mingw32 --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
-
-
-
and for a build with shared libraries
-
-
./configure --target-os=mingw32 --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
-
-
-
-
5 Plan 9# TOC
-
-
The native Plan 9 compiler
-does not implement all the C99 features needed by FFmpeg so the gcc
-port must be used. Furthermore, a few items missing from the C
-library and shell environment need to be fixed.
-
-
- GNU awk, grep, make, and sed
-
-Working packages of these tools can be found at
-ports2plan9 .
-They can be installed with 9front’s pkg
-utility by setting pkgpath
to
-http://ports2plan9.googlecode.com/files/
.
-
- Missing/broken head
and printf
commands
-
-Replacements adequate for building FFmpeg can be found in the
-compat/plan9
directory. Place these somewhere they will be
-found by the shell. These are not full implementations of the
-commands and are not suitable for general use.
-
- Missing C99 stdint.h
and inttypes.h
-
-Replacement headers are available from
-http://code.google.com/p/plan9front/issues/detail?id=152 .
-
- Missing or non-standard library functions
-
-Some functions in the C library are missing or incomplete. The
-gcc-apelibs-1207
package from
-ports2plan9
-includes an updated C library, but installing the full package gives
-unusable executables. Instead, keep the files from gccbin.tgz
-under /386/lib/gnu
. From the libc.a
archive in the
-gcc-apelibs-1207
package, extract the following object files and
-turn them into a library:
-
-
- strerror.o
- strtoll.o
- snprintf.o
- vsnprintf.o
- vfprintf.o
- _IO_getc.o
- _IO_putc.o
-
-
-Use the --extra-libs
option of configure
to inform the
-build system of this library.
-
- FPU exceptions enabled by default
-
-Unlike most other systems, Plan 9 enables FPU exceptions by default.
-These must be disabled before calling any FFmpeg functions. While the
-included tools will do this automatically, other users of the
-libraries must do it themselves.
-
-
-
-
-
- This document was generated on January 14, 2015 using makeinfo .
-
-
-
-
diff --git a/Externals/ffmpeg/shared/ff-prompt.bat b/Externals/ffmpeg/shared/ff-prompt.bat
deleted file mode 100644
index a0d6e09a37..0000000000
--- a/Externals/ffmpeg/shared/ff-prompt.bat
+++ /dev/null
@@ -1,35 +0,0 @@
-ECHO OFF
-REM FF Prompt 1.1
-REM Open a command prompt to run ffmpeg/ffplay/ffprobe
-REM Copyright (C) 2013 Kyle Schwarz
-
-TITLE FF Prompt
-
-IF NOT EXIST bin\ffmpeg.exe (
- CLS
- ECHO bin\ffmpeg.exe could not be found.
- GOTO:error
-)
-
-CD bin || GOTO:error
-PROMPT $G
-CLS
-ffmpeg -version
-SET PATH=%CD%;%PATH%
-ECHO.
-ECHO For help run: ffmpeg -h
-ECHO For formats run: ffmpeg -formats ^| more
-ECHO For codecs run: ffmpeg -codecs ^| more
-ECHO.
-ECHO Current directory is now: "%CD%"
-ECHO The bin directory has been added to PATH
-ECHO.
-
-CMD /F:ON /Q /K
-GOTO:EOF
-
-:error
- ECHO.
- ECHO Press any key to exit.
- PAUSE >nul
- GOTO:EOF
diff --git a/Externals/ffmpeg/shared/presets/ffprobe.xsd b/Externals/ffmpeg/shared/presets/ffprobe.xsd
deleted file mode 100644
index 226169e9af..0000000000
--- a/Externals/ffmpeg/shared/presets/ffprobe.xsd
+++ /dev/null
@@ -1,337 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Externals/ffmpeg/shared/presets/libvpx-1080p.ffpreset b/Externals/ffmpeg/shared/presets/libvpx-1080p.ffpreset
deleted file mode 100644
index cf25932100..0000000000
--- a/Externals/ffmpeg/shared/presets/libvpx-1080p.ffpreset
+++ /dev/null
@@ -1,19 +0,0 @@
-vcodec=libvpx
-
-g=120
-lag-in-frames=16
-deadline=good
-cpu-used=0
-vprofile=1
-qmax=51
-qmin=11
-slices=4
-b=2M
-
-#ignored unless using -pass 2
-maxrate=24M
-minrate=100k
-auto-alt-ref=1
-arnr-maxframes=7
-arnr-strength=5
-arnr-type=centered
diff --git a/Externals/ffmpeg/shared/presets/libvpx-1080p50_60.ffpreset b/Externals/ffmpeg/shared/presets/libvpx-1080p50_60.ffpreset
deleted file mode 100644
index 4a88040d34..0000000000
--- a/Externals/ffmpeg/shared/presets/libvpx-1080p50_60.ffpreset
+++ /dev/null
@@ -1,19 +0,0 @@
-vcodec=libvpx
-
-g=120
-lag-in-frames=25
-deadline=good
-cpu-used=0
-vprofile=1
-qmax=51
-qmin=11
-slices=4
-b=2M
-
-#ignored unless using -pass 2
-maxrate=24M
-minrate=100k
-auto-alt-ref=1
-arnr-maxframes=7
-arnr-strength=5
-arnr-type=centered
diff --git a/Externals/ffmpeg/shared/presets/libvpx-360p.ffpreset b/Externals/ffmpeg/shared/presets/libvpx-360p.ffpreset
deleted file mode 100644
index f9729ba2bb..0000000000
--- a/Externals/ffmpeg/shared/presets/libvpx-360p.ffpreset
+++ /dev/null
@@ -1,18 +0,0 @@
-vcodec=libvpx
-
-g=120
-lag-in-frames=16
-deadline=good
-cpu-used=0
-vprofile=0
-qmax=63
-qmin=0
-b=768k
-
-#ignored unless using -pass 2
-maxrate=1.5M
-minrate=40k
-auto-alt-ref=1
-arnr-maxframes=7
-arnr-strength=5
-arnr-type=centered
diff --git a/Externals/ffmpeg/shared/presets/libvpx-720p.ffpreset b/Externals/ffmpeg/shared/presets/libvpx-720p.ffpreset
deleted file mode 100644
index e84cc150cd..0000000000
--- a/Externals/ffmpeg/shared/presets/libvpx-720p.ffpreset
+++ /dev/null
@@ -1,19 +0,0 @@
-vcodec=libvpx
-
-g=120
-lag-in-frames=16
-deadline=good
-cpu-used=0
-vprofile=0
-qmax=51
-qmin=11
-slices=4
-b=2M
-
-#ignored unless using -pass 2
-maxrate=24M
-minrate=100k
-auto-alt-ref=1
-arnr-maxframes=7
-arnr-strength=5
-arnr-type=centered
diff --git a/Externals/ffmpeg/shared/presets/libvpx-720p50_60.ffpreset b/Externals/ffmpeg/shared/presets/libvpx-720p50_60.ffpreset
deleted file mode 100644
index 8fce2bfb5a..0000000000
--- a/Externals/ffmpeg/shared/presets/libvpx-720p50_60.ffpreset
+++ /dev/null
@@ -1,19 +0,0 @@
-vcodec=libvpx
-
-g=120
-lag-in-frames=25
-deadline=good
-cpu-used=0
-vprofile=0
-qmax=51
-qmin=11
-slices=4
-b=2M
-
-#ignored unless using -pass 2
-maxrate=24M
-minrate=100k
-auto-alt-ref=1
-arnr-maxframes=7
-arnr-strength=5
-arnr-type=centered